From 344b1c803b90b6de478157133cd6849a27c3c47c Mon Sep 17 00:00:00 2001 From: Joshua Harlow Date: Sun, 26 Jul 2015 21:19:22 -0700 Subject: [PATCH 01/54] Extend and improve failure logging Add the ability to show the intention, result, and state of predecessors of the atom that has failed so that more contextual information can be shown in the logs that are written. Change-Id: Ic77c0d4e94a147e54da74976c1d148aef82eccb3 --- doc/source/notifications.rst | 5 ++ taskflow/formatters.py | 160 ++++++++++++++++++++++++++++++++++ taskflow/listeners/logging.py | 85 +++++++++--------- taskflow/utils/misc.py | 8 ++ 4 files changed, 215 insertions(+), 43 deletions(-) create mode 100644 taskflow/formatters.py diff --git a/doc/source/notifications.rst b/doc/source/notifications.rst index a8924b6c..0e419e91 100644 --- a/doc/source/notifications.rst +++ b/doc/source/notifications.rst @@ -180,6 +180,11 @@ Capturing listener .. autoclass:: taskflow.listeners.capturing.CaptureListener +Formatters +---------- + +.. automodule:: taskflow.listeners.formatters + Hierarchy ========= diff --git a/taskflow/formatters.py b/taskflow/formatters.py new file mode 100644 index 00000000..d36082b2 --- /dev/null +++ b/taskflow/formatters.py @@ -0,0 +1,160 @@ +# -*- coding: utf-8 -*- + +# Copyright (C) 2014 Yahoo! Inc. All Rights Reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); you may +# not use this file except in compliance with the License. You may obtain +# a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the +# License for the specific language governing permissions and limitations +# under the License. + +import functools + +from taskflow import exceptions as exc +from taskflow import states +from taskflow.types import tree +from taskflow.utils import misc + + +def _cached_get(cache, cache_key, atom_name, fetch_func, *args, **kwargs): + """Tries to get a previously saved value or fetches it and caches it.""" + value, value_found = None, False + try: + value, value_found = cache[cache_key][atom_name] + except KeyError: + try: + value = fetch_func(*args, **kwargs) + value_found = True + except (exc.StorageFailure, exc.NotFound): + pass + cache[cache_key][atom_name] = value, value_found + return value, value_found + + +def _fetch_predecessor_tree(graph, atom): + """Creates a tree of predecessors, rooted at given atom.""" + root = tree.Node(atom) + stack = [(root, atom)] + seen = set() + while stack: + parent, node = stack.pop() + for pred_node in graph.predecessors_iter(node): + child = tree.Node(pred_node) + parent.add(child) + stack.append((child, pred_node)) + seen.add(pred_node) + return len(seen), root + + +class FailureFormatter(object): + """Formats a failure and connects it to associated atoms & engine.""" + + _BUILDERS = { + states.EXECUTE: (_fetch_predecessor_tree, 'predecessors'), + } + + def __init__(self, engine, hide_inputs_outputs_of=()): + self._hide_inputs_outputs_of = hide_inputs_outputs_of + self._engine = engine + + def _format_node(self, storage, cache, node): + """Formats a single tree node (atom) into a string version.""" + atom = node.item + atom_name = atom.name + atom_attrs = {} + intention, intention_found = _cached_get(cache, 'intentions', + atom_name, + storage.get_atom_intention, + atom_name) + if intention_found: + atom_attrs['intention'] = intention + state, state_found = _cached_get(cache, 'states', atom_name, + storage.get_atom_state, atom_name) + if state_found: + atom_attrs['state'] = state + if atom_name not in self._hide_inputs_outputs_of: + requires, requires_found = _cached_get(cache, 'requires', + atom_name, + # When the cache does not + # exist for this atom this + # will be called with the + # rest of these arguments + # used to populate the + # cache. + storage.fetch_mapped_args, + atom.rebind, + atom_name=atom_name, + optional_args=atom.optional) + if requires_found: + atom_attrs['requires'] = requires + provides, provides_found = _cached_get(cache, 'provides', + atom_name, + storage.get_execute_result, + atom_name) + if provides_found: + atom_attrs['provides'] = provides + if atom_attrs: + return "Atom '%s' %s" % (atom_name, atom_attrs) + else: + return "Atom '%s'" % (atom_name) + + def format(self, fail, atom_matcher): + """Returns a (exc_info, details) tuple about the failure. + + The ``exc_info`` tuple should be a standard three element + (exctype, value, traceback) tuple that will be used for further + logging. A non-empty string is typically returned for ``details``; it + should contain any string info about the failure (with any specific + details the ``exc_info`` may not have/contain). + """ + buff = misc.StringIO() + storage = self._engine.storage + compilation = self._engine.compilation + if fail.exc_info is None: + # Remote failures will not have a 'exc_info' tuple, so just use + # the captured traceback that was captured by the creator when it + # failed... + buff.write_nl(fail.pformat(traceback=True)) + if storage is None or compilation is None: + # Somehow we got called before prepared and/or compiled; ok + # that's weird, skip doing the rest... + return (fail.exc_info, buff.getvalue()) + hierarchy = compilation.hierarchy + graph = compilation.execution_graph + atom_node = hierarchy.find_first_match(atom_matcher) + atom = None + priors = 0 + atom_intention = None + if atom_node is not None: + atom = atom_node.item + atom_intention = storage.get_atom_intention(atom.name) + priors = sum(c for (_n, c) in graph.in_degree_iter([atom])) + if atom is not None and priors and atom_intention in self._BUILDERS: + # Cache as much as we can, since the path of various atoms + # may cause the same atom to be seen repeatedly depending on + # the graph structure... + cache = { + 'intentions': {}, + 'provides': {}, + 'requires': {}, + 'states': {}, + } + builder, kind = self._BUILDERS[atom_intention] + count, rooted_tree = builder(graph, atom) + buff.write_nl('%s %s (most recent atoms first):' % (count, kind)) + formatter = functools.partial(self._format_node, storage, cache) + child_count = rooted_tree.child_count() + for i, child in enumerate(rooted_tree, 1): + if i == child_count: + buff.write(child.pformat(stringify_node=formatter, + starting_prefix=" ")) + else: + buff.write_nl(child.pformat(stringify_node=formatter, + starting_prefix=" ")) + return (fail.exc_info, buff.getvalue()) diff --git a/taskflow/listeners/logging.py b/taskflow/listeners/logging.py index 37fd58ac..219f6ac8 100644 --- a/taskflow/listeners/logging.py +++ b/taskflow/listeners/logging.py @@ -18,9 +18,11 @@ from __future__ import absolute_import import os +from taskflow import formatters from taskflow.listeners import base from taskflow import logging from taskflow import states +from taskflow import task from taskflow.types import failure from taskflow.utils import misc @@ -56,6 +58,16 @@ class LoggingListener(base.DumpingListener): self._logger.log(self._level, message, *args, **kwargs) +def _make_matcher(task_name): + """Returns a function that matches a node with task item with same name.""" + + def _task_matcher(node): + item = node.item + return isinstance(item, task.BaseTask) and item.name == task_name + + return _task_matcher + + class DynamicLoggingListener(base.Listener): """Listener that logs notifications it receives. @@ -99,7 +111,7 @@ class DynamicLoggingListener(base.Listener): flow_listen_for=base.DEFAULT_LISTEN_FOR, retry_listen_for=base.DEFAULT_LISTEN_FOR, log=None, failure_level=logging.WARNING, - level=logging.DEBUG): + level=logging.DEBUG, hide_inputs_outputs_of=()): super(DynamicLoggingListener, self).__init__( engine, task_listen_for=task_listen_for, flow_listen_for=flow_listen_for, retry_listen_for=retry_listen_for) @@ -115,33 +127,10 @@ class DynamicLoggingListener(base.Listener): states.FAILURE: self._failure_level, states.REVERTED: self._failure_level, } + self._hide_inputs_outputs_of = frozenset(hide_inputs_outputs_of) self._logger = misc.pick_first_not_none(log, self._LOGGER, LOG) - - @staticmethod - def _format_failure(fail): - """Returns a (exc_info, exc_details) tuple about the failure. - - The ``exc_info`` tuple should be a standard three element - (exctype, value, traceback) tuple that will be used for further - logging. If a non-empty string is returned for ``exc_details`` it - should contain any string info about the failure (with any specific - details the ``exc_info`` may not have/contain). If the ``exc_info`` - tuple is returned as ``None`` then it will cause the logging - system to avoid outputting any traceback information (read - the python documentation on the logger interaction with ``exc_info`` - to learn more). - """ - if fail.exc_info: - exc_info = fail.exc_info - exc_details = '' - else: - # When a remote failure occurs (or somehow the failure - # object lost its traceback), we will not have a valid - # exc_info that can be used but we *should* have a string - # version that we can use instead... - exc_info = None - exc_details = "%s%s" % (os.linesep, fail.pformat(traceback=True)) - return (exc_info, exc_details) + self._fail_formatter = formatters.FailureFormatter( + self._engine, hide_inputs_outputs_of=self._hide_inputs_outputs_of) def _flow_receiver(self, state, details): """Gets called on flow state changes.""" @@ -152,39 +141,49 @@ class DynamicLoggingListener(base.Listener): def _task_receiver(self, state, details): """Gets called on task state changes.""" + task_name = details['task_name'] + task_uuid = details['task_uuid'] if 'result' in details and state in base.FINISH_STATES: # If the task failed, it's useful to show the exception traceback # and any other available exception information. result = details.get('result') if isinstance(result, failure.Failure): - exc_info, exc_details = self._format_failure(result) - self._logger.log(self._failure_level, - "Task '%s' (%s) transitioned into state" - " '%s' from state '%s'%s", - details['task_name'], details['task_uuid'], - state, details['old_state'], exc_details, - exc_info=exc_info) + exc_info, fail_details = self._fail_formatter.format( + result, _make_matcher(task_name)) + if fail_details: + self._logger.log(self._failure_level, + "Task '%s' (%s) transitioned into state" + " '%s' from state '%s'%s%s", + task_name, task_uuid, state, + details['old_state'], os.linesep, + fail_details, exc_info=exc_info) + else: + self._logger.log(self._failure_level, + "Task '%s' (%s) transitioned into state" + " '%s' from state '%s'", task_name, + task_uuid, state, details['old_state'], + exc_info=exc_info) else: # Otherwise, depending on the enabled logging level/state we # will show or hide results that the task may have produced # during execution. level = self._task_log_levels.get(state, self._level) - if (self._logger.isEnabledFor(self._level) - or state in self._FAILURE_STATES): + show_result = (self._logger.isEnabledFor(self._level) + or state == states.FAILURE) + if show_result and \ + task_name not in self._hide_inputs_outputs_of: self._logger.log(level, "Task '%s' (%s) transitioned into" " state '%s' from state '%s' with" - " result '%s'", details['task_name'], - details['task_uuid'], state, - details['old_state'], result) + " result '%s'", task_name, task_uuid, + state, details['old_state'], result) else: self._logger.log(level, "Task '%s' (%s) transitioned into" " state '%s' from state '%s'", - details['task_name'], - details['task_uuid'], state, + task_name, task_uuid, state, details['old_state']) else: # Just a intermediary state, carry on! level = self._task_log_levels.get(state, self._level) self._logger.log(level, "Task '%s' (%s) transitioned into state" - " '%s' from state '%s'", details['task_name'], - details['task_uuid'], state, details['old_state']) + " '%s' from state '%s'", task_name, task_uuid, + state, details['old_state']) diff --git a/taskflow/utils/misc.py b/taskflow/utils/misc.py index bd2e6e38..aa89aa81 100644 --- a/taskflow/utils/misc.py +++ b/taskflow/utils/misc.py @@ -60,6 +60,14 @@ class StrEnum(str, enum.Enum): return super(StrEnum, cls).__new__(cls, *args, **kwargs) +class StringIO(six.StringIO): + """String buffer with some small additions.""" + + def write_nl(self, value, linesep=os.linesep): + self.write(value) + self.write(linesep) + + def match_type(obj, matchers): """Matches a given object using the given matchers list/iterable. From 971e5258145e0296a9e8d2a01257cfc6bc7368bd Mon Sep 17 00:00:00 2001 From: Joshua Harlow Date: Fri, 7 Aug 2015 16:21:17 -0700 Subject: [PATCH 02/54] Only remove all 'next_nodes' that were done Instead of removing all the nodes (even if some of them failed we should only remove the ones that actually worked out and keep the nodes that were not scheduled and/or failed being scheduled). Change-Id: I13d9db09c256026363f6b8c6c180ddaf191bcc00 --- taskflow/engines/action_engine/builder.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/taskflow/engines/action_engine/builder.py b/taskflow/engines/action_engine/builder.py index 9ab26d4a..52f2f596 100644 --- a/taskflow/engines/action_engine/builder.py +++ b/taskflow/engines/action_engine/builder.py @@ -157,7 +157,7 @@ class MachineBuilder(object): memory.not_done.update(not_done) if failures: memory.failures.extend(failures) - memory.next_nodes.clear() + memory.next_nodes.intersection_update(not_done) return WAIT def wait(old_state, new_state, event): From bb626d50abc6f9f3740f58692545ce5b15d0542c Mon Sep 17 00:00:00 2001 From: Joshua Harlow Date: Sun, 9 Aug 2015 10:37:02 -0700 Subject: [PATCH 03/54] Remove some temporary variables not needed In the orderedset code we can just remove these temp variables and just inline them instead, making the code a little bit easier to read. Change-Id: I6454f45a4e202a087cd6b195095f22c43e7bdab4 --- taskflow/types/sets.py | 15 ++++----------- 1 file changed, 4 insertions(+), 11 deletions(-) diff --git a/taskflow/types/sets.py b/taskflow/types/sets.py index a462189f..0db43bfe 100644 --- a/taskflow/types/sets.py +++ b/taskflow/types/sets.py @@ -72,9 +72,7 @@ class OrderedSet(collections.Set, collections.Hashable): def copy(self): """Return a shallow copy of a set.""" - it = iter(self) - c = self._from_iterable(it) - return c + return self._from_iterable(iter(self)) def intersection(self, *sets): """Return the intersection of two or more sets as a new set. @@ -91,9 +89,7 @@ class OrderedSet(collections.Set, collections.Hashable): break if matches == len(sets): yield value - it = absorb_it(sets) - c = self._from_iterable(it) - return c + return self._from_iterable(absorb_it(sets)) def issuperset(self, other): """Report whether this set contains another set.""" @@ -123,14 +119,11 @@ class OrderedSet(collections.Set, collections.Hashable): break if not seen: yield value - it = absorb_it(sets) - c = self._from_iterable(it) - return c + return self._from_iterable(absorb_it(sets)) def union(self, *sets): """Return the union of sets as a new set. (i.e. all elements that are in either set.) """ - it = itertools.chain(iter(self), *sets) - return self._from_iterable(it) + return self._from_iterable(itertools.chain(iter(self), *sets)) From bedd23882f38b78c735b1f7dcb881b427f33e8f7 Mon Sep 17 00:00:00 2001 From: Joshua Harlow Date: Thu, 13 Aug 2015 21:34:20 -0700 Subject: [PATCH 04/54] Avoid running this example if zookeeper is not found Change-Id: I4461dcbfc28bed839deeb4e4564daa07c55b42b0 --- taskflow/examples/99_bottles.py | 23 +++++++++++++++++++++-- 1 file changed, 21 insertions(+), 2 deletions(-) diff --git a/taskflow/examples/99_bottles.py b/taskflow/examples/99_bottles.py index 983bc201..328d6fdf 100644 --- a/taskflow/examples/99_bottles.py +++ b/taskflow/examples/99_bottles.py @@ -20,6 +20,9 @@ import logging import os import sys import time +import traceback + +from kazoo import client top_dir = os.path.abspath(os.path.join(os.path.dirname(__file__), os.pardir, @@ -201,14 +204,30 @@ def main_local(): run_conductor(only_run_once=True) +def check_for_zookeeper(timeout=1): + sys.stderr.write("Testing for the existence of a zookeeper server...\n") + sys.stderr.write("Please wait....\n") + with contextlib.closing(client.KazooClient()) as test_client: + try: + test_client.start(timeout=timeout) + except test_client.handler.timeout_exception: + sys.stderr.write("Zookeeper is needed for running this example!\n") + traceback.print_exc() + return False + else: + test_client.stop() + return True + + def main(): + logging.basicConfig(level=logging.ERROR) + if not check_for_zookeeper(): + return if len(sys.argv) == 1: main_local() elif sys.argv[1] in ('p', 'c'): if sys.argv[-1] == "v": logging.basicConfig(level=5) - else: - logging.basicConfig(level=logging.ERROR) if sys.argv[1] == 'p': run_poster() else: From f95c3824e0a8eff5703b33c26010c104b148fa4d Mon Sep 17 00:00:00 2001 From: Joshua Harlow Date: Sun, 23 Aug 2015 10:00:51 -0700 Subject: [PATCH 05/54] Fix flow states link Change-Id: Ia1d468516db65ab82ce6bd6be0051cebb1b73409 --- taskflow/states.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/taskflow/states.py b/taskflow/states.py index 1939012b..aef7a231 100644 --- a/taskflow/states.py +++ b/taskflow/states.py @@ -87,7 +87,7 @@ def check_job_transition(old_state, new_state): # Flow state transitions -# See: http://docs.openstack.org/developer/taskflow/states.html +# See: http://docs.openstack.org/developer/taskflow/states.html#flow _ALLOWED_FLOW_TRANSITIONS = frozenset(( (PENDING, RUNNING), # run it! From 139816bd081d147d4f9dcde1ced7d98a8438b22c Mon Sep 17 00:00:00 2001 From: Joshua Harlow Date: Fri, 7 Aug 2015 16:33:34 -0700 Subject: [PATCH 06/54] Use 'iter_utils.count' to determine how many unfinished nodes left On state machine 'game_over' reaction callback we currently use a routine to determine how many nodes are unfinished and then returning the final state (SUSPENDED) if any are; since we just added the iterator utils code we can just use the utility function 'count' provided from that instead. This explicit usage should make it more clear what this small reaction callback is doing, and why it returns SUSPENDED. This also switches it so that in the 'game_over' reaction node deciders aren't ran (as there is no point in running them in this state). Change-Id: Ief18598a537d40e5a9b7c50133f158083457755d --- taskflow/engines/action_engine/builder.py | 22 ++++++++++++++++++---- 1 file changed, 18 insertions(+), 4 deletions(-) diff --git a/taskflow/engines/action_engine/builder.py b/taskflow/engines/action_engine/builder.py index 9ab26d4a..dc1bdcc5 100644 --- a/taskflow/engines/action_engine/builder.py +++ b/taskflow/engines/action_engine/builder.py @@ -21,6 +21,7 @@ from automaton import machines from taskflow import logging from taskflow import states as st from taskflow.types import failure +from taskflow.utils import iter_utils # Default waiting state timeout (in seconds). WAITING_TIMEOUT = 60 @@ -114,12 +115,15 @@ class MachineBuilder(object): # Checks if the storage says the flow is still runnable... return self._storage.get_flow_state() == st.RUNNING - def iter_next_nodes(target_node=None): + def iter_next_nodes(target_node=None, apply_deciders=True): # Yields and filters and tweaks the next nodes to execute... maybe_nodes = self._analyzer.get_next_nodes(node=target_node) for node, late_decider in maybe_nodes: - proceed = late_decider.check_and_affect(self._runtime) - if proceed: + if apply_deciders: + proceed = late_decider.check_and_affect(self._runtime) + if proceed: + yield node + else: yield node def resume(old_state, new_state, event): @@ -138,7 +142,17 @@ class MachineBuilder(object): # it is *always* called before the final state is entered. if memory.failures: return FAILED - if any(1 for node in iter_next_nodes()): + leftover_nodes = iter_utils.count( + # Avoid activating the deciders, since at this point + # the engine is finishing and there will be no more further + # work done anyway... + iter_next_nodes(apply_deciders=False)) + if leftover_nodes: + # Ok we didn't finish (either reverting or executing...) so + # that means we must of been stopped at some point... + LOG.blather("Suspension determined to have been reacted to" + " since (at least) %s nodes have been left in an" + " unfinished state", leftover_nodes) return SUSPENDED elif self._analyzer.is_success(): return SUCCESS From 60a9e6a817dbcf803ef22066fea0e8034b0a9145 Mon Sep 17 00:00:00 2001 From: Timofey Durakov Date: Thu, 13 Aug 2015 14:30:51 +0300 Subject: [PATCH 07/54] iter_nodes method added to flows New method allows to iterate over flow node and get access to node metadata during iteration. Change-Id: Ib0fc77f0597961602fbc3b49ba09e4df815d8230 --- taskflow/flow.py | 9 +++++++ taskflow/patterns/graph_flow.py | 10 +++++--- taskflow/patterns/linear_flow.py | 4 +++ taskflow/patterns/unordered_flow.py | 4 +++ .../tests/unit/patterns/test_graph_flow.py | 25 +++++++++++++++++++ .../tests/unit/patterns/test_linear_flow.py | 21 ++++++++++++++++ .../unit/patterns/test_unordered_flow.py | 18 +++++++++++++ 7 files changed, 88 insertions(+), 3 deletions(-) diff --git a/taskflow/flow.py b/taskflow/flow.py index 56786d4d..4d93edf4 100644 --- a/taskflow/flow.py +++ b/taskflow/flow.py @@ -98,6 +98,15 @@ class Flow(object): * ``meta`` is link metadata, a dictionary. """ + @abc.abstractmethod + def iter_nodes(self): + """Iterate over nodes of the flow. + + Iterates over 2-tuples ``(A, meta)``, where + * ``A`` is a child (atom or subflow) of current flow; + * ``meta`` is link metadata, a dictionary. + """ + def __str__(self): return "%s: %s(len=%d)" % (reflection.get_class_name(self), self.name, len(self)) diff --git a/taskflow/patterns/graph_flow.py b/taskflow/patterns/graph_flow.py index 37da34a6..c0745e1e 100644 --- a/taskflow/patterns/graph_flow.py +++ b/taskflow/patterns/graph_flow.py @@ -266,12 +266,16 @@ class Flow(flow.Flow): return self._get_subgraph().number_of_nodes() def __iter__(self): - for n in self._get_subgraph().topological_sort(): + for n, _n_data in self.iter_nodes(): yield n def iter_links(self): - for (u, v, e_data) in self._get_subgraph().edges_iter(data=True): - yield (u, v, e_data) + return self._get_subgraph().edges_iter(data=True) + + def iter_nodes(self): + g = self._get_subgraph() + for n in g.topological_sort(): + yield n, g.node[n] @property def requires(self): diff --git a/taskflow/patterns/linear_flow.py b/taskflow/patterns/linear_flow.py index 3067076c..f581ce45 100644 --- a/taskflow/patterns/linear_flow.py +++ b/taskflow/patterns/linear_flow.py @@ -60,3 +60,7 @@ class Flow(flow.Flow): def iter_links(self): for src, dst in zip(self._children[:-1], self._children[1:]): yield (src, dst, _LINK_METADATA.copy()) + + def iter_nodes(self): + for n in self._children: + yield (n, {}) diff --git a/taskflow/patterns/unordered_flow.py b/taskflow/patterns/unordered_flow.py index 52bd286e..036ca2f9 100644 --- a/taskflow/patterns/unordered_flow.py +++ b/taskflow/patterns/unordered_flow.py @@ -48,6 +48,10 @@ class Flow(flow.Flow): # between each other due to invariants retained during construction. return iter(()) + def iter_nodes(self): + for n in self._children: + yield (n, {}) + @property def requires(self): requires = set() diff --git a/taskflow/tests/unit/patterns/test_graph_flow.py b/taskflow/tests/unit/patterns/test_graph_flow.py index 588361c9..1d876d56 100644 --- a/taskflow/tests/unit/patterns/test_graph_flow.py +++ b/taskflow/tests/unit/patterns/test_graph_flow.py @@ -212,6 +212,31 @@ class GraphFlowTest(test.TestCase): f = gf.Flow('test').add(task1, task2) self.assertRaises(exc.DependencyFailure, f.add, task3) + def test_iter_nodes(self): + task1 = _task('task1', provides=['a'], requires=['c']) + task2 = _task('task2', provides=['b'], requires=['a']) + task3 = _task('task3', provides=['c']) + f1 = gf.Flow('nested') + f1.add(task3) + tasks = set([task1, task2, f1]) + f = gf.Flow('test').add(task1, task2, f1) + for (n, data) in f.iter_nodes(): + self.assertTrue(n in tasks) + self.assertDictEqual({}, data) + + def test_iter_links(self): + task1 = _task('task1') + task2 = _task('task2') + task3 = _task('task3') + f1 = gf.Flow('nested') + f1.add(task3) + tasks = set([task1, task2, f1]) + f = gf.Flow('test').add(task1, task2, f1) + for (u, v, data) in f.iter_links(): + self.assertTrue(u in tasks) + self.assertTrue(v in tasks) + self.assertDictEqual({}, data) + class TargetedGraphFlowTest(test.TestCase): diff --git a/taskflow/tests/unit/patterns/test_linear_flow.py b/taskflow/tests/unit/patterns/test_linear_flow.py index 48f8f8de..fa39e173 100644 --- a/taskflow/tests/unit/patterns/test_linear_flow.py +++ b/taskflow/tests/unit/patterns/test_linear_flow.py @@ -118,3 +118,24 @@ class LinearFlowTest(test.TestCase): self.assertEqual(f.requires, set(['a'])) self.assertEqual(f.provides, set(['b'])) + + def test_iter_nodes(self): + task1 = _task(name='task1') + task2 = _task(name='task2') + task3 = _task(name='task3') + f = lf.Flow('test').add(task1, task2, task3) + tasks = set([task1, task2, task3]) + for (node, data) in f.iter_nodes(): + self.assertTrue(node in tasks) + self.assertDictEqual({}, data) + + def test_iter_links(self): + task1 = _task(name='task1') + task2 = _task(name='task2') + task3 = _task(name='task3') + f = lf.Flow('test').add(task1, task2, task3) + tasks = set([task1, task2, task3]) + for (u, v, data) in f.iter_links(): + self.assertTrue(u in tasks) + self.assertTrue(v in tasks) + self.assertDictEqual(lf._LINK_METADATA, data) diff --git a/taskflow/tests/unit/patterns/test_unordered_flow.py b/taskflow/tests/unit/patterns/test_unordered_flow.py index 195516b6..eeb3bb2b 100644 --- a/taskflow/tests/unit/patterns/test_unordered_flow.py +++ b/taskflow/tests/unit/patterns/test_unordered_flow.py @@ -108,3 +108,21 @@ class UnorderedFlowTest(test.TestCase): self.assertEqual(ret.name, 'test_retry') self.assertEqual(f.requires, set([])) self.assertEqual(f.provides, set(['b', 'a'])) + + def test_iter_nodes(self): + task1 = _task(name='task1', provides=['a', 'b']) + task2 = _task(name='task2', provides=['a', 'c']) + tasks = set([task1, task2]) + f = uf.Flow('test') + f.add(task2, task1) + for (node, data) in f.iter_nodes(): + self.assertTrue(node in tasks) + self.assertDictEqual({}, data) + + def test_iter_links(self): + task1 = _task(name='task1', provides=['a', 'b']) + task2 = _task(name='task2', provides=['a', 'c']) + f = uf.Flow('test') + f.add(task2, task1) + for (u, v, data) in f.iter_links(): + raise AssertionError('links iterator should be empty') From bf31caab1a1e49285529b296f5aafd3a5b9fe33a Mon Sep 17 00:00:00 2001 From: Joshua Harlow Date: Wed, 2 Sep 2015 11:39:51 -0700 Subject: [PATCH 08/54] Explain that jobs arch. diagram is only for zookeeper Change-Id: I9081f373ca9f0a3bb803f3a798e09ab68ccd1b71 --- doc/source/jobs.rst | 7 ++++++- 1 file changed, 6 insertions(+), 1 deletion(-) diff --git a/doc/source/jobs.rst b/doc/source/jobs.rst index dbbc6c7f..b8d5c705 100644 --- a/doc/source/jobs.rst +++ b/doc/source/jobs.rst @@ -48,10 +48,15 @@ Jobboards High level architecture ======================= -.. image:: img/jobboard.png +.. figure:: img/jobboard.png :height: 350px :align: right + **Note:** This diagram shows the high-level diagram (and further + parts of this documentation also refer to it as well) of the zookeeper + implementation (other implementations will typically have + different architectures). + Features ======== From f6450d9d6b848c59c0ca0fa1fc95f0e5eae1a1fe Mon Sep 17 00:00:00 2001 From: Joshua Harlow Date: Fri, 4 Sep 2015 11:08:11 -0700 Subject: [PATCH 09/54] Fix how the dir persistence backend was not listing logbooks Due to the usage of the os.path.islink check this means that no logbooks would be returned when get_logbooks was called, which is not the behavior we want. Closes-Bug: #1492403 Change-Id: Ife6a5bec777c9e2d820391914ce2c6fbbadf4f79 --- taskflow/persistence/backends/impl_dir.py | 8 ++++++-- taskflow/tests/unit/persistence/base.py | 21 +++++++++++++++++++++ 2 files changed, 27 insertions(+), 2 deletions(-) diff --git a/taskflow/persistence/backends/impl_dir.py b/taskflow/persistence/backends/impl_dir.py index f91c4e72..9d7b3ca2 100644 --- a/taskflow/persistence/backends/impl_dir.py +++ b/taskflow/persistence/backends/impl_dir.py @@ -136,9 +136,13 @@ class Connection(path_based.PathBasedConnection): shutil.rmtree(path) def _get_children(self, path): + if path == self.book_path: + filter_func = os.path.isdir + else: + filter_func = os.path.islink with _storagefailure_wrapper(): - return [link for link in os.listdir(path) - if os.path.islink(self._join_path(path, link))] + return [child for child in os.listdir(path) + if filter_func(self._join_path(path, child))] def _ensure_path(self, path): with _storagefailure_wrapper(): diff --git a/taskflow/tests/unit/persistence/base.py b/taskflow/tests/unit/persistence/base.py index 0b56617f..f5a20bd0 100644 --- a/taskflow/tests/unit/persistence/base.py +++ b/taskflow/tests/unit/persistence/base.py @@ -69,6 +69,27 @@ class PersistenceTestMixin(object): self.assertIsNotNone(lb2.find(fd.uuid)) self.assertIsNotNone(lb2.find(fd2.uuid)) + def test_logbook_save_retrieve_many(self): + lb_ids = {} + for i in range(0, 10): + lb_id = uuidutils.generate_uuid() + lb_name = 'lb-%s-%s' % (i, lb_id) + lb = models.LogBook(name=lb_name, uuid=lb_id) + lb_ids[lb_id] = True + + # Should not already exist + with contextlib.closing(self._get_connection()) as conn: + self.assertRaises(exc.NotFound, conn.get_logbook, lb_id) + conn.save_logbook(lb) + + # Now fetch them all + with contextlib.closing(self._get_connection()) as conn: + lbs = conn.get_logbooks() + for lb in lbs: + self.assertIn(lb.uuid, lb_ids) + lb_ids.pop(lb.uuid) + self.assertEqual(0, len(lb_ids)) + def test_logbook_save_retrieve(self): lb_id = uuidutils.generate_uuid() lb_meta = {'1': 2} From f8624a136b77c68dbf3e75406f2903586cf0762b Mon Sep 17 00:00:00 2001 From: Joshua Harlow Date: Wed, 16 Sep 2015 15:22:25 -0700 Subject: [PATCH 10/54] Fix '_cache_get' multiple keyword argument name overlap The argument 'atom_name' is also used by the fetch function so when it is provided a conflict occurs and this ends badly. To avoid this capture the needed variables used for fetching a cached value into a functools.partial object and use that instead of passing further arguments. Closes-Bug: #1496608 Change-Id: Ic012f7687037bf876d041c4bc62b3f6606a8a845 --- taskflow/formatters.py | 17 ++--- taskflow/tests/unit/test_formatters.py | 102 +++++++++++++++++++++++++ 2 files changed, 109 insertions(+), 10 deletions(-) create mode 100644 taskflow/tests/unit/test_formatters.py diff --git a/taskflow/formatters.py b/taskflow/formatters.py index d36082b2..33fb7088 100644 --- a/taskflow/formatters.py +++ b/taskflow/formatters.py @@ -79,18 +79,15 @@ class FailureFormatter(object): if state_found: atom_attrs['state'] = state if atom_name not in self._hide_inputs_outputs_of: + # When the cache does not exist for this atom this + # will be called with the rest of these arguments + # used to populate the cache. + fetch_mapped_args = functools.partial( + storage.fetch_mapped_args, atom.rebind, + atom_name=atom_name, optional_args=atom.optional) requires, requires_found = _cached_get(cache, 'requires', atom_name, - # When the cache does not - # exist for this atom this - # will be called with the - # rest of these arguments - # used to populate the - # cache. - storage.fetch_mapped_args, - atom.rebind, - atom_name=atom_name, - optional_args=atom.optional) + fetch_mapped_args) if requires_found: atom_attrs['requires'] = requires provides, provides_found = _cached_get(cache, 'provides', diff --git a/taskflow/tests/unit/test_formatters.py b/taskflow/tests/unit/test_formatters.py new file mode 100644 index 00000000..c4db9513 --- /dev/null +++ b/taskflow/tests/unit/test_formatters.py @@ -0,0 +1,102 @@ +# -*- coding: utf-8 -*- + +# Copyright (C) 2015 Yahoo! Inc. All Rights Reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); you may +# not use this file except in compliance with the License. You may obtain +# a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the +# License for the specific language governing permissions and limitations +# under the License. + +from taskflow import engines +from taskflow import formatters +from taskflow.listeners import logging as logging_listener +from taskflow.patterns import linear_flow +from taskflow import states +from taskflow import test +from taskflow.test import mock +from taskflow.test import utils as test_utils + + +class FormattersTest(test.TestCase): + + @staticmethod + def _broken_atom_matcher(node): + return node.item.name == 'Broken' + + def _make_test_flow(self): + b = test_utils.TaskWithFailure("Broken") + h_1 = test_utils.ProgressingTask("Happy-1") + h_2 = test_utils.ProgressingTask("Happy-2") + flo = linear_flow.Flow("test") + flo.add(h_1, h_2, b) + return flo + + def test_exc_info_format(self): + flo = self._make_test_flow() + e = engines.load(flo) + self.assertRaises(RuntimeError, e.run) + + fails = e.storage.get_execute_failures() + self.assertEqual(1, len(fails)) + self.assertIn('Broken', fails) + fail = fails['Broken'] + + f = formatters.FailureFormatter(e) + (exc_info, details) = f.format(fail, self._broken_atom_matcher) + self.assertEqual(3, len(exc_info)) + self.assertEqual("", details) + + @mock.patch('taskflow.formatters.FailureFormatter._format_node') + def test_exc_info_with_details_format(self, mock_format_node): + mock_format_node.return_value = 'A node' + + flo = self._make_test_flow() + e = engines.load(flo) + self.assertRaises(RuntimeError, e.run) + fails = e.storage.get_execute_failures() + self.assertEqual(1, len(fails)) + self.assertIn('Broken', fails) + fail = fails['Broken'] + + # Doing this allows the details to be shown... + e.storage.set_atom_intention("Broken", states.EXECUTE) + f = formatters.FailureFormatter(e) + (exc_info, details) = f.format(fail, self._broken_atom_matcher) + self.assertEqual(3, len(exc_info)) + self.assertTrue(mock_format_node.called) + + @mock.patch('taskflow.storage.Storage.get_execute_result') + def test_exc_info_with_details_format_hidden(self, mock_get_execute): + flo = self._make_test_flow() + e = engines.load(flo) + self.assertRaises(RuntimeError, e.run) + fails = e.storage.get_execute_failures() + self.assertEqual(1, len(fails)) + self.assertIn('Broken', fails) + fail = fails['Broken'] + + # Doing this allows the details to be shown... + e.storage.set_atom_intention("Broken", states.EXECUTE) + hide_inputs_outputs_of = ['Broken', "Happy-1", "Happy-2"] + f = formatters.FailureFormatter( + e, hide_inputs_outputs_of=hide_inputs_outputs_of) + (exc_info, details) = f.format(fail, self._broken_atom_matcher) + self.assertEqual(3, len(exc_info)) + self.assertFalse(mock_get_execute.called) + + @mock.patch('taskflow.formatters.FailureFormatter._format_node') + def test_formatted_via_listener(self, mock_format_node): + mock_format_node.return_value = 'A node' + + flo = self._make_test_flow() + e = engines.load(flo) + with logging_listener.DynamicLoggingListener(e): + self.assertRaises(RuntimeError, e.run) + self.assertTrue(mock_format_node.called) From 965adc914da418bab5da6033fc411037e633cde5 Mon Sep 17 00:00:00 2001 From: OpenStack Proposal Bot Date: Fri, 18 Sep 2015 16:43:06 +0000 Subject: [PATCH 11/54] Updated from global requirements Change-Id: I3a35a4207ec2eaf53d24e150257efae062899217 --- requirements.txt | 2 +- setup.py | 2 +- 2 files changed, 2 insertions(+), 2 deletions(-) diff --git a/requirements.txt b/requirements.txt index 760dfcfa..60cc0839 100644 --- a/requirements.txt +++ b/requirements.txt @@ -3,7 +3,7 @@ # process, which may cause wedges in the gate later. # See: https://bugs.launchpad.net/pbr/+bug/1384919 for why this is here... -pbr<2.0,>=1.6 +pbr>=1.6 # Packages needed for using this library. diff --git a/setup.py b/setup.py index d8080d05..782bb21f 100644 --- a/setup.py +++ b/setup.py @@ -25,5 +25,5 @@ except ImportError: pass setuptools.setup( - setup_requires=['pbr>=1.3'], + setup_requires=['pbr>=1.8'], pbr=True) From 64583e075f41a5a239de15d6d521ce4bfde69a7d Mon Sep 17 00:00:00 2001 From: Joshua Harlow Date: Thu, 6 Aug 2015 17:18:45 -0700 Subject: [PATCH 12/54] Use graphs as the underlying structure of patterns This unifies all the patterns to be graph based so that they are more uniform and there underlying constraints are more easy to understand (taskflow basically processes graphs). Change-Id: Ib2ab07c1c87165cf40a06508128010887f658391 --- taskflow/patterns/graph_flow.py | 2 +- taskflow/patterns/linear_flow.py | 43 ++++++++---- taskflow/patterns/unordered_flow.py | 25 ++++--- .../tests/unit/patterns/test_linear_flow.py | 2 +- taskflow/types/graph.py | 68 +++++++++++++++---- 5 files changed, 96 insertions(+), 44 deletions(-) diff --git a/taskflow/patterns/graph_flow.py b/taskflow/patterns/graph_flow.py index c0745e1e..c769124f 100644 --- a/taskflow/patterns/graph_flow.py +++ b/taskflow/patterns/graph_flow.py @@ -67,7 +67,7 @@ class Flow(flow.Flow): def __init__(self, name, retry=None): super(Flow, self).__init__(name, retry) - self._graph = gr.DiGraph() + self._graph = gr.DiGraph(name=name) self._graph.freeze() #: Extracts the unsatisified symbol requirements of a single node. diff --git a/taskflow/patterns/linear_flow.py b/taskflow/patterns/linear_flow.py index f581ce45..747f4d26 100644 --- a/taskflow/patterns/linear_flow.py +++ b/taskflow/patterns/linear_flow.py @@ -15,9 +15,7 @@ # under the License. from taskflow import flow - - -_LINK_METADATA = {flow.LINK_INVARIANT: True} +from taskflow.types import graph as gr class Flow(flow.Flow): @@ -28,22 +26,37 @@ class Flow(flow.Flow): the reverse order that the *tasks/flows* have been applied in. """ + _no_last_item = object() + """Sentinel object used to denote no last item has been assigned. + + This is used to track no last item being added, since at creation there + is no last item, but since the :meth:`.add` routine can take any object + including none, we have to use a different object to be able to + distinguish the lack of any last item... + """ + def __init__(self, name, retry=None): super(Flow, self).__init__(name, retry) - self._children = [] + self._graph = gr.OrderedDiGraph(name=name) + self._last_item = self._no_last_item def add(self, *items): """Adds a given task/tasks/flow/flows to this flow.""" - items = [i for i in items if i not in self._children] - self._children.extend(items) + for item in items: + if not self._graph.has_node(item): + self._graph.add_node(item) + if self._last_item is not self._no_last_item: + self._graph.add_edge(self._last_item, item, + attr_dict={flow.LINK_INVARIANT: True}) + self._last_item = item return self def __len__(self): - return len(self._children) + return len(self._graph) def __iter__(self): - for child in self._children: - yield child + for item in self._graph.nodes_iter(): + yield item @property def requires(self): @@ -57,10 +70,10 @@ class Flow(flow.Flow): prior_provides.update(item.provides) return frozenset(requires) - def iter_links(self): - for src, dst in zip(self._children[:-1], self._children[1:]): - yield (src, dst, _LINK_METADATA.copy()) - def iter_nodes(self): - for n in self._children: - yield (n, {}) + for (n, n_data) in self._graph.nodes_iter(data=True): + yield (n, n_data) + + def iter_links(self): + for (u, v, e_data) in self._graph.edges_iter(data=True): + yield (u, v, e_data) diff --git a/taskflow/patterns/unordered_flow.py b/taskflow/patterns/unordered_flow.py index 036ca2f9..3de005c6 100644 --- a/taskflow/patterns/unordered_flow.py +++ b/taskflow/patterns/unordered_flow.py @@ -15,6 +15,7 @@ # under the License. from taskflow import flow +from taskflow.types import graph as gr class Flow(flow.Flow): @@ -26,31 +27,29 @@ class Flow(flow.Flow): def __init__(self, name, retry=None): super(Flow, self).__init__(name, retry) - # NOTE(imelnikov): A unordered flow is unordered, so we use - # set instead of list to save children, children so that - # people using it don't depend on the ordering. - self._children = set() + self._graph = gr.Graph(name=name) def add(self, *items): """Adds a given task/tasks/flow/flows to this flow.""" - self._children.update(items) + for item in items: + if not self._graph.has_node(item): + self._graph.add_node(item) return self def __len__(self): - return len(self._children) + return len(self._graph) def __iter__(self): - for child in self._children: - yield child + for item in self._graph: + yield item def iter_links(self): - # NOTE(imelnikov): children in unordered flow have no dependencies - # between each other due to invariants retained during construction. - return iter(()) + for (u, v, e_data) in self._graph.edges_iter(data=True): + yield (u, v, e_data) def iter_nodes(self): - for n in self._children: - yield (n, {}) + for n, n_data in self._graph.nodes_iter(data=True): + yield (n, n_data) @property def requires(self): diff --git a/taskflow/tests/unit/patterns/test_linear_flow.py b/taskflow/tests/unit/patterns/test_linear_flow.py index fa39e173..05f4253a 100644 --- a/taskflow/tests/unit/patterns/test_linear_flow.py +++ b/taskflow/tests/unit/patterns/test_linear_flow.py @@ -138,4 +138,4 @@ class LinearFlowTest(test.TestCase): for (u, v, data) in f.iter_links(): self.assertTrue(u in tasks) self.assertTrue(v in tasks) - self.assertDictEqual(lf._LINK_METADATA, data) + self.assertDictEqual({'invariant': True}, data) diff --git a/taskflow/types/graph.py b/taskflow/types/graph.py index 53eddba6..7462c9bd 100644 --- a/taskflow/types/graph.py +++ b/taskflow/types/graph.py @@ -21,8 +21,49 @@ import networkx as nx import six +def _common_format(g, edge_notation): + lines = [] + lines.append("Name: %s" % g.name) + lines.append("Type: %s" % type(g).__name__) + lines.append("Frozen: %s" % nx.is_frozen(g)) + lines.append("Density: %0.3f" % nx.density(g)) + lines.append("Nodes: %s" % g.number_of_nodes()) + for n in g.nodes_iter(): + lines.append(" - %s" % n) + lines.append("Edges: %s" % g.number_of_edges()) + for (u, v, e_data) in g.edges_iter(data=True): + if e_data: + lines.append(" %s %s %s (%s)" % (u, edge_notation, v, e_data)) + else: + lines.append(" %s %s %s" % (u, edge_notation, v)) + return lines + + +class Graph(nx.Graph): + """A graph subclass with useful utility functions.""" + + def __init__(self, data=None, name=''): + super(Graph, self).__init__(name=name, data=data) + self.frozen = False + + def freeze(self): + """Freezes the graph so that no more mutations can occur.""" + if not self.frozen: + nx.freeze(self) + return self + + def export_to_dot(self): + """Exports the graph to a dot format (requires pydot library).""" + return nx.to_pydot(self).to_string() + + def pformat(self): + """Pretty formats your graph into a string.""" + return os.linesep.join(_common_format(self, "<->")) + + class DiGraph(nx.DiGraph): """A directed graph subclass with useful utility functions.""" + def __init__(self, data=None, name=''): super(DiGraph, self).__init__(name=name, data=data) self.frozen = False @@ -56,20 +97,7 @@ class DiGraph(nx.DiGraph): details about your graph, including; name, type, frozeness, node count, nodes, edge count, edges, graph density and graph cycles (if any). """ - lines = [] - lines.append("Name: %s" % self.name) - lines.append("Type: %s" % type(self).__name__) - lines.append("Frozen: %s" % nx.is_frozen(self)) - lines.append("Nodes: %s" % self.number_of_nodes()) - for n in self.nodes_iter(): - lines.append(" - %s" % n) - lines.append("Edges: %s" % self.number_of_edges()) - for (u, v, e_data) in self.edges_iter(data=True): - if e_data: - lines.append(" %s -> %s (%s)" % (u, v, e_data)) - else: - lines.append(" %s -> %s" % (u, v)) - lines.append("Density: %0.3f" % nx.density(self)) + lines = _common_format(self, "->") cycles = list(nx.cycles.recursive_simple_cycles(self)) lines.append("Cycles: %s" % len(cycles)) for cycle in cycles: @@ -122,6 +150,18 @@ class DiGraph(nx.DiGraph): queue.append(pred_pred) +class OrderedDiGraph(DiGraph): + """A directed graph subclass with useful utility functions. + + This derivative retains node, edge, insertation and iteration + ordering (so that the iteration order matches the insertation + order). + """ + node_dict_factory = collections.OrderedDict + adjlist_dict_factory = collections.OrderedDict + edge_attr_dict_factory = collections.OrderedDict + + def merge_graphs(graph, *graphs, **kwargs): """Merges a bunch of graphs into a new graph. From 1a69143046455be2e0191ce5417d17b49c208bdc Mon Sep 17 00:00:00 2001 From: Monty Taylor Date: Mon, 21 Sep 2015 14:55:54 +0000 Subject: [PATCH 13/54] Change ignore-errors to ignore_errors Needed for coverage 4.0 Change-Id: Ib63d81068db5353b1d341e22bb875839689b40ac --- .coveragerc | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/.coveragerc b/.coveragerc index a733a288..1403ceec 100644 --- a/.coveragerc +++ b/.coveragerc @@ -4,5 +4,5 @@ source = taskflow omit = taskflow/tests/*,taskflow/openstack/*,taskflow/test.py [report] -ignore-errors = True +ignore_errors = True From f6bff9f095dd8aa86b1de1d58d1d81fbb379bbc9 Mon Sep 17 00:00:00 2001 From: venkatamahesh Date: Tue, 22 Sep 2015 22:44:33 +0530 Subject: [PATCH 14/54] Fix the sphinx build path in .gitignore file Change-Id: I80b930c164f5453204d7aea0954d7a98d90e3a51 --- .gitignore | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/.gitignore b/.gitignore index b645cf69..986f39d5 100644 --- a/.gitignore +++ b/.gitignore @@ -55,7 +55,7 @@ AUTHORS ChangeLog # doc -doc/_build/ +doc/build/ .idea env From a0ca0af79f8aad346ca262b95f01296abfb92c99 Mon Sep 17 00:00:00 2001 From: Joshua Harlow Date: Thu, 27 Aug 2015 17:32:14 -0700 Subject: [PATCH 15/54] Refactor common parts of 'get_maybe_ready_for' methods These share quite a bit of common-like code, so refactor both of them to share the same function and differentiate certain provided arguments as needed. Also tweaks the storage class 'get_atoms_states' (which is badly named also) to not do two queries into the flow detail when one will suffice and to not request the same atom name twice when once will suffice. Change-Id: Ifdf2f3efb78d189ed5a8104614b1bf6a84c9339a --- taskflow/engines/action_engine/analyzer.py | 81 +++++++++++----------- taskflow/storage.py | 10 +-- 2 files changed, 46 insertions(+), 45 deletions(-) diff --git a/taskflow/engines/action_engine/analyzer.py b/taskflow/engines/action_engine/analyzer.py index 78d4c29f..77f7df37 100644 --- a/taskflow/engines/action_engine/analyzer.py +++ b/taskflow/engines/action_engine/analyzer.py @@ -166,54 +166,53 @@ class Analyzer(object): ready_nodes.append((node, late_decider)) return ready_nodes + def _get_maybe_ready(self, atom, transition_to, allowed_intentions, + connected_fetcher, connected_checker, + decider_fetcher): + state = self.get_state(atom) + ok_to_transition = self._runtime.check_atom_transition(atom, state, + transition_to) + if not ok_to_transition: + return (False, None) + intention = self._storage.get_atom_intention(atom.name) + if intention not in allowed_intentions: + return (False, None) + connected_states = self._storage.get_atoms_states( + connected_atom.name for connected_atom in connected_fetcher(atom)) + ok_to_run = connected_checker(six.itervalues(connected_states)) + if not ok_to_run: + return (False, None) + else: + return (True, decider_fetcher(atom)) + def _get_maybe_ready_for_execute(self, atom): """Returns if an atom is *likely* ready to be executed.""" - state = self.get_state(atom) - intention = self._storage.get_atom_intention(atom.name) - transition = self._runtime.check_atom_transition(atom, state, - st.RUNNING) - if not transition or intention != st.EXECUTE: - return (False, None) - - predecessor_names = [] - for previous_atom in self._execution_graph.predecessors(atom): - predecessor_names.append(previous_atom.name) - - predecessor_states = self._storage.get_atoms_states(predecessor_names) - predecessor_states_iter = six.itervalues(predecessor_states) - ok_to_run = all(state == st.SUCCESS and intention == st.EXECUTE - for state, intention in predecessor_states_iter) - - if not ok_to_run: - return (False, None) - else: + def decider_fetcher(atom): edge_deciders = self._runtime.fetch_edge_deciders(atom) - return (True, IgnoreDecider(atom, edge_deciders)) + if edge_deciders: + return IgnoreDecider(atom, edge_deciders) + else: + return NoOpDecider() + + connected_checker = lambda connected_iter: \ + all(state == st.SUCCESS and intention == st.EXECUTE + for state, intention in connected_iter) + connected_fetcher = self._execution_graph.predecessors_iter + return self._get_maybe_ready(atom, st.RUNNING, [st.EXECUTE], + connected_fetcher, connected_checker, + decider_fetcher) def _get_maybe_ready_for_revert(self, atom): """Returns if an atom is *likely* ready to be reverted.""" - - state = self.get_state(atom) - intention = self._storage.get_atom_intention(atom.name) - transition = self._runtime.check_atom_transition(atom, state, - st.REVERTING) - if not transition or intention not in (st.REVERT, st.RETRY): - return (False, None) - - predecessor_names = [] - for previous_atom in self._execution_graph.successors(atom): - predecessor_names.append(previous_atom.name) - - predecessor_states = self._storage.get_atoms_states(predecessor_names) - predecessor_states_iter = six.itervalues(predecessor_states) - ok_to_run = all(state in (st.PENDING, st.REVERTED) - for state, intention in predecessor_states_iter) - - if not ok_to_run: - return (False, None) - else: - return (True, NoOpDecider()) + connected_checker = lambda connected_iter: \ + all(state in (st.PENDING, st.REVERTED) + for state, _intention in connected_iter) + decider_fetcher = lambda atom: NoOpDecider() + connected_fetcher = self._execution_graph.successors_iter + return self._get_maybe_ready(atom, st.REVERTING, [st.REVERT, st.RETRY], + connected_fetcher, connected_checker, + decider_fetcher) def iterate_subgraph(self, atom): """Iterates a subgraph connected to given atom.""" diff --git a/taskflow/storage.py b/taskflow/storage.py index cab68f6d..f6769369 100644 --- a/taskflow/storage.py +++ b/taskflow/storage.py @@ -385,10 +385,12 @@ class Storage(object): @fasteners.read_locked def get_atoms_states(self, atom_names): - """Gets all atoms states given a set of names.""" - return dict((name, (self.get_atom_state(name), - self.get_atom_intention(name))) - for name in atom_names) + """Gets a dict of atom name => (state, intention) given atom names.""" + details = {} + for name in set(atom_names): + source, _clone = self._atomdetail_by_name(name) + details[name] = (source.state, source.intention) + return details @fasteners.write_locked def _update_atom_metadata(self, atom_name, update_with, From c7c9647a0b0bbcf6115b885895207af3343d4835 Mon Sep 17 00:00:00 2001 From: Zhao Lei Date: Wed, 23 Sep 2015 18:49:26 +0800 Subject: [PATCH 16/54] Remove quotes from subshell call in bash script Always no quotes for $() statement. We don't need quotes to hold blanks in result: # i=$(echo 1 2 3) # echo $i 1 2 3 # These quotes can make something wrong in some case: # i=$(echo '!') # # i="$(echo '!')" -bash: !: event not found # No real problem for current code, only to use a better code style. Change-Id: I0f43bb936f956c99603dc15aaad9db851ecf0acd Signed-off-by: Zhao Lei --- tools/update_states.sh | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/tools/update_states.sh b/tools/update_states.sh index 60ca3d4b..61b54e8c 100755 --- a/tools/update_states.sh +++ b/tools/update_states.sh @@ -12,7 +12,7 @@ if [ ! -d "$PWD/.diagram-tools" ]; then git clone "https://github.com/vidarh/diagram-tools.git" "$PWD/.diagram-tools" fi -script_dir="$( cd "$( dirname "${BASH_SOURCE[0]}" )" && pwd )" +script_dir=$( cd "$( dirname "${BASH_SOURCE[0]}" )" && pwd ) img_dir="$script_dir/../doc/source/img" echo "---- Updating task state diagram ----" From ec17ad0d76a835c2db65af8a41f3e4d9fc359ee2 Mon Sep 17 00:00:00 2001 From: Joshua Harlow Date: Thu, 24 Sep 2015 11:09:51 -0700 Subject: [PATCH 17/54] Remove ./taskflow/openstack/common as it no longer exists Change-Id: I3123a6d7cf4323fb111c2b4c5545fd6a1d2fd77b --- tox.ini | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/tox.ini b/tox.ini index 5ee35ac8..3c1924f2 100644 --- a/tox.ini +++ b/tox.ini @@ -49,7 +49,7 @@ commands = {posargs} [flake8] builtins = _ -exclude = .venv,.tox,dist,doc,./taskflow/openstack/common,*egg,.git,build,tools +exclude = .venv,.tox,dist,doc,*egg,.git,build,tools [hacking] import_exceptions = six.moves From e6fc3aeb2f3a4ab1c60f9d703bb9f1513b4d86fc Mon Sep 17 00:00:00 2001 From: Joshua Harlow Date: Thu, 24 Sep 2015 14:10:55 -0700 Subject: [PATCH 18/54] Remove dummy/placeholder 'ChangeLog' as its not needed Just enforce that `python setup.py build_sphinx` will be the way to build docs (which also builds a 'ChangeLog') so we don't need to maintain a dummy file here anymore. Change-Id: Icec9173c7970d84c105f151ac74dc9080ab3b1f7 --- ChangeLog | 1 - tox.ini | 4 ++-- 2 files changed, 2 insertions(+), 3 deletions(-) delete mode 100644 ChangeLog diff --git a/ChangeLog b/ChangeLog deleted file mode 100644 index ba4c69b7..00000000 --- a/ChangeLog +++ /dev/null @@ -1 +0,0 @@ -.. This is a generated file! Do not edit. diff --git a/tox.ini b/tox.ini index 5ee35ac8..f7b7cc8f 100644 --- a/tox.ini +++ b/tox.ini @@ -57,7 +57,7 @@ import_exceptions = six.moves unittest.mock [testenv:py27] -commands = +commands = python setup.py testr --slowest --testr-args='{posargs}' sphinx-build -b doctest doc/source doc/build - doc8 doc/source + doc8 --ignore-path "doc/source/history.rst" doc/source From 000ae21927e6311bdcca41bb350392e146a0d473 Mon Sep 17 00:00:00 2001 From: Joshua Harlow Date: Fri, 25 Sep 2015 08:12:33 -0700 Subject: [PATCH 19/54] Rename 'history' -> 'Release notes' This seems to better match what the other oslo libraries are calling this section, so we might as well call it that to. Change-Id: I67773ef2241dcf2f2a1cd65756ac0f8a8081c9cb --- doc/source/index.rst | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/doc/source/index.rst b/doc/source/index.rst index 4c0ab593..2bbc0019 100644 --- a/doc/source/index.rst +++ b/doc/source/index.rst @@ -104,8 +104,8 @@ projects, frameworks and libraries. shelf -History -------- +Release notes +------------- .. toctree:: :maxdepth: 2 From ffcccd1fc756f159018c2858a51d0c2b0d44423b Mon Sep 17 00:00:00 2001 From: Davanum Srinivas Date: Fri, 25 Sep 2015 12:45:27 -0400 Subject: [PATCH 20/54] docs - Set pbr warnerrors option for doc build By setting this pbr option in setup.cfg, the doc build will fail in case of any warnings or errors occur during the build process. Change-Id: I1f70ea5c6b26b30d5c5583934333053ef065fbf6 --- setup.cfg | 3 +++ 1 file changed, 3 insertions(+) diff --git a/setup.cfg b/setup.cfg index 31fc17ab..ff241553 100644 --- a/setup.cfg +++ b/setup.cfg @@ -58,6 +58,9 @@ taskflow.engines = cover-erase = true verbosity = 2 +[pbr] +warnerrors = True + [wheel] universal = 1 From dc0bdb50f4ffcb43a15360a4eee936f5cf4f32b5 Mon Sep 17 00:00:00 2001 From: Joshua Harlow Date: Tue, 29 Sep 2015 17:01:45 -0700 Subject: [PATCH 21/54] Fix 'dependened upon' spelling error Change-Id: I2f9888381805d584a2d1e17c14e85f3d6885404c --- taskflow/types/failure.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/taskflow/types/failure.py b/taskflow/types/failure.py index a0084bb8..34c30473 100644 --- a/taskflow/types/failure.py +++ b/taskflow/types/failure.py @@ -86,7 +86,7 @@ class Failure(mixins.StrMixin): re-used later to re-raise, inspect, examine, log, print, serialize, deserialize... - One example where they are dependened upon is in the WBE engine. When a + One example where they are depended upon is in the WBE engine. When a remote worker throws an exception, the WBE based engine will receive that exception and desire to reraise it to the user/caller of the WBE based engine for appropriate handling (this matches the behavior of non-remote From 79d25e69e8300db5debdfd717ffd80f91c246c10 Mon Sep 17 00:00:00 2001 From: Joshua Harlow Date: Fri, 4 Sep 2015 13:14:25 -0700 Subject: [PATCH 22/54] Simplify flow action engine compilation Instead of the added complexity of discarding flow nodes we can simplify the compilation process by just retaining them and jumping over them in further iteration and graph and tree runtime usage. This change moves toward a model that does just this, which makes it also easier to in the future use the newly added flow graph nodes to do meaningful things (like use them as a point to change which flow_detail is used). Change-Id: Icb1695f4b995a0392f940837514774768f222db4 --- taskflow/engines/action_engine/analyzer.py | 160 ++++---- taskflow/engines/action_engine/builder.py | 55 +-- taskflow/engines/action_engine/compiler.py | 265 +++++--------- taskflow/engines/action_engine/completer.py | 38 +- taskflow/engines/action_engine/engine.py | 28 +- taskflow/engines/action_engine/runtime.py | 78 ++-- taskflow/engines/action_engine/scopes.py | 39 +- taskflow/formatters.py | 21 +- .../tests/unit/action_engine/test_builder.py | 13 +- .../tests/unit/action_engine/test_compile.py | 344 ++++++++++-------- taskflow/types/graph.py | 7 +- taskflow/utils/iter_utils.py | 13 + 12 files changed, 535 insertions(+), 526 deletions(-) diff --git a/taskflow/engines/action_engine/analyzer.py b/taskflow/engines/action_engine/analyzer.py index 77f7df37..bdde8975 100644 --- a/taskflow/engines/action_engine/analyzer.py +++ b/taskflow/engines/action_engine/analyzer.py @@ -18,10 +18,31 @@ import abc import itertools import weakref -from networkx.algorithms import traversal import six +from taskflow.engines.action_engine import compiler as co from taskflow import states as st +from taskflow.utils import iter_utils + + +def _depth_first_iterate(graph, connected_to_functors, initial_nodes_iter): + """Iterates connected nodes in execution graph (from starting set). + + Jumps over nodes with ``noop`` attribute (does not yield them back). + """ + stack = list(initial_nodes_iter) + while stack: + node = stack.pop() + node_attrs = graph.node[node] + if not node_attrs.get('noop'): + yield node + try: + node_kind = node_attrs['kind'] + connected_to_functor = connected_to_functors[node_kind] + except KeyError: + pass + else: + stack.extend(connected_to_functor(node)) @six.add_metaclass(abc.ABCMeta) @@ -74,8 +95,8 @@ class IgnoreDecider(Decider): state to ``IGNORE`` so that they are ignored in future runtime activities. """ - successors_iter = runtime.analyzer.iterate_subgraph(self._atom) - runtime.reset_nodes(itertools.chain([self._atom], successors_iter), + successors_iter = runtime.analyzer.iterate_connected_atoms(self._atom) + runtime.reset_atoms(itertools.chain([self._atom], successors_iter), state=st.IGNORE, intention=st.IGNORE) @@ -105,66 +126,67 @@ class Analyzer(object): self._storage = runtime.storage self._execution_graph = runtime.compilation.execution_graph - def get_next_nodes(self, node=None): - """Get next nodes to run (originating from node or all nodes).""" - if node is None: - execute = self.browse_nodes_for_execute() - revert = self.browse_nodes_for_revert() - return execute + revert - state = self.get_state(node) - intention = self._storage.get_atom_intention(node.name) + def iter_next_atoms(self, atom=None): + """Iterate next atoms to run (originating from atom or all atoms).""" + if atom is None: + return iter_utils.unique_seen(self.browse_atoms_for_execute(), + self.browse_atoms_for_revert()) + state = self.get_state(atom) + intention = self._storage.get_atom_intention(atom.name) if state == st.SUCCESS: if intention == st.REVERT: - return [ - (node, NoOpDecider()), - ] + return iter([ + (atom, NoOpDecider()), + ]) elif intention == st.EXECUTE: - return self.browse_nodes_for_execute(node) + return self.browse_atoms_for_execute(atom=atom) else: - return [] + return iter([]) elif state == st.REVERTED: - return self.browse_nodes_for_revert(node) + return self.browse_atoms_for_revert(atom=atom) elif state == st.FAILURE: - return self.browse_nodes_for_revert() + return self.browse_atoms_for_revert() else: - return [] + return iter([]) - def browse_nodes_for_execute(self, node=None): - """Browse next nodes to execute. + def browse_atoms_for_execute(self, atom=None): + """Browse next atoms to execute. - This returns a collection of nodes that *may* be ready to be - executed, if given a specific node it will only examine the successors - of that node, otherwise it will examine the whole graph. + This returns a iterator of atoms that *may* be ready to be + executed, if given a specific atom, it will only examine the successors + of that atom, otherwise it will examine the whole graph. """ - if node is not None: - nodes = self._execution_graph.successors(node) + if atom is None: + atom_it = self.iterate_nodes(co.ATOMS) else: - nodes = self._execution_graph.nodes_iter() - ready_nodes = [] - for node in nodes: - is_ready, late_decider = self._get_maybe_ready_for_execute(node) + successors_iter = self._execution_graph.successors_iter + atom_it = _depth_first_iterate(self._execution_graph, + {co.FLOW: successors_iter}, + successors_iter(atom)) + for atom in atom_it: + is_ready, late_decider = self._get_maybe_ready_for_execute(atom) if is_ready: - ready_nodes.append((node, late_decider)) - return ready_nodes + yield (atom, late_decider) - def browse_nodes_for_revert(self, node=None): - """Browse next nodes to revert. + def browse_atoms_for_revert(self, atom=None): + """Browse next atoms to revert. - This returns a collection of nodes that *may* be ready to be be - reverted, if given a specific node it will only examine the - predecessors of that node, otherwise it will examine the whole + This returns a iterator of atoms that *may* be ready to be be + reverted, if given a specific atom it will only examine the + predecessors of that atom, otherwise it will examine the whole graph. """ - if node is not None: - nodes = self._execution_graph.predecessors(node) + if atom is None: + atom_it = self.iterate_nodes(co.ATOMS) else: - nodes = self._execution_graph.nodes_iter() - ready_nodes = [] - for node in nodes: - is_ready, late_decider = self._get_maybe_ready_for_revert(node) + predecessors_iter = self._execution_graph.predecessors_iter + atom_it = _depth_first_iterate(self._execution_graph, + {co.FLOW: predecessors_iter}, + predecessors_iter(atom)) + for atom in atom_it: + is_ready, late_decider = self._get_maybe_ready_for_revert(atom) if is_ready: - ready_nodes.append((node, late_decider)) - return ready_nodes + yield (atom, late_decider) def _get_maybe_ready(self, atom, transition_to, allowed_intentions, connected_fetcher, connected_checker, @@ -187,59 +209,71 @@ class Analyzer(object): def _get_maybe_ready_for_execute(self, atom): """Returns if an atom is *likely* ready to be executed.""" - def decider_fetcher(atom): edge_deciders = self._runtime.fetch_edge_deciders(atom) if edge_deciders: return IgnoreDecider(atom, edge_deciders) else: return NoOpDecider() - + predecessors_iter = self._execution_graph.predecessors_iter + connected_fetcher = lambda atom: \ + _depth_first_iterate(self._execution_graph, + {co.FLOW: predecessors_iter}, + predecessors_iter(atom)) connected_checker = lambda connected_iter: \ all(state == st.SUCCESS and intention == st.EXECUTE for state, intention in connected_iter) - connected_fetcher = self._execution_graph.predecessors_iter return self._get_maybe_ready(atom, st.RUNNING, [st.EXECUTE], connected_fetcher, connected_checker, decider_fetcher) def _get_maybe_ready_for_revert(self, atom): """Returns if an atom is *likely* ready to be reverted.""" + successors_iter = self._execution_graph.successors_iter + connected_fetcher = lambda atom: \ + _depth_first_iterate(self._execution_graph, + {co.FLOW: successors_iter}, + successors_iter(atom)) connected_checker = lambda connected_iter: \ all(state in (st.PENDING, st.REVERTED) for state, _intention in connected_iter) decider_fetcher = lambda atom: NoOpDecider() - connected_fetcher = self._execution_graph.successors_iter return self._get_maybe_ready(atom, st.REVERTING, [st.REVERT, st.RETRY], connected_fetcher, connected_checker, decider_fetcher) - def iterate_subgraph(self, atom): - """Iterates a subgraph connected to given atom.""" - for _src, dst in traversal.dfs_edges(self._execution_graph, atom): - yield dst + def iterate_connected_atoms(self, atom): + """Iterates **all** successor atoms connected to given atom.""" + successors_iter = self._execution_graph.successors_iter + return _depth_first_iterate( + self._execution_graph, { + co.FLOW: successors_iter, + co.TASK: successors_iter, + co.RETRY: successors_iter, + }, successors_iter(atom)) def iterate_retries(self, state=None): """Iterates retry atoms that match the provided state. If no state is provided it will yield back all retry atoms. """ - for atom in self._runtime.fetch_atoms_by_kind('retry'): + for atom in self.iterate_nodes((co.RETRY,)): if not state or self.get_state(atom) == state: yield atom - def iterate_all_nodes(self): - """Yields back all nodes in the execution graph.""" - for node in self._execution_graph.nodes_iter(): - yield node + def iterate_nodes(self, allowed_kinds): + """Yields back all nodes of specified kinds in the execution graph.""" + for node, node_data in self._execution_graph.nodes_iter(data=True): + if node_data['kind'] in allowed_kinds: + yield node - def find_atom_retry(self, atom): - """Returns the retry atom associated to the given atom (or none).""" - return self._execution_graph.node[atom].get('retry') + def find_retry(self, node): + """Returns the retry atom associated to the given node (or none).""" + return self._execution_graph.node[node].get(co.RETRY) def is_success(self): - """Checks if all nodes in the execution graph are in 'happy' state.""" - for atom in self.iterate_all_nodes(): + """Checks if all atoms in the execution graph are in 'happy' state.""" + for atom in self.iterate_nodes(co.ATOMS): atom_state = self.get_state(atom) if atom_state == st.IGNORE: continue diff --git a/taskflow/engines/action_engine/builder.py b/taskflow/engines/action_engine/builder.py index 034e64a3..cdf36466 100644 --- a/taskflow/engines/action_engine/builder.py +++ b/taskflow/engines/action_engine/builder.py @@ -49,7 +49,7 @@ class MachineMemory(object): """State machine memory.""" def __init__(self): - self.next_nodes = set() + self.next_up = set() self.not_done = set() self.failures = [] self.done = set() @@ -115,24 +115,25 @@ class MachineBuilder(object): # Checks if the storage says the flow is still runnable... return self._storage.get_flow_state() == st.RUNNING - def iter_next_nodes(target_node=None, apply_deciders=True): - # Yields and filters and tweaks the next nodes to execute... - maybe_nodes = self._analyzer.get_next_nodes(node=target_node) - for node, late_decider in maybe_nodes: + def iter_next_atoms(atom=None, apply_deciders=True): + # Yields and filters and tweaks the next atoms to run... + maybe_atoms_it = self._analyzer.iter_next_atoms(atom=atom) + for atom, late_decider in maybe_atoms_it: if apply_deciders: proceed = late_decider.check_and_affect(self._runtime) if proceed: - yield node + yield atom else: - yield node + yield atom def resume(old_state, new_state, event): # This reaction function just updates the state machines memory # to include any nodes that need to be executed (from a previous # attempt, which may be empty if never ran before) and any nodes # that are now ready to be ran. - memory.next_nodes.update(self._completer.resume()) - memory.next_nodes.update(iter_next_nodes()) + memory.next_up.update( + iter_utils.unique_seen(self._completer.resume(), + iter_next_atoms())) return SCHEDULE def game_over(old_state, new_state, event): @@ -142,17 +143,17 @@ class MachineBuilder(object): # it is *always* called before the final state is entered. if memory.failures: return FAILED - leftover_nodes = iter_utils.count( + leftover_atoms = iter_utils.count( # Avoid activating the deciders, since at this point # the engine is finishing and there will be no more further # work done anyway... - iter_next_nodes(apply_deciders=False)) - if leftover_nodes: + iter_next_atoms(apply_deciders=False)) + if leftover_atoms: # Ok we didn't finish (either reverting or executing...) so # that means we must of been stopped at some point... LOG.blather("Suspension determined to have been reacted to" - " since (at least) %s nodes have been left in an" - " unfinished state", leftover_nodes) + " since (at least) %s atoms have been left in an" + " unfinished state", leftover_atoms) return SUSPENDED elif self._analyzer.is_success(): return SUCCESS @@ -165,13 +166,13 @@ class MachineBuilder(object): # if the user of this engine has requested the engine/storage # that holds this information to stop or suspend); handles failures # that occur during this process safely... - if is_runnable() and memory.next_nodes: - not_done, failures = do_schedule(memory.next_nodes) + if is_runnable() and memory.next_up: + not_done, failures = do_schedule(memory.next_up) if not_done: memory.not_done.update(not_done) if failures: memory.failures.extend(failures) - memory.next_nodes.intersection_update(not_done) + memory.next_up.intersection_update(not_done) return WAIT def wait(old_state, new_state, event): @@ -190,13 +191,13 @@ class MachineBuilder(object): # out what nodes are now ready to be ran (and then triggering those # nodes to be scheduled in the future); handles failures that # occur during this process safely... - next_nodes = set() + next_up = set() while memory.done: fut = memory.done.pop() - node = fut.atom + atom = fut.atom try: event, result = fut.result() - retain = do_complete(node, event, result) + retain = do_complete(atom, event, result) if isinstance(result, failure.Failure): if retain: memory.failures.append(result) @@ -208,24 +209,24 @@ class MachineBuilder(object): # is not enabled, which would suck...) if LOG.isEnabledFor(logging.DEBUG): intention = self._storage.get_atom_intention( - node.name) + atom.name) LOG.debug("Discarding failure '%s' (in" " response to event '%s') under" " completion units request during" - " completion of node '%s' (intention" + " completion of atom '%s' (intention" " is to %s)", result, event, - node, intention) + atom, intention) except Exception: memory.failures.append(failure.Failure()) else: try: - more_nodes = set(iter_next_nodes(target_node=node)) + more_work = set(iter_next_atoms(atom=atom)) except Exception: memory.failures.append(failure.Failure()) else: - next_nodes.update(more_nodes) - if is_runnable() and next_nodes and not memory.failures: - memory.next_nodes.update(next_nodes) + next_up.update(more_work) + if is_runnable() and next_up and not memory.failures: + memory.next_up.update(next_up) return SCHEDULE elif memory.not_done: return WAIT diff --git a/taskflow/engines/action_engine/compiler.py b/taskflow/engines/action_engine/compiler.py index 50ce4eb1..0d3e2883 100644 --- a/taskflow/engines/action_engine/compiler.py +++ b/taskflow/engines/action_engine/compiler.py @@ -14,10 +14,10 @@ # License for the specific language governing permissions and limitations # under the License. -import collections import threading import fasteners +import six from taskflow import exceptions as exc from taskflow import flow @@ -28,18 +28,35 @@ from taskflow.types import tree as tr from taskflow.utils import iter_utils from taskflow.utils import misc +from taskflow.flow import (LINK_INVARIANT, LINK_RETRY) # noqa + LOG = logging.getLogger(__name__) -_RETRY_EDGE_DATA = { - flow.LINK_RETRY: True, -} -_EDGE_INVARIANTS = (flow.LINK_INVARIANT, flow.LINK_MANUAL, flow.LINK_RETRY) -_EDGE_REASONS = flow.LINK_REASONS +# Constants attached to node attributes in the execution graph (and tree +# node metadata), provided as constants here and constants in the compilation +# class (so that users will not have to import this file to access them); but +# provide them as module constants so that internal code can more +# easily access them... +TASK = 'task' +RETRY = 'retry' +FLOW = 'flow' + +# Quite often used together, so make a tuple everyone can share... +ATOMS = (TASK, RETRY) class Compilation(object): """The result of a compilers compile() is this *immutable* object.""" + #: Task nodes will have a ``kind`` attribute/metadata key with this value. + TASK = TASK + + #: Retry nodes will have a ``kind`` attribute/metadata key with this value. + RETRY = RETRY + + #: Flow nodes will have a ``kind`` attribute/metadata key with this value. + FLOW = FLOW + def __init__(self, execution_graph, hierarchy): self._execution_graph = execution_graph self._hierarchy = hierarchy @@ -55,6 +72,12 @@ class Compilation(object): return self._hierarchy +def _overlap_occurence_detector(to_graph, from_graph): + """Returns how many nodes in 'from' graph are in 'to' graph (if any).""" + return iter_utils.count(node for node in from_graph.nodes_iter() + if node in to_graph) + + def _add_update_edges(graph, nodes_from, nodes_to, attr_dict=None): """Adds/updates edges from nodes to other nodes in the specified graph. @@ -79,118 +102,7 @@ def _add_update_edges(graph, nodes_from, nodes_to, attr_dict=None): graph.add_edge(u, v, attr_dict=attr_dict.copy()) -class Linker(object): - """Compiler helper that adds pattern(s) constraints onto a graph.""" - - @staticmethod - def _is_not_empty(graph): - # Returns true if the given graph is *not* empty... - return graph.number_of_nodes() > 0 - - @staticmethod - def _find_first_decomposed(node, priors, - decomposed_members, decomposed_filter): - # How this works; traverse backwards and find only the predecessor - # items that are actually connected to this entity, and avoid any - # linkage that is not directly connected. This is guaranteed to be - # valid since we always iter_links() over predecessors before - # successors in all currently known patterns; a queue is used here - # since it is possible for a node to have 2+ different predecessors so - # we must search back through all of them in a reverse BFS order... - # - # Returns the first decomposed graph of those nodes (including the - # passed in node) that passes the provided filter - # function (returns none if none match). - frontier = collections.deque([node]) - # NOTE(harowja): None is in this initial set since the first prior in - # the priors list has None as its predecessor (which we don't want to - # look for a decomposed member of). - visited = set([None]) - while frontier: - node = frontier.popleft() - if node in visited: - continue - node_graph = decomposed_members[node] - if decomposed_filter(node_graph): - return node_graph - visited.add(node) - # TODO(harlowja): optimize this more to avoid searching through - # things already searched... - for (u, v) in reversed(priors): - if node == v: - # Queue its predecessor to be searched in the future... - frontier.append(u) - else: - return None - - def apply_constraints(self, graph, flow, decomposed_members): - # This list is used to track the links that have been previously - # iterated over, so that when we are trying to find a entry to - # connect to that we iterate backwards through this list, finding - # connected nodes to the current target (lets call it v) and find - # the first (u_n, or u_n - 1, u_n - 2...) that was decomposed into - # a non-empty graph. We also retain all predecessors of v so that we - # can correctly locate u_n - 1 if u_n turns out to have decomposed into - # an empty graph (and so on). - priors = [] - # NOTE(harlowja): u, v are flows/tasks (also graph terminology since - # we are compiling things down into a flattened graph), the meaning - # of this link iteration via iter_links() is that u -> v (with the - # provided dictionary attributes, if any). - for (u, v, attr_dict) in flow.iter_links(): - if not priors: - priors.append((None, u)) - v_g = decomposed_members[v] - if not v_g.number_of_nodes(): - priors.append((u, v)) - continue - invariant = any(attr_dict.get(k) for k in _EDGE_INVARIANTS) - if not invariant: - # This is a symbol *only* dependency, connect - # corresponding providers and consumers to allow the consumer - # to be executed immediately after the provider finishes (this - # is an optimization for these types of dependencies...) - u_g = decomposed_members[u] - if not u_g.number_of_nodes(): - # This must always exist, but incase it somehow doesn't... - raise exc.CompilationFailure( - "Non-invariant link being created from '%s' ->" - " '%s' even though the target '%s' was found to be" - " decomposed into an empty graph" % (v, u, u)) - for u in u_g.nodes_iter(): - for v in v_g.nodes_iter(): - # This is using the intersection() method vs the & - # operator since the latter doesn't work with frozen - # sets (when used in combination with ordered sets). - # - # If this is not done the following happens... - # - # TypeError: unsupported operand type(s) - # for &: 'frozenset' and 'OrderedSet' - depends_on = u.provides.intersection(v.requires) - if depends_on: - edge_attrs = { - _EDGE_REASONS: frozenset(depends_on), - } - _add_update_edges(graph, - [u], [v], - attr_dict=edge_attrs) - else: - # Connect nodes with no predecessors in v to nodes with no - # successors in the *first* non-empty predecessor of v (thus - # maintaining the edge dependency). - match = self._find_first_decomposed(u, priors, - decomposed_members, - self._is_not_empty) - if match is not None: - _add_update_edges(graph, - match.no_successors_iter(), - list(v_g.no_predecessors_iter()), - attr_dict=attr_dict) - priors.append((u, v)) - - -class _TaskCompiler(object): +class TaskCompiler(object): """Non-recursive compiler of tasks.""" @staticmethod @@ -199,71 +111,67 @@ class _TaskCompiler(object): def compile(self, task, parent=None): graph = gr.DiGraph(name=task.name) - graph.add_node(task) - node = tr.Node(task) + graph.add_node(task, kind=TASK) + node = tr.Node(task, kind=TASK) if parent is not None: parent.add(node) return graph, node -class _FlowCompiler(object): +class FlowCompiler(object): """Recursive compiler of flows.""" @staticmethod def handles(obj): return isinstance(obj, flow.Flow) - def __init__(self, deep_compiler_func, linker): + def __init__(self, deep_compiler_func): self._deep_compiler_func = deep_compiler_func - self._linker = linker - - def _connect_retry(self, retry, graph): - graph.add_node(retry) - - # All nodes that have no predecessors should depend on this retry. - nodes_to = [n for n in graph.no_predecessors_iter() if n is not retry] - if nodes_to: - _add_update_edges(graph, [retry], nodes_to, - attr_dict=_RETRY_EDGE_DATA) - - # Add association for each node of graph that has no existing retry. - for n in graph.nodes_iter(): - if n is not retry and flow.LINK_RETRY not in graph.node[n]: - graph.node[n][flow.LINK_RETRY] = retry - - @staticmethod - def _occurence_detector(to_graph, from_graph): - return iter_utils.count(node for node in from_graph.nodes_iter() - if node in to_graph) - - def _decompose_flow(self, flow, parent=None): - """Decomposes a flow into a graph, tree node + decomposed subgraphs.""" - graph = gr.DiGraph(name=flow.name) - node = tr.Node(flow) - if parent is not None: - parent.add(node) - if flow.retry is not None: - node.add(tr.Node(flow.retry)) - decomposed_members = {} - for item in flow: - subgraph, _subnode = self._deep_compiler_func(item, parent=node) - decomposed_members[item] = subgraph - if subgraph.number_of_nodes(): - graph = gr.merge_graphs( - graph, subgraph, - # We can specialize this to be simpler than the default - # algorithm which creates overhead that we don't - # need for our purposes... - overlap_detector=self._occurence_detector) - return graph, node, decomposed_members def compile(self, flow, parent=None): - graph, node, decomposed_members = self._decompose_flow(flow, - parent=parent) - self._linker.apply_constraints(graph, flow, decomposed_members) + """Decomposes a flow into a graph and scope tree hierarchy.""" + graph = gr.DiGraph(name=flow.name) + graph.add_node(flow, kind=FLOW, noop=True) + tree_node = tr.Node(flow, kind=FLOW, noop=True) + if parent is not None: + parent.add(tree_node) if flow.retry is not None: - self._connect_retry(flow.retry, graph) - return graph, node + tree_node.add(tr.Node(flow.retry, kind=RETRY)) + decomposed = dict( + (child, self._deep_compiler_func(child, parent=tree_node)[0]) + for child in flow) + decomposed_graphs = list(six.itervalues(decomposed)) + graph = gr.merge_graphs(graph, *decomposed_graphs, + overlap_detector=_overlap_occurence_detector) + for u, v, attr_dict in flow.iter_links(): + u_graph = decomposed[u] + v_graph = decomposed[v] + _add_update_edges(graph, u_graph.no_successors_iter(), + list(v_graph.no_predecessors_iter()), + attr_dict=attr_dict) + if flow.retry is not None: + graph.add_node(flow.retry, kind=RETRY) + _add_update_edges(graph, [flow], [flow.retry], + attr_dict={LINK_INVARIANT: True}) + for node in graph.nodes_iter(): + if node is not flow.retry and node is not flow: + graph.node[node].setdefault(RETRY, flow.retry) + from_nodes = [flow.retry] + connected_attr_dict = {LINK_INVARIANT: True, LINK_RETRY: True} + else: + from_nodes = [flow] + connected_attr_dict = {LINK_INVARIANT: True} + connected_to = [ + node for node in graph.no_predecessors_iter() if node is not flow + ] + if connected_to: + # Ensure all nodes in this graph(s) that have no + # predecessors depend on this flow (or this flow's retry) so that + # we can depend on the flow being traversed before its + # children (even though at the current time it will be skipped). + _add_update_edges(graph, from_nodes, connected_to, + attr_dict=connected_attr_dict) + return graph, tree_node class PatternCompiler(object): @@ -288,8 +196,8 @@ class PatternCompiler(object): the recursion (now with a decomposed mapping from contained patterns or atoms to there corresponding subgraph) we have to then connect the subgraphs (and the atom(s) there-in) that were decomposed for a pattern - correctly into a new graph (using a :py:class:`.Linker` object to ensure - the pattern mandated constraints are retained) and then return to the + correctly into a new graph and then ensure the pattern mandated + constraints are retained. Finally we then return to the caller (and they will do the same thing up until the root node, which by that point one graph is created with all contained atoms in the pattern/nested patterns mandated ordering). @@ -364,14 +272,10 @@ class PatternCompiler(object): def __init__(self, root, freeze=True): self._root = root self._history = set() - self._linker = Linker() self._freeze = freeze self._lock = threading.Lock() self._compilation = None - self._matchers = [ - _FlowCompiler(self._compile, self._linker), - _TaskCompiler(), - ] + self._matchers = (FlowCompiler(self._compile), TaskCompiler()) self._level = 0 def _compile(self, item, parent=None): @@ -418,12 +322,17 @@ class PatternCompiler(object): def _post_compile(self, graph, node): """Called after the compilation of the root finishes successfully.""" - dup_names = misc.get_duplicate_keys(graph.nodes_iter(), - key=lambda node: node.name) + dup_names = misc.get_duplicate_keys( + (node for node, node_attrs in graph.nodes_iter(data=True) + if node_attrs['kind'] in ATOMS), + key=lambda node: node.name) if dup_names: raise exc.Duplicate( "Atoms with duplicate names found: %s" % (sorted(dup_names))) - if graph.number_of_nodes() == 0: + atoms = iter_utils.count( + node for node, node_attrs in graph.nodes_iter(data=True) + if node_attrs['kind'] in ATOMS) + if atoms == 0: raise exc.Empty("Root container '%s' (%s) is empty" % (self._root, type(self._root))) self._history.clear() diff --git a/taskflow/engines/action_engine/completer.py b/taskflow/engines/action_engine/completer.py index 0ab727a4..e3ab54d2 100644 --- a/taskflow/engines/action_engine/completer.py +++ b/taskflow/engines/action_engine/completer.py @@ -20,6 +20,7 @@ import weakref from oslo_utils import reflection import six +from taskflow.engines.action_engine import compiler as co from taskflow.engines.action_engine import executor as ex from taskflow import logging from taskflow import retry as retry_atom @@ -62,7 +63,7 @@ class RevertAndRetry(Strategy): self._retry = retry def apply(self): - tweaked = self._runtime.reset_nodes([self._retry], state=None, + tweaked = self._runtime.reset_atoms([self._retry], state=None, intention=st.RETRY) tweaked.extend(self._runtime.reset_subgraph(self._retry, state=None, intention=st.REVERT)) @@ -79,8 +80,9 @@ class RevertAll(Strategy): self._analyzer = runtime.analyzer def apply(self): - return self._runtime.reset_nodes(self._analyzer.iterate_all_nodes(), - state=None, intention=st.REVERT) + return self._runtime.reset_atoms( + self._analyzer.iterate_nodes(co.ATOMS), + state=None, intention=st.REVERT) class Revert(Strategy): @@ -93,7 +95,7 @@ class Revert(Strategy): self._atom = atom def apply(self): - tweaked = self._runtime.reset_nodes([self._atom], state=None, + tweaked = self._runtime.reset_atoms([self._atom], state=None, intention=st.REVERT) tweaked.extend(self._runtime.reset_subgraph(self._atom, state=None, intention=st.REVERT)) @@ -126,26 +128,26 @@ class Completer(object): self._retry_action.complete_reversion(retry, result) def resume(self): - """Resumes nodes in the contained graph. + """Resumes atoms in the contained graph. - This is done to allow any previously completed or failed nodes to - be analyzed, there results processed and any potential nodes affected + This is done to allow any previously completed or failed atoms to + be analyzed, there results processed and any potential atoms affected to be adjusted as needed. - This should return a set of nodes which should be the initial set of - nodes that were previously not finished (due to a RUNNING or REVERTING + This should return a set of atoms which should be the initial set of + atoms that were previously not finished (due to a RUNNING or REVERTING attempt not previously finishing). """ - for node in self._analyzer.iterate_all_nodes(): - if self._analyzer.get_state(node) == st.FAILURE: - self._process_atom_failure(node, self._storage.get(node.name)) + for atom in self._analyzer.iterate_nodes(co.ATOMS): + if self._analyzer.get_state(atom) == st.FAILURE: + self._process_atom_failure(atom, self._storage.get(atom.name)) for retry in self._analyzer.iterate_retries(st.RETRYING): self._runtime.retry_subflow(retry) - unfinished_nodes = set() - for node in self._analyzer.iterate_all_nodes(): - if self._analyzer.get_state(node) in (st.RUNNING, st.REVERTING): - unfinished_nodes.add(node) - return unfinished_nodes + unfinished_atoms = set() + for atom in self._analyzer.iterate_nodes(co.ATOMS): + if self._analyzer.get_state(atom) in (st.RUNNING, st.REVERTING): + unfinished_atoms.add(atom) + return unfinished_atoms def complete(self, node, event, result): """Performs post-execution completion of a node. @@ -167,7 +169,7 @@ class Completer(object): def _determine_resolution(self, atom, failure): """Determines which resolution strategy to activate/apply.""" - retry = self._analyzer.find_atom_retry(atom) + retry = self._analyzer.find_retry(atom) if retry is not None: # Ask retry controller what to do in case of failure. strategy = self._retry_action.on_failure(retry, atom, failure) diff --git a/taskflow/engines/action_engine/engine.py b/taskflow/engines/action_engine/engine.py index cc6b1ac4..74e150c1 100644 --- a/taskflow/engines/action_engine/engine.py +++ b/taskflow/engines/action_engine/engine.py @@ -241,11 +241,10 @@ class ActionEngine(base.Engine): transient = strutils.bool_from_string( self._options.get('inject_transient', True)) self.storage.ensure_atoms( - self._compilation.execution_graph.nodes_iter()) - for node in self._compilation.execution_graph.nodes_iter(): - if node.inject: - self.storage.inject_atom_args(node.name, - node.inject, + self._runtime.analyzer.iterate_nodes(compiler.ATOMS)) + for atom in self._runtime.analyzer.iterate_nodes(compiler.ATOMS): + if atom.inject: + self.storage.inject_atom_args(atom.name, atom.inject, transient=transient) @fasteners.locked @@ -255,8 +254,8 @@ class ActionEngine(base.Engine): # flow/task provided or storage provided, if there are still missing # dependencies then this flow will fail at runtime (which we can avoid # by failing at validation time). - execution_graph = self._compilation.execution_graph if LOG.isEnabledFor(logging.BLATHER): + execution_graph = self._compilation.execution_graph LOG.blather("Validating scoping and argument visibility for" " execution graph with %s nodes and %s edges with" " density %0.3f", execution_graph.number_of_nodes(), @@ -269,18 +268,17 @@ class ActionEngine(base.Engine): last_cause = None last_node = None missing_nodes = 0 - fetch_func = self.storage.fetch_unsatisfied_args - for node in execution_graph.nodes_iter(): - node_missing = fetch_func(node.name, node.rebind, - optional_args=node.optional) - if node_missing: - cause = exc.MissingDependencies(node, - sorted(node_missing), + for atom in self._runtime.analyzer.iterate_nodes(compiler.ATOMS): + atom_missing = self.storage.fetch_unsatisfied_args( + atom.name, atom.rebind, optional_args=atom.optional) + if atom_missing: + cause = exc.MissingDependencies(atom, + sorted(atom_missing), cause=last_cause) last_cause = cause - last_node = node + last_node = atom missing_nodes += 1 - missing.update(node_missing) + missing.update(atom_missing) if missing: # For when a task is provided (instead of a flow) and that # task is the only item in the graph and its missing deps, avoid diff --git a/taskflow/engines/action_engine/runtime.py b/taskflow/engines/action_engine/runtime.py index d97ba967..6780e931 100644 --- a/taskflow/engines/action_engine/runtime.py +++ b/taskflow/engines/action_engine/runtime.py @@ -22,12 +22,13 @@ from taskflow.engines.action_engine.actions import retry as ra from taskflow.engines.action_engine.actions import task as ta from taskflow.engines.action_engine import analyzer as an from taskflow.engines.action_engine import builder as bu +from taskflow.engines.action_engine import compiler as com from taskflow.engines.action_engine import completer as co from taskflow.engines.action_engine import scheduler as sched from taskflow.engines.action_engine import scopes as sc -from taskflow import flow +from taskflow import exceptions as exc +from taskflow.flow import LINK_DECIDER from taskflow import states as st -from taskflow import task from taskflow.utils import misc @@ -47,7 +48,6 @@ class Runtime(object): self._storage = storage self._compilation = compilation self._atom_cache = {} - self._atoms_by_kind = {} def compile(self): """Compiles & caches frequently used execution helper objects. @@ -59,47 +59,47 @@ class Runtime(object): specific scheduler and so-on). """ change_state_handlers = { - 'task': functools.partial(self.task_action.change_state, - progress=0.0), - 'retry': self.retry_action.change_state, + com.TASK: functools.partial(self.task_action.change_state, + progress=0.0), + com.RETRY: self.retry_action.change_state, } schedulers = { - 'retry': self.retry_scheduler, - 'task': self.task_scheduler, + com.RETRY: self.retry_scheduler, + com.TASK: self.task_scheduler, } - execution_graph = self._compilation.execution_graph - all_retry_atoms = [] - all_task_atoms = [] - for atom in self.analyzer.iterate_all_nodes(): - metadata = {} - walker = sc.ScopeWalker(self.compilation, atom, names_only=True) - if isinstance(atom, task.BaseTask): - check_transition_handler = st.check_task_transition - change_state_handler = change_state_handlers['task'] - scheduler = schedulers['task'] - all_task_atoms.append(atom) + check_transition_handlers = { + com.TASK: st.check_task_transition, + com.RETRY: st.check_retry_transition, + } + graph = self._compilation.execution_graph + for node, node_data in graph.nodes_iter(data=True): + node_kind = node_data['kind'] + if node_kind == com.FLOW: + continue + elif node_kind in com.ATOMS: + check_transition_handler = check_transition_handlers[node_kind] + change_state_handler = change_state_handlers[node_kind] + scheduler = schedulers[node_kind] else: - check_transition_handler = st.check_retry_transition - change_state_handler = change_state_handlers['retry'] - scheduler = schedulers['retry'] - all_retry_atoms.append(atom) + raise exc.CompilationFailure("Unknown node kind '%s'" + " encountered" % node_kind) + metadata = {} + walker = sc.ScopeWalker(self.compilation, node, names_only=True) edge_deciders = {} - for previous_atom in execution_graph.predecessors(atom): + for prev_node in graph.predecessors_iter(node): # If there is any link function that says if this connection # is able to run (or should not) ensure we retain it and use # it later as needed. - u_v_data = execution_graph.adj[previous_atom][atom] - u_v_decider = u_v_data.get(flow.LINK_DECIDER) + u_v_data = graph.adj[prev_node][node] + u_v_decider = u_v_data.get(LINK_DECIDER) if u_v_decider is not None: - edge_deciders[previous_atom.name] = u_v_decider + edge_deciders[prev_node.name] = u_v_decider metadata['scope_walker'] = walker metadata['check_transition_handler'] = check_transition_handler metadata['change_state_handler'] = change_state_handler metadata['scheduler'] = scheduler metadata['edge_deciders'] = edge_deciders - self._atom_cache[atom.name] = metadata - self._atoms_by_kind['retry'] = all_retry_atoms - self._atoms_by_kind['task'] = all_task_atoms + self._atom_cache[node.name] = metadata @property def compilation(self): @@ -162,15 +162,6 @@ class Runtime(object): metadata = self._atom_cache[atom.name] return metadata['edge_deciders'] - def fetch_atoms_by_kind(self, kind): - """Fetches all the atoms of a given kind. - - NOTE(harlowja): Currently only ``task`` or ``retry`` are valid - kinds of atoms (requesting other kinds will just - return empty lists). - """ - return self._atoms_by_kind.get(kind, []) - def fetch_scheduler(self, atom): """Fetches the cached specific scheduler for the given atom.""" # This does not check if the name exists (since this is only used @@ -197,7 +188,7 @@ class Runtime(object): # Various helper methods used by the runtime components; not for public # consumption... - def reset_nodes(self, atoms, state=st.PENDING, intention=st.EXECUTE): + def reset_atoms(self, atoms, state=st.PENDING, intention=st.EXECUTE): """Resets all the provided atoms to the given state and intention.""" tweaked = [] for atom in atoms: @@ -213,7 +204,7 @@ class Runtime(object): def reset_all(self, state=st.PENDING, intention=st.EXECUTE): """Resets all atoms to the given state and intention.""" - return self.reset_nodes(self.analyzer.iterate_all_nodes(), + return self.reset_atoms(self.analyzer.iterate_nodes(com.ATOMS), state=state, intention=intention) def reset_subgraph(self, atom, state=st.PENDING, intention=st.EXECUTE): @@ -221,8 +212,9 @@ class Runtime(object): The subgraph is contained of all of the atoms successors. """ - return self.reset_nodes(self.analyzer.iterate_subgraph(atom), - state=state, intention=intention) + return self.reset_atoms( + self.analyzer.iterate_connected_atoms(atom), + state=state, intention=intention) def retry_subflow(self, retry): """Prepares a retrys + its subgraph for execution. diff --git a/taskflow/engines/action_engine/scopes.py b/taskflow/engines/action_engine/scopes.py index 5fd7ee6f..1d309d89 100644 --- a/taskflow/engines/action_engine/scopes.py +++ b/taskflow/engines/action_engine/scopes.py @@ -14,14 +14,14 @@ # License for the specific language governing permissions and limitations # under the License. -from taskflow import atom as atom_type -from taskflow import flow as flow_type +from taskflow.engines.action_engine import compiler as co from taskflow import logging LOG = logging.getLogger(__name__) -def _extract_atoms_iter(node, idx=-1): +def _depth_first_reverse_iterate(node, idx=-1): + """Iterates connected (in reverse) nodes in tree (from starting node).""" # Always go left to right, since right to left is the pattern order # and we want to go backwards and not forwards through that ordering... if idx == -1: @@ -29,15 +29,17 @@ def _extract_atoms_iter(node, idx=-1): else: children_iter = reversed(node[0:idx]) for child in children_iter: - if isinstance(child.item, flow_type.Flow): - for atom in _extract_atoms_iter(child): + child_kind = child.metadata['kind'] + if child_kind == co.FLOW: + # Jump through these... + # + # TODO(harlowja): make this non-recursive and remove this + # style of doing this when + # https://review.openstack.org/#/c/205731/ merges... + for atom in _depth_first_reverse_iterate(child): yield atom - elif isinstance(child.item, atom_type.Atom): - yield child.item else: - raise TypeError( - "Unknown extraction item '%s' (%s)" % (child.item, - type(child.item))) + yield child.item class ScopeWalker(object): @@ -57,13 +59,10 @@ class ScopeWalker(object): " hierarchy" % atom) self._level_cache = {} self._atom = atom - self._graph = compilation.execution_graph + self._execution_graph = compilation.execution_graph self._names_only = names_only self._predecessors = None - #: Function that extracts the *associated* atoms of a given tree node. - _extract_atoms_iter = staticmethod(_extract_atoms_iter) - def __iter__(self): """Iterates over the visible scopes. @@ -99,10 +98,14 @@ class ScopeWalker(object): nodes (aka we have reached the top of the tree) or we run out of predecessors. """ + graph = self._execution_graph if self._predecessors is None: - pred_iter = self._graph.bfs_predecessors_iter(self._atom) - self._predecessors = set(pred_iter) - predecessors = self._predecessors.copy() + predecessors = set( + node for node in graph.bfs_predecessors_iter(self._atom) + if graph.node[node]['kind'] in co.ATOMS) + self._predecessors = predecessors.copy() + else: + predecessors = self._predecessors.copy() last = self._node for lvl, parent in enumerate(self._node.path_iter(include_self=False)): if not predecessors: @@ -114,7 +117,7 @@ class ScopeWalker(object): except KeyError: visible = [] removals = set() - for atom in self._extract_atoms_iter(parent, idx=last_idx): + for atom in _depth_first_reverse_iterate(parent, idx=last_idx): if atom in predecessors: predecessors.remove(atom) removals.add(atom) diff --git a/taskflow/formatters.py b/taskflow/formatters.py index 33fb7088..41b409c9 100644 --- a/taskflow/formatters.py +++ b/taskflow/formatters.py @@ -16,6 +16,7 @@ import functools +from taskflow.engines.action_engine import compiler from taskflow import exceptions as exc from taskflow import states from taskflow.types import tree @@ -45,7 +46,8 @@ def _fetch_predecessor_tree(graph, atom): while stack: parent, node = stack.pop() for pred_node in graph.predecessors_iter(node): - child = tree.Node(pred_node) + child = tree.Node(pred_node, + **graph.node[pred_node]) parent.add(child) stack.append((child, pred_node)) seen.add(pred_node) @@ -62,8 +64,13 @@ class FailureFormatter(object): def __init__(self, engine, hide_inputs_outputs_of=()): self._hide_inputs_outputs_of = hide_inputs_outputs_of self._engine = engine + self._formatter_funcs = { + compiler.FLOW: self._format_flow, + } + for kind in compiler.ATOMS: + self._formatter_funcs[kind] = self._format_atom - def _format_node(self, storage, cache, node): + def _format_atom(self, storage, cache, node): """Formats a single tree node (atom) into a string version.""" atom = node.item atom_name = atom.name @@ -101,6 +108,16 @@ class FailureFormatter(object): else: return "Atom '%s'" % (atom_name) + def _format_flow(self, storage, cache, node): + """Formats a single tree node (flow) into a string version.""" + flow = node.item + return flow.name + + def _format_node(self, storage, cache, node): + """Formats a single tree node into a string version.""" + formatter_func = self. _formatter_funcs[node.metadata['kind']] + return formatter_func(storage, cache, node) + def format(self, fail, atom_matcher): """Returns a (exc_info, details) tuple about the failure. diff --git a/taskflow/tests/unit/action_engine/test_builder.py b/taskflow/tests/unit/action_engine/test_builder.py index b4067449..08877f8e 100644 --- a/taskflow/tests/unit/action_engine/test_builder.py +++ b/taskflow/tests/unit/action_engine/test_builder.py @@ -37,18 +37,19 @@ class BuildersTest(test.TestCase): compilation = compiler.PatternCompiler(flow).compile() flow_detail = pu.create_flow_detail(flow) store = storage.Storage(flow_detail) - # This ensures the tasks exist in storage... - for task in compilation.execution_graph: - store.ensure_atom(task) + nodes_iter = compilation.execution_graph.nodes_iter(data=True) + for node, node_attrs in nodes_iter: + if node_attrs['kind'] in ('task', 'retry'): + store.ensure_atom(node) if initial_state: store.set_flow_state(initial_state) - task_notifier = notifier.Notifier() + atom_notifier = notifier.Notifier() task_executor = executor.SerialTaskExecutor() retry_executor = executor.SerialRetryExecutor() task_executor.start() self.addCleanup(task_executor.stop) r = runtime.Runtime(compilation, store, - task_notifier, task_executor, + atom_notifier, task_executor, retry_executor) r.compile() return r @@ -305,6 +306,6 @@ class BuildersTest(test.TestCase): self.assertEqual(1, occurrences.get((builder.GAME_OVER, st.SUCCESS))) self.assertEqual(1, occurrences.get((builder.UNDEFINED, st.RESUMING))) - self.assertEqual(0, len(memory.next_nodes)) + self.assertEqual(0, len(memory.next_up)) self.assertEqual(0, len(memory.not_done)) self.assertEqual(0, len(memory.failures)) diff --git a/taskflow/tests/unit/action_engine/test_compile.py b/taskflow/tests/unit/action_engine/test_compile.py index 884cd8d5..b676e0ea 100644 --- a/taskflow/tests/unit/action_engine/test_compile.py +++ b/taskflow/tests/unit/action_engine/test_compile.py @@ -49,21 +49,22 @@ class PatternCompileTest(test.TestCase): a, b, c, d = test_utils.make_many(4) flo = lf.Flow("test") flo.add(a, b, c) - sflo = lf.Flow("sub-test") - sflo.add(d) - flo.add(sflo) + inner_flo = lf.Flow("sub-test") + inner_flo.add(d) + flo.add(inner_flo) compilation = compiler.PatternCompiler(flo).compile() g = compilation.execution_graph - self.assertEqual(4, len(g)) + self.assertEqual(6, len(g)) order = g.topological_sort() - self.assertEqual([a, b, c, d], order) - self.assertTrue(g.has_edge(c, d)) - self.assertEqual(g.get_edge_data(c, d), {'invariant': True}) + self.assertEqual([flo, a, b, c, inner_flo, d], order) + self.assertTrue(g.has_edge(c, inner_flo)) + self.assertTrue(g.has_edge(inner_flo, d)) + self.assertEqual(g.get_edge_data(inner_flo, d), {'invariant': True}) self.assertEqual([d], list(g.no_successors_iter())) - self.assertEqual([a], list(g.no_predecessors_iter())) + self.assertEqual([flo], list(g.no_predecessors_iter())) def test_invalid(self): a, b, c = test_utils.make_many(3) @@ -79,36 +80,42 @@ class PatternCompileTest(test.TestCase): flo.add(a, b, c, d) compilation = compiler.PatternCompiler(flo).compile() g = compilation.execution_graph - self.assertEqual(4, len(g)) - self.assertEqual(0, g.number_of_edges()) + self.assertEqual(5, len(g)) + self.assertItemsEqual(g.edges(), [ + (flo, a), + (flo, b), + (flo, c), + (flo, d), + ]) self.assertEqual(set([a, b, c, d]), set(g.no_successors_iter())) - self.assertEqual(set([a, b, c, d]), + self.assertEqual(set([flo]), set(g.no_predecessors_iter())) def test_linear_nested(self): a, b, c, d = test_utils.make_many(4) flo = lf.Flow("test") flo.add(a, b) - flo2 = uf.Flow("test2") - flo2.add(c, d) - flo.add(flo2) + inner_flo = uf.Flow("test2") + inner_flo.add(c, d) + flo.add(inner_flo) compilation = compiler.PatternCompiler(flo).compile() - g = compilation.execution_graph - self.assertEqual(4, len(g)) + graph = compilation.execution_graph + self.assertEqual(6, len(graph)) - lb = g.subgraph([a, b]) + lb = graph.subgraph([a, b]) self.assertFalse(lb.has_edge(b, a)) self.assertTrue(lb.has_edge(a, b)) - self.assertEqual(g.get_edge_data(a, b), {'invariant': True}) + self.assertEqual(graph.get_edge_data(a, b), {'invariant': True}) - ub = g.subgraph([c, d]) + ub = graph.subgraph([c, d]) self.assertEqual(0, ub.number_of_edges()) # This ensures that c and d do not start executing until after b. - self.assertTrue(g.has_edge(b, c)) - self.assertTrue(g.has_edge(b, d)) + self.assertTrue(graph.has_edge(b, inner_flo)) + self.assertTrue(graph.has_edge(inner_flo, c)) + self.assertTrue(graph.has_edge(inner_flo, d)) def test_unordered_nested(self): a, b, c, d = test_utils.make_many(4) @@ -120,34 +127,30 @@ class PatternCompileTest(test.TestCase): compilation = compiler.PatternCompiler(flo).compile() g = compilation.execution_graph - self.assertEqual(4, len(g)) - for n in [a, b]: - self.assertFalse(g.has_edge(n, c)) - self.assertFalse(g.has_edge(n, d)) - self.assertFalse(g.has_edge(d, c)) - self.assertTrue(g.has_edge(c, d)) - self.assertEqual(g.get_edge_data(c, d), {'invariant': True}) - - ub = g.subgraph([a, b]) - self.assertEqual(0, ub.number_of_edges()) - lb = g.subgraph([c, d]) - self.assertEqual(1, lb.number_of_edges()) + self.assertEqual(6, len(g)) + self.assertItemsEqual(g.edges(), [ + (flo, a), + (flo, b), + (flo, flo2), + (flo2, c), + (c, d) + ]) def test_unordered_nested_in_linear(self): a, b, c, d = test_utils.make_many(4) - flo = lf.Flow('lt').add( - a, - uf.Flow('ut').add(b, c), - d) + inner_flo = uf.Flow('ut').add(b, c) + flo = lf.Flow('lt').add(a, inner_flo, d) compilation = compiler.PatternCompiler(flo).compile() g = compilation.execution_graph - self.assertEqual(4, len(g)) + self.assertEqual(6, len(g)) self.assertItemsEqual(g.edges(), [ - (a, b), - (a, c), + (flo, a), + (a, inner_flo), + (inner_flo, b), + (inner_flo, c), (b, d), - (c, d) + (c, d), ]) def test_graph(self): @@ -157,8 +160,8 @@ class PatternCompileTest(test.TestCase): compilation = compiler.PatternCompiler(flo).compile() g = compilation.execution_graph - self.assertEqual(4, len(g)) - self.assertEqual(0, g.number_of_edges()) + self.assertEqual(5, len(g)) + self.assertEqual(4, g.number_of_edges()) def test_graph_nested(self): a, b, c, d, e, f, g = test_utils.make_many(7) @@ -171,10 +174,17 @@ class PatternCompileTest(test.TestCase): compilation = compiler.PatternCompiler(flo).compile() graph = compilation.execution_graph - self.assertEqual(7, len(graph)) - self.assertItemsEqual(graph.edges(data=True), [ - (e, f, {'invariant': True}), - (f, g, {'invariant': True}) + self.assertEqual(9, len(graph)) + self.assertItemsEqual(graph.edges(), [ + (flo, a), + (flo, b), + (flo, c), + (flo, d), + (flo, flo2), + + (flo2, e), + (e, f), + (f, g), ]) def test_graph_nested_graph(self): @@ -187,9 +197,19 @@ class PatternCompileTest(test.TestCase): flo.add(flo2) compilation = compiler.PatternCompiler(flo).compile() - g = compilation.execution_graph - self.assertEqual(7, len(g)) - self.assertEqual(0, g.number_of_edges()) + graph = compilation.execution_graph + self.assertEqual(9, len(graph)) + self.assertItemsEqual(graph.edges(), [ + (flo, a), + (flo, b), + (flo, c), + (flo, d), + (flo, flo2), + + (flo2, e), + (flo2, f), + (flo2, g), + ]) def test_graph_links(self): a, b, c, d = test_utils.make_many(4) @@ -201,13 +221,15 @@ class PatternCompileTest(test.TestCase): compilation = compiler.PatternCompiler(flo).compile() g = compilation.execution_graph - self.assertEqual(4, len(g)) + self.assertEqual(5, len(g)) self.assertItemsEqual(g.edges(data=True), [ + (flo, a, {'invariant': True}), + (a, b, {'manual': True}), (b, c, {'manual': True}), (c, d, {'manual': True}), ]) - self.assertItemsEqual([a], g.no_predecessors_iter()) + self.assertItemsEqual([flo], g.no_predecessors_iter()) self.assertItemsEqual([d], g.no_successors_iter()) def test_graph_dependencies(self): @@ -217,96 +239,112 @@ class PatternCompileTest(test.TestCase): compilation = compiler.PatternCompiler(flo).compile() g = compilation.execution_graph - self.assertEqual(2, len(g)) + self.assertEqual(3, len(g)) self.assertItemsEqual(g.edges(data=True), [ + (flo, a, {'invariant': True}), (a, b, {'reasons': set(['x'])}) ]) - self.assertItemsEqual([a], g.no_predecessors_iter()) + self.assertItemsEqual([flo], g.no_predecessors_iter()) self.assertItemsEqual([b], g.no_successors_iter()) def test_graph_nested_requires(self): a = test_utils.ProvidesRequiresTask('a', provides=['x'], requires=[]) b = test_utils.ProvidesRequiresTask('b', provides=[], requires=[]) c = test_utils.ProvidesRequiresTask('c', provides=[], requires=['x']) - flo = gf.Flow("test").add( - a, - lf.Flow("test2").add(b, c) - ) + inner_flo = lf.Flow("test2").add(b, c) + flo = gf.Flow("test").add(a, inner_flo) compilation = compiler.PatternCompiler(flo).compile() - g = compilation.execution_graph - self.assertEqual(3, len(g)) - self.assertItemsEqual(g.edges(data=True), [ - (a, c, {'reasons': set(['x'])}), - (b, c, {'invariant': True}) + graph = compilation.execution_graph + self.assertEqual(5, len(graph)) + self.assertItemsEqual(graph.edges(data=True), [ + (flo, a, {'invariant': True}), + (inner_flo, b, {'invariant': True}), + (a, inner_flo, {'reasons': set(['x'])}), + (b, c, {'invariant': True}), ]) - self.assertItemsEqual([a, b], g.no_predecessors_iter()) - self.assertItemsEqual([c], g.no_successors_iter()) + self.assertItemsEqual([flo], graph.no_predecessors_iter()) + self.assertItemsEqual([c], graph.no_successors_iter()) def test_graph_nested_provides(self): a = test_utils.ProvidesRequiresTask('a', provides=[], requires=['x']) b = test_utils.ProvidesRequiresTask('b', provides=['x'], requires=[]) c = test_utils.ProvidesRequiresTask('c', provides=[], requires=[]) - flo = gf.Flow("test").add( - a, - lf.Flow("test2").add(b, c) - ) + inner_flo = lf.Flow("test2").add(b, c) + flo = gf.Flow("test").add(a, inner_flo) compilation = compiler.PatternCompiler(flo).compile() - g = compilation.execution_graph - self.assertEqual(3, len(g)) - self.assertItemsEqual(g.edges(data=True), [ + graph = compilation.execution_graph + self.assertEqual(5, len(graph)) + self.assertItemsEqual(graph.edges(data=True), [ + (flo, inner_flo, {'invariant': True}), + + (inner_flo, b, {'invariant': True}), (b, c, {'invariant': True}), - (b, a, {'reasons': set(['x'])}) + (c, a, {'reasons': set(['x'])}), ]) - self.assertItemsEqual([b], g.no_predecessors_iter()) - self.assertItemsEqual([a, c], g.no_successors_iter()) + self.assertItemsEqual([flo], graph.no_predecessors_iter()) + self.assertItemsEqual([a], graph.no_successors_iter()) def test_empty_flow_in_linear_flow(self): - flow = lf.Flow('lf') + flo = lf.Flow('lf') a = test_utils.ProvidesRequiresTask('a', provides=[], requires=[]) b = test_utils.ProvidesRequiresTask('b', provides=[], requires=[]) - empty_flow = gf.Flow("empty") - flow.add(a, empty_flow, b) + empty_flo = gf.Flow("empty") + flo.add(a, empty_flo, b) - compilation = compiler.PatternCompiler(flow).compile() - g = compilation.execution_graph - self.assertItemsEqual(g.edges(data=True), [ - (a, b, {'invariant': True}), + compilation = compiler.PatternCompiler(flo).compile() + graph = compilation.execution_graph + self.assertItemsEqual(graph.edges(), [ + (flo, a), + (a, empty_flo), + (empty_flo, b), ]) def test_many_empty_in_graph_flow(self): - flow = gf.Flow('root') + flo = gf.Flow('root') a = test_utils.ProvidesRequiresTask('a', provides=[], requires=[]) - flow.add(a) + flo.add(a) b = lf.Flow('b') b_0 = test_utils.ProvidesRequiresTask('b.0', provides=[], requires=[]) + b_1 = lf.Flow('b.1') + b_2 = lf.Flow('b.2') b_3 = test_utils.ProvidesRequiresTask('b.3', provides=[], requires=[]) - b.add( - b_0, - lf.Flow('b.1'), lf.Flow('b.2'), - b_3, - ) - flow.add(b) + b.add(b_0, b_1, b_2, b_3) + flo.add(b) c = lf.Flow('c') - c.add(lf.Flow('c.0'), lf.Flow('c.1'), lf.Flow('c.2')) - flow.add(c) + c_0 = lf.Flow('c.0') + c_1 = lf.Flow('c.1') + c_2 = lf.Flow('c.2') + c.add(c_0, c_1, c_2) + flo.add(c) d = test_utils.ProvidesRequiresTask('d', provides=[], requires=[]) - flow.add(d) + flo.add(d) - flow.link(b, d) - flow.link(a, d) - flow.link(c, d) + flo.link(b, d) + flo.link(a, d) + flo.link(c, d) - compilation = compiler.PatternCompiler(flow).compile() - g = compilation.execution_graph - self.assertTrue(g.has_edge(b_0, b_3)) - self.assertTrue(g.has_edge(b_3, d)) - self.assertEqual(4, len(g)) + compilation = compiler.PatternCompiler(flo).compile() + graph = compilation.execution_graph + + self.assertTrue(graph.has_edge(flo, a)) + + self.assertTrue(graph.has_edge(flo, b)) + self.assertTrue(graph.has_edge(b_0, b_1)) + self.assertTrue(graph.has_edge(b_1, b_2)) + self.assertTrue(graph.has_edge(b_2, b_3)) + + self.assertTrue(graph.has_edge(flo, c)) + self.assertTrue(graph.has_edge(c_0, c_1)) + self.assertTrue(graph.has_edge(c_1, c_2)) + + self.assertTrue(graph.has_edge(b_3, d)) + self.assertEqual(12, len(graph)) def test_empty_flow_in_nested_flow(self): flow = lf.Flow('lf') @@ -323,9 +361,10 @@ class PatternCompileTest(test.TestCase): compilation = compiler.PatternCompiler(flow).compile() g = compilation.execution_graph - self.assertTrue(g.has_edge(a, c)) - self.assertTrue(g.has_edge(c, d)) - self.assertTrue(g.has_edge(d, b)) + for source, target in [(flow, a), (a, flow2), + (flow2, c), (c, empty_flow), + (empty_flow, d), (d, b)]: + self.assertTrue(g.has_edge(source, target)) def test_empty_flow_in_graph_flow(self): flow = lf.Flow('lf') @@ -336,19 +375,9 @@ class PatternCompileTest(test.TestCase): compilation = compiler.PatternCompiler(flow).compile() g = compilation.execution_graph - self.assertTrue(g.has_edge(a, b)) - - def test_empty_flow_in_graph_flow_empty_linkage(self): - flow = gf.Flow('lf') - a = test_utils.ProvidesRequiresTask('a', provides=[], requires=[]) - b = test_utils.ProvidesRequiresTask('b', provides=[], requires=[]) - empty_flow = lf.Flow("empty") - flow.add(a, empty_flow, b) - flow.link(empty_flow, b) - - compilation = compiler.PatternCompiler(flow).compile() - g = compilation.execution_graph - self.assertEqual(0, len(g.edges())) + self.assertTrue(g.has_edge(flow, a)) + self.assertTrue(g.has_edge(a, empty_flow)) + self.assertTrue(g.has_edge(empty_flow, b)) def test_empty_flow_in_graph_flow_linkage(self): flow = gf.Flow('lf') @@ -360,8 +389,9 @@ class PatternCompileTest(test.TestCase): compilation = compiler.PatternCompiler(flow).compile() g = compilation.execution_graph - self.assertEqual(1, len(g.edges())) self.assertTrue(g.has_edge(a, b)) + self.assertTrue(g.has_edge(flow, a)) + self.assertTrue(g.has_edge(flow, empty_flow)) def test_checks_for_dups(self): flo = gf.Flow("test").add( @@ -384,36 +414,39 @@ class PatternCompileTest(test.TestCase): flo = lf.Flow("test", retry.AlwaysRevert("c")) compilation = compiler.PatternCompiler(flo).compile() g = compilation.execution_graph - self.assertEqual(1, len(g)) - self.assertEqual(0, g.number_of_edges()) + self.assertEqual(2, len(g)) + self.assertEqual(1, g.number_of_edges()) def test_retry_in_unordered_flow(self): flo = uf.Flow("test", retry.AlwaysRevert("c")) compilation = compiler.PatternCompiler(flo).compile() g = compilation.execution_graph - self.assertEqual(1, len(g)) - self.assertEqual(0, g.number_of_edges()) + self.assertEqual(2, len(g)) + self.assertEqual(1, g.number_of_edges()) def test_retry_in_graph_flow(self): flo = gf.Flow("test", retry.AlwaysRevert("c")) compilation = compiler.PatternCompiler(flo).compile() g = compilation.execution_graph - self.assertEqual(1, len(g)) - self.assertEqual(0, g.number_of_edges()) + self.assertEqual(2, len(g)) + self.assertEqual(1, g.number_of_edges()) def test_retry_in_nested_flows(self): c1 = retry.AlwaysRevert("c1") c2 = retry.AlwaysRevert("c2") - flo = lf.Flow("test", c1).add(lf.Flow("test2", c2)) + inner_flo = lf.Flow("test2", c2) + flo = lf.Flow("test", c1).add(inner_flo) compilation = compiler.PatternCompiler(flo).compile() g = compilation.execution_graph - self.assertEqual(2, len(g)) + self.assertEqual(4, len(g)) self.assertItemsEqual(g.edges(data=True), [ - (c1, c2, {'retry': True}) + (flo, c1, {'invariant': True}), + (c1, inner_flo, {'invariant': True, 'retry': True}), + (inner_flo, c2, {'invariant': True}), ]) self.assertIs(c1, g.node[c2]['retry']) - self.assertItemsEqual([c1], g.no_predecessors_iter()) + self.assertItemsEqual([flo], g.no_predecessors_iter()) self.assertItemsEqual([c2], g.no_successors_iter()) def test_retry_in_linear_flow_with_tasks(self): @@ -423,13 +456,14 @@ class PatternCompileTest(test.TestCase): compilation = compiler.PatternCompiler(flo).compile() g = compilation.execution_graph - self.assertEqual(3, len(g)) + self.assertEqual(4, len(g)) self.assertItemsEqual(g.edges(data=True), [ + (flo, c, {'invariant': True}), (a, b, {'invariant': True}), - (c, a, {'retry': True}) + (c, a, {'invariant': True, 'retry': True}) ]) - self.assertItemsEqual([c], g.no_predecessors_iter()) + self.assertItemsEqual([flo], g.no_predecessors_iter()) self.assertItemsEqual([b], g.no_successors_iter()) self.assertIs(c, g.node[a]['retry']) self.assertIs(c, g.node[b]['retry']) @@ -441,13 +475,14 @@ class PatternCompileTest(test.TestCase): compilation = compiler.PatternCompiler(flo).compile() g = compilation.execution_graph - self.assertEqual(3, len(g)) + self.assertEqual(4, len(g)) self.assertItemsEqual(g.edges(data=True), [ - (c, a, {'retry': True}), - (c, b, {'retry': True}) + (flo, c, {'invariant': True}), + (c, a, {'invariant': True, 'retry': True}), + (c, b, {'invariant': True, 'retry': True}), ]) - self.assertItemsEqual([c], g.no_predecessors_iter()) + self.assertItemsEqual([flo], g.no_predecessors_iter()) self.assertItemsEqual([a, b], g.no_successors_iter()) self.assertIs(c, g.node[a]['retry']) self.assertIs(c, g.node[b]['retry']) @@ -458,15 +493,16 @@ class PatternCompileTest(test.TestCase): flo = gf.Flow("test", r).add(a, b, c).link(b, c) compilation = compiler.PatternCompiler(flo).compile() g = compilation.execution_graph - self.assertEqual(4, len(g)) + self.assertEqual(5, len(g)) self.assertItemsEqual(g.edges(data=True), [ - (r, a, {'retry': True}), - (r, b, {'retry': True}), + (flo, r, {'invariant': True}), + (r, a, {'invariant': True, 'retry': True}), + (r, b, {'invariant': True, 'retry': True}), (b, c, {'manual': True}) ]) - self.assertItemsEqual([r], g.no_predecessors_iter()) + self.assertItemsEqual([flo], g.no_predecessors_iter()) self.assertItemsEqual([a, c], g.no_successors_iter()) self.assertIs(r, g.node[a]['retry']) self.assertIs(r, g.node[b]['retry']) @@ -476,18 +512,18 @@ class PatternCompileTest(test.TestCase): c1 = retry.AlwaysRevert("cp1") c2 = retry.AlwaysRevert("cp2") a, b, c, d = test_utils.make_many(4) - flo = lf.Flow("test", c1).add( - a, - lf.Flow("test", c2).add(b, c), - d) + inner_flo = lf.Flow("test", c2).add(b, c) + flo = lf.Flow("test", c1).add(a, inner_flo, d) compilation = compiler.PatternCompiler(flo).compile() g = compilation.execution_graph - self.assertEqual(6, len(g)) + self.assertEqual(8, len(g)) self.assertItemsEqual(g.edges(data=True), [ - (c1, a, {'retry': True}), - (a, c2, {'invariant': True}), - (c2, b, {'retry': True}), + (flo, c1, {'invariant': True}), + (c1, a, {'invariant': True, 'retry': True}), + (a, inner_flo, {'invariant': True}), + (inner_flo, c2, {'invariant': True}), + (c2, b, {'invariant': True, 'retry': True}), (b, c, {'invariant': True}), (c, d, {'invariant': True}), ]) @@ -501,17 +537,17 @@ class PatternCompileTest(test.TestCase): def test_retry_subflows_hierarchy(self): c1 = retry.AlwaysRevert("cp1") a, b, c, d = test_utils.make_many(4) - flo = lf.Flow("test", c1).add( - a, - lf.Flow("test").add(b, c), - d) + inner_flo = lf.Flow("test").add(b, c) + flo = lf.Flow("test", c1).add(a, inner_flo, d) compilation = compiler.PatternCompiler(flo).compile() g = compilation.execution_graph - self.assertEqual(5, len(g)) + self.assertEqual(7, len(g)) self.assertItemsEqual(g.edges(data=True), [ - (c1, a, {'retry': True}), - (a, b, {'invariant': True}), + (flo, c1, {'invariant': True}), + (c1, a, {'invariant': True, 'retry': True}), + (a, inner_flo, {'invariant': True}), + (inner_flo, b, {'invariant': True}), (b, c, {'invariant': True}), (c, d, {'invariant': True}), ]) diff --git a/taskflow/types/graph.py b/taskflow/types/graph.py index 7462c9bd..349dc09c 100644 --- a/taskflow/types/graph.py +++ b/taskflow/types/graph.py @@ -28,8 +28,11 @@ def _common_format(g, edge_notation): lines.append("Frozen: %s" % nx.is_frozen(g)) lines.append("Density: %0.3f" % nx.density(g)) lines.append("Nodes: %s" % g.number_of_nodes()) - for n in g.nodes_iter(): - lines.append(" - %s" % n) + for n, n_data in g.nodes_iter(data=True): + if n_data: + lines.append(" - %s (%s)" % (n, n_data)) + else: + lines.append(" - %s" % n) lines.append("Edges: %s" % g.number_of_edges()) for (u, v, e_data) in g.edges_iter(data=True): if e_data: diff --git a/taskflow/utils/iter_utils.py b/taskflow/utils/iter_utils.py index 68810e8c..a96e9cfc 100644 --- a/taskflow/utils/iter_utils.py +++ b/taskflow/utils/iter_utils.py @@ -16,12 +16,25 @@ # License for the specific language governing permissions and limitations # under the License. +import itertools + def count(it): """Returns how many values in the iterator (depletes the iterator).""" return sum(1 for _value in it) +def unique_seen(it, *its): + """Yields unique values from iterator(s) (and retains order).""" + seen = set() + for value in itertools.chain(it, *its): + if value in seen: + continue + else: + yield value + seen.add(value) + + def find_first_match(it, matcher, not_found_value=None): """Searches iterator for first value that matcher callback returns true.""" for value in it: From 75517eb0e01c876894ddfd60d6a8f3441de4c38a Mon Sep 17 00:00:00 2001 From: Joshua Harlow Date: Fri, 24 Jul 2015 18:10:55 -0700 Subject: [PATCH 23/54] Use the node built-in 'dfs_iter' instead of recursion We can just use the non-recursive depth first iteration of nodes when scanning for atoms to select for a given scope level instead of using recursive calls to achieve the same effect. This makes it possible to have large and heavily nested flows that are not restricted by the python stack limit. Change-Id: I0d18565680f777adbdfca9d4983636c6b3e848da --- taskflow/engines/action_engine/scopes.py | 18 ++++----- taskflow/tests/unit/test_types.py | 24 +++++++++--- taskflow/types/tree.py | 48 +++++++++++++++++------- 3 files changed, 62 insertions(+), 28 deletions(-) diff --git a/taskflow/engines/action_engine/scopes.py b/taskflow/engines/action_engine/scopes.py index 1d309d89..4dbfa76f 100644 --- a/taskflow/engines/action_engine/scopes.py +++ b/taskflow/engines/action_engine/scopes.py @@ -21,7 +21,11 @@ LOG = logging.getLogger(__name__) def _depth_first_reverse_iterate(node, idx=-1): - """Iterates connected (in reverse) nodes in tree (from starting node).""" + """Iterates connected (in reverse) nodes (from starting node). + + Jumps through nodes with ``FLOW`` ``kind`` attribute (does not yield + them back). + """ # Always go left to right, since right to left is the pattern order # and we want to go backwards and not forwards through that ordering... if idx == -1: @@ -29,15 +33,11 @@ def _depth_first_reverse_iterate(node, idx=-1): else: children_iter = reversed(node[0:idx]) for child in children_iter: - child_kind = child.metadata['kind'] - if child_kind == co.FLOW: + if child.metadata['kind'] == co.FLOW: # Jump through these... - # - # TODO(harlowja): make this non-recursive and remove this - # style of doing this when - # https://review.openstack.org/#/c/205731/ merges... - for atom in _depth_first_reverse_iterate(child): - yield atom + for child_child in child.dfs_iter(right_to_left=False): + if child_child.metadata['kind'] in co.ATOMS: + yield child_child.item else: yield child.item diff --git a/taskflow/tests/unit/test_types.py b/taskflow/tests/unit/test_types.py index 79044923..9399c893 100644 --- a/taskflow/tests/unit/test_types.py +++ b/taskflow/tests/unit/test_types.py @@ -467,24 +467,38 @@ CEO self.assertEqual(set(['animal', 'reptile', 'mammal', 'horse', 'primate', 'monkey', 'human']), set(things)) - def test_dfs_itr_order(self): + def test_dfs_itr_left_to_right(self): + root = self._make_species() + it = root.dfs_iter(include_self=False, right_to_left=False) + things = list([n.item for n in it]) + self.assertEqual(['reptile', 'mammal', 'primate', + 'human', 'monkey', 'horse'], things) + + def test_dfs_itr_no_self(self): root = self._make_species() - things = list([n.item for n in root.dfs_iter(include_self=True)]) - self.assertEqual(['animal', 'mammal', 'horse', 'primate', - 'monkey', 'human', 'reptile'], things) things = list([n.item for n in root.dfs_iter(include_self=False)]) self.assertEqual(['mammal', 'horse', 'primate', 'monkey', 'human', 'reptile'], things) - def test_bfs_iter(self): + def test_bfs_itr(self): root = self._make_species() things = list([n.item for n in root.bfs_iter(include_self=True)]) self.assertEqual(['animal', 'reptile', 'mammal', 'primate', 'horse', 'human', 'monkey'], things) + + def test_bfs_itr_no_self(self): + root = self._make_species() things = list([n.item for n in root.bfs_iter(include_self=False)]) self.assertEqual(['reptile', 'mammal', 'primate', 'horse', 'human', 'monkey'], things) + def test_bfs_itr_right_to_left(self): + root = self._make_species() + it = root.bfs_iter(include_self=False, right_to_left=True) + things = list([n.item for n in it]) + self.assertEqual(['mammal', 'reptile', 'horse', + 'primate', 'monkey', 'human'], things) + class OrderedSetTest(test.TestCase): diff --git a/taskflow/types/tree.py b/taskflow/types/tree.py index 94c009e1..56c96bbb 100644 --- a/taskflow/types/tree.py +++ b/taskflow/types/tree.py @@ -36,8 +36,9 @@ class FrozenNode(Exception): class _DFSIter(object): """Depth first iterator (non-recursive) over the child nodes.""" - def __init__(self, root, include_self=False): + def __init__(self, root, include_self=False, right_to_left=True): self.root = root + self.right_to_left = bool(right_to_left) self.include_self = bool(include_self) def __iter__(self): @@ -45,20 +46,28 @@ class _DFSIter(object): if self.include_self: stack.append(self.root) else: - stack.extend(self.root.reverse_iter()) + if self.right_to_left: + stack.extend(self.root.reverse_iter()) + else: + # Traverse the left nodes first to the right nodes. + stack.extend(iter(self.root)) while stack: - node = stack.pop() # Visit the node. + node = stack.pop() yield node - # Traverse the left & right subtree. - stack.extend(node.reverse_iter()) + if self.right_to_left: + stack.extend(node.reverse_iter()) + else: + # Traverse the left nodes first to the right nodes. + stack.extend(iter(node)) class _BFSIter(object): """Breadth first iterator (non-recursive) over the child nodes.""" - def __init__(self, root, include_self=False): + def __init__(self, root, include_self=False, right_to_left=False): self.root = root + self.right_to_left = bool(right_to_left) self.include_self = bool(include_self) def __iter__(self): @@ -66,13 +75,20 @@ class _BFSIter(object): if self.include_self: q.append(self.root) else: - q.extend(self.root.reverse_iter()) + if self.right_to_left: + q.extend(iter(self.root)) + else: + # Traverse the left nodes first to the right nodes. + q.extend(self.root.reverse_iter()) while q: - node = q.popleft() # Visit the node. + node = q.popleft() yield node - # Traverse the left & right subtree. - q.extend(node.reverse_iter()) + if self.right_to_left: + q.extend(iter(node)) + else: + # Traverse the left nodes first to the right nodes. + q.extend(node.reverse_iter()) class Node(object): @@ -361,10 +377,14 @@ class Node(object): raise ValueError("%s is not contained in any child" % (item)) return index_at - def dfs_iter(self, include_self=False): + def dfs_iter(self, include_self=False, right_to_left=True): """Depth first iteration (non-recursive) over the child nodes.""" - return _DFSIter(self, include_self=include_self) + return _DFSIter(self, + include_self=include_self, + right_to_left=right_to_left) - def bfs_iter(self, include_self=False): + def bfs_iter(self, include_self=False, right_to_left=False): """Breadth first iteration (non-recursive) over the child nodes.""" - return _BFSIter(self, include_self=include_self) + return _BFSIter(self, + include_self=include_self, + right_to_left=right_to_left) From 6170deaf00cc16a4ae90634bd602df36d397484c Mon Sep 17 00:00:00 2001 From: Davanum Srinivas Date: Sun, 4 Oct 2015 19:40:09 -0400 Subject: [PATCH 24/54] No need for Oslo Incubator Sync We can remove openstack-common.conf as we don't sync any code from oslo-incubator any more. Change-Id: Idd645486a71ffd560a71fb3731ad3b5ae5941afa --- openstack-common.conf | 4 ---- 1 file changed, 4 deletions(-) delete mode 100644 openstack-common.conf diff --git a/openstack-common.conf b/openstack-common.conf deleted file mode 100644 index 5eb87cc9..00000000 --- a/openstack-common.conf +++ /dev/null @@ -1,4 +0,0 @@ -[DEFAULT] - -# The base module to hold the copy of openstack.common -base=taskflow From 6f6e9a3b2c86a0cf56097683c51b06b8a59ca548 Mon Sep 17 00:00:00 2001 From: Joshua Harlow Date: Sat, 22 Aug 2015 08:28:59 -0700 Subject: [PATCH 25/54] Relabel internal engine 'event' -> 'outcome' Instead of calling the variable name 'event' it is more appropriate to call these variable names 'outcome' (or 'outcomes') since they represent the 'outcome' (EXECUTED, or REVERTED) of a atom and are not event types. To avoid the confusion just relabel these to be outcome(s) instead. Change-Id: Ia9caebe279145e4887491151d40a4f59650d40d4 --- taskflow/engines/action_engine/builder.py | 8 ++++---- taskflow/engines/action_engine/completer.py | 16 ++++++++-------- taskflow/engines/action_engine/executor.py | 2 +- 3 files changed, 13 insertions(+), 13 deletions(-) diff --git a/taskflow/engines/action_engine/builder.py b/taskflow/engines/action_engine/builder.py index cdf36466..9013cd8a 100644 --- a/taskflow/engines/action_engine/builder.py +++ b/taskflow/engines/action_engine/builder.py @@ -196,8 +196,8 @@ class MachineBuilder(object): fut = memory.done.pop() atom = fut.atom try: - event, result = fut.result() - retain = do_complete(atom, event, result) + outcome, result = fut.result() + retain = do_complete(atom, outcome, result) if isinstance(result, failure.Failure): if retain: memory.failures.append(result) @@ -211,10 +211,10 @@ class MachineBuilder(object): intention = self._storage.get_atom_intention( atom.name) LOG.debug("Discarding failure '%s' (in" - " response to event '%s') under" + " response to outcome '%s') under" " completion units request during" " completion of atom '%s' (intention" - " is to %s)", result, event, + " is to %s)", result, outcome, atom, intention) except Exception: memory.failures.append(failure.Failure()) diff --git a/taskflow/engines/action_engine/completer.py b/taskflow/engines/action_engine/completer.py index e3ab54d2..1dcb326b 100644 --- a/taskflow/engines/action_engine/completer.py +++ b/taskflow/engines/action_engine/completer.py @@ -113,16 +113,16 @@ class Completer(object): self._retry_action = runtime.retry_action self._undefined_resolver = RevertAll(self._runtime) - def _complete_task(self, task, event, result): + def _complete_task(self, task, outcome, result): """Completes the given task, processes task failure.""" - if event == ex.EXECUTED: + if outcome == ex.EXECUTED: self._task_action.complete_execution(task, result) else: self._task_action.complete_reversion(task, result) - def _complete_retry(self, retry, event, result): + def _complete_retry(self, retry, outcome, result): """Completes the given retry, processes retry failure.""" - if event == ex.EXECUTED: + if outcome == ex.EXECUTED: self._retry_action.complete_execution(retry, result) else: self._retry_action.complete_reversion(retry, result) @@ -149,18 +149,18 @@ class Completer(object): unfinished_atoms.add(atom) return unfinished_atoms - def complete(self, node, event, result): + def complete(self, node, outcome, result): """Performs post-execution completion of a node. Returns whether the result should be saved into an accumulator of failures or whether this should not be done. """ if isinstance(node, task_atom.BaseTask): - self._complete_task(node, event, result) + self._complete_task(node, outcome, result) else: - self._complete_retry(node, event, result) + self._complete_retry(node, outcome, result) if isinstance(result, failure.Failure): - if event == ex.EXECUTED: + if outcome == ex.EXECUTED: self._process_atom_failure(node, result) else: # Reverting failed, always retain the failure... diff --git a/taskflow/engines/action_engine/executor.py b/taskflow/engines/action_engine/executor.py index b47322d7..cfc999c2 100644 --- a/taskflow/engines/action_engine/executor.py +++ b/taskflow/engines/action_engine/executor.py @@ -35,7 +35,7 @@ from taskflow.types import failure from taskflow.types import notifier from taskflow.utils import threading_utils -# Execution and reversion events. +# Execution and reversion outcomes. EXECUTED = 'executed' REVERTED = 'reverted' From 256670952119bce1180511efe27cb87d7e7a7a0e Mon Sep 17 00:00:00 2001 From: Joshua Harlow Date: Mon, 5 Oct 2015 17:17:41 -0700 Subject: [PATCH 26/54] Fix bad sphinx module reference Closes-Bug: #1503085 Change-Id: I7baa826a5db02d8f1e10639e0648676da4cfb4f8 --- doc/source/notifications.rst | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/doc/source/notifications.rst b/doc/source/notifications.rst index 0e419e91..56e92c73 100644 --- a/doc/source/notifications.rst +++ b/doc/source/notifications.rst @@ -183,7 +183,7 @@ Capturing listener Formatters ---------- -.. automodule:: taskflow.listeners.formatters +.. automodule:: taskflow.formatters Hierarchy ========= From 9658952b4270155187572bb00eba65dd7a0e108c Mon Sep 17 00:00:00 2001 From: Joshua Harlow Date: Wed, 30 Sep 2015 18:23:18 -0700 Subject: [PATCH 27/54] Make more of the WBE logging and '__repr__' message more useful When running the examples, especially when running them in TRACE/BLATHER/DEBUG logging level these updates make it more clear = what is being processed, the messages being sent/acked/received and what there contents are. Change-Id: I94a497c9064df30197454ae480fe3d471ba1dc7d --- taskflow/engines/worker_based/executor.py | 4 ++-- taskflow/engines/worker_based/protocol.py | 15 ++++++++++++--- taskflow/engines/worker_based/types.py | 18 +++++++++--------- 3 files changed, 23 insertions(+), 14 deletions(-) diff --git a/taskflow/engines/worker_based/executor.py b/taskflow/engines/worker_based/executor.py index 6ad86d7b..1fb688b0 100644 --- a/taskflow/engines/worker_based/executor.py +++ b/taskflow/engines/worker_based/executor.py @@ -95,8 +95,8 @@ class WorkerTaskExecutor(executor.TaskExecutor): request = self._requests_cache.get(task_uuid) if request is not None: response = pr.Response.from_dict(response) - LOG.debug("Response with state '%s' received for '%s'", - response.state, request) + LOG.debug("Extracted response '%s' and matched it to" + " request '%s'", response, request) if response.state == pr.RUNNING: request.transition_and_log_error(pr.RUNNING, logger=LOG) elif response.state == pr.EVENT: diff --git a/taskflow/engines/worker_based/protocol.py b/taskflow/engines/worker_based/protocol.py index 44913064..63556c25 100644 --- a/taskflow/engines/worker_based/protocol.py +++ b/taskflow/engines/worker_based/protocol.py @@ -104,9 +104,10 @@ LOG = logging.getLogger(__name__) class Message(object): """Base class for all message types.""" - def __str__(self): - cls_name = reflection.get_class_name(self, fully_qualified=False) - return "<%s> %s" % (cls_name, self.to_dict()) + def __repr__(self): + return ("<%s object at 0x%x with contents %s>" + % (reflection.get_class_name(self, fully_qualified=False), + id(self), self.to_dict())) @abc.abstractmethod def to_dict(self): @@ -150,6 +151,14 @@ class Notify(Message): def __init__(self, **data): self._data = data + @property + def topic(self): + return self._data.get('topic') + + @property + def tasks(self): + return self._data.get('tasks') + def to_dict(self): return self._data diff --git a/taskflow/engines/worker_based/types.py b/taskflow/engines/worker_based/types.py index 09a41ab3..2a049aa1 100644 --- a/taskflow/engines/worker_based/types.py +++ b/taskflow/engines/worker_based/types.py @@ -206,18 +206,18 @@ class ProxyWorkerFinder(WorkerFinder): self._workers[topic] = worker return (worker, True) - def _process_response(self, response, message): - """Process notify message from remote side.""" - LOG.debug("Started processing notify message '%s'", + def _process_response(self, data, message): + """Process notify message sent from remote side.""" + LOG.debug("Started processing notify response message '%s'", ku.DelayedPretty(message)) - topic = response['topic'] - tasks = response['tasks'] + response = pr.Notify(**data) + LOG.debug("Extracted notify response '%s'", response) with self._cond: - worker, new_or_updated = self._add(topic, tasks) + worker, new_or_updated = self._add(response.topic, + response.tasks) if new_or_updated: - LOG.debug("Received notification about worker '%s' (%s" - " total workers are currently known)", worker, - self._total_workers()) + LOG.debug("Updated worker '%s' (%s total workers are" + " currently known)", worker, self._total_workers()) self._cond.notify_all() if new_or_updated: self.notifier.notify(self.WORKER_ARRIVED, {'worker': worker}) From bf209bdd05f540f122d4e9da2d5585d0010761b1 Mon Sep 17 00:00:00 2001 From: Joshua Harlow Date: Mon, 12 Oct 2015 17:50:39 -0700 Subject: [PATCH 28/54] Use batch 'get_atoms_states' where we can Instead of doing many single calls to 'get_atom_state' when we are iterating over many atoms instead prefer to use the bulk call via 'get_atoms_states' that can gather a large amount of needed states in a single call, optimizing more of the reading that taskflow does with regards to states. This also removes the proxy method 'get_state' from the engine internal code, since it is no longer needed after this change. Change-Id: I90eb43e754a7e5efb657468361d67dbe69d31844 --- taskflow/engines/action_engine/analyzer.py | 26 +++++++++++++-------- taskflow/engines/action_engine/completer.py | 17 ++++++++++---- taskflow/engines/action_engine/runtime.py | 5 ++-- 3 files changed, 31 insertions(+), 17 deletions(-) diff --git a/taskflow/engines/action_engine/analyzer.py b/taskflow/engines/action_engine/analyzer.py index bdde8975..b0c4a7fd 100644 --- a/taskflow/engines/action_engine/analyzer.py +++ b/taskflow/engines/action_engine/analyzer.py @@ -131,7 +131,7 @@ class Analyzer(object): if atom is None: return iter_utils.unique_seen(self.browse_atoms_for_execute(), self.browse_atoms_for_revert()) - state = self.get_state(atom) + state = self._storage.get_atom_state(atom.name) intention = self._storage.get_atom_intention(atom.name) if state == st.SUCCESS: if intention == st.REVERT: @@ -191,7 +191,7 @@ class Analyzer(object): def _get_maybe_ready(self, atom, transition_to, allowed_intentions, connected_fetcher, connected_checker, decider_fetcher): - state = self.get_state(atom) + state = self._storage.get_atom_state(atom.name) ok_to_transition = self._runtime.check_atom_transition(atom, state, transition_to) if not ok_to_transition: @@ -257,8 +257,15 @@ class Analyzer(object): If no state is provided it will yield back all retry atoms. """ - for atom in self.iterate_nodes((co.RETRY,)): - if not state or self.get_state(atom) == state: + if state: + atoms = list(self.iterate_nodes((co.RETRY,))) + atom_states = self._storage.get_atoms_states(atom.name + for atom in atoms) + for atom in atoms: + if atom_states[atom.name][0] == state: + yield atom + else: + for atom in self.iterate_nodes((co.RETRY,)): yield atom def iterate_nodes(self, allowed_kinds): @@ -273,14 +280,13 @@ class Analyzer(object): def is_success(self): """Checks if all atoms in the execution graph are in 'happy' state.""" - for atom in self.iterate_nodes(co.ATOMS): - atom_state = self.get_state(atom) + atoms = list(self.iterate_nodes(co.ATOMS)) + atom_states = self._storage.get_atoms_states(atom.name + for atom in atoms) + for atom in atoms: + atom_state = atom_states[atom.name][0] if atom_state == st.IGNORE: continue if atom_state != st.SUCCESS: return False return True - - def get_state(self, atom): - """Gets the state of a given atom (from the backend storage unit).""" - return self._storage.get_atom_state(atom.name) diff --git a/taskflow/engines/action_engine/completer.py b/taskflow/engines/action_engine/completer.py index 1dcb326b..0b2f820d 100644 --- a/taskflow/engines/action_engine/completer.py +++ b/taskflow/engines/action_engine/completer.py @@ -138,14 +138,21 @@ class Completer(object): atoms that were previously not finished (due to a RUNNING or REVERTING attempt not previously finishing). """ - for atom in self._analyzer.iterate_nodes(co.ATOMS): - if self._analyzer.get_state(atom) == st.FAILURE: + atoms = list(self._analyzer.iterate_nodes(co.ATOMS)) + atom_states = self._storage.get_atoms_states(atom.name + for atom in atoms) + for atom in atoms: + atom_state = atom_states[atom.name][0] + if atom_state == st.FAILURE: self._process_atom_failure(atom, self._storage.get(atom.name)) for retry in self._analyzer.iterate_retries(st.RETRYING): - self._runtime.retry_subflow(retry) + for atom, state, intention in self._runtime.retry_subflow(retry): + if state: + atom_states[atom.name] = (state, intention) unfinished_atoms = set() - for atom in self._analyzer.iterate_nodes(co.ATOMS): - if self._analyzer.get_state(atom) in (st.RUNNING, st.REVERTING): + for atom in atoms: + atom_state = atom_states[atom.name][0] + if atom_state in (st.RUNNING, st.REVERTING): unfinished_atoms.add(atom) return unfinished_atoms diff --git a/taskflow/engines/action_engine/runtime.py b/taskflow/engines/action_engine/runtime.py index 6780e931..441df288 100644 --- a/taskflow/engines/action_engine/runtime.py +++ b/taskflow/engines/action_engine/runtime.py @@ -223,5 +223,6 @@ class Runtime(object): subgraph (its successors) to the ``PENDING`` state with an ``EXECUTE`` intention. """ - self.storage.set_atom_intention(retry.name, st.EXECUTE) - self.reset_subgraph(retry) + tweaked = self.reset_atoms([retry], state=None, intention=st.EXECUTE) + tweaked.extend(self.reset_subgraph(retry)) + return tweaked From 4f41c43ed0d2dc01c953d4904e9cca6f6f662da5 Mon Sep 17 00:00:00 2001 From: Joshua Harlow Date: Wed, 14 Oct 2015 22:45:13 -0700 Subject: [PATCH 29/54] Ensure node 'remove' and 'disassociate' can not be called when frozen When a tree node is frozen it should not be possible to remove nodes from it or cause that tree node to disassociate itself from its parent, so make sure that the appropriate decorator is used to ensure this can not happen (and add tests that verify an exception is raised when mutations are attempted). Change-Id: I27e4af7b891ea3fd1826d5a5e71e808ad5af7647 --- taskflow/tests/unit/test_types.py | 12 ++++++++++++ taskflow/types/tree.py | 2 ++ 2 files changed, 14 insertions(+) diff --git a/taskflow/tests/unit/test_types.py b/taskflow/tests/unit/test_types.py index 9399c893..d991b684 100644 --- a/taskflow/tests/unit/test_types.py +++ b/taskflow/tests/unit/test_types.py @@ -390,6 +390,18 @@ CEO root = tree.Node("josh") self.assertTrue(root.empty()) + def test_after_frozen(self): + root = tree.Node("josh") + root.add(tree.Node("josh.1")) + root.freeze() + self.assertTrue( + all(n.frozen for n in root.dfs_iter(include_self=True))) + self.assertRaises(tree.FrozenNode, + root.remove, "josh.1") + self.assertRaises(tree.FrozenNode, root.disassociate) + self.assertRaises(tree.FrozenNode, root.add, + tree.Node("josh.2")) + def test_removal(self): root = self._make_species() self.assertIsNotNone(root.remove('reptile')) diff --git a/taskflow/types/tree.py b/taskflow/types/tree.py index 56c96bbb..e0f61670 100644 --- a/taskflow/types/tree.py +++ b/taskflow/types/tree.py @@ -200,6 +200,7 @@ class Node(object): only_direct=only_direct, include_self=include_self) + @misc.disallow_when_frozen(FrozenNode) def disassociate(self): """Removes this node from its parent (if any). @@ -219,6 +220,7 @@ class Node(object): occurrences += 1 return occurrences + @misc.disallow_when_frozen(FrozenNode) def remove(self, item, only_direct=False, include_self=True): """Removes a item from this nodes children. From 5f5fdd181142f0957f0dfa27959010867def5d20 Mon Sep 17 00:00:00 2001 From: Sriram Madapusi Vasudevan Date: Wed, 14 Oct 2015 11:59:08 -0400 Subject: [PATCH 30/54] feat: add max_dispatches arg to conductor's run - This will cause the conductor to only do 'n' number of dispatches, after which it stops dispatching jobs. - This will allow the code that call the conductor, to monitor conductor.dispatching, and make a decision on what is to be done with it. Eg: Decomission a conductor, restart the conductor etc - Backward Compatible. Change-Id: I3386c7050806806b5ee44a74ba93e50515a5ab7b --- taskflow/conductors/backends/impl_blocking.py | 34 +++++++++++++++---- .../tests/unit/conductor/test_blocking.py | 28 +++++++++++++++ taskflow/utils/iter_utils.py | 15 ++++++++ 3 files changed, 70 insertions(+), 7 deletions(-) diff --git a/taskflow/conductors/backends/impl_blocking.py b/taskflow/conductors/backends/impl_blocking.py index e3f2f5ab..d8f2b4c3 100644 --- a/taskflow/conductors/backends/impl_blocking.py +++ b/taskflow/conductors/backends/impl_blocking.py @@ -11,7 +11,6 @@ # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. - import threading try: @@ -22,13 +21,13 @@ except ImportError: from debtcollector import removals from oslo_utils import excutils import six - from taskflow.conductors import base from taskflow import exceptions as excp from taskflow.listeners import logging as logging_listener from taskflow import logging from taskflow.types import timing as tt from taskflow.utils import async_utils +from taskflow.utils import iter_utils LOG = logging.getLogger(__name__) WAIT_TIMEOUT = 0.5 @@ -44,7 +43,7 @@ class BlockingConductor(base.Conductor): This conductor iterates over jobs in the provided jobboard (waiting for the given timeout if no jobs exist) and attempts to claim them, work on those jobs in its local thread (blocking further work from being claimed - and consumed) and then consume those work units after completetion. This + and consumed) and then consume those work units after completion. This process will repeat until the conductor has been stopped or other critical error occurs. @@ -160,13 +159,24 @@ class BlockingConductor(base.Conductor): LOG.info("Job completed successfully: %s", job) return async_utils.make_completed_future(consume) - def run(self): + def run(self, max_dispatches=None): self._dead.clear() + + total_dispatched = 0 try: + + if max_dispatches is None: + # NOTE(TheSriram): if max_dispatches is not set, + # then the conductor will run indefinitely, and not + # stop after 'n' number of dispatches + max_dispatches = -1 + + dispatch_gen = iter_utils.iter_forever(max_dispatches) + while True: if self._wait_timeout.is_stopped(): break - dispatched = 0 + local_dispatched = 0 for job in self._jobboard.iterjobs(): if self._wait_timeout.is_stopped(): break @@ -186,7 +196,8 @@ class BlockingConductor(base.Conductor): LOG.warn("Job dispatching failed: %s", job, exc_info=True) else: - dispatched += 1 + + local_dispatched += 1 consume = f.result() try: if consume: @@ -200,8 +211,17 @@ class BlockingConductor(base.Conductor): else: LOG.warn("Failed job abandonment: %s", job, exc_info=True) - if dispatched == 0 and not self._wait_timeout.is_stopped(): + + total_dispatched = next(dispatch_gen) + + if local_dispatched == 0 and \ + not self._wait_timeout.is_stopped(): self._wait_timeout.wait() + + except StopIteration: + if max_dispatches >= 0 and total_dispatched >= max_dispatches: + LOG.info("Maximum dispatch limit of %s reached", + max_dispatches) finally: self._dead.set() diff --git a/taskflow/tests/unit/conductor/test_blocking.py b/taskflow/tests/unit/conductor/test_blocking.py index caab904f..29d211fc 100644 --- a/taskflow/tests/unit/conductor/test_blocking.py +++ b/taskflow/tests/unit/conductor/test_blocking.py @@ -121,6 +121,34 @@ class BlockingConductorTest(test_utils.EngineTestBase, test.TestCase): self.assertIsNotNone(fd) self.assertEqual(st.SUCCESS, fd.state) + def test_run_max_dispatches(self): + components = self.make_components() + components.conductor.connect() + consumed_event = threading.Event() + + def on_consume(state, details): + consumed_event.set() + + components.board.notifier.register(base.REMOVAL, on_consume) + with close_many(components.client, components.conductor): + t = threading_utils.daemon_thread( + lambda: components.conductor.run(max_dispatches=5)) + t.start() + lb, fd = pu.temporary_flow_detail(components.persistence) + engines.save_factory_details(fd, test_factory, + [False], {}, + backend=components.persistence) + for _ in range(5): + components.board.post('poke', lb, + details={'flow_uuid': fd.uuid}) + self.assertTrue(consumed_event.wait( + test_utils.WAIT_TIMEOUT)) + components.board.post('poke', lb, + details={'flow_uuid': fd.uuid}) + components.conductor.stop() + self.assertTrue(components.conductor.wait(test_utils.WAIT_TIMEOUT)) + self.assertFalse(components.conductor.dispatching) + def test_fail_run(self): components = self.make_components() components.conductor.connect() diff --git a/taskflow/utils/iter_utils.py b/taskflow/utils/iter_utils.py index a96e9cfc..1a366849 100644 --- a/taskflow/utils/iter_utils.py +++ b/taskflow/utils/iter_utils.py @@ -17,6 +17,7 @@ # under the License. import itertools +from six.moves import range as compat_range def count(it): @@ -53,3 +54,17 @@ def while_is_not(it, stop_value): yield value if value is stop_value: break + + +def iter_forever(limit): + """Yields values from iterator until a limit is reached. + + if limit is negative, we iterate forever. + """ + if limit < 0: + i = itertools.count() + while True: + yield next(i) + else: + for i in compat_range(0, limit): + yield i From dd22aff707386785f0437ff53f6ea4c9527a78a1 Mon Sep 17 00:00:00 2001 From: OpenStack Proposal Bot Date: Fri, 16 Oct 2015 03:07:14 +0000 Subject: [PATCH 31/54] Updated from global requirements Change-Id: I7d060db9690ab017eedf422316eacad5802878ca --- requirements.txt | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/requirements.txt b/requirements.txt index 60cc0839..0bdbf290 100644 --- a/requirements.txt +++ b/requirements.txt @@ -41,7 +41,7 @@ jsonschema!=2.5.0,<3.0.0,>=2.0.0 automaton>=0.5.0 # Apache-2.0 # For common utilities -oslo.utils>=2.0.0 # Apache-2.0 +oslo.utils>=2.4.0 # Apache-2.0 oslo.serialization>=1.4.0 # Apache-2.0 # For lru caches and such From 3dcd5a96f88acb54de002c7f49066efae93cdea9 Mon Sep 17 00:00:00 2001 From: lin-hua-cheng Date: Sat, 17 Oct 2015 00:59:32 -0700 Subject: [PATCH 32/54] Fix order of assertEqual for unit.action_engine First parameter should be the expected value. Partial-Bug: #1357117 Change-Id: I2c7345171571a063b649a319a18b1cd712ac6275 --- taskflow/tests/unit/action_engine/test_compile.py | 9 +++++---- taskflow/tests/unit/action_engine/test_scoping.py | 2 +- 2 files changed, 6 insertions(+), 5 deletions(-) diff --git a/taskflow/tests/unit/action_engine/test_compile.py b/taskflow/tests/unit/action_engine/test_compile.py index b676e0ea..fcebe891 100644 --- a/taskflow/tests/unit/action_engine/test_compile.py +++ b/taskflow/tests/unit/action_engine/test_compile.py @@ -29,8 +29,8 @@ class PatternCompileTest(test.TestCase): task = test_utils.DummyTask(name='a') compilation = compiler.PatternCompiler(task).compile() g = compilation.execution_graph - self.assertEqual(list(g.nodes()), [task]) - self.assertEqual(list(g.edges()), []) + self.assertEqual([task], list(g.nodes())) + self.assertEqual([], list(g.edges())) def test_retry(self): r = retry.AlwaysRevert('r1') @@ -61,7 +61,8 @@ class PatternCompileTest(test.TestCase): self.assertEqual([flo, a, b, c, inner_flo, d], order) self.assertTrue(g.has_edge(c, inner_flo)) self.assertTrue(g.has_edge(inner_flo, d)) - self.assertEqual(g.get_edge_data(inner_flo, d), {'invariant': True}) + self.assertEqual({'invariant': True}, + g.get_edge_data(inner_flo, d)) self.assertEqual([d], list(g.no_successors_iter())) self.assertEqual([flo], list(g.no_predecessors_iter())) @@ -107,7 +108,7 @@ class PatternCompileTest(test.TestCase): lb = graph.subgraph([a, b]) self.assertFalse(lb.has_edge(b, a)) self.assertTrue(lb.has_edge(a, b)) - self.assertEqual(graph.get_edge_data(a, b), {'invariant': True}) + self.assertEqual({'invariant': True}, graph.get_edge_data(a, b)) ub = graph.subgraph([c, d]) self.assertEqual(0, ub.number_of_edges()) diff --git a/taskflow/tests/unit/action_engine/test_scoping.py b/taskflow/tests/unit/action_engine/test_scoping.py index b4429264..a0e5fbea 100644 --- a/taskflow/tests/unit/action_engine/test_scoping.py +++ b/taskflow/tests/unit/action_engine/test_scoping.py @@ -245,7 +245,7 @@ class MixedPatternScopingTest(test.TestCase): first_subroot = i break self.assertGreater(first_subroot, first_root) - self.assertEqual(scope[0][-2:], ['root.2', 'root.1']) + self.assertEqual(['root.2', 'root.1'], scope[0][-2:]) def test_shadow_graph(self): r = gf.Flow("root") From 164d89b031974ba372d3d8d69ba56783c9bb49e1 Mon Sep 17 00:00:00 2001 From: lin-hua-cheng Date: Sat, 17 Oct 2015 01:12:30 -0700 Subject: [PATCH 33/54] Fix order of assertEqual for unit.jobs First parameter should be the expected value. Change-Id: I407cf3fa31fbd049a32b674b254b8f24c3f82c90 Partial-Bug: #1357117 --- taskflow/tests/unit/jobs/test_zk_job.py | 6 +++--- 1 file changed, 3 insertions(+), 3 deletions(-) diff --git a/taskflow/tests/unit/jobs/test_zk_job.py b/taskflow/tests/unit/jobs/test_zk_job.py index e42bed66..375c8dc4 100644 --- a/taskflow/tests/unit/jobs/test_zk_job.py +++ b/taskflow/tests/unit/jobs/test_zk_job.py @@ -97,7 +97,7 @@ class ZookeeperBoardTestMixin(base.BoardTestMixin): def test_board_iter(self): with base.connect_close(self.board): it = self.board.iterjobs() - self.assertEqual(it.board, self.board) + self.assertEqual(self.board, it.board) self.assertFalse(it.only_unclaimed) self.assertFalse(it.ensure_fresh) @@ -222,8 +222,8 @@ class ZakeJobboardTest(test.TestCase, ZookeeperBoardTestMixin): and not path.endswith(LOCK_POSTFIX)): jobs.append(path) - self.assertEqual(len(trashed), 1) - self.assertEqual(len(jobs), 0) + self.assertEqual(1, len(trashed)) + self.assertEqual(0, len(jobs)) def test_posting_received_raw(self): book = p_utils.temporary_log_book() From b3e3e05a028e6ead4db8e700ab26c1013a148cc8 Mon Sep 17 00:00:00 2001 From: lin-hua-cheng Date: Sat, 17 Oct 2015 01:22:21 -0700 Subject: [PATCH 34/54] Fix order of assertEqual for unit.patterns First parameter should be the expected value. Change-Id: I06140e4be87663136483d1e7be3bc471fbbbf92b Partial-Bug: #1357117 --- .../tests/unit/patterns/test_graph_flow.py | 63 ++++++++-------- .../tests/unit/patterns/test_linear_flow.py | 71 +++++++++---------- .../unit/patterns/test_unordered_flow.py | 48 ++++++------- 3 files changed, 88 insertions(+), 94 deletions(-) diff --git a/taskflow/tests/unit/patterns/test_graph_flow.py b/taskflow/tests/unit/patterns/test_graph_flow.py index 1d876d56..a1433d98 100644 --- a/taskflow/tests/unit/patterns/test_graph_flow.py +++ b/taskflow/tests/unit/patterns/test_graph_flow.py @@ -30,21 +30,21 @@ class GraphFlowTest(test.TestCase): def test_graph_flow_starts_as_empty(self): f = gf.Flow('test') - self.assertEqual(len(f), 0) - self.assertEqual(list(f), []) - self.assertEqual(list(f.iter_links()), []) + self.assertEqual(0, len(f)) + self.assertEqual([], list(f)) + self.assertEqual([], list(f.iter_links())) - self.assertEqual(f.requires, set()) - self.assertEqual(f.provides, set()) + self.assertEqual(set(), f.requires) + self.assertEqual(set(), f.provides) expected = 'taskflow.patterns.graph_flow.Flow: test(len=0)' - self.assertEqual(str(f), expected) + self.assertEqual(expected, str(f)) def test_graph_flow_add_nothing(self): f = gf.Flow('test') result = f.add() self.assertIs(f, result) - self.assertEqual(len(f), 0) + self.assertEqual(0, len(f)) def test_graph_flow_one_task(self): f = gf.Flow('test') @@ -53,45 +53,43 @@ class GraphFlowTest(test.TestCase): self.assertIs(f, result) - self.assertEqual(len(f), 1) - self.assertEqual(list(f), [task]) - self.assertEqual(list(f.iter_links()), []) - self.assertEqual(f.requires, set(['a', 'b'])) - self.assertEqual(f.provides, set(['c', 'd'])) + self.assertEqual(1, len(f)) + self.assertEqual([task], list(f)) + self.assertEqual([], list(f.iter_links())) + self.assertEqual(set(['a', 'b']), f.requires) + self.assertEqual(set(['c', 'd']), f.provides) def test_graph_flow_two_independent_tasks(self): task1 = _task(name='task1') task2 = _task(name='task2') f = gf.Flow('test').add(task1, task2) - self.assertEqual(len(f), 2) + self.assertEqual(2, len(f)) self.assertItemsEqual(f, [task1, task2]) - self.assertEqual(list(f.iter_links()), []) + self.assertEqual([], list(f.iter_links())) def test_graph_flow_two_dependent_tasks(self): task1 = _task(name='task1', provides=['a']) task2 = _task(name='task2', requires=['a']) f = gf.Flow('test').add(task1, task2) - self.assertEqual(len(f), 2) + self.assertEqual(2, len(f)) self.assertItemsEqual(f, [task1, task2]) - self.assertEqual(list(f.iter_links()), [ - (task1, task2, {'reasons': set(['a'])}) - ]) + self.assertEqual([(task1, task2, {'reasons': set(['a'])})], + list(f.iter_links())) - self.assertEqual(f.requires, set()) - self.assertEqual(f.provides, set(['a'])) + self.assertEqual(set(), f.requires) + self.assertEqual(set(['a']), f.provides) def test_graph_flow_two_dependent_tasks_two_different_calls(self): task1 = _task(name='task1', provides=['a']) task2 = _task(name='task2', requires=['a']) f = gf.Flow('test').add(task1).add(task2) - self.assertEqual(len(f), 2) + self.assertEqual(2, len(f)) self.assertItemsEqual(f, [task1, task2]) - self.assertEqual(list(f.iter_links()), [ - (task1, task2, {'reasons': set(['a'])}) - ]) + self.assertEqual([(task1, task2, {'reasons': set(['a'])})], + list(f.iter_links())) def test_graph_flow_two_task_same_provide(self): task1 = _task(name='task1', provides=['a', 'b']) @@ -136,10 +134,10 @@ class GraphFlowTest(test.TestCase): ret = retry.AlwaysRevert(requires=['a'], provides=['b']) f = gf.Flow('test', ret) self.assertIs(f.retry, ret) - self.assertEqual(ret.name, 'test_retry') + self.assertEqual('test_retry', ret.name) - self.assertEqual(f.requires, set(['a'])) - self.assertEqual(f.provides, set(['b'])) + self.assertEqual(set(['a']), f.requires) + self.assertEqual(set(['b']), f.provides) def test_graph_flow_ordering(self): task1 = _task('task1', provides=set(['a', 'b'])) @@ -248,7 +246,7 @@ class TargetedGraphFlowTest(test.TestCase): task4 = _task('task4', provides=[], requires=['b']) f.add(task1, task2, task3, task4) f.set_target(task3) - self.assertEqual(len(f), 3) + self.assertEqual(3, len(f)) self.assertItemsEqual(f, [task1, task2, task3]) self.assertNotIn('c', f.provides) @@ -261,7 +259,7 @@ class TargetedGraphFlowTest(test.TestCase): f.add(task1, task2, task3, task4) f.set_target(task3) f.reset_target() - self.assertEqual(len(f), 4) + self.assertEqual(4, len(f)) self.assertItemsEqual(f, [task1, task2, task3, task4]) self.assertIn('c', f.provides) @@ -278,7 +276,7 @@ class TargetedGraphFlowTest(test.TestCase): task1 = _task('task1', provides=['a'], requires=[]) f.add(task1) f.set_target(task1) - self.assertEqual(len(f), 1) + self.assertEqual(1, len(f)) self.assertItemsEqual(f, [task1]) def test_recache_on_add(self): @@ -311,6 +309,5 @@ class TargetedGraphFlowTest(test.TestCase): f.link(task2, task1) self.assertEqual(2, len(f)) - self.assertEqual(list(f.iter_links()), [ - (task2, task1, {'manual': True}) - ]) + self.assertEqual([(task2, task1, {'manual': True})], + list(f.iter_links()), ) diff --git a/taskflow/tests/unit/patterns/test_linear_flow.py b/taskflow/tests/unit/patterns/test_linear_flow.py index 05f4253a..046c8f31 100644 --- a/taskflow/tests/unit/patterns/test_linear_flow.py +++ b/taskflow/tests/unit/patterns/test_linear_flow.py @@ -29,21 +29,21 @@ class LinearFlowTest(test.TestCase): def test_linear_flow_starts_as_empty(self): f = lf.Flow('test') - self.assertEqual(len(f), 0) - self.assertEqual(list(f), []) - self.assertEqual(list(f.iter_links()), []) + self.assertEqual(0, len(f)) + self.assertEqual([], list(f)) + self.assertEqual([], list(f.iter_links())) - self.assertEqual(f.requires, set()) - self.assertEqual(f.provides, set()) + self.assertEqual(set(), f.requires) + self.assertEqual(set(), f.provides) expected = 'taskflow.patterns.linear_flow.Flow: test(len=0)' - self.assertEqual(str(f), expected) + self.assertEqual(expected, str(f)) def test_linear_flow_add_nothing(self): f = lf.Flow('test') result = f.add() self.assertIs(f, result) - self.assertEqual(len(f), 0) + self.assertEqual(0, len(f)) def test_linear_flow_one_task(self): f = lf.Flow('test') @@ -52,47 +52,44 @@ class LinearFlowTest(test.TestCase): self.assertIs(f, result) - self.assertEqual(len(f), 1) - self.assertEqual(list(f), [task]) - self.assertEqual(list(f.iter_links()), []) - self.assertEqual(f.requires, set(['a', 'b'])) - self.assertEqual(f.provides, set(['c', 'd'])) + self.assertEqual(1, len(f)) + self.assertEqual([task], list(f)) + self.assertEqual([], list(f.iter_links())) + self.assertEqual(set(['a', 'b']), f.requires) + self.assertEqual(set(['c', 'd']), f.provides) def test_linear_flow_two_independent_tasks(self): task1 = _task(name='task1') task2 = _task(name='task2') f = lf.Flow('test').add(task1, task2) - self.assertEqual(len(f), 2) - self.assertEqual(list(f), [task1, task2]) - self.assertEqual(list(f.iter_links()), [ - (task1, task2, {'invariant': True}) - ]) + self.assertEqual(2, len(f)) + self.assertEqual([task1, task2], list(f)) + self.assertEqual([(task1, task2, {'invariant': True})], + list(f.iter_links())) def test_linear_flow_two_dependent_tasks(self): task1 = _task(name='task1', provides=['a']) task2 = _task(name='task2', requires=['a']) f = lf.Flow('test').add(task1, task2) - self.assertEqual(len(f), 2) - self.assertEqual(list(f), [task1, task2]) - self.assertEqual(list(f.iter_links()), [ - (task1, task2, {'invariant': True}) - ]) + self.assertEqual(2, len(f)) + self.assertEqual([task1, task2], list(f)) + self.assertEqual([(task1, task2, {'invariant': True})], + list(f.iter_links())) - self.assertEqual(f.requires, set()) - self.assertEqual(f.provides, set(['a'])) + self.assertEqual(set(), f.requires) + self.assertEqual(set(['a']), f.provides) def test_linear_flow_two_dependent_tasks_two_different_calls(self): task1 = _task(name='task1', provides=['a']) task2 = _task(name='task2', requires=['a']) f = lf.Flow('test').add(task1).add(task2) - self.assertEqual(len(f), 2) - self.assertEqual(list(f), [task1, task2]) - self.assertEqual(list(f.iter_links()), [ - (task1, task2, {'invariant': True}) - ]) + self.assertEqual(2, len(f)) + self.assertEqual([task1, task2], list(f)) + self.assertEqual([(task1, task2, {'invariant': True})], + list(f.iter_links()), ) def test_linear_flow_three_tasks(self): task1 = _task(name='task1') @@ -100,24 +97,24 @@ class LinearFlowTest(test.TestCase): task3 = _task(name='task3') f = lf.Flow('test').add(task1, task2, task3) - self.assertEqual(len(f), 3) - self.assertEqual(list(f), [task1, task2, task3]) - self.assertEqual(list(f.iter_links()), [ + self.assertEqual(3, len(f)) + self.assertEqual([task1, task2, task3], list(f)) + self.assertEqual([ (task1, task2, {'invariant': True}), (task2, task3, {'invariant': True}) - ]) + ], list(f.iter_links())) expected = 'taskflow.patterns.linear_flow.Flow: test(len=3)' - self.assertEqual(str(f), expected) + self.assertEqual(expected, str(f)) def test_linear_flow_with_retry(self): ret = retry.AlwaysRevert(requires=['a'], provides=['b']) f = lf.Flow('test', ret) self.assertIs(f.retry, ret) - self.assertEqual(ret.name, 'test_retry') + self.assertEqual('test_retry', ret.name) - self.assertEqual(f.requires, set(['a'])) - self.assertEqual(f.provides, set(['b'])) + self.assertEqual(set(['a']), f.requires) + self.assertEqual(set(['b']), f.provides) def test_iter_nodes(self): task1 = _task(name='task1') diff --git a/taskflow/tests/unit/patterns/test_unordered_flow.py b/taskflow/tests/unit/patterns/test_unordered_flow.py index eeb3bb2b..d14c4e70 100644 --- a/taskflow/tests/unit/patterns/test_unordered_flow.py +++ b/taskflow/tests/unit/patterns/test_unordered_flow.py @@ -29,21 +29,21 @@ class UnorderedFlowTest(test.TestCase): def test_unordered_flow_starts_as_empty(self): f = uf.Flow('test') - self.assertEqual(len(f), 0) - self.assertEqual(list(f), []) - self.assertEqual(list(f.iter_links()), []) + self.assertEqual(0, len(f)) + self.assertEqual([], list(f)) + self.assertEqual([], list(f.iter_links())) - self.assertEqual(f.requires, set()) - self.assertEqual(f.provides, set()) + self.assertEqual(set(), f.requires) + self.assertEqual(set(), f.provides) expected = 'taskflow.patterns.unordered_flow.Flow: test(len=0)' - self.assertEqual(str(f), expected) + self.assertEqual(expected, str(f)) def test_unordered_flow_add_nothing(self): f = uf.Flow('test') result = f.add() self.assertIs(f, result) - self.assertEqual(len(f), 0) + self.assertEqual(0, len(f)) def test_unordered_flow_one_task(self): f = uf.Flow('test') @@ -52,27 +52,27 @@ class UnorderedFlowTest(test.TestCase): self.assertIs(f, result) - self.assertEqual(len(f), 1) - self.assertEqual(list(f), [task]) - self.assertEqual(list(f.iter_links()), []) - self.assertEqual(f.requires, set(['a', 'b'])) - self.assertEqual(f.provides, set(['c', 'd'])) + self.assertEqual(1, len(f)) + self.assertEqual([task], list(f)) + self.assertEqual([], list(f.iter_links())) + self.assertEqual(set(['a', 'b']), f.requires) + self.assertEqual(set(['c', 'd']), f.provides) def test_unordered_flow_two_tasks(self): task1 = _task(name='task1') task2 = _task(name='task2') f = uf.Flow('test').add(task1, task2) - self.assertEqual(len(f), 2) - self.assertEqual(set(f), set([task1, task2])) - self.assertEqual(list(f.iter_links()), []) + self.assertEqual(2, len(f)) + self.assertEqual(set([task1, task2]), set(f)) + self.assertEqual([], list(f.iter_links())) def test_unordered_flow_two_tasks_two_different_calls(self): task1 = _task(name='task1', provides=['a']) task2 = _task(name='task2', requires=['a']) f = uf.Flow('test').add(task1) f.add(task2) - self.assertEqual(len(f), 2) + self.assertEqual(2, len(f)) self.assertEqual(set(['a']), f.requires) self.assertEqual(set(['a']), f.provides) @@ -80,7 +80,7 @@ class UnorderedFlowTest(test.TestCase): task1 = _task(name='task1', provides=['a']) task2 = _task(name='task2', requires=['a']) f = uf.Flow('test').add(task2).add(task1) - self.assertEqual(len(f), 2) + self.assertEqual(2, len(f)) self.assertEqual(set(['a']), f.requires) self.assertEqual(set(['a']), f.provides) @@ -89,25 +89,25 @@ class UnorderedFlowTest(test.TestCase): task2 = _task(name='task2', provides=['a', 'c']) f = uf.Flow('test') f.add(task2, task1) - self.assertEqual(len(f), 2) + self.assertEqual(2, len(f)) def test_unordered_flow_with_retry(self): ret = retry.AlwaysRevert(requires=['a'], provides=['b']) f = uf.Flow('test', ret) self.assertIs(f.retry, ret) - self.assertEqual(ret.name, 'test_retry') + self.assertEqual('test_retry', ret.name) - self.assertEqual(f.requires, set(['a'])) - self.assertEqual(f.provides, set(['b'])) + self.assertEqual(set(['a']), f.requires) + self.assertEqual(set(['b']), f.provides) def test_unordered_flow_with_retry_fully_satisfies(self): ret = retry.AlwaysRevert(provides=['b', 'a']) f = uf.Flow('test', ret) f.add(_task(name='task1', requires=['a'])) self.assertIs(f.retry, ret) - self.assertEqual(ret.name, 'test_retry') - self.assertEqual(f.requires, set([])) - self.assertEqual(f.provides, set(['b', 'a'])) + self.assertEqual('test_retry', ret.name) + self.assertEqual(set([]), f.requires) + self.assertEqual(set(['b', 'a']), f.provides) def test_iter_nodes(self): task1 = _task(name='task1', provides=['a', 'b']) From a6f47a0c5733856d2b9e4785fa43ad72d67c2ef8 Mon Sep 17 00:00:00 2001 From: lin-hua-cheng Date: Sat, 17 Oct 2015 01:29:23 -0700 Subject: [PATCH 35/54] Fix order of assertEqual for unit.persistence First parameter should be the expected value. Change-Id: If255cc908cf013b6c58f5bf666ccaa50f04f7759 Partial-Bug: #1357117 --- taskflow/tests/unit/persistence/base.py | 18 +++++++++--------- 1 file changed, 9 insertions(+), 9 deletions(-) diff --git a/taskflow/tests/unit/persistence/base.py b/taskflow/tests/unit/persistence/base.py index f5a20bd0..a52db508 100644 --- a/taskflow/tests/unit/persistence/base.py +++ b/taskflow/tests/unit/persistence/base.py @@ -149,7 +149,7 @@ class PersistenceTestMixin(object): with contextlib.closing(self._get_connection()) as conn: lb2 = conn.get_logbook(lb_id) fd2 = lb2.find(fd.uuid) - self.assertEqual(fd2.meta.get('test'), 43) + self.assertEqual(43, fd2.meta.get('test')) def test_flow_detail_lazy_fetch(self): lb_id = uuidutils.generate_uuid() @@ -212,7 +212,7 @@ class PersistenceTestMixin(object): lb2 = conn.get_logbook(lb_id) fd2 = lb2.find(fd.uuid) td2 = fd2.find(td.uuid) - self.assertEqual(td2.meta.get('test'), 43) + self.assertEqual(43, td2.meta.get('test')) self.assertIsInstance(td2, models.TaskDetail) def test_task_detail_with_failure(self): @@ -240,9 +240,9 @@ class PersistenceTestMixin(object): lb2 = conn.get_logbook(lb_id) fd2 = lb2.find(fd.uuid) td2 = fd2.find(td.uuid) - self.assertEqual(td2.failure.exception_str, 'Woot!') + self.assertEqual('Woot!', td2.failure.exception_str) self.assertIs(td2.failure.check(RuntimeError), RuntimeError) - self.assertEqual(td2.failure.traceback_str, td.failure.traceback_str) + self.assertEqual(td.failure.traceback_str, td2.failure.traceback_str) self.assertIsInstance(td2, models.TaskDetail) def test_logbook_merge_flow_detail(self): @@ -312,9 +312,9 @@ class PersistenceTestMixin(object): fd2 = lb2.find(fd.uuid) td2 = fd2.find(td.uuid) self.assertIsNot(td2, None) - self.assertEqual(td2.name, 'detail-1') - self.assertEqual(td2.version, '4.2') - self.assertEqual(td2.intention, states.EXECUTE) + self.assertEqual('detail-1', td2.name) + self.assertEqual('4.2', td2.version) + self.assertEqual(states.EXECUTE, td2.intention) def test_logbook_delete(self): lb_id = uuidutils.generate_uuid() @@ -350,7 +350,7 @@ class PersistenceTestMixin(object): lb2 = conn.get_logbook(lb_id) fd2 = lb2.find(fd.uuid) rd2 = fd2.find(rd.uuid) - self.assertEqual(rd2.intention, states.REVERT) + self.assertEqual(states.REVERT, rd2.intention) self.assertIsInstance(rd2, models.RetryDetail) def test_retry_detail_save_with_task_failure(self): @@ -405,5 +405,5 @@ class PersistenceTestMixin(object): lb2 = conn.get_logbook(lb_id) fd2 = lb2.find(fd.uuid) rd2 = fd2.find(rd.uuid) - self.assertEqual(rd2.intention, states.REVERT) + self.assertEqual(states.REVERT, rd2.intention) self.assertIsInstance(rd2, models.RetryDetail) From 4052c9816581a078f76cb09880ba3179e031999a Mon Sep 17 00:00:00 2001 From: lin-hua-cheng Date: Sat, 17 Oct 2015 02:03:44 -0700 Subject: [PATCH 36/54] Fix order of assertEqual for unit.worker_based First parameter should be the expected value. Change-Id: I209cc4be621c62f60fca3584a21457988129c014 Partial-Bug: #1357117 --- .../tests/unit/worker_based/test_creation.py | 4 +-- .../tests/unit/worker_based/test_endpoint.py | 14 +++++----- .../tests/unit/worker_based/test_executor.py | 28 +++++++++---------- .../tests/unit/worker_based/test_protocol.py | 20 ++++++------- .../tests/unit/worker_based/test_proxy.py | 4 +-- .../tests/unit/worker_based/test_server.py | 26 ++++++++--------- .../tests/unit/worker_based/test_worker.py | 24 ++++++++-------- 7 files changed, 60 insertions(+), 60 deletions(-) diff --git a/taskflow/tests/unit/worker_based/test_creation.py b/taskflow/tests/unit/worker_based/test_creation.py index 887498ce..5b689ded 100644 --- a/taskflow/tests/unit/worker_based/test_creation.py +++ b/taskflow/tests/unit/worker_based/test_creation.py @@ -52,7 +52,7 @@ class TestWorkerBasedActionEngine(test.MockTestCase): transition_timeout=mock.ANY, retry_options=None) ] - self.assertEqual(self.master_mock.mock_calls, expected_calls) + self.assertEqual(expected_calls, self.master_mock.mock_calls) def test_creation_custom(self): executor_mock, executor_inst_mock = self._patch_in_executor() @@ -77,7 +77,7 @@ class TestWorkerBasedActionEngine(test.MockTestCase): transition_timeout=200, retry_options={}) ] - self.assertEqual(self.master_mock.mock_calls, expected_calls) + self.assertEqual(expected_calls, self.master_mock.mock_calls) def test_creation_custom_executor(self): ex = executor.WorkerTaskExecutor('a', 'test-exchange', ['test-topic']) diff --git a/taskflow/tests/unit/worker_based/test_endpoint.py b/taskflow/tests/unit/worker_based/test_endpoint.py index 6f13c8be..92d63128 100644 --- a/taskflow/tests/unit/worker_based/test_endpoint.py +++ b/taskflow/tests/unit/worker_based/test_endpoint.py @@ -44,16 +44,16 @@ class TestEndpoint(test.TestCase): def test_creation(self): task = self.task_ep.generate() - self.assertEqual(self.task_ep.name, self.task_cls_name) + self.assertEqual(self.task_cls_name, self.task_ep.name) self.assertIsInstance(task, self.task_cls) - self.assertEqual(task.name, self.task_cls_name) + self.assertEqual(self.task_cls_name, task.name) def test_creation_with_task_name(self): task_name = 'test' task = self.task_ep.generate(name=task_name) - self.assertEqual(self.task_ep.name, self.task_cls_name) + self.assertEqual(self.task_cls_name, self.task_ep.name) self.assertIsInstance(task, self.task_cls) - self.assertEqual(task.name, task_name) + self.assertEqual(task_name, task.name) def test_creation_task_with_constructor_args(self): # NOTE(skudriashev): Exception is expected here since task @@ -62,7 +62,7 @@ class TestEndpoint(test.TestCase): self.assertRaises(TypeError, endpoint.generate) def test_to_str(self): - self.assertEqual(str(self.task_ep), self.task_cls_name) + self.assertEqual(self.task_cls_name, str(self.task_ep)) def test_execute(self): task = self.task_ep.generate(self.task_cls_name) @@ -70,7 +70,7 @@ class TestEndpoint(test.TestCase): task_uuid=self.task_uuid, arguments=self.task_args, progress_callback=None) - self.assertEqual(result, self.task_result) + self.assertEqual(self.task_result, result) def test_revert(self): task = self.task_ep.generate(self.task_cls_name) @@ -80,4 +80,4 @@ class TestEndpoint(test.TestCase): progress_callback=None, result=self.task_result, failures={}) - self.assertEqual(result, None) + self.assertEqual(None, result) diff --git a/taskflow/tests/unit/worker_based/test_executor.py b/taskflow/tests/unit/worker_based/test_executor.py index 0fad2bd3..d01a294e 100644 --- a/taskflow/tests/unit/worker_based/test_executor.py +++ b/taskflow/tests/unit/worker_based/test_executor.py @@ -91,7 +91,7 @@ class TestWorkerTaskExecutor(test.MockTestCase): type_handlers=mock.ANY), mock.call.proxy.dispatcher.type_handlers.update(mock.ANY), ] - self.assertEqual(self.master_mock.mock_calls, master_mock_calls) + self.assertEqual(master_mock_calls, self.master_mock.mock_calls) def test_on_message_response_state_running(self): response = pr.Response(pr.RUNNING) @@ -126,7 +126,7 @@ class TestWorkerTaskExecutor(test.MockTestCase): ex._requests_cache[self.task_uuid] = self.request_inst_mock ex._process_response(response.to_dict(), self.message_mock) - self.assertEqual(len(ex._requests_cache), 0) + self.assertEqual(0, len(ex._requests_cache)) expected_calls = [ mock.call.transition_and_log_error(pr.FAILURE, logger=mock.ANY), mock.call.set_result(result=test_utils.FailureMatcher(a_failure)) @@ -152,7 +152,7 @@ class TestWorkerTaskExecutor(test.MockTestCase): ex._requests_cache[self.task_uuid] = self.request_inst_mock ex._process_response(response.to_dict(), self.message_mock) - self.assertEqual(self.request_inst_mock.mock_calls, []) + self.assertEqual([], self.request_inst_mock.mock_calls) def test_on_message_response_unknown_task(self): self.message_mock.properties['correlation_id'] = '' @@ -161,7 +161,7 @@ class TestWorkerTaskExecutor(test.MockTestCase): ex._requests_cache[self.task_uuid] = self.request_inst_mock ex._process_response(response.to_dict(), self.message_mock) - self.assertEqual(self.request_inst_mock.mock_calls, []) + self.assertEqual([], self.request_inst_mock.mock_calls) def test_on_message_response_no_correlation_id(self): self.message_mock.properties = {'type': pr.RESPONSE} @@ -170,15 +170,15 @@ class TestWorkerTaskExecutor(test.MockTestCase): ex._requests_cache[self.task_uuid] = self.request_inst_mock ex._process_response(response.to_dict(), self.message_mock) - self.assertEqual(self.request_inst_mock.mock_calls, []) + self.assertEqual([], self.request_inst_mock.mock_calls) def test_on_wait_task_not_expired(self): ex = self.executor() ex._requests_cache[self.task_uuid] = self.request_inst_mock - self.assertEqual(len(ex._requests_cache), 1) + self.assertEqual(1, len(ex._requests_cache)) ex._on_wait() - self.assertEqual(len(ex._requests_cache), 1) + self.assertEqual(1, len(ex._requests_cache)) def test_on_wait_task_expired(self): now = timeutils.utcnow() @@ -191,24 +191,24 @@ class TestWorkerTaskExecutor(test.MockTestCase): ex = self.executor() ex._requests_cache[self.task_uuid] = self.request_inst_mock - self.assertEqual(len(ex._requests_cache), 1) + self.assertEqual(1, len(ex._requests_cache)) ex._on_wait() - self.assertEqual(len(ex._requests_cache), 0) + self.assertEqual(0, len(ex._requests_cache)) def test_remove_task_non_existent(self): ex = self.executor() ex._requests_cache[self.task_uuid] = self.request_inst_mock - self.assertEqual(len(ex._requests_cache), 1) + self.assertEqual(1, len(ex._requests_cache)) del ex._requests_cache[self.task_uuid] - self.assertEqual(len(ex._requests_cache), 0) + self.assertEqual(0, len(ex._requests_cache)) # delete non-existent try: del ex._requests_cache[self.task_uuid] except KeyError: pass - self.assertEqual(len(ex._requests_cache), 0) + self.assertEqual(0, len(ex._requests_cache)) def test_execute_task(self): ex = self.executor() @@ -255,7 +255,7 @@ class TestWorkerTaskExecutor(test.MockTestCase): mock.call.Request(self.task, self.task_uuid, 'execute', self.task_args, self.timeout), ] - self.assertEqual(self.master_mock.mock_calls, expected_calls) + self.assertEqual(expected_calls, self.master_mock.mock_calls) def test_execute_task_publish_error(self): self.proxy_inst_mock.publish.side_effect = Exception('Woot!') @@ -316,7 +316,7 @@ class TestWorkerTaskExecutor(test.MockTestCase): def test_stop_not_running(self): self.executor().stop() - self.assertEqual(self.master_mock.mock_calls, []) + self.assertEqual([], self.master_mock.mock_calls) def test_stop_not_alive(self): self.proxy_inst_mock.start.side_effect = None diff --git a/taskflow/tests/unit/worker_based/test_protocol.py b/taskflow/tests/unit/worker_based/test_protocol.py index 4647ae8c..71116864 100644 --- a/taskflow/tests/unit/worker_based/test_protocol.py +++ b/taskflow/tests/unit/worker_based/test_protocol.py @@ -133,34 +133,34 @@ class TestProtocol(test.TestCase): def test_creation(self): request = self.request() - self.assertEqual(request.uuid, self.task_uuid) - self.assertEqual(request.task, self.task) + self.assertEqual(self.task_uuid, request.uuid) + self.assertEqual(self.task, request.task) self.assertIsInstance(request.result, futurist.Future) self.assertFalse(request.result.done()) def test_to_dict_default(self): - self.assertEqual(self.request().to_dict(), self.request_to_dict()) + self.assertEqual(self.request_to_dict(), self.request().to_dict()) def test_to_dict_with_result(self): - self.assertEqual(self.request(result=333).to_dict(), - self.request_to_dict(result=('success', 333))) + self.assertEqual(self.request_to_dict(result=('success', 333)), + self.request(result=333).to_dict()) def test_to_dict_with_result_none(self): - self.assertEqual(self.request(result=None).to_dict(), - self.request_to_dict(result=('success', None))) + self.assertEqual(self.request_to_dict(result=('success', None)), + self.request(result=None).to_dict()) def test_to_dict_with_result_failure(self): a_failure = failure.Failure.from_exception(RuntimeError('Woot!')) expected = self.request_to_dict(result=('failure', a_failure.to_dict())) - self.assertEqual(self.request(result=a_failure).to_dict(), expected) + self.assertEqual(expected, self.request(result=a_failure).to_dict()) def test_to_dict_with_failures(self): a_failure = failure.Failure.from_exception(RuntimeError('Woot!')) request = self.request(failures={self.task.name: a_failure}) expected = self.request_to_dict( failures={self.task.name: a_failure.to_dict()}) - self.assertEqual(request.to_dict(), expected) + self.assertEqual(expected, request.to_dict()) @mock.patch('oslo_utils.timeutils.now') def test_pending_not_expired(self, now): @@ -189,4 +189,4 @@ class TestProtocol(test.TestCase): request = self.request() request.set_result(111) result = request.result.result() - self.assertEqual(result, (executor.EXECUTED, 111)) + self.assertEqual((executor.EXECUTED, 111), result) diff --git a/taskflow/tests/unit/worker_based/test_proxy.py b/taskflow/tests/unit/worker_based/test_proxy.py index 7ec91780..68688fa2 100644 --- a/taskflow/tests/unit/worker_based/test_proxy.py +++ b/taskflow/tests/unit/worker_based/test_proxy.py @@ -138,7 +138,7 @@ class TestProxy(test.MockTestCase): durable=False, auto_delete=True) ] - self.assertEqual(self.master_mock.mock_calls, master_mock_calls) + self.assertEqual(master_mock_calls, self.master_mock.mock_calls) def test_creation_custom(self): transport_opts = {'context': 'context'} @@ -151,7 +151,7 @@ class TestProxy(test.MockTestCase): durable=False, auto_delete=True) ] - self.assertEqual(self.master_mock.mock_calls, master_mock_calls) + self.assertEqual(master_mock_calls, self.master_mock.mock_calls) def test_publish(self): msg_mock = mock.MagicMock() diff --git a/taskflow/tests/unit/worker_based/test_server.py b/taskflow/tests/unit/worker_based/test_server.py index 9b7815c4..31d57810 100644 --- a/taskflow/tests/unit/worker_based/test_server.py +++ b/taskflow/tests/unit/worker_based/test_server.py @@ -91,7 +91,7 @@ class TestServer(test.MockTestCase): retry_options=mock.ANY) ] self.master_mock.assert_has_calls(master_mock_calls) - self.assertEqual(len(s._endpoints), 3) + self.assertEqual(3, len(s._endpoints)) def test_creation_with_endpoints(self): s = self.server(endpoints=self.endpoints) @@ -104,34 +104,34 @@ class TestServer(test.MockTestCase): retry_options=mock.ANY) ] self.master_mock.assert_has_calls(master_mock_calls) - self.assertEqual(len(s._endpoints), len(self.endpoints)) + self.assertEqual(len(self.endpoints), len(s._endpoints)) def test_parse_request(self): request = self.make_request() bundle = pr.Request.from_dict(request) task_cls, task_name, action, task_args = bundle - self.assertEqual((task_cls, task_name, action, task_args), - (self.task.name, self.task.name, self.task_action, - dict(arguments=self.task_args))) + self.assertEqual((self.task.name, self.task.name, self.task_action, + dict(arguments=self.task_args)), + (task_cls, task_name, action, task_args)) def test_parse_request_with_success_result(self): request = self.make_request(action='revert', result=1) bundle = pr.Request.from_dict(request) task_cls, task_name, action, task_args = bundle - self.assertEqual((task_cls, task_name, action, task_args), - (self.task.name, self.task.name, 'revert', + self.assertEqual((self.task.name, self.task.name, 'revert', dict(arguments=self.task_args, - result=1))) + result=1)), + (task_cls, task_name, action, task_args)) def test_parse_request_with_failure_result(self): a_failure = failure.Failure.from_exception(Exception('test')) request = self.make_request(action='revert', result=a_failure) bundle = pr.Request.from_dict(request) task_cls, task_name, action, task_args = bundle - self.assertEqual((task_cls, task_name, action, task_args), - (self.task.name, self.task.name, 'revert', + self.assertEqual((self.task.name, self.task.name, 'revert', dict(arguments=self.task_args, - result=utils.FailureMatcher(a_failure)))) + result=utils.FailureMatcher(a_failure))), + (task_cls, task_name, action, task_args)) def test_parse_request_with_failures(self): failures = {'0': failure.Failure.from_exception(Exception('test1')), @@ -140,11 +140,11 @@ class TestServer(test.MockTestCase): bundle = pr.Request.from_dict(request) task_cls, task_name, action, task_args = bundle self.assertEqual( - (task_cls, task_name, action, task_args), (self.task.name, self.task.name, 'revert', dict(arguments=self.task_args, failures=dict((i, utils.FailureMatcher(f)) - for i, f in six.iteritems(failures))))) + for i, f in six.iteritems(failures)))), + (task_cls, task_name, action, task_args)) @mock.patch("taskflow.engines.worker_based.server.LOG.critical") def test_reply_publish_failure(self, mocked_exception): diff --git a/taskflow/tests/unit/worker_based/test_worker.py b/taskflow/tests/unit/worker_based/test_worker.py index ab441e46..10521485 100644 --- a/taskflow/tests/unit/worker_based/test_worker.py +++ b/taskflow/tests/unit/worker_based/test_worker.py @@ -99,7 +99,7 @@ class TestWorker(test.MockTestCase): transport=mock.ANY, retry_options=mock.ANY) ] - self.assertEqual(self.master_mock.mock_calls, master_mock_calls) + self.assertEqual(master_mock_calls, self.master_mock.mock_calls) def test_run_with_no_tasks(self): self.worker(reset_master_mock=True).run() @@ -107,7 +107,7 @@ class TestWorker(test.MockTestCase): master_mock_calls = [ mock.call.server.start() ] - self.assertEqual(self.master_mock.mock_calls, master_mock_calls) + self.assertEqual(master_mock_calls, self.master_mock.mock_calls) def test_run_with_tasks(self): self.worker(reset_master_mock=True, @@ -116,7 +116,7 @@ class TestWorker(test.MockTestCase): master_mock_calls = [ mock.call.server.start() ] - self.assertEqual(self.master_mock.mock_calls, master_mock_calls) + self.assertEqual(master_mock_calls, self.master_mock.mock_calls) def test_run_with_custom_executor(self): executor_mock = mock.MagicMock(name='executor') @@ -126,7 +126,7 @@ class TestWorker(test.MockTestCase): master_mock_calls = [ mock.call.server.start() ] - self.assertEqual(self.master_mock.mock_calls, master_mock_calls) + self.assertEqual(master_mock_calls, self.master_mock.mock_calls) def test_wait(self): w = self.worker(reset_master_mock=True) @@ -137,7 +137,7 @@ class TestWorker(test.MockTestCase): mock.call.server.start(), mock.call.server.wait() ] - self.assertEqual(self.master_mock.mock_calls, master_mock_calls) + self.assertEqual(master_mock_calls, self.master_mock.mock_calls) def test_stop(self): self.worker(reset_master_mock=True).stop() @@ -146,20 +146,20 @@ class TestWorker(test.MockTestCase): mock.call.server.stop(), mock.call.executor.shutdown() ] - self.assertEqual(self.master_mock.mock_calls, master_mock_calls) + self.assertEqual(master_mock_calls, self.master_mock.mock_calls) def test_derive_endpoints_from_string_tasks(self): endpoints = worker.Worker._derive_endpoints( ['taskflow.tests.utils:DummyTask']) - self.assertEqual(len(endpoints), 1) + self.assertEqual(1, len(endpoints)) self.assertIsInstance(endpoints[0], endpoint.Endpoint) - self.assertEqual(endpoints[0].name, self.task_name) + self.assertEqual(self.task_name, endpoints[0].name) def test_derive_endpoints_from_string_modules(self): endpoints = worker.Worker._derive_endpoints(['taskflow.tests.utils']) - self.assertEqual(len(endpoints), self.endpoint_count) + self.assertEqual(self.endpoint_count, len(endpoints)) def test_derive_endpoints_from_string_non_existent_module(self): tasks = ['non.existent.module'] @@ -179,9 +179,9 @@ class TestWorker(test.MockTestCase): def test_derive_endpoints_from_tasks(self): endpoints = worker.Worker._derive_endpoints([self.task_cls]) - self.assertEqual(len(endpoints), 1) + self.assertEqual(1, len(endpoints)) self.assertIsInstance(endpoints[0], endpoint.Endpoint) - self.assertEqual(endpoints[0].name, self.task_name) + self.assertEqual(self.task_name, endpoints[0].name) def test_derive_endpoints_from_non_task_class(self): self.assertRaises(TypeError, worker.Worker._derive_endpoints, @@ -190,7 +190,7 @@ class TestWorker(test.MockTestCase): def test_derive_endpoints_from_modules(self): endpoints = worker.Worker._derive_endpoints([utils]) - self.assertEqual(len(endpoints), self.endpoint_count) + self.assertEqual(self.endpoint_count, len(endpoints)) def test_derive_endpoints_unexpected_task_type(self): self.assertRaises(TypeError, worker.Worker._derive_endpoints, [111]) From 9a7ed89133c70f909c8636006ef7e65f22d9f1e3 Mon Sep 17 00:00:00 2001 From: lin-hua-cheng Date: Sat, 17 Oct 2015 18:29:39 -0700 Subject: [PATCH 37/54] Fix order of assertEqual for unit.test_* First parameter should be the expected value. Change-Id: I2941296e38c3245da298cc64aeb5636fbc4b2eb6 Partial-Bug: #1357117 --- taskflow/tests/test_examples.py | 2 +- taskflow/tests/unit/test_arguments_passing.py | 42 +++--- taskflow/tests/unit/test_engine_helpers.py | 16 +- taskflow/tests/unit/test_engines.py | 56 +++---- taskflow/tests/unit/test_failure.py | 48 +++--- taskflow/tests/unit/test_flow_dependencies.py | 134 ++++++++--------- taskflow/tests/unit/test_functor_task.py | 6 +- taskflow/tests/unit/test_progress.py | 6 +- taskflow/tests/unit/test_retries.py | 45 +++--- taskflow/tests/unit/test_storage.py | 138 +++++++++--------- taskflow/tests/unit/test_suspend.py | 20 +-- taskflow/tests/unit/test_utils.py | 14 +- taskflow/tests/unit/test_utils_binary.py | 14 +- 13 files changed, 271 insertions(+), 270 deletions(-) diff --git a/taskflow/tests/test_examples.py b/taskflow/tests/test_examples.py index ce795dd0..62142909 100644 --- a/taskflow/tests/test_examples.py +++ b/taskflow/tests/test_examples.py @@ -132,7 +132,7 @@ class ExamplesTestCase(test.TestCase): # replace them with some constant string output = UUID_RE.sub('', output) expected_output = UUID_RE.sub('', expected_output) - self.assertEqual(output, expected_output) + self.assertEqual(expected_output, output) def make_output_files(): diff --git a/taskflow/tests/unit/test_arguments_passing.py b/taskflow/tests/unit/test_arguments_passing.py index 2cde1dea..8676412d 100644 --- a/taskflow/tests/unit/test_arguments_passing.py +++ b/taskflow/tests/unit/test_arguments_passing.py @@ -30,24 +30,24 @@ class ArgumentsPassingTest(utils.EngineTestBase): flow = utils.TaskOneReturn(name='task1', provides='first_data') engine = self._make_engine(flow) engine.run() - self.assertEqual(engine.storage.fetch_all(), {'first_data': 1}) + self.assertEqual({'first_data': 1}, engine.storage.fetch_all()) def test_save_all_in_one(self): flow = utils.TaskMultiReturn(provides='all_data') engine = self._make_engine(flow) engine.run() - self.assertEqual(engine.storage.fetch_all(), - {'all_data': (1, 3, 5)}) + self.assertEqual({'all_data': (1, 3, 5)}, + engine.storage.fetch_all()) def test_save_several_values(self): flow = utils.TaskMultiReturn(provides=('badger', 'mushroom', 'snake')) engine = self._make_engine(flow) engine.run() - self.assertEqual(engine.storage.fetch_all(), { + self.assertEqual({ 'badger': 1, 'mushroom': 3, 'snake': 5 - }) + }, engine.storage.fetch_all()) def test_save_dict(self): flow = utils.TaskMultiDict(provides=set(['badger', @@ -55,11 +55,11 @@ class ArgumentsPassingTest(utils.EngineTestBase): 'snake'])) engine = self._make_engine(flow) engine.run() - self.assertEqual(engine.storage.fetch_all(), { + self.assertEqual({ 'badger': 0, 'mushroom': 1, 'snake': 2, - }) + }, engine.storage.fetch_all()) def test_bad_save_as_value(self): self.assertRaises(TypeError, @@ -71,10 +71,10 @@ class ArgumentsPassingTest(utils.EngineTestBase): engine = self._make_engine(flow) engine.storage.inject({'x': 1, 'y': 4, 'z': 9, 'a': 17}) engine.run() - self.assertEqual(engine.storage.fetch_all(), { + self.assertEqual({ 'x': 1, 'y': 4, 'z': 9, 'a': 17, 'result': 14, - }) + }, engine.storage.fetch_all()) def test_arguments_missing(self): flow = utils.TaskMultiArg() @@ -88,19 +88,19 @@ class ArgumentsPassingTest(utils.EngineTestBase): engine = self._make_engine(flow) engine.storage.inject({'x': 1, 'y': 4, 'z': 9, 'a': 17}) engine.run() - self.assertEqual(engine.storage.fetch_all(), { + self.assertEqual({ 'x': 1, 'y': 4, 'z': 9, 'a': 17, 'result': 30, - }) + }, engine.storage.fetch_all()) def test_argument_injection(self): flow = utils.TaskMultiArgOneReturn(provides='result', inject={'x': 1, 'y': 4, 'z': 9}) engine = self._make_engine(flow) engine.run() - self.assertEqual(engine.storage.fetch_all(), { + self.assertEqual({ 'result': 14, - }) + }, engine.storage.fetch_all()) def test_argument_injection_rebind(self): flow = utils.TaskMultiArgOneReturn(provides='result', @@ -108,9 +108,9 @@ class ArgumentsPassingTest(utils.EngineTestBase): inject={'a': 1, 'b': 4, 'c': 9}) engine = self._make_engine(flow) engine.run() - self.assertEqual(engine.storage.fetch_all(), { + self.assertEqual({ 'result': 14, - }) + }, engine.storage.fetch_all()) def test_argument_injection_required(self): flow = utils.TaskMultiArgOneReturn(provides='result', @@ -119,9 +119,9 @@ class ArgumentsPassingTest(utils.EngineTestBase): 'a': 0, 'b': 0, 'c': 0}) engine = self._make_engine(flow) engine.run() - self.assertEqual(engine.storage.fetch_all(), { + self.assertEqual({ 'result': 14, - }) + }, engine.storage.fetch_all()) def test_all_arguments_mapping(self): flow = utils.TaskMultiArgOneReturn(provides='result', @@ -131,10 +131,10 @@ class ArgumentsPassingTest(utils.EngineTestBase): 'a': 1, 'b': 2, 'c': 3, 'x': 4, 'y': 5, 'z': 6 }) engine.run() - self.assertEqual(engine.storage.fetch_all(), { + self.assertEqual({ 'a': 1, 'b': 2, 'c': 3, 'x': 4, 'y': 5, 'z': 6, 'result': 6, - }) + }, engine.storage.fetch_all()) def test_invalid_argument_name_map(self): flow = utils.TaskMultiArg(rebind={'z': 'b'}) @@ -159,9 +159,9 @@ class ArgumentsPassingTest(utils.EngineTestBase): engine = self._make_engine(flow) engine.storage.inject({'long_arg_name': 1}) engine.run() - self.assertEqual(engine.storage.fetch_all(), { + self.assertEqual({ 'long_arg_name': 1, 'result': 1 - }) + }, engine.storage.fetch_all()) class SerialEngineTest(ArgumentsPassingTest, test.TestCase): diff --git a/taskflow/tests/unit/test_engine_helpers.py b/taskflow/tests/unit/test_engine_helpers.py index 64be04b6..60fa7558 100644 --- a/taskflow/tests/unit/test_engine_helpers.py +++ b/taskflow/tests/unit/test_engine_helpers.py @@ -85,7 +85,7 @@ class FlowFromDetailTestCase(test.TestCase): return_value=lambda: 'RESULT') as mock_import: result = taskflow.engines.flow_from_detail(flow_detail) mock_import.assert_called_once_with(name) - self.assertEqual(result, 'RESULT') + self.assertEqual('RESULT', result) def test_factory_with_arg(self): name = 'some.test.factory' @@ -96,7 +96,7 @@ class FlowFromDetailTestCase(test.TestCase): return_value=lambda x: 'RESULT %s' % x) as mock_import: result = taskflow.engines.flow_from_detail(flow_detail) mock_import.assert_called_once_with(name) - self.assertEqual(result, 'RESULT foo') + self.assertEqual('RESULT foo', result) def my_flow_factory(task_name): @@ -121,12 +121,12 @@ class LoadFromFactoryTestCase(test.TestCase): self.assertIsInstance(engine._flow, test_utils.DummyTask) fd = engine.storage._flowdetail - self.assertEqual(fd.name, 'test1') - self.assertEqual(fd.meta.get('factory'), { + self.assertEqual('test1', fd.name) + self.assertEqual({ 'name': '%s.my_flow_factory' % __name__, 'args': [], 'kwargs': {'task_name': 'test1'}, - }) + }, fd.meta.get('factory')) def test_it_works_by_name(self): factory_name = '%s.my_flow_factory' % __name__ @@ -135,9 +135,9 @@ class LoadFromFactoryTestCase(test.TestCase): self.assertIsInstance(engine._flow, test_utils.DummyTask) fd = engine.storage._flowdetail - self.assertEqual(fd.name, 'test1') - self.assertEqual(fd.meta.get('factory'), { + self.assertEqual('test1', fd.name) + self.assertEqual({ 'name': factory_name, 'args': [], 'kwargs': {'task_name': 'test1'}, - }) + }, fd.meta.get('factory')) diff --git a/taskflow/tests/unit/test_engines.py b/taskflow/tests/unit/test_engines.py index c56d7569..ba003aec 100644 --- a/taskflow/tests/unit/test_engines.py +++ b/taskflow/tests/unit/test_engines.py @@ -72,14 +72,14 @@ class EngineTaskTest(object): with utils.CaptureListener(engine, values=values) as capturer: self.assertFailuresRegexp(RuntimeError, '^Woot', engine.run) self.assertEqual(expected, capturer.values) - self.assertEqual(engine.storage.get_flow_state(), states.REVERTED) + self.assertEqual(states.REVERTED, engine.storage.get_flow_state()) with utils.CaptureListener(engine, values=values) as capturer: self.assertFailuresRegexp(RuntimeError, '^Woot', engine.run) now_expected = list(expected) now_expected.extend(['fail.t PENDING', 'fail.f PENDING']) now_expected.extend(expected) self.assertEqual(now_expected, values) - self.assertEqual(engine.storage.get_flow_state(), states.REVERTED) + self.assertEqual(states.REVERTED, engine.storage.get_flow_state()) def test_invalid_flow_raises(self): @@ -123,33 +123,33 @@ class EngineOptionalRequirementsTest(utils.EngineTestBase): engine = self._make_engine(flow_no_inject, store={'a': 3}) engine.run() result = engine.storage.fetch_all() - self.assertEqual(result, {'a': 3, 'result': 15}) + self.assertEqual({'a': 3, 'result': 15}, result) engine = self._make_engine(flow_no_inject, store={'a': 3, 'b': 7}) engine.run() result = engine.storage.fetch_all() - self.assertEqual(result, {'a': 3, 'b': 7, 'result': 21}) + self.assertEqual({'a': 3, 'b': 7, 'result': 21}, result) engine = self._make_engine(flow_inject_a, store={'a': 3}) engine.run() result = engine.storage.fetch_all() - self.assertEqual(result, {'a': 3, 'result': 50}) + self.assertEqual({'a': 3, 'result': 50}, result) engine = self._make_engine(flow_inject_a, store={'a': 3, 'b': 7}) engine.run() result = engine.storage.fetch_all() - self.assertEqual(result, {'a': 3, 'b': 7, 'result': 70}) + self.assertEqual({'a': 3, 'b': 7, 'result': 70}, result) engine = self._make_engine(flow_inject_b, store={'a': 3}) engine.run() result = engine.storage.fetch_all() - self.assertEqual(result, {'a': 3, 'result': 3000}) + self.assertEqual({'a': 3, 'result': 3000}, result) engine = self._make_engine(flow_inject_b, store={'a': 3, 'b': 7}) engine.run() result = engine.storage.fetch_all() - self.assertEqual(result, {'a': 3, 'b': 7, 'result': 3000}) + self.assertEqual({'a': 3, 'b': 7, 'result': 3000}, result) class EngineMultipleResultsTest(utils.EngineTestBase): @@ -160,7 +160,7 @@ class EngineMultipleResultsTest(utils.EngineTestBase): engine = self._make_engine(flow) engine.run() result = engine.storage.fetch('x') - self.assertEqual(result, 1) + self.assertEqual(1, result) def test_many_results_visible_to(self): flow = lf.Flow("flow") @@ -223,7 +223,7 @@ class EngineMultipleResultsTest(utils.EngineTestBase): engine = self._make_engine(flow, store={'x': 0}) engine.run() result = engine.storage.fetch('x') - self.assertEqual(result, 0) + self.assertEqual(0, result) def test_fetch_all_with_a_single_result(self): flow = lf.Flow("flow") @@ -232,7 +232,7 @@ class EngineMultipleResultsTest(utils.EngineTestBase): engine = self._make_engine(flow) engine.run() result = engine.storage.fetch_all() - self.assertEqual(result, {'x': 1}) + self.assertEqual({'x': 1}, result) def test_fetch_all_with_two_results(self): flow = lf.Flow("flow") @@ -241,7 +241,7 @@ class EngineMultipleResultsTest(utils.EngineTestBase): engine = self._make_engine(flow, store={'x': 0}) engine.run() result = engine.storage.fetch_all() - self.assertEqual(result, {'x': [0, 1]}) + self.assertEqual({'x': [0, 1]}, result) def test_task_can_update_value(self): flow = lf.Flow("flow") @@ -250,7 +250,7 @@ class EngineMultipleResultsTest(utils.EngineTestBase): engine = self._make_engine(flow, store={'x': 0}) engine.run() result = engine.storage.fetch_all() - self.assertEqual(result, {'x': [0, 1]}) + self.assertEqual({'x': [0, 1]}, result) class EngineLinearFlowTest(utils.EngineTestBase): @@ -315,7 +315,7 @@ class EngineLinearFlowTest(utils.EngineTestBase): expected = ['task1.t RUNNING', 'task1.t SUCCESS(5)', 'task2.t RUNNING', 'task2.t SUCCESS(5)'] self.assertEqual(expected, capturer.values) - self.assertEqual(len(flow), 2) + self.assertEqual(2, len(flow)) def test_sequential_flow_two_tasks_iter(self): flow = lf.Flow('flow-2').add( @@ -329,7 +329,7 @@ class EngineLinearFlowTest(utils.EngineTestBase): expected = ['task1.t RUNNING', 'task1.t SUCCESS(5)', 'task2.t RUNNING', 'task2.t SUCCESS(5)'] self.assertEqual(expected, capturer.values) - self.assertEqual(len(flow), 2) + self.assertEqual(2, len(flow)) def test_sequential_flow_iter_suspend_resume(self): flow = lf.Flow('flow-2').add( @@ -373,7 +373,7 @@ class EngineLinearFlowTest(utils.EngineTestBase): ) engine = self._make_engine(flow) self.assertFailuresRegexp(RuntimeError, '^Woot', engine.run) - self.assertEqual(engine.storage.fetch_all(), {}) + self.assertEqual({}, engine.storage.fetch_all()) def test_revert_provided(self): flow = lf.Flow('revert').add( @@ -382,7 +382,7 @@ class EngineLinearFlowTest(utils.EngineTestBase): ) engine = self._make_engine(flow, store={'value': 0}) self.assertFailuresRegexp(RuntimeError, '^Woot', engine.run) - self.assertEqual(engine.storage.get_revert_result('giver'), 2) + self.assertEqual(2, engine.storage.get_revert_result('giver')) def test_nasty_revert(self): flow = lf.Flow('revert').add( @@ -470,7 +470,7 @@ class EngineParallelFlowTest(utils.EngineTestBase): engine.run() expected = ['task1.t RUNNING', 'task1.t SUCCESS(5)'] self.assertEqual(expected, capturer.values) - self.assertEqual(engine.storage.fetch_all(), {'a': 5}) + self.assertEqual({'a': 5}, engine.storage.fetch_all()) def test_parallel_flow_two_tasks(self): flow = uf.Flow('p-2').add( @@ -533,8 +533,8 @@ class EngineParallelFlowTest(utils.EngineTestBase): engine.run() expected = ['task2.t RUNNING', 'task2.t SUCCESS(5)'] self.assertEqual(expected, capturer.values) - self.assertEqual(engine.storage.fetch_all(), - {'x1': 17, 'x2': 5}) + self.assertEqual({'x1': 17, 'x2': 5}, + engine.storage.fetch_all()) class EngineLinearAndUnorderedExceptionsTest(utils.EngineTestBase): @@ -670,7 +670,7 @@ class EngineGraphFlowTest(utils.EngineTestBase): expected = set(['task2.t SUCCESS(5)', 'task2.t RUNNING', 'task1.t RUNNING', 'task1.t SUCCESS(5)']) self.assertEqual(expected, set(capturer.values)) - self.assertEqual(len(flow), 2) + self.assertEqual(2, len(flow)) def test_graph_flow_two_tasks(self): flow = gf.Flow('g-1-1').add( @@ -728,7 +728,7 @@ class EngineGraphFlowTest(utils.EngineTestBase): 'task1.t REVERTING', 'task1.t REVERTED(None)'] self.assertEqual(expected, capturer.values) - self.assertEqual(engine.storage.get_flow_state(), states.REVERTED) + self.assertEqual(states.REVERTED, engine.storage.get_flow_state()) def test_graph_flow_four_tasks_revert_failure(self): flow = gf.Flow('g-3-nasty').add( @@ -738,7 +738,7 @@ class EngineGraphFlowTest(utils.EngineTestBase): engine = self._make_engine(flow) self.assertFailuresRegexp(RuntimeError, '^Gotcha', engine.run) - self.assertEqual(engine.storage.get_flow_state(), states.FAILURE) + self.assertEqual(states.FAILURE, engine.storage.get_flow_state()) def test_graph_flow_with_multireturn_and_multiargs_tasks(self): flow = gf.Flow('g-3-multi').add( @@ -751,14 +751,14 @@ class EngineGraphFlowTest(utils.EngineTestBase): engine = self._make_engine(flow) engine.storage.inject({'x': 30}) engine.run() - self.assertEqual(engine.storage.fetch_all(), { + self.assertEqual({ 'a': 1, 'b': 3, 'c': 5, 'x': 30, 'y': 38, 'z': 42 - }) + }, engine.storage.fetch_all()) def test_task_graph_property(self): flow = gf.Flow('test').add( @@ -1110,11 +1110,11 @@ class EngineCheckingTaskTest(utils.EngineTestBase): return 'RESULT' def revert(m_self, result, flow_failures): - self.assertEqual(result, 'RESULT') - self.assertEqual(list(flow_failures.keys()), ['fail1']) + self.assertEqual('RESULT', result) + self.assertEqual(['fail1'], list(flow_failures.keys())) fail = flow_failures['fail1'] self.assertIsInstance(fail, failure.Failure) - self.assertEqual(str(fail), 'Failure: RuntimeError: Woot!') + self.assertEqual('Failure: RuntimeError: Woot!', str(fail)) flow = lf.Flow('test').add( CheckingTask(), diff --git a/taskflow/tests/unit/test_failure.py b/taskflow/tests/unit/test_failure.py index 9e5b4f79..4ecfa5c5 100644 --- a/taskflow/tests/unit/test_failure.py +++ b/taskflow/tests/unit/test_failure.py @@ -44,15 +44,15 @@ def _make_exc_info(msg): class GeneralFailureObjTestsMixin(object): def test_captures_message(self): - self.assertEqual(self.fail_obj.exception_str, 'Woot!') + self.assertEqual('Woot!', self.fail_obj.exception_str) def test_str(self): - self.assertEqual(str(self.fail_obj), - 'Failure: RuntimeError: Woot!') + self.assertEqual('Failure: RuntimeError: Woot!', + str(self.fail_obj)) def test_exception_types(self): - self.assertEqual(list(self.fail_obj), - test_utils.RUNTIME_ERROR_CLASSES[:-2]) + self.assertEqual(test_utils.RUNTIME_ERROR_CLASSES[:-2], + list(self.fail_obj)) def test_pformat_no_traceback(self): text = self.fail_obj.pformat() @@ -60,11 +60,11 @@ class GeneralFailureObjTestsMixin(object): def test_check_str(self): val = 'Exception' - self.assertEqual(self.fail_obj.check(val), val) + self.assertEqual(val, self.fail_obj.check(val)) def test_check_str_not_there(self): val = 'ValueError' - self.assertEqual(self.fail_obj.check(val), None) + self.assertEqual(None, self.fail_obj.check(val)) def test_check_type(self): self.assertIs(self.fail_obj.check(RuntimeError), RuntimeError) @@ -84,8 +84,8 @@ class CaptureFailureTestCase(test.TestCase, GeneralFailureObjTestsMixin): def test_captures_exc_info(self): exc_info = self.fail_obj.exc_info - self.assertEqual(len(exc_info), 3) - self.assertEqual(exc_info[0], RuntimeError) + self.assertEqual(3, len(exc_info)) + self.assertEqual(RuntimeError, exc_info[0]) self.assertIs(exc_info[1], self.fail_obj.exception) def test_reraises(self): @@ -181,7 +181,7 @@ class FailureObjectTestCase(test.TestCase): exc_type_names=['Exception'], hi='hi there') expected = "Failure.__init__ got unexpected keyword argument(s): hi" - self.assertEqual(str(exc), expected) + self.assertEqual(expected, str(exc)) def test_empty_does_not_reraise(self): self.assertIs(failure.Failure.reraise_if_any([]), None) @@ -198,7 +198,7 @@ class FailureObjectTestCase(test.TestCase): ] exc = self.assertRaises(exceptions.WrappedFailure, failure.Failure.reraise_if_any, fls) - self.assertEqual(list(exc), fls) + self.assertEqual(fls, list(exc)) def test_failure_copy(self): fail_obj = _captured_failure('Woot!') @@ -267,14 +267,14 @@ class WrappedFailureTestCase(test.TestCase): def test_simple_iter(self): fail_obj = _captured_failure('Woot!') wf = exceptions.WrappedFailure([fail_obj]) - self.assertEqual(len(wf), 1) - self.assertEqual(list(wf), [fail_obj]) + self.assertEqual(1, len(wf)) + self.assertEqual([fail_obj], list(wf)) def test_simple_check(self): fail_obj = _captured_failure('Woot!') wf = exceptions.WrappedFailure([fail_obj]) - self.assertEqual(wf.check(RuntimeError), RuntimeError) - self.assertEqual(wf.check(ValueError), None) + self.assertEqual(RuntimeError, wf.check(RuntimeError)) + self.assertEqual(None, wf.check(ValueError)) def test_two_failures(self): fls = [ @@ -282,8 +282,8 @@ class WrappedFailureTestCase(test.TestCase): _captured_failure('Oh, not again!') ] wf = exceptions.WrappedFailure(fls) - self.assertEqual(len(wf), 2) - self.assertEqual(list(wf), fls) + self.assertEqual(2, len(wf)) + self.assertEqual(fls, list(wf)) def test_flattening(self): f1 = _captured_failure('Wrap me') @@ -295,7 +295,7 @@ class WrappedFailureTestCase(test.TestCase): fail_obj = failure.Failure() wf = exceptions.WrappedFailure([fail_obj, f3]) - self.assertEqual(list(wf), [f1, f2, f3]) + self.assertEqual([f1, f2, f3], list(wf)) class NonAsciiExceptionsTestCase(test.TestCase): @@ -304,8 +304,8 @@ class NonAsciiExceptionsTestCase(test.TestCase): bad_string = chr(200) excp = ValueError(bad_string) fail = failure.Failure.from_exception(excp) - self.assertEqual(fail.exception_str, - encodeutils.exception_to_unicode(excp)) + self.assertEqual(encodeutils.exception_to_unicode(excp), + fail.exception_str) # This is slightly different on py2 vs py3... due to how # __str__ or __unicode__ is called and what is expected from # both... @@ -314,15 +314,15 @@ class NonAsciiExceptionsTestCase(test.TestCase): expected = 'Failure: ValueError: %s' % msg.encode('utf-8') else: expected = u'Failure: ValueError: \xc8' - self.assertEqual(str(fail), expected) + self.assertEqual(expected, str(fail)) def test_exception_non_ascii_unicode(self): hi_ru = u'привет' fail = failure.Failure.from_exception(ValueError(hi_ru)) - self.assertEqual(fail.exception_str, hi_ru) + self.assertEqual(hi_ru, fail.exception_str) self.assertIsInstance(fail.exception_str, six.text_type) - self.assertEqual(six.text_type(fail), - u'Failure: ValueError: %s' % hi_ru) + self.assertEqual(u'Failure: ValueError: %s' % hi_ru, + six.text_type(fail)) def test_wrapped_failure_non_ascii_unicode(self): hi_cn = u'嗨' diff --git a/taskflow/tests/unit/test_flow_dependencies.py b/taskflow/tests/unit/test_flow_dependencies.py index bdb427d9..54f857bf 100644 --- a/taskflow/tests/unit/test_flow_dependencies.py +++ b/taskflow/tests/unit/test_flow_dependencies.py @@ -66,29 +66,29 @@ class FlowDependenciesTest(test.TestCase): flow = lf.Flow('lf').add( utils.TaskOneArg('task1'), utils.TaskMultiArg('task2')) - self.assertEqual(flow.requires, set(['x', 'y', 'z'])) - self.assertEqual(flow.provides, set()) + self.assertEqual(set(['x', 'y', 'z']), flow.requires) + self.assertEqual(set(), flow.provides) def test_linear_flow_requires_rebind_values(self): flow = lf.Flow('lf').add( utils.TaskOneArg('task1', rebind=['q']), utils.TaskMultiArg('task2')) - self.assertEqual(flow.requires, set(['x', 'y', 'z', 'q'])) - self.assertEqual(flow.provides, set()) + self.assertEqual(set(['x', 'y', 'z', 'q']), flow.requires) + self.assertEqual(set(), flow.provides) def test_linear_flow_provides_values(self): flow = lf.Flow('lf').add( utils.TaskOneReturn('task1', provides='x'), utils.TaskMultiReturn('task2', provides=['a', 'b', 'c'])) - self.assertEqual(flow.requires, set()) - self.assertEqual(flow.provides, set(['x', 'a', 'b', 'c'])) + self.assertEqual(set(), flow.requires) + self.assertEqual(set(['x', 'a', 'b', 'c']), flow.provides) def test_linear_flow_provides_required_values(self): flow = lf.Flow('lf').add( utils.TaskOneReturn('task1', provides='x'), utils.TaskOneArg('task2')) - self.assertEqual(flow.requires, set()) - self.assertEqual(flow.provides, set(['x'])) + self.assertEqual(set(), flow.requires) + self.assertEqual(set(['x']), flow.provides) def test_linear_flow_multi_provides_and_requires_values(self): flow = lf.Flow('lf').add( @@ -97,36 +97,36 @@ class FlowDependenciesTest(test.TestCase): provides=['x', 'y', 'q']), utils.TaskMultiArgMultiReturn('task2', provides=['i', 'j', 'k'])) - self.assertEqual(flow.requires, set(['a', 'b', 'c', 'z'])) - self.assertEqual(flow.provides, set(['x', 'y', 'q', 'i', 'j', 'k'])) + self.assertEqual(set(['a', 'b', 'c', 'z']), flow.requires) + self.assertEqual(set(['x', 'y', 'q', 'i', 'j', 'k']), flow.provides) def test_unordered_flow_without_dependencies(self): flow = uf.Flow('uf').add( utils.TaskNoRequiresNoReturns('task1'), utils.TaskNoRequiresNoReturns('task2')) - self.assertEqual(flow.requires, set()) - self.assertEqual(flow.provides, set()) + self.assertEqual(set(), flow.requires) + self.assertEqual(set(), flow.provides) def test_unordered_flow_requires_values(self): flow = uf.Flow('uf').add( utils.TaskOneArg('task1'), utils.TaskMultiArg('task2')) - self.assertEqual(flow.requires, set(['x', 'y', 'z'])) - self.assertEqual(flow.provides, set()) + self.assertEqual(set(['x', 'y', 'z']), flow.requires) + self.assertEqual(set(), flow.provides) def test_unordered_flow_requires_rebind_values(self): flow = uf.Flow('uf').add( utils.TaskOneArg('task1', rebind=['q']), utils.TaskMultiArg('task2')) - self.assertEqual(flow.requires, set(['x', 'y', 'z', 'q'])) - self.assertEqual(flow.provides, set()) + self.assertEqual(set(['x', 'y', 'z', 'q']), flow.requires) + self.assertEqual(set(), flow.provides) def test_unordered_flow_provides_values(self): flow = uf.Flow('uf').add( utils.TaskOneReturn('task1', provides='x'), utils.TaskMultiReturn('task2', provides=['a', 'b', 'c'])) - self.assertEqual(flow.requires, set()) - self.assertEqual(flow.provides, set(['x', 'a', 'b', 'c'])) + self.assertEqual(set(), flow.requires) + self.assertEqual(set(['x', 'a', 'b', 'c']), flow.provides) def test_unordered_flow_provides_required_values(self): flow = uf.Flow('uf') @@ -159,8 +159,8 @@ class FlowDependenciesTest(test.TestCase): provides=['d', 'e', 'f']), utils.TaskMultiArgMultiReturn('task2', provides=['i', 'j', 'k'])) - self.assertEqual(flow.requires, set(['a', 'b', 'c', 'x', 'y', 'z'])) - self.assertEqual(flow.provides, set(['d', 'e', 'f', 'i', 'j', 'k'])) + self.assertEqual(set(['a', 'b', 'c', 'x', 'y', 'z']), flow.requires) + self.assertEqual(set(['d', 'e', 'f', 'i', 'j', 'k']), flow.provides) def test_unordered_flow_provides_same_values(self): flow = uf.Flow('uf').add(utils.TaskOneReturn(provides='x')) @@ -184,36 +184,36 @@ class FlowDependenciesTest(test.TestCase): rebind=['b'], provides=['z']), utils.TaskOneArgOneReturn('task4', rebind=['c'], provides=['q']))) - self.assertEqual(flow.requires, set(['a', 'b', 'c'])) - self.assertEqual(flow.provides, set(['x', 'y', 'z', 'q'])) + self.assertEqual(set(['a', 'b', 'c']), flow.requires) + self.assertEqual(set(['x', 'y', 'z', 'q']), flow.provides) def test_graph_flow_requires_values(self): flow = gf.Flow('gf').add( utils.TaskOneArg('task1'), utils.TaskMultiArg('task2')) - self.assertEqual(flow.requires, set(['x', 'y', 'z'])) - self.assertEqual(flow.provides, set()) + self.assertEqual(set(['x', 'y', 'z']), flow.requires) + self.assertEqual(set(), flow.provides) def test_graph_flow_requires_rebind_values(self): flow = gf.Flow('gf').add( utils.TaskOneArg('task1', rebind=['q']), utils.TaskMultiArg('task2')) - self.assertEqual(flow.requires, set(['x', 'y', 'z', 'q'])) - self.assertEqual(flow.provides, set()) + self.assertEqual(set(['x', 'y', 'z', 'q']), flow.requires) + self.assertEqual(set(), flow.provides) def test_graph_flow_provides_values(self): flow = gf.Flow('gf').add( utils.TaskOneReturn('task1', provides='x'), utils.TaskMultiReturn('task2', provides=['a', 'b', 'c'])) - self.assertEqual(flow.requires, set()) - self.assertEqual(flow.provides, set(['x', 'a', 'b', 'c'])) + self.assertEqual(set(), flow.requires) + self.assertEqual(set(['x', 'a', 'b', 'c']), flow.provides) def test_graph_flow_provides_required_values(self): flow = gf.Flow('gf').add( utils.TaskOneReturn('task1', provides='x'), utils.TaskOneArg('task2')) - self.assertEqual(flow.requires, set()) - self.assertEqual(flow.provides, set(['x'])) + self.assertEqual(set(), flow.requires) + self.assertEqual(set(['x']), flow.provides) def test_graph_flow_provides_provided_value_other_call(self): flow = gf.Flow('gf') @@ -228,8 +228,8 @@ class FlowDependenciesTest(test.TestCase): provides=['d', 'e', 'f']), utils.TaskMultiArgMultiReturn('task2', provides=['i', 'j', 'k'])) - self.assertEqual(flow.requires, set(['a', 'b', 'c', 'x', 'y', 'z'])) - self.assertEqual(flow.provides, set(['d', 'e', 'f', 'i', 'j', 'k'])) + self.assertEqual(set(['a', 'b', 'c', 'x', 'y', 'z']), flow.requires) + self.assertEqual(set(['d', 'e', 'f', 'i', 'j', 'k']), flow.provides) def test_graph_cyclic_dependency(self): flow = gf.Flow('g-3-cyclic') @@ -245,81 +245,81 @@ class FlowDependenciesTest(test.TestCase): def test_task_requires_and_provides_same_values(self): flow = lf.Flow('lf', utils.TaskOneArgOneReturn('rt', requires='x', provides='x')) - self.assertEqual(flow.requires, set('x')) - self.assertEqual(flow.provides, set('x')) + self.assertEqual(set('x'), flow.requires) + self.assertEqual(set('x'), flow.provides) def test_retry_in_linear_flow_no_requirements_no_provides(self): flow = lf.Flow('lf', retry.AlwaysRevert('rt')) - self.assertEqual(flow.requires, set()) - self.assertEqual(flow.provides, set()) + self.assertEqual(set(), flow.requires) + self.assertEqual(set(), flow.provides) def test_retry_in_linear_flow_with_requirements(self): flow = lf.Flow('lf', retry.AlwaysRevert('rt', requires=['x', 'y'])) - self.assertEqual(flow.requires, set(['x', 'y'])) - self.assertEqual(flow.provides, set()) + self.assertEqual(set(['x', 'y']), flow.requires) + self.assertEqual(set(), flow.provides) def test_retry_in_linear_flow_with_provides(self): flow = lf.Flow('lf', retry.AlwaysRevert('rt', provides=['x', 'y'])) - self.assertEqual(flow.requires, set()) - self.assertEqual(flow.provides, set(['x', 'y'])) + self.assertEqual(set(), flow.requires) + self.assertEqual(set(['x', 'y']), flow.provides) def test_retry_in_linear_flow_requires_and_provides(self): flow = lf.Flow('lf', retry.AlwaysRevert('rt', requires=['x', 'y'], provides=['a', 'b'])) - self.assertEqual(flow.requires, set(['x', 'y'])) - self.assertEqual(flow.provides, set(['a', 'b'])) + self.assertEqual(set(['x', 'y']), flow.requires) + self.assertEqual(set(['a', 'b']), flow.provides) def test_retry_requires_and_provides_same_value(self): flow = lf.Flow('lf', retry.AlwaysRevert('rt', requires=['x', 'y'], provides=['x', 'y'])) - self.assertEqual(flow.requires, set(['x', 'y'])) - self.assertEqual(flow.provides, set(['x', 'y'])) + self.assertEqual(set(['x', 'y']), flow.requires) + self.assertEqual(set(['x', 'y']), flow.provides) def test_retry_in_unordered_flow_no_requirements_no_provides(self): flow = uf.Flow('uf', retry.AlwaysRevert('rt')) - self.assertEqual(flow.requires, set()) - self.assertEqual(flow.provides, set()) + self.assertEqual(set(), flow.requires) + self.assertEqual(set(), flow.provides) def test_retry_in_unordered_flow_with_requirements(self): flow = uf.Flow('uf', retry.AlwaysRevert('rt', requires=['x', 'y'])) - self.assertEqual(flow.requires, set(['x', 'y'])) - self.assertEqual(flow.provides, set()) + self.assertEqual(set(['x', 'y']), flow.requires) + self.assertEqual(set(), flow.provides) def test_retry_in_unordered_flow_with_provides(self): flow = uf.Flow('uf', retry.AlwaysRevert('rt', provides=['x', 'y'])) - self.assertEqual(flow.requires, set()) - self.assertEqual(flow.provides, set(['x', 'y'])) + self.assertEqual(set(), flow.requires) + self.assertEqual(set(['x', 'y']), flow.provides) def test_retry_in_unordered_flow_requires_and_provides(self): flow = uf.Flow('uf', retry.AlwaysRevert('rt', requires=['x', 'y'], provides=['a', 'b'])) - self.assertEqual(flow.requires, set(['x', 'y'])) - self.assertEqual(flow.provides, set(['a', 'b'])) + self.assertEqual(set(['x', 'y']), flow.requires) + self.assertEqual(set(['a', 'b']), flow.provides) def test_retry_in_graph_flow_no_requirements_no_provides(self): flow = gf.Flow('gf', retry.AlwaysRevert('rt')) - self.assertEqual(flow.requires, set()) - self.assertEqual(flow.provides, set()) + self.assertEqual(set(), flow.requires) + self.assertEqual(set(), flow.provides) def test_retry_in_graph_flow_with_requirements(self): flow = gf.Flow('gf', retry.AlwaysRevert('rt', requires=['x', 'y'])) - self.assertEqual(flow.requires, set(['x', 'y'])) - self.assertEqual(flow.provides, set()) + self.assertEqual(set(['x', 'y']), flow.requires) + self.assertEqual(set(), flow.provides) def test_retry_in_graph_flow_with_provides(self): flow = gf.Flow('gf', retry.AlwaysRevert('rt', provides=['x', 'y'])) - self.assertEqual(flow.requires, set()) - self.assertEqual(flow.provides, set(['x', 'y'])) + self.assertEqual(set(), flow.requires) + self.assertEqual(set(['x', 'y']), flow.provides) def test_retry_in_graph_flow_requires_and_provides(self): flow = gf.Flow('gf', retry.AlwaysRevert('rt', requires=['x', 'y'], provides=['a', 'b'])) - self.assertEqual(flow.requires, set(['x', 'y'])) - self.assertEqual(flow.provides, set(['a', 'b'])) + self.assertEqual(set(['x', 'y']), flow.requires) + self.assertEqual(set(['a', 'b']), flow.provides) def test_linear_flow_retry_and_task(self): flow = lf.Flow('lf', retry.AlwaysRevert('rt', @@ -328,8 +328,8 @@ class FlowDependenciesTest(test.TestCase): flow.add(utils.TaskMultiArgOneReturn(rebind=['a', 'x', 'c'], provides=['z'])) - self.assertEqual(flow.requires, set(['x', 'y', 'c'])) - self.assertEqual(flow.provides, set(['a', 'b', 'z'])) + self.assertEqual(set(['x', 'y', 'c']), flow.requires) + self.assertEqual(set(['a', 'b', 'z']), flow.provides) def test_unordered_flow_retry_and_task(self): flow = uf.Flow('uf', retry.AlwaysRevert('rt', @@ -338,8 +338,8 @@ class FlowDependenciesTest(test.TestCase): flow.add(utils.TaskMultiArgOneReturn(rebind=['a', 'x', 'c'], provides=['z'])) - self.assertEqual(flow.requires, set(['x', 'y', 'c'])) - self.assertEqual(flow.provides, set(['a', 'b', 'z'])) + self.assertEqual(set(['x', 'y', 'c']), flow.requires) + self.assertEqual(set(['a', 'b', 'z']), flow.provides) def test_unordered_flow_retry_and_task_same_requires_provides(self): flow = uf.Flow('uf', retry.AlwaysRevert('rt', requires=['x'])) @@ -365,8 +365,8 @@ class FlowDependenciesTest(test.TestCase): flow.add(utils.TaskMultiArgOneReturn(rebind=['a', 'x', 'c'], provides=['z'])) - self.assertEqual(flow.requires, set(['x', 'y', 'c'])) - self.assertEqual(flow.provides, set(['a', 'b', 'z'])) + self.assertEqual(set(['x', 'y', 'c']), flow.requires) + self.assertEqual(set(['a', 'b', 'z']), flow.provides) def test_graph_flow_retry_and_task_dependency_provide_require(self): flow = gf.Flow('gf', retry.AlwaysRevert('rt', requires=['x'])) @@ -389,4 +389,4 @@ class FlowDependenciesTest(test.TestCase): pass flow = lf.Flow('lf', retry=FullArgsRetry(requires='a')) - self.assertEqual(flow.requires, set(['a'])) + self.assertEqual(set(['a']), flow.requires) diff --git a/taskflow/tests/unit/test_functor_task.py b/taskflow/tests/unit/test_functor_task.py index 2deed5fe..cc45720d 100644 --- a/taskflow/tests/unit/test_functor_task.py +++ b/taskflow/tests/unit/test_functor_task.py @@ -49,11 +49,11 @@ class FunctorTaskTest(test.TestCase): def test_simple(self): task = base.FunctorTask(add) - self.assertEqual(task.name, __name__ + '.add') + self.assertEqual(__name__ + '.add', task.name) def test_other_name(self): task = base.FunctorTask(add, name='my task') - self.assertEqual(task.name, 'my task') + self.assertEqual('my task', task.name) def test_it_runs(self): values = [] @@ -67,7 +67,7 @@ class FunctorTaskTest(test.TestCase): ) self.assertRaisesRegexp(RuntimeError, '^Woot', taskflow.engines.run, flow) - self.assertEqual(values, ['one', 'fail', 'revert one']) + self.assertEqual(['one', 'fail', 'revert one'], values) def test_lambda_functors(self): t = base.FunctorTask diff --git a/taskflow/tests/unit/test_progress.py b/taskflow/tests/unit/test_progress.py index 943f93c0..7486c02b 100644 --- a/taskflow/tests/unit/test_progress.py +++ b/taskflow/tests/unit/test_progress.py @@ -117,11 +117,11 @@ class TestProgress(test.TestCase): end_progress = e.storage.get_task_progress("test") self.assertEqual(1.0, end_progress) end_details = e.storage.get_task_progress_details("test") - self.assertEqual(end_details.get('at_progress'), 0.5) - self.assertEqual(end_details.get('details'), { + self.assertEqual(0.5, end_details.get('at_progress')) + self.assertEqual({ 'test': 'test data', 'foo': 'bar' - }) + }, end_details.get('details')) def test_dual_storage_progress(self): fired_events = [] diff --git a/taskflow/tests/unit/test_retries.py b/taskflow/tests/unit/test_retries.py index ce957b3a..6dc01851 100644 --- a/taskflow/tests/unit/test_retries.py +++ b/taskflow/tests/unit/test_retries.py @@ -53,19 +53,19 @@ class RetryTest(utils.EngineTestBase): flow = lf.Flow('flow-1', utils.OneReturnRetry(provides='x')) engine = self._make_engine(flow) engine.run() - self.assertEqual(engine.storage.fetch_all(), {'x': 1}) + self.assertEqual({'x': 1}, engine.storage.fetch_all()) def test_run_empty_unordered_flow(self): flow = uf.Flow('flow-1', utils.OneReturnRetry(provides='x')) engine = self._make_engine(flow) engine.run() - self.assertEqual(engine.storage.fetch_all(), {'x': 1}) + self.assertEqual({'x': 1}, engine.storage.fetch_all()) def test_run_empty_graph_flow(self): flow = gf.Flow('flow-1', utils.OneReturnRetry(provides='x')) engine = self._make_engine(flow) engine.run() - self.assertEqual(engine.storage.fetch_all(), {'x': 1}) + self.assertEqual({'x': 1}, engine.storage.fetch_all()) def test_states_retry_success_linear_flow(self): flow = lf.Flow('flow-1', retry.Times(4, 'r1', provides='x')).add( @@ -76,7 +76,7 @@ class RetryTest(utils.EngineTestBase): engine.storage.inject({'y': 2}) with utils.CaptureListener(engine) as capturer: engine.run() - self.assertEqual(engine.storage.fetch_all(), {'y': 2, 'x': 2}) + self.assertEqual({'y': 2, 'x': 2}, engine.storage.fetch_all()) expected = ['flow-1.f RUNNING', 'r1.r RUNNING', 'r1.r SUCCESS(1)', 'task1.t RUNNING', 'task1.t SUCCESS(5)', @@ -105,7 +105,7 @@ class RetryTest(utils.EngineTestBase): engine.storage.inject({'y': 4}) with utils.CaptureListener(engine) as capturer: self.assertRaisesRegexp(RuntimeError, '^Woot', engine.run) - self.assertEqual(engine.storage.fetch_all(), {'y': 4}) + self.assertEqual({'y': 4}, engine.storage.fetch_all()) expected = ['flow-1.f RUNNING', 'r1.r RUNNING', 'r1.r SUCCESS(1)', @@ -144,7 +144,7 @@ class RetryTest(utils.EngineTestBase): engine.storage.inject({'y': 4}) with utils.CaptureListener(engine) as capturer: self.assertRaisesRegexp(RuntimeError, '^Gotcha', engine.run) - self.assertEqual(engine.storage.fetch_all(), {'y': 4, 'x': 1}) + self.assertEqual({'y': 4, 'x': 1}, engine.storage.fetch_all()) expected = ['flow-1.f RUNNING', 'r1.r RUNNING', 'r1.r SUCCESS(1)', @@ -172,7 +172,7 @@ class RetryTest(utils.EngineTestBase): engine.storage.inject({'y': 2}) with utils.CaptureListener(engine) as capturer: engine.run() - self.assertEqual(engine.storage.fetch_all(), {'y': 2, 'x': 2}) + self.assertEqual({'y': 2, 'x': 2}, engine.storage.fetch_all()) expected = ['flow-1.f RUNNING', 'r1.r RUNNING', 'r1.r SUCCESS(None)', @@ -215,8 +215,9 @@ class RetryTest(utils.EngineTestBase): engine.storage.inject({'y': 2}) with utils.CaptureListener(engine) as capturer: engine.run() - self.assertEqual(engine.storage.fetch_all(), {'y': 2, 'x1': 2, - 'x2': 1}) + self.assertEqual({'y': 2, 'x1': 2, + 'x2': 1}, + engine.storage.fetch_all()) expected = ['flow-1.f RUNNING', 'r1.r RUNNING', 'r1.r SUCCESS(1)', @@ -270,7 +271,7 @@ class RetryTest(utils.EngineTestBase): engine.storage.inject({'y': 2}) with utils.CaptureListener(engine) as capturer: engine.run() - self.assertEqual(engine.storage.fetch_all(), {'y': 2, 'x': 2}) + self.assertEqual({'y': 2, 'x': 2}, engine.storage.fetch_all()) expected = ['flow-1.f RUNNING', 'r.r RUNNING', 'r.r SUCCESS(1)', @@ -305,7 +306,7 @@ class RetryTest(utils.EngineTestBase): engine.storage.inject({'y': 2}) with utils.CaptureListener(engine) as capturer: engine.run() - self.assertEqual(engine.storage.fetch_all(), {'y': 2, 'x': 2, 'x2': 1}) + self.assertEqual({'y': 2, 'x': 2, 'x2': 1}, engine.storage.fetch_all()) expected = ['flow-1.f RUNNING', 'r1.r RUNNING', 'r1.r SUCCESS(1)', @@ -350,7 +351,7 @@ class RetryTest(utils.EngineTestBase): engine.run() except Exception: pass - self.assertEqual(engine.storage.fetch_all(), {'y': 2}) + self.assertEqual({'y': 2}, engine.storage.fetch_all()) expected = ['flow-1.f RUNNING', 'task1.t RUNNING', 'task1.t SUCCESS(5)', @@ -379,7 +380,7 @@ class RetryTest(utils.EngineTestBase): engine.run() except Exception: pass - self.assertEqual(engine.storage.fetch_all(), {'y': 2}) + self.assertEqual({'y': 2}, engine.storage.fetch_all()) expected = ['flow-1.f RUNNING', 'task1.t RUNNING', 'task1.t SUCCESS(5)', @@ -406,7 +407,7 @@ class RetryTest(utils.EngineTestBase): engine.storage.inject({'y': 2}) with utils.CaptureListener(engine) as capturer: self.assertRaisesRegexp(RuntimeError, '^Woot', engine.run) - self.assertEqual(engine.storage.fetch_all(), {'y': 2}) + self.assertEqual({'y': 2}, engine.storage.fetch_all()) expected = ['flow-1.f RUNNING', 'r1.r RUNNING', 'r1.r SUCCESS(1)', @@ -471,7 +472,7 @@ class RetryTest(utils.EngineTestBase): 't3.t RUNNING', 't3.t SUCCESS(5)', 'flow-1.f SUCCESS'] - self.assertEqual(capturer.values, expected) + self.assertEqual(expected, capturer.values) def test_resume_flow_that_should_be_retried(self): flow = lf.Flow('flow-1', retry.Times(3, 'r1')).add( @@ -525,7 +526,7 @@ class RetryTest(utils.EngineTestBase): 't1.t RUNNING', 't1.t SUCCESS(5)', 'flow-1.f SUCCESS'] - self.assertEqual(capturer.values, expected) + self.assertEqual(expected, capturer.values) def test_default_times_retry(self): flow = lf.Flow('flow-1', retry.Times(3, 'r1')).add( @@ -1040,7 +1041,7 @@ class RetryTest(utils.EngineTestBase): 'task1.t RUNNING', 'task1.t SUCCESS(5)', 'flow-1.f SUCCESS'] - self.assertEqual(capturer.values, expected) + self.assertEqual(expected, capturer.values) def test_retry_fails(self): r = FailingRetry() @@ -1048,7 +1049,7 @@ class RetryTest(utils.EngineTestBase): engine = self._make_engine(flow) self.assertRaisesRegexp(ValueError, '^OMG', engine.run) self.assertEqual(1, len(engine.storage.get_retry_histories())) - self.assertEqual(len(r.history), 0) + self.assertEqual(0, len(r.history)) self.assertEqual([], list(r.history.outcomes_iter())) self.assertIsNotNone(r.history.failure) self.assertTrue(r.history.caused_by(ValueError, include_retry=True)) @@ -1088,7 +1089,7 @@ class RetryTest(utils.EngineTestBase): 'c.t FAILURE(Failure: RuntimeError: Woot!)', 'b.t REVERTED(None)', ]) - self.assertEqual(engine.storage.get_flow_state(), st.REVERTED) + self.assertEqual(st.REVERTED, engine.storage.get_flow_state()) def test_nested_provides_graph_retried_correctly(self): flow = gf.Flow("test").add( @@ -1123,7 +1124,7 @@ class RetryTest(utils.EngineTestBase): 'a.t SUCCESS(5)', 'c.t SUCCESS(5)'] self.assertItemsEqual(expected, capturer.values[4:]) - self.assertEqual(engine.storage.get_flow_state(), st.SUCCESS) + self.assertEqual(st.SUCCESS, engine.storage.get_flow_state()) class RetryParallelExecutionTest(utils.EngineTestBase): @@ -1142,7 +1143,7 @@ class RetryParallelExecutionTest(utils.EngineTestBase): engine.storage.inject({'y': 2}) with utils.CaptureListener(engine, capture_flow=False) as capturer: engine.run() - self.assertEqual(engine.storage.fetch_all(), {'y': 2, 'x': 2}) + self.assertEqual({'y': 2, 'x': 2}, engine.storage.fetch_all()) expected = ['r.r RUNNING', 'r.r SUCCESS(1)', 'task1.t RUNNING', @@ -1178,7 +1179,7 @@ class RetryParallelExecutionTest(utils.EngineTestBase): engine.storage.inject({'y': 2}) with utils.CaptureListener(engine, capture_flow=False) as capturer: engine.run() - self.assertEqual(engine.storage.fetch_all(), {'y': 2, 'x': 2}) + self.assertEqual({'y': 2, 'x': 2}, engine.storage.fetch_all()) expected = ['r.r RUNNING', 'r.r SUCCESS(1)', 'task1.t RUNNING', diff --git a/taskflow/tests/unit/test_storage.py b/taskflow/tests/unit/test_storage.py index 0e1c47fc..53ca2106 100644 --- a/taskflow/tests/unit/test_storage.py +++ b/taskflow/tests/unit/test_storage.py @@ -63,13 +63,13 @@ class StorageTestMixin(object): def test_flow_name_and_uuid(self): flow_detail = models.FlowDetail(name='test-fd', uuid='aaaa') s = self._get_storage(flow_detail) - self.assertEqual(s.flow_name, 'test-fd') - self.assertEqual(s.flow_uuid, 'aaaa') + self.assertEqual('test-fd', s.flow_name) + self.assertEqual('aaaa', s.flow_uuid) def test_ensure_task(self): s = self._get_storage() s.ensure_atom(test_utils.NoopTask('my task')) - self.assertEqual(s.get_atom_state('my task'), states.PENDING) + self.assertEqual(states.PENDING, s.get_atom_state('my task')) self.assertTrue(uuidutils.is_uuid_like(s.get_atom_uuid('my task'))) def test_get_tasks_states(self): @@ -81,7 +81,7 @@ class StorageTestMixin(object): 'my task': (states.SUCCESS, states.EXECUTE), 'my task2': (states.PENDING, states.EXECUTE), } - self.assertEqual(s.get_atoms_states(['my task', 'my task2']), expected) + self.assertEqual(expected, s.get_atoms_states(['my task', 'my task2'])) def test_ensure_task_flow_detail(self): _lb, flow_detail = p_utils.temporary_flow_detail(self.backend) @@ -91,9 +91,9 @@ class StorageTestMixin(object): s.ensure_atom(t) td = flow_detail.find(s.get_atom_uuid('my task')) self.assertIsNotNone(td) - self.assertEqual(td.name, 'my task') - self.assertEqual(td.version, '3.11') - self.assertEqual(td.state, states.PENDING) + self.assertEqual('my task', td.name) + self.assertEqual('3.11', td.version) + self.assertEqual(states.PENDING, td.state) def test_get_without_save(self): _lb, flow_detail = p_utils.temporary_flow_detail(self.backend) @@ -114,26 +114,26 @@ class StorageTestMixin(object): s = self._get_storage() s.ensure_atom(test_utils.NoopTask('my task')) s.save('my task', 5) - self.assertEqual(s.get('my task'), 5) - self.assertEqual(s.fetch_all(), {}) - self.assertEqual(s.get_atom_state('my task'), states.SUCCESS) + self.assertEqual(5, s.get('my task')) + self.assertEqual({}, s.fetch_all()) + self.assertEqual(states.SUCCESS, s.get_atom_state('my task')) def test_save_and_get_cached_failure(self): a_failure = failure.Failure.from_exception(RuntimeError('Woot!')) s = self._get_storage() s.ensure_atom(test_utils.NoopTask('my task')) s.save('my task', a_failure, states.FAILURE) - self.assertEqual(s.get('my task'), a_failure) - self.assertEqual(s.get_atom_state('my task'), states.FAILURE) + self.assertEqual(a_failure, s.get('my task')) + self.assertEqual(states.FAILURE, s.get_atom_state('my task')) self.assertTrue(s.has_failures()) - self.assertEqual(s.get_failures(), {'my task': a_failure}) + self.assertEqual({'my task': a_failure}, s.get_failures()) def test_save_and_get_non_cached_failure(self): a_failure = failure.Failure.from_exception(RuntimeError('Woot!')) s = self._get_storage() s.ensure_atom(test_utils.NoopTask('my task')) s.save('my task', a_failure, states.FAILURE) - self.assertEqual(s.get('my task'), a_failure) + self.assertEqual(a_failure, s.get('my task')) s._failures['my task'] = {} self.assertTrue(a_failure.matches(s.get('my task'))) @@ -145,10 +145,10 @@ class StorageTestMixin(object): s.save('my task', a_failure, states.FAILURE) s.set_atom_state('my task', states.REVERTING) - self.assertEqual(s.get('my task'), a_failure) + self.assertEqual(a_failure, s.get('my task')) s.set_atom_state('my task', states.REVERTED) - self.assertEqual(s.get('my task'), a_failure) + self.assertEqual(a_failure, s.get('my task')) def test_get_failure_after_reload(self): a_failure = failure.Failure.from_exception(RuntimeError('Woot!')) @@ -159,7 +159,7 @@ class StorageTestMixin(object): self.assertTrue(s2.has_failures()) self.assertEqual(1, len(s2.get_failures())) self.assertTrue(a_failure.matches(s2.get('my task'))) - self.assertEqual(s2.get_atom_state('my task'), states.FAILURE) + self.assertEqual(states.FAILURE, s2.get_atom_state('my task')) def test_get_non_existing_var(self): s = self._get_storage() @@ -171,21 +171,21 @@ class StorageTestMixin(object): s.ensure_atom(test_utils.NoopTask('my task')) s.save('my task', 5) s.reset('my task') - self.assertEqual(s.get_atom_state('my task'), states.PENDING) + self.assertEqual(states.PENDING, s.get_atom_state('my task')) self.assertRaises(exceptions.NotFound, s.get, 'my task') def test_reset_unknown_task(self): s = self._get_storage() s.ensure_atom(test_utils.NoopTask('my task')) - self.assertEqual(s.reset('my task'), None) + self.assertEqual(None, s.reset('my task')) def test_fetch_by_name(self): s = self._get_storage() name = 'my result' s.ensure_atom(test_utils.NoopTask('my task', provides=name)) s.save('my task', 5) - self.assertEqual(s.fetch(name), 5) - self.assertEqual(s.fetch_all(), {name: 5}) + self.assertEqual(5, s.fetch(name)) + self.assertEqual({name: 5}, s.fetch_all()) def test_fetch_unknown_name(self): s = self._get_storage() @@ -203,108 +203,108 @@ class StorageTestMixin(object): s = self._get_storage() s.ensure_atom(test_utils.NoopTask('my task')) s.update_atom_metadata('my task', None) - self.assertEqual(s.get_task_progress('my task'), 0.0) + self.assertEqual(0.0, s.get_task_progress('my task')) s.set_task_progress('my task', 0.5) - self.assertEqual(s.get_task_progress('my task'), 0.5) + self.assertEqual(0.5, s.get_task_progress('my task')) s.update_atom_metadata('my task', None) - self.assertEqual(s.get_task_progress('my task'), 0.5) + self.assertEqual(0.5, s.get_task_progress('my task')) def test_default_task_progress(self): s = self._get_storage() s.ensure_atom(test_utils.NoopTask('my task')) - self.assertEqual(s.get_task_progress('my task'), 0.0) - self.assertEqual(s.get_task_progress_details('my task'), None) + self.assertEqual(0.0, s.get_task_progress('my task')) + self.assertEqual(None, s.get_task_progress_details('my task')) def test_task_progress(self): s = self._get_storage() s.ensure_atom(test_utils.NoopTask('my task')) s.set_task_progress('my task', 0.5, {'test_data': 11}) - self.assertEqual(s.get_task_progress('my task'), 0.5) - self.assertEqual(s.get_task_progress_details('my task'), { + self.assertEqual(0.5, s.get_task_progress('my task')) + self.assertEqual({ 'at_progress': 0.5, 'details': {'test_data': 11} - }) + }, s.get_task_progress_details('my task')) s.set_task_progress('my task', 0.7, {'test_data': 17}) - self.assertEqual(s.get_task_progress('my task'), 0.7) - self.assertEqual(s.get_task_progress_details('my task'), { + self.assertEqual(0.7, s.get_task_progress('my task')) + self.assertEqual({ 'at_progress': 0.7, 'details': {'test_data': 17} - }) + }, s.get_task_progress_details('my task')) s.set_task_progress('my task', 0.99) - self.assertEqual(s.get_task_progress('my task'), 0.99) - self.assertEqual(s.get_task_progress_details('my task'), { + self.assertEqual(0.99, s.get_task_progress('my task')) + self.assertEqual({ 'at_progress': 0.7, 'details': {'test_data': 17} - }) + }, s.get_task_progress_details('my task')) def test_task_progress_erase(self): s = self._get_storage() s.ensure_atom(test_utils.NoopTask('my task')) s.set_task_progress('my task', 0.8, {}) - self.assertEqual(s.get_task_progress('my task'), 0.8) - self.assertEqual(s.get_task_progress_details('my task'), None) + self.assertEqual(0.8, s.get_task_progress('my task')) + self.assertEqual(None, s.get_task_progress_details('my task')) def test_fetch_result_not_ready(self): s = self._get_storage() name = 'my result' s.ensure_atom(test_utils.NoopTask('my task', provides=name)) self.assertRaises(exceptions.NotFound, s.get, name) - self.assertEqual(s.fetch_all(), {}) + self.assertEqual({}, s.fetch_all()) def test_save_multiple_results(self): s = self._get_storage() s.ensure_atom(test_utils.NoopTask('my task', provides=['foo', 'bar'])) s.save('my task', ('spam', 'eggs')) - self.assertEqual(s.fetch_all(), { + self.assertEqual({ 'foo': 'spam', 'bar': 'eggs', - }) + }, s.fetch_all()) def test_mapping_none(self): s = self._get_storage() s.ensure_atom(test_utils.NoopTask('my task')) s.save('my task', 5) - self.assertEqual(s.fetch_all(), {}) + self.assertEqual({}, s.fetch_all()) def test_inject(self): s = self._get_storage() s.inject({'foo': 'bar', 'spam': 'eggs'}) - self.assertEqual(s.fetch('spam'), 'eggs') - self.assertEqual(s.fetch_all(), { + self.assertEqual('eggs', s.fetch('spam')) + self.assertEqual({ 'foo': 'bar', 'spam': 'eggs', - }) + }, s.fetch_all()) def test_inject_twice(self): s = self._get_storage() s.inject({'foo': 'bar'}) - self.assertEqual(s.fetch_all(), {'foo': 'bar'}) + self.assertEqual({'foo': 'bar'}, s.fetch_all()) s.inject({'spam': 'eggs'}) - self.assertEqual(s.fetch_all(), { + self.assertEqual({ 'foo': 'bar', 'spam': 'eggs', - }) + }, s.fetch_all()) def test_inject_resumed(self): s = self._get_storage() s.inject({'foo': 'bar', 'spam': 'eggs'}) # verify it's there - self.assertEqual(s.fetch_all(), { + self.assertEqual({ 'foo': 'bar', 'spam': 'eggs', - }) + }, s.fetch_all()) # imagine we are resuming, so we need to make new # storage from same flow details s2 = self._get_storage(s._flowdetail) # injected data should still be there: - self.assertEqual(s2.fetch_all(), { + self.assertEqual({ 'foo': 'bar', 'spam': 'eggs', - }) + }, s2.fetch_all()) def test_many_thread_ensure_same_task(self): s = self._get_storage() @@ -341,8 +341,8 @@ class StorageTestMixin(object): def test_fetch_mapped_args(self): s = self._get_storage() s.inject({'foo': 'bar', 'spam': 'eggs'}) - self.assertEqual(s.fetch_mapped_args({'viking': 'spam'}), - {'viking': 'eggs'}) + self.assertEqual({'viking': 'eggs'}, + s.fetch_mapped_args({'viking': 'spam'})) def test_fetch_not_found_args(self): s = self._get_storage() @@ -353,23 +353,23 @@ class StorageTestMixin(object): def test_fetch_optional_args_found(self): s = self._get_storage() s.inject({'foo': 'bar', 'spam': 'eggs'}) - self.assertEqual(s.fetch_mapped_args({'viking': 'spam'}, - optional_args=set(['viking'])), - {'viking': 'eggs'}) + self.assertEqual({'viking': 'eggs'}, + s.fetch_mapped_args({'viking': 'spam'}, + optional_args=set(['viking']))) def test_fetch_optional_args_not_found(self): s = self._get_storage() s.inject({'foo': 'bar', 'spam': 'eggs'}) - self.assertEqual(s.fetch_mapped_args({'viking': 'helmet'}, - optional_args=set(['viking'])), - {}) + self.assertEqual({}, + s.fetch_mapped_args({'viking': 'helmet'}, + optional_args=set(['viking']))) def test_set_and_get_task_state(self): s = self._get_storage() state = states.PENDING s.ensure_atom(test_utils.NoopTask('my task')) s.set_atom_state('my task', state) - self.assertEqual(s.get_atom_state('my task'), state) + self.assertEqual(state, s.get_atom_state('my task')) def test_get_state_of_unknown_task(self): s = self._get_storage() @@ -418,7 +418,7 @@ class StorageTestMixin(object): def test_initial_flow_state(self): s = self._get_storage() - self.assertEqual(s.get_flow_state(), states.PENDING) + self.assertEqual(states.PENDING, s.get_flow_state()) def test_get_flow_state(self): _lb, flow_detail = p_utils.temporary_flow_detail(backend=self.backend) @@ -426,12 +426,12 @@ class StorageTestMixin(object): with contextlib.closing(self.backend.get_connection()) as conn: flow_detail.update(conn.update_flow_details(flow_detail)) s = self._get_storage(flow_detail) - self.assertEqual(s.get_flow_state(), states.FAILURE) + self.assertEqual(states.FAILURE, s.get_flow_state()) def test_set_and_get_flow_state(self): s = self._get_storage() s.set_flow_state(states.SUCCESS) - self.assertEqual(s.get_flow_state(), states.SUCCESS) + self.assertEqual(states.SUCCESS, s.get_flow_state()) def test_result_is_checked(self): s = self._get_storage() @@ -451,7 +451,7 @@ class StorageTestMixin(object): s = self._get_storage() s.ensure_atom(test_utils.NoopTask('my task', provides=['a', 'b'])) s.save('my task', ['result']) - self.assertEqual(s.fetch('a'), 'result') + self.assertEqual('result', s.fetch('a')) self.assertRaisesRegexp(exceptions.NotFound, '^Unable to find result', s.fetch, 'b') @@ -495,9 +495,9 @@ class StorageTestMixin(object): s.save('my retry', 'b') s.cleanup_retry_history('my retry', states.REVERTED) history = s.get_retry_history('my retry') - self.assertEqual(list(history), []) + self.assertEqual([], list(history)) self.assertEqual(0, len(history)) - self.assertEqual(s.fetch_all(), {}) + self.assertEqual({}, s.fetch_all()) def test_cached_retry_failure(self): a_failure = failure.Failure.from_exception(RuntimeError('Woot!')) @@ -511,7 +511,7 @@ class StorageTestMixin(object): self.assertIsNotNone(history.failure) self.assertEqual(1, len(history)) self.assertTrue(s.has_failures()) - self.assertEqual(s.get_failures(), {'my retry': a_failure}) + self.assertEqual({'my retry': a_failure}, s.get_failures()) def test_logbook_get_unknown_atom_type(self): self.assertRaisesRegexp(TypeError, @@ -523,14 +523,14 @@ class StorageTestMixin(object): s.ensure_atom(test_utils.NoopTask('my task')) s.set_atom_intention('my task', states.REVERT) intention = s.get_atom_intention('my task') - self.assertEqual(intention, states.REVERT) + self.assertEqual(states.REVERT, intention) def test_save_retry_intention(self): s = self._get_storage() s.ensure_atom(test_utils.NoopTask('my retry')) s.set_atom_intention('my retry', states.RETRY) intention = s.get_atom_intention('my retry') - self.assertEqual(intention, states.RETRY) + self.assertEqual(states.RETRY, intention) def test_inject_persistent_missing(self): t = test_utils.ProgressingTask('my retry', requires=['x']) diff --git a/taskflow/tests/unit/test_suspend.py b/taskflow/tests/unit/test_suspend.py index 07c96923..6559bfc2 100644 --- a/taskflow/tests/unit/test_suspend.py +++ b/taskflow/tests/unit/test_suspend.py @@ -49,13 +49,13 @@ class SuspendTest(utils.EngineTestBase): with SuspendingListener(engine, task_name='b', task_state=states.SUCCESS) as capturer: engine.run() - self.assertEqual(engine.storage.get_flow_state(), states.SUCCESS) + self.assertEqual(states.SUCCESS, engine.storage.get_flow_state()) expected = ['a.t RUNNING', 'a.t SUCCESS(5)'] self.assertEqual(expected, capturer.values) with SuspendingListener(engine, task_name='b', task_state=states.SUCCESS) as capturer: engine.run() - self.assertEqual(engine.storage.get_flow_state(), states.SUCCESS) + self.assertEqual(states.SUCCESS, engine.storage.get_flow_state()) expected = [] self.assertEqual(expected, capturer.values) @@ -69,13 +69,13 @@ class SuspendTest(utils.EngineTestBase): with SuspendingListener(engine, task_name='b', task_state=states.SUCCESS) as capturer: engine.run() - self.assertEqual(engine.storage.get_flow_state(), states.SUSPENDED) + self.assertEqual(states.SUSPENDED, engine.storage.get_flow_state()) expected = ['a.t RUNNING', 'a.t SUCCESS(5)', 'b.t RUNNING', 'b.t SUCCESS(5)'] self.assertEqual(expected, capturer.values) with utils.CaptureListener(engine, capture_flow=False) as capturer: engine.run() - self.assertEqual(engine.storage.get_flow_state(), states.SUCCESS) + self.assertEqual(states.SUCCESS, engine.storage.get_flow_state()) expected = ['c.t RUNNING', 'c.t SUCCESS(5)'] self.assertEqual(expected, capturer.values) @@ -89,7 +89,7 @@ class SuspendTest(utils.EngineTestBase): with SuspendingListener(engine, task_name='b', task_state=states.REVERTED) as capturer: engine.run() - self.assertEqual(engine.storage.get_flow_state(), states.SUSPENDED) + self.assertEqual(states.SUSPENDED, engine.storage.get_flow_state()) expected = ['a.t RUNNING', 'a.t SUCCESS(5)', 'b.t RUNNING', @@ -103,7 +103,7 @@ class SuspendTest(utils.EngineTestBase): self.assertEqual(expected, capturer.values) with utils.CaptureListener(engine, capture_flow=False) as capturer: self.assertRaisesRegexp(RuntimeError, '^Woot', engine.run) - self.assertEqual(engine.storage.get_flow_state(), states.REVERTED) + self.assertEqual(states.REVERTED, engine.storage.get_flow_state()) expected = ['a.t REVERTING', 'a.t REVERTED(None)'] self.assertEqual(expected, capturer.values) @@ -133,7 +133,7 @@ class SuspendTest(utils.EngineTestBase): engine2 = self._make_engine(flow, engine.storage._flowdetail) with utils.CaptureListener(engine2, capture_flow=False) as capturer2: self.assertRaisesRegexp(RuntimeError, '^Woot', engine2.run) - self.assertEqual(engine2.storage.get_flow_state(), states.REVERTED) + self.assertEqual(states.REVERTED, engine2.storage.get_flow_state()) expected = ['a.t REVERTING', 'a.t REVERTED(None)'] self.assertEqual(expected, capturer2.values) @@ -170,9 +170,9 @@ class SuspendTest(utils.EngineTestBase): engine2 = self._make_engine(flow2, engine.storage._flowdetail) with utils.CaptureListener(engine2, capture_flow=False) as capturer2: self.assertRaisesRegexp(RuntimeError, '^Woot', engine2.run) - self.assertEqual(engine2.storage.get_flow_state(), states.REVERTED) + self.assertEqual(states.REVERTED, engine2.storage.get_flow_state()) expected = ['a.t REVERTING', 'a.t REVERTED(None)'] - self.assertEqual(capturer2.values, expected) + self.assertEqual(expected, capturer2.values) def test_storage_is_rechecked(self): flow = lf.Flow('linear').add( @@ -184,7 +184,7 @@ class SuspendTest(utils.EngineTestBase): with SuspendingListener(engine, task_name='b', task_state=states.SUCCESS): engine.run() - self.assertEqual(engine.storage.get_flow_state(), states.SUSPENDED) + self.assertEqual(states.SUSPENDED, engine.storage.get_flow_state()) # uninject everything: engine.storage.save(engine.storage.injector_name, {}, states.SUCCESS) diff --git a/taskflow/tests/unit/test_utils.py b/taskflow/tests/unit/test_utils.py index c136cb7c..64bb2330 100644 --- a/taskflow/tests/unit/test_utils.py +++ b/taskflow/tests/unit/test_utils.py @@ -179,19 +179,19 @@ class TestSequenceMinus(test.TestCase): def test_simple_case(self): result = misc.sequence_minus([1, 2, 3, 4], [2, 3]) - self.assertEqual(result, [1, 4]) + self.assertEqual([1, 4], result) def test_subtrahend_has_extra_elements(self): result = misc.sequence_minus([1, 2, 3, 4], [2, 3, 5, 7, 13]) - self.assertEqual(result, [1, 4]) + self.assertEqual([1, 4], result) def test_some_items_are_equal(self): result = misc.sequence_minus([1, 1, 1, 1], [1, 1, 3]) - self.assertEqual(result, [1, 1]) + self.assertEqual([1, 1], result) def test_equal_items_not_continious(self): result = misc.sequence_minus([1, 2, 3, 1], [1, 3]) - self.assertEqual(result, [2, 1]) + self.assertEqual([2, 1], result) class TestReversedEnumerate(testscenarios.TestWithScenarios, test.TestCase): @@ -301,11 +301,11 @@ class TestMergeUri(test.TestCase): class TestClamping(test.TestCase): def test_simple_clamp(self): result = misc.clamp(1.0, 2.0, 3.0) - self.assertEqual(result, 2.0) + self.assertEqual(2.0, result) result = misc.clamp(4.0, 2.0, 3.0) - self.assertEqual(result, 3.0) + self.assertEqual(3.0, result) result = misc.clamp(3.0, 4.0, 4.0) - self.assertEqual(result, 4.0) + self.assertEqual(4.0, result) def test_invalid_clamp(self): self.assertRaises(ValueError, misc.clamp, 0.0, 2.0, 1.0) diff --git a/taskflow/tests/unit/test_utils_binary.py b/taskflow/tests/unit/test_utils_binary.py index 92c496a2..773f3893 100644 --- a/taskflow/tests/unit/test_utils_binary.py +++ b/taskflow/tests/unit/test_utils_binary.py @@ -32,7 +32,7 @@ class BinaryEncodeTest(test.TestCase): def _check(self, data, expected_result): result = misc.binary_encode(data) self.assertIsInstance(result, six.binary_type) - self.assertEqual(result, expected_result) + self.assertEqual(expected_result, result) def test_simple_binary(self): data = _bytes('hello') @@ -51,7 +51,7 @@ class BinaryEncodeTest(test.TestCase): def test_unicode_other_encoding(self): result = misc.binary_encode(u'mañana', 'latin-1') self.assertIsInstance(result, six.binary_type) - self.assertEqual(result, u'mañana'.encode('latin-1')) + self.assertEqual(u'mañana'.encode('latin-1'), result) class BinaryDecodeTest(test.TestCase): @@ -59,7 +59,7 @@ class BinaryDecodeTest(test.TestCase): def _check(self, data, expected_result): result = misc.binary_decode(data) self.assertIsInstance(result, six.text_type) - self.assertEqual(result, expected_result) + self.assertEqual(expected_result, result) def test_simple_text(self): data = u'hello' @@ -79,18 +79,18 @@ class BinaryDecodeTest(test.TestCase): data = u'mañana'.encode('latin-1') result = misc.binary_decode(data, 'latin-1') self.assertIsInstance(result, six.text_type) - self.assertEqual(result, u'mañana') + self.assertEqual(u'mañana', result) class DecodeJsonTest(test.TestCase): def test_it_works(self): - self.assertEqual(misc.decode_json(_bytes('{"foo": 1}')), - {"foo": 1}) + self.assertEqual({"foo": 1}, + misc.decode_json(_bytes('{"foo": 1}'))) def test_it_works_with_unicode(self): data = _bytes('{"foo": "фуу"}') - self.assertEqual(misc.decode_json(data), {"foo": u'фуу'}) + self.assertEqual({"foo": u'фуу'}, misc.decode_json(data)) def test_handles_invalid_unicode(self): self.assertRaises(ValueError, misc.decode_json, From 70e58977c944d23f2133cd1b3d6f3970f101e7ab Mon Sep 17 00:00:00 2001 From: Joshua Harlow Date: Fri, 7 Aug 2015 14:46:24 -0700 Subject: [PATCH 38/54] Add atom priority ability In situations where many atoms can execute at the same time it is sometimes useful to denote that when this situation happens that certain atoms should execute/revert before other atoms (or at least an attempt should be made to do this) instead of being nearly arbitrary. This adds a priority class attribute to the atom class (which can be overridden or changed as needed) which is then used in the runtime state machine to sort on so that atoms with higher priority get submitted (and therefore executed/reverted) first. Closes-Bug: #1507755 Change-Id: I3dcc705959085cba167883c85278e394b5cb1d2b --- taskflow/atom.py | 27 +++++++++++++++++++++++ taskflow/engines/action_engine/builder.py | 7 +++++- taskflow/tests/unit/test_engines.py | 26 ++++++++++++++++++++++ 3 files changed, 59 insertions(+), 1 deletion(-) diff --git a/taskflow/atom.py b/taskflow/atom.py index fba04a97..a5ff4bc1 100644 --- a/taskflow/atom.py +++ b/taskflow/atom.py @@ -194,6 +194,33 @@ class Atom(object): this atom produces. """ + priority = 0 + """A numeric priority that instances of this class will have when running, + used when there are multiple *parallel* candidates to execute and/or + revert. During this situation the candidate list will be stably sorted + based on this priority attribute which will result in atoms with higher + priorities executing (or reverting) before atoms with lower + priorities (higher being defined as a number bigger, or greater tha + an atom with a lower priority number). By default all atoms have the same + priority (zero). + + For example when the following is combined into a + graph (where each node in the denoted graph is some task):: + + a -> b + b -> c + b -> e + b -> f + + When ``b`` finishes there will then be three candidates that can run + ``(c, e, f)`` and they may run in any order. What this priority does is + sort those three by their priority before submitting them to be + worked on (so that instead of say a random run order they will now be + ran by there sorted order). This is also true when reverting (in that the + sort order of the potential nodes will be used to determine the + submission order). + """ + def __init__(self, name=None, provides=None, inject=None): self.name = name self.version = (1, 0) diff --git a/taskflow/engines/action_engine/builder.py b/taskflow/engines/action_engine/builder.py index 9013cd8a..4ef658a3 100644 --- a/taskflow/engines/action_engine/builder.py +++ b/taskflow/engines/action_engine/builder.py @@ -108,9 +108,14 @@ class MachineBuilder(object): timeout = WAITING_TIMEOUT # Cache some local functions/methods... - do_schedule = self._scheduler.schedule do_complete = self._completer.complete + def do_schedule(next_nodes): + return self._scheduler.schedule( + sorted(next_nodes, + key=lambda node: getattr(node, 'priority', 0), + reverse=True)) + def is_runnable(): # Checks if the storage says the flow is still runnable... return self._storage.get_flow_state() == st.RUNNING diff --git a/taskflow/tests/unit/test_engines.py b/taskflow/tests/unit/test_engines.py index c56d7569..5f5b2ad6 100644 --- a/taskflow/tests/unit/test_engines.py +++ b/taskflow/tests/unit/test_engines.py @@ -461,6 +461,32 @@ class EngineParallelFlowTest(utils.EngineTestBase): engine = self._make_engine(flow) self.assertRaises(exc.Empty, engine.run) + def test_parallel_flow_with_priority(self): + flow = uf.Flow('p-1') + for i in range(0, 10): + t = utils.ProgressingTask(name='task%s' % i) + t.priority = i + flow.add(t) + engine = self._make_engine(flow) + with utils.CaptureListener(engine, capture_flow=False) as capturer: + engine.run() + expected = [ + 'task9.t RUNNING', + 'task8.t RUNNING', + 'task7.t RUNNING', + 'task6.t RUNNING', + 'task5.t RUNNING', + 'task4.t RUNNING', + 'task3.t RUNNING', + 'task2.t RUNNING', + 'task1.t RUNNING', + 'task0.t RUNNING', + ] + # NOTE(harlowja): chop off the gathering of SUCCESS states, since we + # don't care if thats in order... + gotten = capturer.values[0:10] + self.assertEqual(expected, gotten) + def test_parallel_flow_one_task(self): flow = uf.Flow('p-1').add( utils.ProgressingTask(name='task1', provides='a') From 4388c24b0b1f33fb3f4847a8df0341c3ff2cd5bf Mon Sep 17 00:00:00 2001 From: tonytan4ever Date: Mon, 12 Oct 2015 17:15:20 -0400 Subject: [PATCH 39/54] Register conductor information on jobboard Change-Id: I3bf935280a6e8b265045b09fde43d0ec7dc56f07 --- doc/source/types.rst | 5 +++ taskflow/conductors/backends/impl_blocking.py | 26 ++++++++++++++ taskflow/jobs/backends/impl_redis.py | 4 +++ taskflow/jobs/backends/impl_zookeeper.py | 28 +++++++++++++++ taskflow/jobs/base.py | 4 +++ taskflow/tests/unit/jobs/test_zk_job.py | 34 +++++++++++++++++++ taskflow/types/entity.py | 33 ++++++++++++++++++ 7 files changed, 134 insertions(+) create mode 100644 taskflow/types/entity.py diff --git a/doc/source/types.rst b/doc/source/types.rst index 254ed28a..b27a3fb4 100644 --- a/doc/source/types.rst +++ b/doc/source/types.rst @@ -17,6 +17,11 @@ Cache .. automodule:: taskflow.types.cache +Entity +====== + +.. automodule:: taskflow.types.entity + Failure ======= diff --git a/taskflow/conductors/backends/impl_blocking.py b/taskflow/conductors/backends/impl_blocking.py index d8f2b4c3..3fd5cb92 100644 --- a/taskflow/conductors/backends/impl_blocking.py +++ b/taskflow/conductors/backends/impl_blocking.py @@ -11,6 +11,10 @@ # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. + +import os +import socket + import threading try: @@ -25,6 +29,7 @@ from taskflow.conductors import base from taskflow import exceptions as excp from taskflow.listeners import logging as logging_listener from taskflow import logging +from taskflow.types import entity from taskflow.types import timing as tt from taskflow.utils import async_utils from taskflow.utils import iter_utils @@ -159,9 +164,30 @@ class BlockingConductor(base.Conductor): LOG.info("Job completed successfully: %s", job) return async_utils.make_completed_future(consume) + def _get_conductor_info(self): + """For right now we just register the conductor name as: + + @: + + """ + hostname = socket.gethostname() + pid = os.getpid() + name = '@'.join([ + self._name, hostname+":"+str(pid)]) + # Can add a lot more information here, + metadata = { + "hostname": hostname, + "pid": pid + } + + return entity.Entity("conductor", name, metadata) + def run(self, max_dispatches=None): self._dead.clear() + # Register a conductor type entity + self._jobboard.register_entity(self._get_conductor_info()) + total_dispatched = 0 try: diff --git a/taskflow/jobs/backends/impl_redis.py b/taskflow/jobs/backends/impl_redis.py index 92a13dae..6f210ac3 100644 --- a/taskflow/jobs/backends/impl_redis.py +++ b/taskflow/jobs/backends/impl_redis.py @@ -808,6 +808,10 @@ return cmsgpack.pack(result) ensure_fresh=ensure_fresh, board_fetch_func=lambda ensure_fresh: self._fetch_jobs()) + def register_entity(self, entity): + # Will implement a redis jobboard conductor register later + pass + @base.check_who def consume(self, job, who): script = self._get_script('consume') diff --git a/taskflow/jobs/backends/impl_zookeeper.py b/taskflow/jobs/backends/impl_zookeeper.py index 15b54b13..dc38a0e4 100644 --- a/taskflow/jobs/backends/impl_zookeeper.py +++ b/taskflow/jobs/backends/impl_zookeeper.py @@ -236,6 +236,10 @@ class ZookeeperJobBoard(base.NotifyingJobBoard): #: Znode child path created under root path that contains trashed jobs. TRASH_FOLDER = ".trash" + #: Znode child path created under root path that contains registered + #: entities. + ENTITY_FOLDER = ".entities" + #: Znode **prefix** that job entries have. JOB_PREFIX = 'job' @@ -259,6 +263,9 @@ class ZookeeperJobBoard(base.NotifyingJobBoard): self._path = path self._trash_path = self._path.replace(k_paths.basename(self._path), self.TRASH_FOLDER) + self._entity_path = self._path.replace( + k_paths.basename(self._path), + self.ENTITY_FOLDER) # The backend to load the full logbooks from, since what is sent over # the data connection is only the logbook uuid and name, and not the # full logbook. @@ -300,6 +307,11 @@ class ZookeeperJobBoard(base.NotifyingJobBoard): """Path where all trashed job znodes will be stored.""" return self._trash_path + @property + def entity_path(self): + """Path where all conductor info znodes will be stored.""" + return self._entity_path + @property def job_count(self): return len(self._known_jobs) @@ -552,6 +564,22 @@ class ZookeeperJobBoard(base.NotifyingJobBoard): return (misc.decode_json(lock_data), lock_stat, misc.decode_json(job_data), job_stat) + def register_entity(self, entity): + entity_type = entity.kind + if entity_type == 'conductor': + entity_path = k_paths.join(self.entity_path, entity_type) + self._client.ensure_path(entity_path) + + conductor_name = entity.name + self._client.create(k_paths.join(entity_path, + conductor_name), + value=misc.binary_encode( + jsonutils.dumps(entity.to_dict())), + ephemeral=True) + else: + raise excp.NotImplementedError( + "Not implemented for other entity type '%s'" % entity_type) + @base.check_who def consume(self, job, who): with self._wrap(job.uuid, job.path, diff --git a/taskflow/jobs/base.py b/taskflow/jobs/base.py index 81e4a574..9e95ee1c 100644 --- a/taskflow/jobs/base.py +++ b/taskflow/jobs/base.py @@ -386,6 +386,10 @@ class JobBoard(object): this must be the same name that was used for claiming this job. """ + @abc.abstractmethod + def register_entity(self, entity): + """Register an entity to the jobboard('s backend), e.g: a conductor""" + @abc.abstractproperty def connected(self): """Returns if this jobboard is connected.""" diff --git a/taskflow/tests/unit/jobs/test_zk_job.py b/taskflow/tests/unit/jobs/test_zk_job.py index e42bed66..f729fc3c 100644 --- a/taskflow/tests/unit/jobs/test_zk_job.py +++ b/taskflow/tests/unit/jobs/test_zk_job.py @@ -17,6 +17,7 @@ import contextlib import threading +from kazoo.protocol import paths as k_paths from kazoo.recipe import watchers from oslo_serialization import jsonutils from oslo_utils import uuidutils @@ -25,12 +26,14 @@ import testtools from zake import fake_client from zake import utils as zake_utils +from taskflow import exceptions as excp from taskflow.jobs.backends import impl_zookeeper from taskflow import states from taskflow import test from taskflow.test import mock from taskflow.tests.unit.jobs import base from taskflow.tests import utils as test_utils +from taskflow.types import entity from taskflow.utils import kazoo_utils from taskflow.utils import misc from taskflow.utils import persistence_utils as p_utils @@ -259,3 +262,34 @@ class ZakeJobboardTest(test.TestCase, ZookeeperBoardTestMixin): }, 'details': {}, }, jsonutils.loads(misc.binary_decode(paths[path_key]['data']))) + + def test_register_entity(self): + conductor_name = "conductor-abc@localhost:4123" + entity_instance = entity.Entity("conductor", + conductor_name, + {}) + with base.connect_close(self.board): + self.board.register_entity(entity_instance) + # Check '.entity' node has been created + self.assertTrue(self.board.entity_path in self.client.storage.paths) + + conductor_entity_path = k_paths.join(self.board.entity_path, + 'conductor', + conductor_name) + self.assertTrue(conductor_entity_path in self.client.storage.paths) + conductor_data = ( + self.client.storage.paths[conductor_entity_path]['data']) + self.assertTrue(len(conductor_data) > 0) + self.assertDictEqual({ + 'name': conductor_name, + 'kind': 'conductor', + 'metadata': {}, + }, jsonutils.loads(misc.binary_decode(conductor_data))) + + entity_instance_2 = entity.Entity("non-sense", + "other_name", + {}) + with base.connect_close(self.board): + self.assertRaises(excp.NotImplementedError, + self.board.register_entity, + entity_instance_2) diff --git a/taskflow/types/entity.py b/taskflow/types/entity.py new file mode 100644 index 00000000..d46927ce --- /dev/null +++ b/taskflow/types/entity.py @@ -0,0 +1,33 @@ +# -*- coding: utf-8 -*- + +# Copyright (C) 2015 Rackspace Inc. All Rights Reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); you may +# not use this file except in compliance with the License. You may obtain +# a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the +# License for the specific language governing permissions and limitations +# under the License. + + +class Entity(object): + """Entity object(s) to be registered on jobboard. + + Now only supports 'kind' of 'conductor'. + """ + def __init__(self, kind, name, metadata): + self.kind = kind + self.name = name + self.metadata = metadata + + def to_dict(self): + return { + 'kind': self.kind, + 'name': self.name, + 'metadata': self.metadata + } From 31dc9e0e079c5a44c0e09819e02b130bd7dc029a Mon Sep 17 00:00:00 2001 From: OpenStack Proposal Bot Date: Mon, 19 Oct 2015 23:33:44 +0000 Subject: [PATCH 40/54] Updated from global requirements Change-Id: I0b87f05e9100e38f11773af15b498cd5bd126005 --- requirements.txt | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/requirements.txt b/requirements.txt index 0bdbf290..339b4a5b 100644 --- a/requirements.txt +++ b/requirements.txt @@ -41,7 +41,7 @@ jsonschema!=2.5.0,<3.0.0,>=2.0.0 automaton>=0.5.0 # Apache-2.0 # For common utilities -oslo.utils>=2.4.0 # Apache-2.0 +oslo.utils!=2.6.0,>=2.4.0 # Apache-2.0 oslo.serialization>=1.4.0 # Apache-2.0 # For lru caches and such From 0095b0439aa8aa4d0826fc09751481930ceff619 Mon Sep 17 00:00:00 2001 From: OpenStack Proposal Bot Date: Fri, 23 Oct 2015 17:59:30 +0000 Subject: [PATCH 41/54] Updated from global requirements Change-Id: I9511da3628f709cb32961835a651c4461e6b299e --- requirements.txt | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/requirements.txt b/requirements.txt index 339b4a5b..3605ea06 100644 --- a/requirements.txt +++ b/requirements.txt @@ -42,7 +42,7 @@ automaton>=0.5.0 # Apache-2.0 # For common utilities oslo.utils!=2.6.0,>=2.4.0 # Apache-2.0 -oslo.serialization>=1.4.0 # Apache-2.0 +oslo.serialization>=1.10.0 # Apache-2.0 # For lru caches and such cachetools>=1.0.0 # MIT License From 16abe31be1fe375a2a01ed81db027c1c9ae39a63 Mon Sep 17 00:00:00 2001 From: Joshua Harlow Date: Mon, 19 Oct 2015 17:18:01 -0700 Subject: [PATCH 42/54] Move 'fill_iter' to 'iter_utils.fill' This is better placed in the iterator utility module as it acts on iterables and provides its own iterator that fills up to (and potentially beyond) a provided iterator. Also adds some nice tests to make sure it keeps on working as expected; as well as tests for other parts of iter_utils to ensure they keep on working as expected as well. Change-Id: Ica90816cbdedfd87f3861a111d7a852655c1fb74 --- taskflow/tests/unit/test_utils_iter_utils.py | 51 +++++++++++++++ taskflow/types/failure.py | 20 +----- taskflow/utils/iter_utils.py | 65 +++++++++++++++++--- 3 files changed, 111 insertions(+), 25 deletions(-) diff --git a/taskflow/tests/unit/test_utils_iter_utils.py b/taskflow/tests/unit/test_utils_iter_utils.py index 82d470f3..88879811 100644 --- a/taskflow/tests/unit/test_utils_iter_utils.py +++ b/taskflow/tests/unit/test_utils_iter_utils.py @@ -30,6 +30,51 @@ def forever_it(): class IterUtilsTest(test.TestCase): + def test_fill_empty(self): + self.assertEqual([], list(iter_utils.fill([1, 2, 3], 0))) + + def test_bad_unique_seen(self): + iters = [ + ['a', 'b'], + 2, + None, + ] + self.assertRaises(ValueError, + iter_utils.unique_seen, *iters) + + def test_unique_seen(self): + iters = [ + ['a', 'b'], + ['a', 'c', 'd'], + ['a', 'e', 'f'], + ['f', 'm', 'n'], + ] + self.assertEqual(['a', 'b', 'c', 'd', 'e', 'f', 'm', 'n'], + list(iter_utils.unique_seen(*iters))) + + def test_bad_fill(self): + self.assertRaises(ValueError, iter_utils.fill, 2, 2) + + def test_fill_many_empty(self): + result = list(iter_utils.fill(compat_range(0, 50), 500)) + self.assertEqual(450, sum(1 for x in result if x is None)) + self.assertEqual(50, sum(1 for x in result if x is not None)) + + def test_fill_custom_filler(self): + self.assertEqual("abcd", + "".join(iter_utils.fill("abc", 4, filler='d'))) + + def test_fill_less_needed(self): + self.assertEqual("ab", "".join(iter_utils.fill("abc", 2))) + + def test_fill(self): + self.assertEqual([None, None], list(iter_utils.fill([], 2))) + self.assertEqual((None, None), tuple(iter_utils.fill([], 2))) + + def test_bad_find_first_match(self): + self.assertRaises(ValueError, + iter_utils.find_first_match, 2, lambda v: False) + def test_find_first_match(self): it = forever_it() self.assertEqual(100, iter_utils.find_first_match(it, @@ -40,6 +85,9 @@ class IterUtilsTest(test.TestCase): self.assertIsNone(iter_utils.find_first_match(it, lambda v: v == '')) + def test_bad_count(self): + self.assertRaises(ValueError, iter_utils.count, 2) + def test_count(self): self.assertEqual(0, iter_utils.count([])) self.assertEqual(1, iter_utils.count(['a'])) @@ -48,6 +96,9 @@ class IterUtilsTest(test.TestCase): self.assertEqual(0, iter_utils.count(compat_range(0))) self.assertEqual(0, iter_utils.count(compat_range(-1))) + def test_bad_while_is_not(self): + self.assertRaises(ValueError, iter_utils.while_is_not, 2, 'a') + def test_while_is_not(self): it = iter(string.ascii_lowercase) self.assertEqual(['a'], diff --git a/taskflow/types/failure.py b/taskflow/types/failure.py index 34c30473..2663fb81 100644 --- a/taskflow/types/failure.py +++ b/taskflow/types/failure.py @@ -24,6 +24,7 @@ from oslo_utils import reflection import six from taskflow import exceptions as exc +from taskflow.utils import iter_utils from taskflow.utils import mixins from taskflow.utils import schema_utils as su @@ -40,23 +41,6 @@ def _copy_exc_info(exc_info): return (exc_type, copy.copy(exc_value), tb) -def _fill_iter(it, desired_len, filler=None): - """Iterates over a provided iterator up to the desired length. - - If the source iterator does not have enough values then the filler - value is yielded until the desired length is reached. - """ - count = 0 - for value in it: - if count >= desired_len: - return - yield value - count += 1 - while count < desired_len: - yield filler - count += 1 - - def _are_equal_exc_info_tuples(ei1, ei2): if ei1 == ei2: return True @@ -444,7 +428,7 @@ class Failure(mixins.StrMixin): # what the twisted people have done, see for example # twisted-13.0.0/twisted/python/failure.py#L89 for how they # created a fake traceback object... - self._exc_info = tuple(_fill_iter(dct['exc_info'], 3)) + self._exc_info = tuple(iter_utils.fill(dct['exc_info'], 3)) else: self._exc_info = None causes = dct.get('causes') diff --git a/taskflow/utils/iter_utils.py b/taskflow/utils/iter_utils.py index 1a366849..5d0aff10 100644 --- a/taskflow/utils/iter_utils.py +++ b/taskflow/utils/iter_utils.py @@ -16,10 +16,45 @@ # License for the specific language governing permissions and limitations # under the License. +import collections import itertools + +import six from six.moves import range as compat_range +def _ensure_iterable(func): + + @six.wraps(func) + def wrapper(it, *args, **kwargs): + if not isinstance(it, collections.Iterable): + raise ValueError("Iterable expected, but '%s' is not" + " iterable" % it) + return func(it, *args, **kwargs) + + return wrapper + + +@_ensure_iterable +def fill(it, desired_len, filler=None): + """Iterates over a provided iterator up to the desired length. + + If the source iterator does not have enough values then the filler + value is yielded until the desired length is reached. + """ + if desired_len > 0: + count = 0 + for value in it: + yield value + count += 1 + if count >= desired_len: + return + while count < desired_len: + yield filler + count += 1 + + +@_ensure_iterable def count(it): """Returns how many values in the iterator (depletes the iterator).""" return sum(1 for _value in it) @@ -27,15 +62,30 @@ def count(it): def unique_seen(it, *its): """Yields unique values from iterator(s) (and retains order).""" - seen = set() - for value in itertools.chain(it, *its): - if value in seen: - continue - else: - yield value - seen.add(value) + + def _gen_it(all_its): + # NOTE(harlowja): Generation is delayed so that validation + # can happen before generation/iteration... (instead of + # during generation/iteration) + seen = set() + while all_its: + it = all_its.popleft() + for value in it: + if value not in seen: + yield value + seen.add(value) + + all_its = collections.deque([it]) + if its: + all_its.extend(its) + for it in all_its: + if not isinstance(it, collections.Iterable): + raise ValueError("Iterable expected, but '%s' is" + " not iterable" % it) + return _gen_it(all_its) +@_ensure_iterable def find_first_match(it, matcher, not_found_value=None): """Searches iterator for first value that matcher callback returns true.""" for value in it: @@ -44,6 +94,7 @@ def find_first_match(it, matcher, not_found_value=None): return not_found_value +@_ensure_iterable def while_is_not(it, stop_value): """Yields given values from iterator until stop value is passed. From d7afc216b81f390eea4416f5481d058469874789 Mon Sep 17 00:00:00 2001 From: Joshua Harlow Date: Mon, 26 Oct 2015 17:23:46 -0700 Subject: [PATCH 43/54] Update docstrings on entity type Describe the instance variables and there types with docstrings and comments and such, so that it is more known what this type is for. Also removes docstring saying this is only for jobs and jobboards since it is really a more generic entity class and has no direct tie to jobboards. Change-Id: I98adb161adcb4ce096c1ee0e6c3377eb71383a90 --- taskflow/types/entity.py | 14 ++++++++++++-- 1 file changed, 12 insertions(+), 2 deletions(-) diff --git a/taskflow/types/entity.py b/taskflow/types/entity.py index d46927ce..a644277f 100644 --- a/taskflow/types/entity.py +++ b/taskflow/types/entity.py @@ -16,9 +16,19 @@ class Entity(object): - """Entity object(s) to be registered on jobboard. + """Entity object that identifies some resource/item/other. - Now only supports 'kind' of 'conductor'. + :ivar kind: **immutable** type/kind that identifies this + entity (typically unique to a library/application) + :type kind: string + :ivar name: **immutable** name that can be used to uniquely + identify this entity among many other entities + :type name: string + :ivar metadata: **immutable** dictionary of metadata that is + associated with this entity (and typically + has keys/values that further describe this + entity) + :type metadata: dict """ def __init__(self, kind, name, metadata): self.kind = kind From ae9c701f9073941fbe063d2b7854ff6eed5b5fc0 Mon Sep 17 00:00:00 2001 From: Joshua Harlow Date: Tue, 14 Jul 2015 16:13:06 -0700 Subject: [PATCH 44/54] Add a executor backed conductor and have existing impl. use it This adds a executor backed job dispatching base class and has the existing blocking executor use it by running jobs and dispatching jobs into a sync executor. It also allows for dispatching jobs into a thread executor, or other executor via a new '_executor_factory' method that can generate executors (it can be overriden in the non-blocking executor to provide your own executors instances). This does alter the behavior in that now that jobs are dispatched into an executor we no longer can immediatly know if a job was dispatched and raised an exception or whether it will raise an exception in the future, so we now alter the 'local_dispatched' to just be a boolean that is used to determine if any dispatches happened (failure or not). Change-Id: I485770e8f4c85d3833892a453c9fb5168d8f0407 --- doc/source/conductors.rst | 10 +- setup.cfg | 1 + taskflow/conductors/backends/impl_blocking.py | 258 +------------- taskflow/conductors/backends/impl_executor.py | 333 ++++++++++++++++++ .../conductors/backends/impl_nonblocking.py | 69 ++++ taskflow/conductors/base.py | 31 +- taskflow/jobs/base.py | 7 +- taskflow/tests/unit/conductor/__init__.py | 0 .../test_blocking.py => test_conductors.py} | 58 ++- taskflow/types/timing.py | 4 +- taskflow/utils/misc.py | 14 + taskflow/utils/threading_utils.py | 12 + 12 files changed, 543 insertions(+), 254 deletions(-) create mode 100644 taskflow/conductors/backends/impl_executor.py create mode 100644 taskflow/conductors/backends/impl_nonblocking.py delete mode 100644 taskflow/tests/unit/conductor/__init__.py rename taskflow/tests/unit/{conductor/test_blocking.py => test_conductors.py} (75%) diff --git a/doc/source/conductors.rst b/doc/source/conductors.rst index d6d99a2c..5d78d53f 100644 --- a/doc/source/conductors.rst +++ b/doc/source/conductors.rst @@ -9,7 +9,7 @@ Conductors Overview ======== -Conductors in TaskFlow provide a mechanism that unifies the various TaskFlow +Conductors provide a mechanism that unifies the various concepts under a single easy to use (as plug-and-play as we can make it) construct. @@ -66,6 +66,7 @@ Interfaces .. automodule:: taskflow.conductors.base .. automodule:: taskflow.conductors.backends +.. automodule:: taskflow.conductors.backends.impl_executor Implementations =============== @@ -75,12 +76,19 @@ Blocking .. automodule:: taskflow.conductors.backends.impl_blocking +Non-blocking +------------ + +.. automodule:: taskflow.conductors.backends.impl_nonblocking + Hierarchy ========= .. inheritance-diagram:: taskflow.conductors.base taskflow.conductors.backends.impl_blocking + taskflow.conductors.backends.impl_nonblocking + taskflow.conductors.backends.impl_executor :parts: 1 .. _musical conductors: http://en.wikipedia.org/wiki/Conducting diff --git a/setup.cfg b/setup.cfg index ff241553..f903e7b5 100644 --- a/setup.cfg +++ b/setup.cfg @@ -37,6 +37,7 @@ taskflow.jobboards = taskflow.conductors = blocking = taskflow.conductors.backends.impl_blocking:BlockingConductor + nonblocking = taskflow.conductors.backends.impl_nonblocking:NonBlockingConductor taskflow.persistence = dir = taskflow.persistence.backends.impl_dir:DirBackend diff --git a/taskflow/conductors/backends/impl_blocking.py b/taskflow/conductors/backends/impl_blocking.py index 3fd5cb92..797338a0 100644 --- a/taskflow/conductors/backends/impl_blocking.py +++ b/taskflow/conductors/backends/impl_blocking.py @@ -12,254 +12,30 @@ # License for the specific language governing permissions and limitations # under the License. -import os -import socket +import futurist -import threading - -try: - from contextlib import ExitStack # noqa -except ImportError: - from contextlib2 import ExitStack # noqa - -from debtcollector import removals -from oslo_utils import excutils -import six -from taskflow.conductors import base -from taskflow import exceptions as excp -from taskflow.listeners import logging as logging_listener -from taskflow import logging -from taskflow.types import entity -from taskflow.types import timing as tt -from taskflow.utils import async_utils -from taskflow.utils import iter_utils - -LOG = logging.getLogger(__name__) -WAIT_TIMEOUT = 0.5 -NO_CONSUME_EXCEPTIONS = tuple([ - excp.ExecutionFailure, - excp.StorageFailure, -]) +from taskflow.conductors.backends import impl_executor -class BlockingConductor(base.Conductor): - """A conductor that runs jobs in its own dispatching loop. +class BlockingConductor(impl_executor.ExecutorConductor): + """Blocking conductor that processes job(s) in a blocking manner.""" - This conductor iterates over jobs in the provided jobboard (waiting for - the given timeout if no jobs exist) and attempts to claim them, work on - those jobs in its local thread (blocking further work from being claimed - and consumed) and then consume those work units after completion. This - process will repeat until the conductor has been stopped or other critical - error occurs. - - NOTE(harlowja): consumption occurs even if a engine fails to run due to - a task failure. This is only skipped when an execution failure or - a storage failure occurs which are *usually* correctable by re-running on - a different conductor (storage failures and execution failures may be - transient issues that can be worked around by later execution). If a job - after completing can not be consumed or abandoned the conductor relies - upon the jobboard capabilities to automatically abandon these jobs. + MAX_SIMULTANEOUS_JOBS = 1 + """ + Default maximum number of jobs that can be in progress at the same time. """ - START_FINISH_EVENTS_EMITTED = tuple([ - 'compilation', 'preparation', - 'validation', 'running', - ]) - """Events will be emitted for the start and finish of each engine - activity defined above, the actual event name that can be registered - to subscribe to will be ``${event}_start`` and ``${event}_end`` where - the ``${event}`` in this pseudo-variable will be one of these events. - """ + @staticmethod + def _executor_factory(): + return futurist.SynchronousExecutor() def __init__(self, name, jobboard, persistence=None, engine=None, - engine_options=None, wait_timeout=None): + engine_options=None, wait_timeout=None, + log=None, max_simultaneous_jobs=MAX_SIMULTANEOUS_JOBS): super(BlockingConductor, self).__init__( - name, jobboard, persistence=persistence, - engine=engine, engine_options=engine_options) - if wait_timeout is None: - wait_timeout = WAIT_TIMEOUT - if isinstance(wait_timeout, (int, float) + six.string_types): - self._wait_timeout = tt.Timeout(float(wait_timeout)) - elif isinstance(wait_timeout, tt.Timeout): - self._wait_timeout = wait_timeout - else: - raise ValueError("Invalid timeout literal: %s" % (wait_timeout)) - self._dead = threading.Event() - - @removals.removed_kwarg('timeout', version="0.8", removal_version="2.0") - def stop(self, timeout=None): - """Requests the conductor to stop dispatching. - - This method can be used to request that a conductor stop its - consumption & dispatching loop. - - The method returns immediately regardless of whether the conductor has - been stopped. - - .. deprecated:: 0.8 - - The ``timeout`` parameter is **deprecated** and is present for - backward compatibility **only**. In order to wait for the - conductor to gracefully shut down, :py:meth:`wait` should be used - instead. - """ - self._wait_timeout.interrupt() - - @property - def dispatching(self): - return not self._dead.is_set() - - def _listeners_from_job(self, job, engine): - listeners = super(BlockingConductor, self)._listeners_from_job(job, - engine) - listeners.append(logging_listener.LoggingListener(engine, log=LOG)) - return listeners - - def _dispatch_job(self, job): - engine = self._engine_from_job(job) - listeners = self._listeners_from_job(job, engine) - with ExitStack() as stack: - for listener in listeners: - stack.enter_context(listener) - LOG.debug("Dispatching engine for job '%s'", job) - consume = True - try: - for stage_func, event_name in [(engine.compile, 'compilation'), - (engine.prepare, 'preparation'), - (engine.validate, 'validation'), - (engine.run, 'running')]: - self._notifier.notify("%s_start" % event_name, { - 'job': job, - 'engine': engine, - 'conductor': self, - }) - stage_func() - self._notifier.notify("%s_end" % event_name, { - 'job': job, - 'engine': engine, - 'conductor': self, - }) - except excp.WrappedFailure as e: - if all((f.check(*NO_CONSUME_EXCEPTIONS) for f in e)): - consume = False - if LOG.isEnabledFor(logging.WARNING): - if consume: - LOG.warn("Job execution failed (consumption being" - " skipped): %s [%s failures]", job, len(e)) - else: - LOG.warn("Job execution failed (consumption" - " proceeding): %s [%s failures]", job, len(e)) - # Show the failure/s + traceback (if possible)... - for i, f in enumerate(e): - LOG.warn("%s. %s", i + 1, f.pformat(traceback=True)) - except NO_CONSUME_EXCEPTIONS: - LOG.warn("Job execution failed (consumption being" - " skipped): %s", job, exc_info=True) - consume = False - except Exception: - LOG.warn("Job execution failed (consumption proceeding): %s", - job, exc_info=True) - else: - LOG.info("Job completed successfully: %s", job) - return async_utils.make_completed_future(consume) - - def _get_conductor_info(self): - """For right now we just register the conductor name as: - - @: - - """ - hostname = socket.gethostname() - pid = os.getpid() - name = '@'.join([ - self._name, hostname+":"+str(pid)]) - # Can add a lot more information here, - metadata = { - "hostname": hostname, - "pid": pid - } - - return entity.Entity("conductor", name, metadata) - - def run(self, max_dispatches=None): - self._dead.clear() - - # Register a conductor type entity - self._jobboard.register_entity(self._get_conductor_info()) - - total_dispatched = 0 - try: - - if max_dispatches is None: - # NOTE(TheSriram): if max_dispatches is not set, - # then the conductor will run indefinitely, and not - # stop after 'n' number of dispatches - max_dispatches = -1 - - dispatch_gen = iter_utils.iter_forever(max_dispatches) - - while True: - if self._wait_timeout.is_stopped(): - break - local_dispatched = 0 - for job in self._jobboard.iterjobs(): - if self._wait_timeout.is_stopped(): - break - LOG.debug("Trying to claim job: %s", job) - try: - self._jobboard.claim(job, self._name) - except (excp.UnclaimableJob, excp.NotFound): - LOG.debug("Job already claimed or consumed: %s", job) - continue - consume = False - try: - f = self._dispatch_job(job) - except KeyboardInterrupt: - with excutils.save_and_reraise_exception(): - LOG.warn("Job dispatching interrupted: %s", job) - except Exception: - LOG.warn("Job dispatching failed: %s", job, - exc_info=True) - else: - - local_dispatched += 1 - consume = f.result() - try: - if consume: - self._jobboard.consume(job, self._name) - else: - self._jobboard.abandon(job, self._name) - except (excp.JobFailure, excp.NotFound): - if consume: - LOG.warn("Failed job consumption: %s", job, - exc_info=True) - else: - LOG.warn("Failed job abandonment: %s", job, - exc_info=True) - - total_dispatched = next(dispatch_gen) - - if local_dispatched == 0 and \ - not self._wait_timeout.is_stopped(): - self._wait_timeout.wait() - - except StopIteration: - if max_dispatches >= 0 and total_dispatched >= max_dispatches: - LOG.info("Maximum dispatch limit of %s reached", - max_dispatches) - finally: - self._dead.set() - - def wait(self, timeout=None): - """Waits for the conductor to gracefully exit. - - This method waits for the conductor to gracefully exit. An optional - timeout can be provided, which will cause the method to return - within the specified timeout. If the timeout is reached, the returned - value will be False. - - :param timeout: Maximum number of seconds that the :meth:`wait` method - should block for. - """ - return self._dead.wait(timeout) + name, jobboard, + persistence=persistence, engine=engine, + engine_options=engine_options, + wait_timeout=wait_timeout, log=log, + max_simultaneous_jobs=max_simultaneous_jobs) diff --git a/taskflow/conductors/backends/impl_executor.py b/taskflow/conductors/backends/impl_executor.py new file mode 100644 index 00000000..c47488da --- /dev/null +++ b/taskflow/conductors/backends/impl_executor.py @@ -0,0 +1,333 @@ +# -*- coding: utf-8 -*- + +# Licensed under the Apache License, Version 2.0 (the "License"); you may +# not use this file except in compliance with the License. You may obtain +# a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the +# License for the specific language governing permissions and limitations +# under the License. + +import abc +import functools +import itertools +import threading + +try: + from contextlib import ExitStack # noqa +except ImportError: + from contextlib2 import ExitStack # noqa + +from debtcollector import removals +from oslo_utils import excutils +import six + +from taskflow.conductors import base +from taskflow import exceptions as excp +from taskflow.listeners import logging as logging_listener +from taskflow import logging +from taskflow.types import timing as tt +from taskflow.utils import iter_utils +from taskflow.utils import misc + +LOG = logging.getLogger(__name__) + + +def _convert_to_timeout(value=None, default_value=None, event_factory=None): + if value is None: + value = default_value + if isinstance(value, (int, float) + six.string_types): + return tt.Timeout(float(value), event_factory=event_factory) + elif isinstance(value, tt.Timeout): + return value + else: + raise ValueError("Invalid timeout literal '%s'" % (value)) + + +@six.add_metaclass(abc.ABCMeta) +class ExecutorConductor(base.Conductor): + """Dispatches jobs from blocking :py:meth:`.run` method to some executor. + + This conductor iterates over jobs in the provided jobboard (waiting for + the given timeout if no jobs exist) and attempts to claim them, work on + those jobs using an executor (potentially blocking further work from being + claimed and consumed) and then consume those work units after + completion. This process will repeat until the conductor has been stopped + or other critical error occurs. + + NOTE(harlowja): consumption occurs even if a engine fails to run due to + a atom failure. This is only skipped when an execution failure or + a storage failure occurs which are *usually* correctable by re-running on + a different conductor (storage failures and execution failures may be + transient issues that can be worked around by later execution). If a job + after completing can not be consumed or abandoned the conductor relies + upon the jobboard capabilities to automatically abandon these jobs. + """ + + LOG = None + """ + Logger that will be used for listening to events (if none then the module + level logger will be used instead). + """ + + #: Default timeout used to idle/wait when no jobs have been found. + WAIT_TIMEOUT = 0.5 + + MAX_SIMULTANEOUS_JOBS = -1 + """ + Default maximum number of jobs that can be in progress at the same time. + + Negative or zero values imply no limit (do note that if a executor is + used that is built on a queue, as most are, that this will imply that the + queue will contain a potentially large & unfinished backlog of + submitted jobs). This *may* get better someday if + https://bugs.python.org/issue22737 is ever implemented and released. + """ + + #: Exceptions that will **not** cause consumption to occur. + NO_CONSUME_EXCEPTIONS = tuple([ + excp.ExecutionFailure, + excp.StorageFailure, + ]) + + _event_factory = threading.Event + """This attribute *can* be overridden by subclasses (for example if + an eventlet *green* event works better for the conductor user).""" + + START_FINISH_EVENTS_EMITTED = tuple([ + 'compilation', 'preparation', + 'validation', 'running', + ]) + """Events will be emitted for the start and finish of each engine + activity defined above, the actual event name that can be registered + to subscribe to will be ``${event}_start`` and ``${event}_end`` where + the ``${event}`` in this pseudo-variable will be one of these events. + """ + + def __init__(self, name, jobboard, + persistence=None, engine=None, + engine_options=None, wait_timeout=None, + log=None, max_simultaneous_jobs=MAX_SIMULTANEOUS_JOBS): + super(ExecutorConductor, self).__init__( + name, jobboard, persistence=persistence, + engine=engine, engine_options=engine_options) + self._wait_timeout = _convert_to_timeout( + value=wait_timeout, default_value=self.WAIT_TIMEOUT, + event_factory=self._event_factory) + self._dead = self._event_factory() + self._log = misc.pick_first_not_none(log, self.LOG, LOG) + self._max_simultaneous_jobs = int( + misc.pick_first_not_none(max_simultaneous_jobs, + self.MAX_SIMULTANEOUS_JOBS)) + self._dispatched = set() + + def _executor_factory(self): + """Creates an executor to be used during dispatching.""" + raise excp.NotImplementedError("This method must be implemented but" + " it has not been") + + @removals.removed_kwarg('timeout', version="0.8", removal_version="2.0") + def stop(self, timeout=None): + """Requests the conductor to stop dispatching. + + This method can be used to request that a conductor stop its + consumption & dispatching loop. + + The method returns immediately regardless of whether the conductor has + been stopped. + + .. deprecated:: 0.8 + + The ``timeout`` parameter is **deprecated** and is present for + backward compatibility **only**. In order to wait for the + conductor to gracefully shut down, :py:meth:`wait` should be used + instead. + """ + self._wait_timeout.interrupt() + + @property + def dispatching(self): + """Whether or not the dispatching loop is still dispatching.""" + return not self._dead.is_set() + + def _listeners_from_job(self, job, engine): + listeners = super(ExecutorConductor, self)._listeners_from_job( + job, engine) + listeners.append(logging_listener.LoggingListener(engine, + log=self._log)) + return listeners + + def _dispatch_job(self, job): + engine = self._engine_from_job(job) + listeners = self._listeners_from_job(job, engine) + with ExitStack() as stack: + for listener in listeners: + stack.enter_context(listener) + self._log.debug("Dispatching engine for job '%s'", job) + consume = True + try: + for stage_func, event_name in [(engine.compile, 'compilation'), + (engine.prepare, 'preparation'), + (engine.validate, 'validation'), + (engine.run, 'running')]: + self._notifier.notify("%s_start" % event_name, { + 'job': job, + 'engine': engine, + 'conductor': self, + }) + stage_func() + self._notifier.notify("%s_end" % event_name, { + 'job': job, + 'engine': engine, + 'conductor': self, + }) + except excp.WrappedFailure as e: + if all((f.check(*self.NO_CONSUME_EXCEPTIONS) for f in e)): + consume = False + if self._log.isEnabledFor(logging.WARNING): + if consume: + self._log.warn( + "Job execution failed (consumption being" + " skipped): %s [%s failures]", job, len(e)) + else: + self._log.warn( + "Job execution failed (consumption" + " proceeding): %s [%s failures]", job, len(e)) + # Show the failure/s + traceback (if possible)... + for i, f in enumerate(e): + self._log.warn("%s. %s", i + 1, + f.pformat(traceback=True)) + except self.NO_CONSUME_EXCEPTIONS: + self._log.warn("Job execution failed (consumption being" + " skipped): %s", job, exc_info=True) + consume = False + except Exception: + self._log.warn( + "Job execution failed (consumption proceeding): %s", + job, exc_info=True) + else: + self._log.info("Job completed successfully: %s", job) + return consume + + def _try_finish_job(self, job, consume): + try: + if consume: + self._jobboard.consume(job, self._name) + else: + self._jobboard.abandon(job, self._name) + except (excp.JobFailure, excp.NotFound): + if consume: + self._log.warn("Failed job consumption: %s", job, + exc_info=True) + else: + self._log.warn("Failed job abandonment: %s", job, + exc_info=True) + + def _on_job_done(self, job, fut): + consume = False + try: + consume = fut.result() + except KeyboardInterrupt: + with excutils.save_and_reraise_exception(): + self._log.warn("Job dispatching interrupted: %s", job) + except Exception: + self._log.warn("Job dispatching failed: %s", job, exc_info=True) + try: + self._try_finish_job(job, consume) + finally: + self._dispatched.discard(fut) + + def _can_claim_more_jobs(self, job): + if self._wait_timeout.is_stopped(): + return False + if self._max_simultaneous_jobs <= 0: + return True + if len(self._dispatched) >= self._max_simultaneous_jobs: + return False + else: + return True + + def _run_until_dead(self, executor, max_dispatches=None): + total_dispatched = 0 + if max_dispatches is None: + # NOTE(TheSriram): if max_dispatches is not set, + # then the conductor will run indefinitely, and not + # stop after 'n' number of dispatches + max_dispatches = -1 + dispatch_gen = iter_utils.iter_forever(max_dispatches) + is_stopped = self._wait_timeout.is_stopped + try: + # Don't even do any work in the first place... + if max_dispatches == 0: + raise StopIteration + while not is_stopped(): + any_dispatched = False + for job in itertools.takewhile(self._can_claim_more_jobs, + self._jobboard.iterjobs()): + self._log.debug("Trying to claim job: %s", job) + try: + self._jobboard.claim(job, self._name) + except (excp.UnclaimableJob, excp.NotFound): + self._log.debug("Job already claimed or" + " consumed: %s", job) + else: + try: + fut = executor.submit(self._dispatch_job, job) + except RuntimeError: + with excutils.save_and_reraise_exception(): + self._log.warn("Job dispatch submitting" + " failed: %s", job) + self._try_finish_job(job, False) + else: + fut.job = job + self._dispatched.add(fut) + any_dispatched = True + fut.add_done_callback( + functools.partial(self._on_job_done, job)) + total_dispatched = next(dispatch_gen) + if not any_dispatched and not is_stopped(): + self._wait_timeout.wait() + except StopIteration: + # This will be raised from 'dispatch_gen' if it reaches its + # max dispatch number (which implies we should do no more work). + with excutils.save_and_reraise_exception(): + if max_dispatches >= 0 and total_dispatched >= max_dispatches: + self._log.info("Maximum dispatch limit of %s reached", + max_dispatches) + + def run(self, max_dispatches=None): + self._dead.clear() + self._dispatched.clear() + try: + self._jobboard.register_entity(self.conductor) + with self._executor_factory() as executor: + self._run_until_dead(executor, + max_dispatches=max_dispatches) + except StopIteration: + pass + except KeyboardInterrupt: + with excutils.save_and_reraise_exception(): + self._log.warn("Job dispatching interrupted") + finally: + self._dead.set() + + # Inherit the docs, so we can reference them in our class docstring, + # if we don't do this sphinx gets confused... + run.__doc__ = base.Conductor.run.__doc__ + + def wait(self, timeout=None): + """Waits for the conductor to gracefully exit. + + This method waits for the conductor to gracefully exit. An optional + timeout can be provided, which will cause the method to return + within the specified timeout. If the timeout is reached, the returned + value will be ``False``, otherwise it will be ``True``. + + :param timeout: Maximum number of seconds that the :meth:`wait` method + should block for. + """ + return self._dead.wait(timeout) diff --git a/taskflow/conductors/backends/impl_nonblocking.py b/taskflow/conductors/backends/impl_nonblocking.py new file mode 100644 index 00000000..76893d70 --- /dev/null +++ b/taskflow/conductors/backends/impl_nonblocking.py @@ -0,0 +1,69 @@ +# -*- coding: utf-8 -*- + +# Licensed under the Apache License, Version 2.0 (the "License"); you may +# not use this file except in compliance with the License. You may obtain +# a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the +# License for the specific language governing permissions and limitations +# under the License. + +import futurist +import six + +from taskflow.conductors.backends import impl_executor +from taskflow.utils import threading_utils as tu + + +class NonBlockingConductor(impl_executor.ExecutorConductor): + """Non-blocking conductor that processes job(s) using a thread executor. + + NOTE(harlowja): A custom executor factory can be provided via keyword + argument ``executor_factory``, if provided it will be + invoked at + :py:meth:`~taskflow.conductors.base.Conductor.run` time + with one positional argument (this conductor) and it must + return a compatible `executor`_ which can be used + to submit jobs to. If ``None`` is a provided a thread pool + backed executor is selected by default (it will have + an equivalent number of workers as this conductors + simultaneous job count). + + .. _executor: https://docs.python.org/dev/library/\ + concurrent.futures.html#executor-objects + """ + + MAX_SIMULTANEOUS_JOBS = tu.get_optimal_thread_count() + """ + Default maximum number of jobs that can be in progress at the same time. + """ + + def _default_executor_factory(self): + max_simultaneous_jobs = self._max_simultaneous_jobs + if max_simultaneous_jobs <= 0: + max_workers = tu.get_optimal_thread_count() + else: + max_workers = max_simultaneous_jobs + return futurist.ThreadPoolExecutor(max_workers=max_workers) + + def __init__(self, name, jobboard, + persistence=None, engine=None, + engine_options=None, wait_timeout=None, + log=None, max_simultaneous_jobs=MAX_SIMULTANEOUS_JOBS, + executor_factory=None): + super(NonBlockingConductor, self).__init__( + name, jobboard, + persistence=persistence, engine=engine, + engine_options=engine_options, wait_timeout=wait_timeout, + log=log, max_simultaneous_jobs=max_simultaneous_jobs) + if executor_factory is None: + self._executor_factory = self._default_executor_factory + else: + if not six.callable(executor_factory): + raise ValueError("Provided keyword argument 'executor_factory'" + " must be callable") + self._executor_factory = executor_factory diff --git a/taskflow/conductors/base.py b/taskflow/conductors/base.py index 69424232..750d8cff 100644 --- a/taskflow/conductors/base.py +++ b/taskflow/conductors/base.py @@ -13,6 +13,7 @@ # under the License. import abc +import os import threading import fasteners @@ -20,7 +21,9 @@ import six from taskflow import engines from taskflow import exceptions as excp +from taskflow.types import entity from taskflow.types import notifier +from taskflow.utils import misc @six.add_metaclass(abc.ABCMeta) @@ -35,6 +38,9 @@ class Conductor(object): period of time will finish up the prior failed conductors work. """ + #: Entity kind used when creating new entity objects + ENTITY_KIND = 'conductor' + def __init__(self, name, jobboard, persistence=None, engine=None, engine_options=None): self._name = name @@ -48,6 +54,18 @@ class Conductor(object): self._lock = threading.RLock() self._notifier = notifier.Notifier() + @misc.cachedproperty + def conductor(self): + """Entity object that represents this conductor.""" + hostname = misc.get_hostname() + pid = os.getpid() + name = '@'.join([self._name, hostname + ":" + str(pid)]) + metadata = { + 'hostname': hostname, + 'pid': pid, + } + return entity.Entity(self.ENTITY_KIND, name, metadata) + @property def notifier(self): """The conductor actions (or other state changes) notifier. @@ -134,8 +152,17 @@ class Conductor(object): self._jobboard.close() @abc.abstractmethod - def run(self): - """Continuously claims, runs, and consumes jobs (and repeat).""" + def run(self, max_dispatches=None): + """Continuously claims, runs, and consumes jobs (and repeat). + + :param max_dispatches: An upper bound on the number of jobs that will + be dispatched, if none or negative this implies + there is no limit to the number of jobs that + will be dispatched, otherwise if positive this + run method will return when that amount of jobs + has been dispatched (instead of running + forever and/or until stopped). + """ @abc.abstractmethod def _dispatch_job(self, job): diff --git a/taskflow/jobs/base.py b/taskflow/jobs/base.py index 9e95ee1c..8e5d77c3 100644 --- a/taskflow/jobs/base.py +++ b/taskflow/jobs/base.py @@ -388,7 +388,12 @@ class JobBoard(object): @abc.abstractmethod def register_entity(self, entity): - """Register an entity to the jobboard('s backend), e.g: a conductor""" + """Register an entity to the jobboard('s backend), e.g: a conductor. + + :param entity: entity to register as being associated with the + jobboard('s backend) + :type entity: :py:class:`~taskflow.types.entity.Entity` + """ @abc.abstractproperty def connected(self): diff --git a/taskflow/tests/unit/conductor/__init__.py b/taskflow/tests/unit/conductor/__init__.py deleted file mode 100644 index e69de29b..00000000 diff --git a/taskflow/tests/unit/conductor/test_blocking.py b/taskflow/tests/unit/test_conductors.py similarity index 75% rename from taskflow/tests/unit/conductor/test_blocking.py rename to taskflow/tests/unit/test_conductors.py index 29d211fc..d7f84d50 100644 --- a/taskflow/tests/unit/conductor/test_blocking.py +++ b/taskflow/tests/unit/test_conductors.py @@ -18,6 +18,8 @@ import collections import contextlib import threading +import futurist +import testscenarios from zake import fake_client from taskflow.conductors import backends @@ -51,23 +53,39 @@ def test_factory(blowup): return f +def single_factory(): + return futurist.ThreadPoolExecutor(max_workers=1) + + ComponentBundle = collections.namedtuple('ComponentBundle', ['board', 'client', 'persistence', 'conductor']) -class BlockingConductorTest(test_utils.EngineTestBase, test.TestCase): - KIND = 'blocking' +class ManyConductorTest(testscenarios.TestWithScenarios, + test_utils.EngineTestBase, test.TestCase): + scenarios = [ + ('blocking', {'kind': 'blocking', + 'conductor_kwargs': {'wait_timeout': 0.1}}), + ('nonblocking_many_thread', + {'kind': 'nonblocking', 'conductor_kwargs': {'wait_timeout': 0.1}}), + ('nonblocking_one_thread', {'kind': 'nonblocking', + 'conductor_kwargs': { + 'executor_factory': single_factory, + 'wait_timeout': 0.1, + }}) + ] - def make_components(self, name='testing', wait_timeout=0.1): + def make_components(self): client = fake_client.FakeClient() persistence = impl_memory.MemoryBackend() - board = impl_zookeeper.ZookeeperJobBoard(name, {}, + board = impl_zookeeper.ZookeeperJobBoard('testing', {}, client=client, persistence=persistence) - conductor = backends.fetch(self.KIND, name, board, - persistence=persistence, - wait_timeout=wait_timeout) + conductor_kwargs = self.conductor_kwargs.copy() + conductor_kwargs['persistence'] = persistence + conductor = backends.fetch(self.kind, 'testing', board, + **conductor_kwargs) return ComponentBundle(board, client, persistence, conductor) def test_connection(self): @@ -178,3 +196,29 @@ class BlockingConductorTest(test_utils.EngineTestBase, test.TestCase): fd = lb.find(fd.uuid) self.assertIsNotNone(fd) self.assertEqual(st.REVERTED, fd.state) + + +class NonBlockingExecutorTest(test.TestCase): + def test_bad_wait_timeout(self): + persistence = impl_memory.MemoryBackend() + client = fake_client.FakeClient() + board = impl_zookeeper.ZookeeperJobBoard('testing', {}, + client=client, + persistence=persistence) + self.assertRaises(ValueError, + backends.fetch, + 'nonblocking', 'testing', board, + persistence=persistence, + wait_timeout='testing') + + def test_bad_factory(self): + persistence = impl_memory.MemoryBackend() + client = fake_client.FakeClient() + board = impl_zookeeper.ZookeeperJobBoard('testing', {}, + client=client, + persistence=persistence) + self.assertRaises(ValueError, + backends.fetch, + 'nonblocking', 'testing', board, + persistence=persistence, + executor_factory='testing') diff --git a/taskflow/types/timing.py b/taskflow/types/timing.py index 2fa7d20a..99aeac56 100644 --- a/taskflow/types/timing.py +++ b/taskflow/types/timing.py @@ -31,11 +31,11 @@ class Timeout(object): This object has the ability to be interrupted before the actual timeout is reached. """ - def __init__(self, timeout): + def __init__(self, timeout, event_factory=threading.Event): if timeout < 0: raise ValueError("Timeout must be >= 0 and not %s" % (timeout)) self._timeout = timeout - self._event = threading.Event() + self._event = event_factory() def interrupt(self): self._event.set() diff --git a/taskflow/utils/misc.py b/taskflow/utils/misc.py index aa89aa81..ca8faa5e 100644 --- a/taskflow/utils/misc.py +++ b/taskflow/utils/misc.py @@ -22,6 +22,7 @@ import errno import inspect import os import re +import socket import sys import threading import types @@ -42,6 +43,7 @@ from taskflow.types import notifier from taskflow.utils import deprecation +UNKNOWN_HOSTNAME = "" NUMERIC_TYPES = six.integer_types + (float,) # NOTE(imelnikov): regular expression to get scheme from URI, @@ -68,6 +70,18 @@ class StringIO(six.StringIO): self.write(linesep) +def get_hostname(unknown_hostname=UNKNOWN_HOSTNAME): + """Gets the machines hostname; if not able to returns an invalid one.""" + try: + hostname = socket.getfqdn() + if not hostname: + return unknown_hostname + else: + return hostname + except socket.error: + return unknown_hostname + + def match_type(obj, matchers): """Matches a given object using the given matchers list/iterable. diff --git a/taskflow/utils/threading_utils.py b/taskflow/utils/threading_utils.py index 7de0151d..ed554683 100644 --- a/taskflow/utils/threading_utils.py +++ b/taskflow/utils/threading_utils.py @@ -15,6 +15,7 @@ # under the License. import collections +import multiprocessing import threading import six @@ -35,6 +36,17 @@ def get_ident(): return _thread.get_ident() +def get_optimal_thread_count(default=2): + """Try to guess optimal thread count for current system.""" + try: + return multiprocessing.cpu_count() + 1 + except NotImplementedError: + # NOTE(harlowja): apparently may raise so in this case we will + # just setup two threads since it's hard to know what else we + # should do in this situation. + return default + + def daemon_thread(target, *args, **kwargs): """Makes a daemon thread that calls the given target when started.""" thread = threading.Thread(target=target, args=args, kwargs=kwargs) From 41a399c0fe4a9f705a0f5616fa1055116a005b8b Mon Sep 17 00:00:00 2001 From: Joshua Harlow Date: Tue, 20 Oct 2015 17:46:42 -0700 Subject: [PATCH 45/54] Use conductor entity class constant instead of raw string Now that the conductor base class provides a constant that all entity objects produced from it should provide we can now use that to ensure we are only registering a limited set of entity kinds. Change-Id: I0e71a8da64a228fdc68c9de941aaccf54d493d96 --- taskflow/jobs/backends/impl_zookeeper.py | 3 ++- 1 file changed, 2 insertions(+), 1 deletion(-) diff --git a/taskflow/jobs/backends/impl_zookeeper.py b/taskflow/jobs/backends/impl_zookeeper.py index dc38a0e4..a6237889 100644 --- a/taskflow/jobs/backends/impl_zookeeper.py +++ b/taskflow/jobs/backends/impl_zookeeper.py @@ -30,6 +30,7 @@ from oslo_utils import timeutils from oslo_utils import uuidutils import six +from taskflow.conductors import base as c_base from taskflow import exceptions as excp from taskflow.jobs import base from taskflow import logging @@ -566,7 +567,7 @@ class ZookeeperJobBoard(base.NotifyingJobBoard): def register_entity(self, entity): entity_type = entity.kind - if entity_type == 'conductor': + if entity_type == c_base.Conductor.ENTITY_KIND: entity_path = k_paths.join(self.entity_path, entity_type) self._client.ensure_path(entity_path) From 18974b5fe580f053cb7fe0fd4f624c72a67aa5cc Mon Sep 17 00:00:00 2001 From: Joshua Harlow Date: Thu, 12 Nov 2015 15:17:38 -0800 Subject: [PATCH 46/54] Correctly apply deciders across flow boundaries When a flow is added to another flow and there is a decider placed on that link we need to make sure that we retain that decider. This also ensures that any predecessor of that flow that has a decider that affects the flow nodes execution also gets used in the decision process. Closes-Bug: #1515748 Change-Id: Ifdf20378b26cdd13e0a3ff87cec8990fe89c0661 --- taskflow/engines/action_engine/analyzer.py | 10 ++- taskflow/engines/action_engine/runtime.py | 41 ++++++++--- taskflow/tests/unit/test_engines.py | 81 ++++++++++++++++++++++ 3 files changed, 119 insertions(+), 13 deletions(-) diff --git a/taskflow/engines/action_engine/analyzer.py b/taskflow/engines/action_engine/analyzer.py index bdde8975..6f9aa669 100644 --- a/taskflow/engines/action_engine/analyzer.py +++ b/taskflow/engines/action_engine/analyzer.py @@ -80,10 +80,14 @@ class IgnoreDecider(Decider): def check(self, runtime): """Returns bool of whether this decider should allow running.""" + # Gather all atoms results so that those results can be used + # by the decider(s) that are making a decision as to pass or + # not pass... results = {} - for name in six.iterkeys(self._edge_deciders): - results[name] = runtime.storage.get(name) - for local_decider in six.itervalues(self._edge_deciders): + for node, node_kind, _local_decider in self._edge_deciders: + if node_kind in co.ATOMS: + results[node.name] = runtime.storage.get(node.name) + for _node, _node_kind, local_decider in self._edge_deciders: if not local_decider(history=results): return False return True diff --git a/taskflow/engines/action_engine/runtime.py b/taskflow/engines/action_engine/runtime.py index 6780e931..dc9aa276 100644 --- a/taskflow/engines/action_engine/runtime.py +++ b/taskflow/engines/action_engine/runtime.py @@ -14,6 +14,7 @@ # License for the specific language governing permissions and limitations # under the License. +import collections import functools from futurist import waiters @@ -49,6 +50,34 @@ class Runtime(object): self._compilation = compilation self._atom_cache = {} + @staticmethod + def _walk_edge_deciders(graph, atom): + """Iterates through all nodes, deciders that alter atoms execution.""" + # This is basically a reverse breadth first exploration, with + # special logic to further traverse down flow nodes... + predecessors_iter = graph.predecessors_iter + nodes = collections.deque((u_node, atom) + for u_node in predecessors_iter(atom)) + visited = set() + while nodes: + u_node, v_node = nodes.popleft() + u_node_kind = graph.node[u_node]['kind'] + try: + yield (u_node, u_node_kind, + graph.adj[u_node][v_node][LINK_DECIDER]) + except KeyError: + pass + if u_node_kind == com.FLOW and u_node not in visited: + # Avoid re-exploring the same flow if we get to this + # same flow by a different *future* path... + visited.add(u_node) + # Since we *currently* jump over flow node(s), we need to make + # sure that any prior decider that was directed at this flow + # node also gets used during future decisions about this + # atom node. + nodes.extend((u_u_node, u_node) + for u_u_node in predecessors_iter(u_node)) + def compile(self): """Compiles & caches frequently used execution helper objects. @@ -84,21 +113,13 @@ class Runtime(object): raise exc.CompilationFailure("Unknown node kind '%s'" " encountered" % node_kind) metadata = {} + deciders_it = self._walk_edge_deciders(graph, node) walker = sc.ScopeWalker(self.compilation, node, names_only=True) - edge_deciders = {} - for prev_node in graph.predecessors_iter(node): - # If there is any link function that says if this connection - # is able to run (or should not) ensure we retain it and use - # it later as needed. - u_v_data = graph.adj[prev_node][node] - u_v_decider = u_v_data.get(LINK_DECIDER) - if u_v_decider is not None: - edge_deciders[prev_node.name] = u_v_decider metadata['scope_walker'] = walker metadata['check_transition_handler'] = check_transition_handler metadata['change_state_handler'] = change_state_handler metadata['scheduler'] = scheduler - metadata['edge_deciders'] = edge_deciders + metadata['edge_deciders'] = tuple(deciders_it) self._atom_cache[node.name] = metadata @property diff --git a/taskflow/tests/unit/test_engines.py b/taskflow/tests/unit/test_engines.py index ba003aec..7c8b6015 100644 --- a/taskflow/tests/unit/test_engines.py +++ b/taskflow/tests/unit/test_engines.py @@ -933,6 +933,87 @@ class EngineResetTests(utils.EngineTestBase): class EngineGraphConditionalFlowTest(utils.EngineTestBase): + def test_graph_flow_conditional_jumps_across_2(self): + histories = [] + + def should_go(history): + histories.append(history) + return False + + task1 = utils.ProgressingTask(name='task1') + task2 = utils.ProgressingTask(name='task2') + task3 = utils.ProgressingTask(name='task3') + task4 = utils.ProgressingTask(name='task4') + + subflow = lf.Flow("more-work") + subsub_flow = lf.Flow("more-more-work") + subsub_flow.add(task3, task4) + subflow.add(subsub_flow) + + flow = gf.Flow("main-work") + flow.add(task1, task2) + flow.link(task1, task2) + flow.add(subflow) + flow.link(task2, subflow, decider=should_go) + + engine = self._make_engine(flow) + with utils.CaptureListener(engine, capture_flow=False) as capturer: + engine.run() + + expected = [ + 'task1.t RUNNING', + 'task1.t SUCCESS(5)', + + 'task2.t RUNNING', + 'task2.t SUCCESS(5)', + + 'task3.t IGNORE', + 'task4.t IGNORE', + ] + self.assertEqual(expected, capturer.values) + self.assertEqual(1, len(histories)) + self.assertIn('task2', histories[0]) + + def test_graph_flow_conditional_jumps_across(self): + histories = [] + + def should_go(history): + histories.append(history) + return False + + task1 = utils.ProgressingTask(name='task1') + task2 = utils.ProgressingTask(name='task2') + task3 = utils.ProgressingTask(name='task3') + task4 = utils.ProgressingTask(name='task4') + + subflow = lf.Flow("more-work") + subflow.add(task3, task4) + flow = gf.Flow("main-work") + flow.add(task1, task2) + flow.link(task1, task2) + flow.add(subflow) + flow.link(task2, subflow, decider=should_go) + flow.link(task1, subflow, decider=should_go) + + engine = self._make_engine(flow) + with utils.CaptureListener(engine, capture_flow=False) as capturer: + engine.run() + + expected = [ + 'task1.t RUNNING', + 'task1.t SUCCESS(5)', + + 'task2.t RUNNING', + 'task2.t SUCCESS(5)', + + 'task3.t IGNORE', + 'task4.t IGNORE', + ] + self.assertEqual(expected, capturer.values) + self.assertEqual(1, len(histories)) + self.assertIn('task1', histories[0]) + self.assertIn('task2', histories[0]) + def test_graph_flow_conditional(self): flow = gf.Flow('root') From 9e96331a9d1a0479a4631836badd6f4b0ce6c13b Mon Sep 17 00:00:00 2001 From: Davanum Srinivas Date: Sat, 14 Nov 2015 23:06:56 -0500 Subject: [PATCH 47/54] Remove python 2.6 and cleanup tox.ini * Remove support for python 2.6 * Remove skipsdist : Needs to be set only if sdist is expensive * Remove usedevelop : only needed when skipsdist is set to True * Remove install_command : We can just use the default, we don't need to override * Remove setenv : We can just use the default as we don't need any extra environment variables * Remove requirements.txt from deps, as this is already added automatically Change-Id: I3564e6d2dfaf64b573382d776e6a79a442750139 --- tox.ini | 8 +------- 1 file changed, 1 insertion(+), 7 deletions(-) diff --git a/tox.ini b/tox.ini index 7f1dd065..d0bddf2b 100644 --- a/tox.ini +++ b/tox.ini @@ -1,21 +1,15 @@ [tox] minversion = 1.6 -skipsdist = True envlist = cover, docs, pep8, - py26, py27, py34, pylint, update-states [testenv] -usedevelop = True -install_command = pip install {opts} {packages} -setenv = VIRTUAL_ENV={envdir} -deps = -r{toxinidir}/requirements.txt - -r{toxinidir}/test-requirements.txt +deps = -r{toxinidir}/test-requirements.txt commands = python setup.py testr --slowest --testr-args='{posargs}' [testenv:docs] From 6918b8fab0d303bb7596df657f24897bbc67a1fd Mon Sep 17 00:00:00 2001 From: Min Pae Date: Wed, 4 Nov 2015 09:27:25 -0800 Subject: [PATCH 48/54] Adding notification points for job completion Adding notifications for job completion, both consumed and abandoned, so that a listener can take some action based on job completion. Change-Id: I826285d4bfccd2406df7b59e53a9b724702ed094 --- taskflow/conductors/backends/impl_executor.py | 24 ++++++++++++++ taskflow/tests/unit/test_conductors.py | 32 +++++++++++++++++++ 2 files changed, 56 insertions(+) diff --git a/taskflow/conductors/backends/impl_executor.py b/taskflow/conductors/backends/impl_executor.py index c47488da..d61f3e0d 100644 --- a/taskflow/conductors/backends/impl_executor.py +++ b/taskflow/conductors/backends/impl_executor.py @@ -106,6 +106,20 @@ class ExecutorConductor(base.Conductor): activity defined above, the actual event name that can be registered to subscribe to will be ``${event}_start`` and ``${event}_end`` where the ``${event}`` in this pseudo-variable will be one of these events. + + .. deprecated:: 1.23.0 + Use :py:attr:`~EVENTS_EMITTED` + """ + + EVENTS_EMITTED = tuple([ + 'compilation_start', 'compilation_end', + 'preparation_start', 'preparation_end', + 'validation_start', 'validation_end', + 'running_start', 'running_end', + 'job_consumed', 'job_abandoned', + ]) + """Events will be emitted for each of the events above. The event is + emitted to listeners registered with the conductor. """ def __init__(self, name, jobboard, @@ -217,8 +231,18 @@ class ExecutorConductor(base.Conductor): try: if consume: self._jobboard.consume(job, self._name) + self._notifier.notify("job_consumed", { + 'job': job, + 'conductor': self, + 'persistence': self._persistence, + }) else: self._jobboard.abandon(job, self._name) + self._notifier.notify("job_abandoned", { + 'job': job, + 'conductor': self, + 'persistence': self._persistence, + }) except (excp.JobFailure, excp.NotFound): if consume: self._log.warn("Failed job consumption: %s", job, diff --git a/taskflow/tests/unit/test_conductors.py b/taskflow/tests/unit/test_conductors.py index d7f84d50..9fa46f98 100644 --- a/taskflow/tests/unit/test_conductors.py +++ b/taskflow/tests/unit/test_conductors.py @@ -113,11 +113,25 @@ class ManyConductorTest(testscenarios.TestWithScenarios, components = self.make_components() components.conductor.connect() consumed_event = threading.Event() + job_consumed_event = threading.Event() + job_abandoned_event = threading.Event() def on_consume(state, details): consumed_event.set() + def on_job_consumed(event, details): + if event == 'job_consumed': + job_consumed_event.set() + + def on_job_abandoned(event, details): + if event == 'job_abandoned': + job_abandoned_event.set() + components.board.notifier.register(base.REMOVAL, on_consume) + components.conductor.notifier.register("job_consumed", + on_job_consumed) + components.conductor.notifier.register("job_abandoned", + on_job_abandoned) with close_many(components.conductor, components.client): t = threading_utils.daemon_thread(components.conductor.run) t.start() @@ -128,6 +142,8 @@ class ManyConductorTest(testscenarios.TestWithScenarios, components.board.post('poke', lb, details={'flow_uuid': fd.uuid}) self.assertTrue(consumed_event.wait(test_utils.WAIT_TIMEOUT)) + self.assertTrue(job_consumed_event.wait(test_utils.WAIT_TIMEOUT)) + self.assertFalse(job_abandoned_event.wait(1)) components.conductor.stop() self.assertTrue(components.conductor.wait(test_utils.WAIT_TIMEOUT)) self.assertFalse(components.conductor.dispatching) @@ -171,11 +187,25 @@ class ManyConductorTest(testscenarios.TestWithScenarios, components = self.make_components() components.conductor.connect() consumed_event = threading.Event() + job_consumed_event = threading.Event() + job_abandoned_event = threading.Event() def on_consume(state, details): consumed_event.set() + def on_job_consumed(event, details): + if event == 'job_consumed': + job_consumed_event.set() + + def on_job_abandoned(event, details): + if event == 'job_abandoned': + job_abandoned_event.set() + components.board.notifier.register(base.REMOVAL, on_consume) + components.conductor.notifier.register("job_consumed", + on_job_consumed) + components.conductor.notifier.register("job_abandoned", + on_job_abandoned) with close_many(components.conductor, components.client): t = threading_utils.daemon_thread(components.conductor.run) t.start() @@ -186,6 +216,8 @@ class ManyConductorTest(testscenarios.TestWithScenarios, components.board.post('poke', lb, details={'flow_uuid': fd.uuid}) self.assertTrue(consumed_event.wait(test_utils.WAIT_TIMEOUT)) + self.assertTrue(job_consumed_event.wait(test_utils.WAIT_TIMEOUT)) + self.assertFalse(job_abandoned_event.wait(1)) components.conductor.stop() self.assertTrue(components.conductor.wait(test_utils.WAIT_TIMEOUT)) self.assertFalse(components.conductor.dispatching) From cd922d4e466f27a470208ceba4beb8c1f4da2db3 Mon Sep 17 00:00:00 2001 From: Greg Hill Date: Tue, 10 Nov 2015 19:42:37 -0600 Subject: [PATCH 49/54] Add optional 'defer_reverts' behavior This makes it possible to REVERT a subflow and have it also revert the parent flow if the parent flow doesn't have its own retry strategy. We will probably want to make this new behavior the default or only behavior in a future release. Change-Id: Iea5ac366380ba7396a87d0185703549fb0c2f825 --- taskflow/engines/action_engine/completer.py | 15 ++++ taskflow/engines/action_engine/engine.py | 4 +- taskflow/engines/action_engine/runtime.py | 7 +- taskflow/engines/base.py | 6 +- taskflow/retry.py | 20 ++++- taskflow/tests/unit/test_retries.py | 98 ++++++++++++++++++--- taskflow/tests/unit/test_utils.py | 24 +++++ taskflow/utils/misc.py | 8 ++ 8 files changed, 160 insertions(+), 22 deletions(-) diff --git a/taskflow/engines/action_engine/completer.py b/taskflow/engines/action_engine/completer.py index 1dcb326b..ee988c4a 100644 --- a/taskflow/engines/action_engine/completer.py +++ b/taskflow/engines/action_engine/completer.py @@ -18,6 +18,7 @@ import abc import weakref from oslo_utils import reflection +from oslo_utils import strutils import six from taskflow.engines.action_engine import compiler as co @@ -178,6 +179,20 @@ class Completer(object): elif strategy == retry_atom.REVERT: # Ask parent retry and figure out what to do... parent_resolver = self._determine_resolution(retry, failure) + + # In the future, this will be the only behavior. REVERT + # should defer to the parent retry if it exists, or use the + # default REVERT_ALL if it doesn't. This lets you safely nest + # flows with retries inside flows without retries and it still + # behave as a user would expect, i.e. if the retry gets + # exhausted it reverts the outer flow unless the outer flow + # has a separate retry behavior. + defer_reverts = strutils.bool_from_string( + self._runtime.options.get('defer_reverts', False) + ) + if defer_reverts: + return parent_resolver + # Ok if the parent resolver says something not REVERT, and # it isn't just using the undefined resolver, assume the # parent knows best. diff --git a/taskflow/engines/action_engine/engine.py b/taskflow/engines/action_engine/engine.py index 74e150c1..845b702a 100644 --- a/taskflow/engines/action_engine/engine.py +++ b/taskflow/engines/action_engine/engine.py @@ -319,11 +319,13 @@ class ActionEngine(base.Engine): if self._compiled: return self._compilation = self._compiler.compile() + self._runtime = runtime.Runtime(self._compilation, self.storage, self.atom_notifier, self._task_executor, - self._retry_executor) + self._retry_executor, + options=self._options) self._runtime.compile() self._compiled = True diff --git a/taskflow/engines/action_engine/runtime.py b/taskflow/engines/action_engine/runtime.py index dc9aa276..719f7563 100644 --- a/taskflow/engines/action_engine/runtime.py +++ b/taskflow/engines/action_engine/runtime.py @@ -42,13 +42,14 @@ class Runtime(object): """ def __init__(self, compilation, storage, atom_notifier, - task_executor, retry_executor): + task_executor, retry_executor, options=None): self._atom_notifier = atom_notifier self._task_executor = task_executor self._retry_executor = retry_executor self._storage = storage self._compilation = compilation self._atom_cache = {} + self._options = misc.ensure_dict(options) @staticmethod def _walk_edge_deciders(graph, atom): @@ -130,6 +131,10 @@ class Runtime(object): def storage(self): return self._storage + @property + def options(self): + return self._options + @misc.cachedproperty def analyzer(self): return an.Analyzer(self) diff --git a/taskflow/engines/base.py b/taskflow/engines/base.py index a500dd47..5330fc18 100644 --- a/taskflow/engines/base.py +++ b/taskflow/engines/base.py @@ -21,6 +21,7 @@ from debtcollector import moves import six from taskflow.types import notifier +from taskflow.utils import misc @six.add_metaclass(abc.ABCMeta) @@ -41,10 +42,7 @@ class Engine(object): self._flow = flow self._flow_detail = flow_detail self._backend = backend - if not options: - self._options = {} - else: - self._options = dict(options) + self._options = misc.ensure_dict(options) self._notifier = notifier.Notifier() self._atom_notifier = notifier.Notifier() diff --git a/taskflow/retry.py b/taskflow/retry.py index 93991326..aa9208e3 100644 --- a/taskflow/retry.py +++ b/taskflow/retry.py @@ -34,13 +34,25 @@ class Decision(misc.StrEnum): This strategy first consults the parent atom before reverting the associated subflow to determine if the parent retry object provides a - different reconciliation strategy (if no parent retry object exists - then reverting will proceed, if one does exist the parent retry may - override this reconciliation strategy with its own). + different reconciliation strategy. This allows for safe nesting of + flows with different retry strategies. + + If the parent flow has no retry strategy, the default behavior is + to just revert the atoms in the associated subflow. This is + generally not the desired behavior, but is left as the default in + order to keep backwards-compatibility. The ``defer_reverts`` + engine option will let you change this behavior. If that is set + to True, a REVERT will always defer to the parent, meaning that + if the parent has no retry strategy, it will be reverted as well. """ - #: Completely reverts the whole flow. REVERT_ALL = "REVERT_ALL" + """Reverts the entire flow, regardless of parent strategy. + + This strategy will revert every atom that has executed thus + far, regardless of whether the parent flow has a separate + retry strategy associated with it. + """ #: Retries the surrounding/associated subflow again. RETRY = "RETRY" diff --git a/taskflow/tests/unit/test_retries.py b/taskflow/tests/unit/test_retries.py index 6dc01851..5f6a22e1 100644 --- a/taskflow/tests/unit/test_retries.py +++ b/taskflow/tests/unit/test_retries.py @@ -202,6 +202,69 @@ class RetryTest(utils.EngineTestBase): 'flow-1.f SUCCESS'] self.assertEqual(expected, capturer.values) + def test_new_revert_vs_old(self): + flow = lf.Flow('flow-1').add( + utils.TaskNoRequiresNoReturns("task1"), + lf.Flow('flow-2', retry.Times(1, 'r1', provides='x')).add( + utils.TaskNoRequiresNoReturns("task2"), + utils.ConditionalTask("task3") + ), + utils.TaskNoRequiresNoReturns("task4") + ) + engine = self._make_engine(flow) + engine.storage.inject({'y': 2}) + with utils.CaptureListener(engine) as capturer: + try: + engine.run() + except Exception: + pass + + expected = ['flow-1.f RUNNING', + 'task1.t RUNNING', + 'task1.t SUCCESS(None)', + 'r1.r RUNNING', + 'r1.r SUCCESS(1)', + 'task2.t RUNNING', + 'task2.t SUCCESS(None)', + 'task3.t RUNNING', + 'task3.t FAILURE(Failure: RuntimeError: Woot!)', + 'task3.t REVERTING', + 'task3.t REVERTED(None)', + 'task2.t REVERTING', + 'task2.t REVERTED(None)', + 'r1.r REVERTING', + 'r1.r REVERTED(None)', + 'flow-1.f REVERTED'] + self.assertEqual(expected, capturer.values) + + engine = self._make_engine(flow, defer_reverts=True) + engine.storage.inject({'y': 2}) + with utils.CaptureListener(engine) as capturer: + try: + engine.run() + except Exception: + pass + + expected = ['flow-1.f RUNNING', + 'task1.t RUNNING', + 'task1.t SUCCESS(None)', + 'r1.r RUNNING', + 'r1.r SUCCESS(1)', + 'task2.t RUNNING', + 'task2.t SUCCESS(None)', + 'task3.t RUNNING', + 'task3.t FAILURE(Failure: RuntimeError: Woot!)', + 'task3.t REVERTING', + 'task3.t REVERTED(None)', + 'task2.t REVERTING', + 'task2.t REVERTED(None)', + 'r1.r REVERTING', + 'r1.r REVERTED(None)', + 'task1.t REVERTING', + 'task1.t REVERTED(None)', + 'flow-1.f REVERTED'] + self.assertEqual(expected, capturer.values) + def test_states_retry_failure_parent_flow_fails(self): flow = lf.Flow('flow-1', retry.Times(3, 'r1', provides='x1')).add( utils.TaskNoRequiresNoReturns("task1"), @@ -1210,11 +1273,12 @@ class RetryParallelExecutionTest(utils.EngineTestBase): class SerialEngineTest(RetryTest, test.TestCase): - def _make_engine(self, flow, flow_detail=None): + def _make_engine(self, flow, defer_reverts=None, flow_detail=None): return taskflow.engines.load(flow, flow_detail=flow_detail, engine='serial', - backend=self.backend) + backend=self.backend, + defer_reverts=defer_reverts) class ParallelEngineWithThreadsTest(RetryTest, @@ -1222,36 +1286,46 @@ class ParallelEngineWithThreadsTest(RetryTest, test.TestCase): _EXECUTOR_WORKERS = 2 - def _make_engine(self, flow, flow_detail=None, executor=None): + def _make_engine(self, flow, defer_reverts=None, flow_detail=None, + executor=None): if executor is None: executor = 'threads' - return taskflow.engines.load(flow, flow_detail=flow_detail, + return taskflow.engines.load(flow, + flow_detail=flow_detail, engine='parallel', backend=self.backend, executor=executor, - max_workers=self._EXECUTOR_WORKERS) + max_workers=self._EXECUTOR_WORKERS, + defer_reverts=defer_reverts) @testtools.skipIf(not eu.EVENTLET_AVAILABLE, 'eventlet is not available') class ParallelEngineWithEventletTest(RetryTest, test.TestCase): - def _make_engine(self, flow, flow_detail=None, executor=None): + def _make_engine(self, flow, defer_reverts=None, flow_detail=None, + executor=None): if executor is None: executor = futurist.GreenThreadPoolExecutor() self.addCleanup(executor.shutdown) - return taskflow.engines.load(flow, flow_detail=flow_detail, - backend=self.backend, engine='parallel', - executor=executor) + return taskflow.engines.load(flow, + flow_detail=flow_detail, + backend=self.backend, + engine='parallel', + executor=executor, + defer_reverts=defer_reverts) class ParallelEngineWithProcessTest(RetryTest, test.TestCase): _EXECUTOR_WORKERS = 2 - def _make_engine(self, flow, flow_detail=None, executor=None): + def _make_engine(self, flow, defer_reverts=None, flow_detail=None, + executor=None): if executor is None: executor = 'processes' - return taskflow.engines.load(flow, flow_detail=flow_detail, + return taskflow.engines.load(flow, + flow_detail=flow_detail, engine='parallel', backend=self.backend, executor=executor, - max_workers=self._EXECUTOR_WORKERS) + max_workers=self._EXECUTOR_WORKERS, + defer_reverts=defer_reverts) diff --git a/taskflow/tests/unit/test_utils.py b/taskflow/tests/unit/test_utils.py index 64bb2330..6ea9f4fb 100644 --- a/taskflow/tests/unit/test_utils.py +++ b/taskflow/tests/unit/test_utils.py @@ -340,3 +340,27 @@ class TestIterable(test.TestCase): def test_dict(self): self.assertTrue(misc.is_iterable(dict())) + + +class TestEnsureDict(testscenarios.TestWithScenarios): + scenarios = [ + ('none', {'original': None, 'expected': {}}), + ('empty_dict', {'original': {}, 'expected': {}}), + ('empty_list', {'original': [], 'expected': {}}), + ('dict', {'original': {'a': 1, 'b': 2}, 'expected': {'a': 1, 'b': 2}}), + ] + + def test_expected(self): + self.assertEqual(self.expected, misc.ensure_dict(self.original)) + self.assertFalse(self.expected is misc.ensure_dict(self.original)) + + +class TestEnsureDictRaises(testscenarios.TestWithScenarios): + scenarios = [ + ('list', {'original': [1, 2], 'exception': TypeError}), + ('tuple', {'original': (1, 2), 'exception': TypeError}), + ('set', {'original': set([1, 2]), 'exception': TypeError}), + ] + + def test_exceptions(self): + self.assertRaises(self.exception, misc.ensure_dict, self.original) diff --git a/taskflow/utils/misc.py b/taskflow/utils/misc.py index ca8faa5e..e837a426 100644 --- a/taskflow/utils/misc.py +++ b/taskflow/utils/misc.py @@ -595,3 +595,11 @@ def is_iterable(obj): """ return (not isinstance(obj, six.string_types) and isinstance(obj, collections.Iterable)) + + +def ensure_dict(obj): + """Copy an existing dictionary or default to empty dict....""" + if not obj: + return {} + # default to a shallow copy to avoid most ownership issues + return dict(obj) From 484ded4baa58bc4e02862523b744ef7e122c5684 Mon Sep 17 00:00:00 2001 From: Joshua Harlow Date: Tue, 4 Aug 2015 11:43:39 -0700 Subject: [PATCH 50/54] Enable conversion of the tree nodes into a digraph Just like it's useful to be able to translate a execution graph into a dot diagram, the same usefulness can be helpful for the engine scoping hierarchy that is created, to make it easily possible to do this provide a tree method that converts itself (and its children) into a direct graph which can then easily be exported as a dot diagram (and then rendered as needed). Change-Id: I0addc2dee4cdce03ee5f33832a419303abc77db4 --- taskflow/tests/unit/test_types.py | 21 +++++++++++++++++++++ taskflow/types/tree.py | 18 ++++++++++++++++++ 2 files changed, 39 insertions(+) diff --git a/taskflow/tests/unit/test_types.py b/taskflow/tests/unit/test_types.py index 9399c893..178b9af3 100644 --- a/taskflow/tests/unit/test_types.py +++ b/taskflow/tests/unit/test_types.py @@ -499,6 +499,27 @@ CEO self.assertEqual(['mammal', 'reptile', 'horse', 'primate', 'monkey', 'human'], things) + def test_to_diagraph(self): + root = self._make_species() + g = root.to_digraph() + self.assertEqual(root.child_count(only_direct=False) + 1, len(g)) + for node in root.dfs_iter(include_self=True): + self.assertIn(node.item, g) + self.assertEqual([], g.predecessors('animal')) + self.assertEqual(['animal'], g.predecessors('reptile')) + self.assertEqual(['primate'], g.predecessors('human')) + self.assertEqual(['mammal'], g.predecessors('primate')) + self.assertEqual(['animal'], g.predecessors('mammal')) + self.assertEqual(['mammal', 'reptile'], g.successors('animal')) + + def test_to_digraph_retains_metadata(self): + root = tree.Node("chickens", alive=True) + dead_chicken = tree.Node("chicken.1", alive=False) + root.add(dead_chicken) + g = root.to_digraph() + self.assertEqual(g.node['chickens'], {'alive': True}) + self.assertEqual(g.node['chicken.1'], {'alive': False}) + class OrderedSetTest(test.TestCase): diff --git a/taskflow/types/tree.py b/taskflow/types/tree.py index 56c96bbb..d6a0df2c 100644 --- a/taskflow/types/tree.py +++ b/taskflow/types/tree.py @@ -22,6 +22,7 @@ import os import six +from taskflow.types import graph from taskflow.utils import iter_utils from taskflow.utils import misc @@ -388,3 +389,20 @@ class Node(object): return _BFSIter(self, include_self=include_self, right_to_left=right_to_left) + + def to_digraph(self): + """Converts this node + its children into a ordered directed graph. + + The graph returned will have the same structure as the + this node and its children (and tree node metadata will be translated + into graph node metadata). + + :returns: a directed graph + :rtype: :py:class:`taskflow.types.graph.OrderedDiGraph` + """ + g = graph.OrderedDiGraph() + for node in self.bfs_iter(include_self=True, right_to_left=True): + g.add_node(node.item, attr_dict=node.metadata) + if node is not self: + g.add_edge(node.parent.item, node.item) + return g From 5d720219500bce112dd0accc92e75efdb84ef9b4 Mon Sep 17 00:00:00 2001 From: OpenStack Proposal Bot Date: Wed, 18 Nov 2015 23:08:48 +0000 Subject: [PATCH 51/54] Updated from global requirements Change-Id: Iec6ff125c540dd557832e3b8bddc4e9480e8a641 --- requirements.txt | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/requirements.txt b/requirements.txt index 3605ea06..7a26c1c6 100644 --- a/requirements.txt +++ b/requirements.txt @@ -41,7 +41,7 @@ jsonschema!=2.5.0,<3.0.0,>=2.0.0 automaton>=0.5.0 # Apache-2.0 # For common utilities -oslo.utils!=2.6.0,>=2.4.0 # Apache-2.0 +oslo.utils>=2.8.0 # Apache-2.0 oslo.serialization>=1.10.0 # Apache-2.0 # For lru caches and such From 8cfebccfbc928cb6f7adbf5b9d8dfe75bf6c5824 Mon Sep 17 00:00:00 2001 From: OpenStack Proposal Bot Date: Thu, 19 Nov 2015 15:53:38 +0000 Subject: [PATCH 52/54] Updated from global requirements Change-Id: I0cdda30829cf5bef6529c81948511137ca98b01f --- test-requirements.txt | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/test-requirements.txt b/test-requirements.txt index e9bca9bc..1a6e23a1 100644 --- a/test-requirements.txt +++ b/test-requirements.txt @@ -32,4 +32,4 @@ eventlet>=0.17.4 # Docs build jobs need these packages. sphinx!=1.2.0,!=1.3b1,<1.3,>=1.1.2 -oslosphinx>=2.5.0 # Apache-2.0 +oslosphinx!=3.4.0,>=2.5.0 # Apache-2.0 From 010b3fda2125dd9fe10e8ee8cfd5aba35575c43a Mon Sep 17 00:00:00 2001 From: Joshua Harlow Date: Sun, 2 Aug 2015 20:48:12 -0700 Subject: [PATCH 53/54] Allow provided flow to be empty If someone really wants to provide a flow to run that is empty that is there prerogative so it doesn't seem that valuable to blow up if they do this. Change-Id: I0ad89b0ade85a64f6ec107e2686454ef6dc97353 --- taskflow/engines/action_engine/compiler.py | 6 ----- .../tests/unit/action_engine/test_compile.py | 2 +- taskflow/tests/unit/test_engines.py | 23 ++++++++++++------- 3 files changed, 16 insertions(+), 15 deletions(-) diff --git a/taskflow/engines/action_engine/compiler.py b/taskflow/engines/action_engine/compiler.py index 0d3e2883..b50fcca7 100644 --- a/taskflow/engines/action_engine/compiler.py +++ b/taskflow/engines/action_engine/compiler.py @@ -329,12 +329,6 @@ class PatternCompiler(object): if dup_names: raise exc.Duplicate( "Atoms with duplicate names found: %s" % (sorted(dup_names))) - atoms = iter_utils.count( - node for node, node_attrs in graph.nodes_iter(data=True) - if node_attrs['kind'] in ATOMS) - if atoms == 0: - raise exc.Empty("Root container '%s' (%s) is empty" - % (self._root, type(self._root))) self._history.clear() @fasteners.locked diff --git a/taskflow/tests/unit/action_engine/test_compile.py b/taskflow/tests/unit/action_engine/test_compile.py index fcebe891..e8d01266 100644 --- a/taskflow/tests/unit/action_engine/test_compile.py +++ b/taskflow/tests/unit/action_engine/test_compile.py @@ -43,7 +43,7 @@ class PatternCompileTest(test.TestCase): def test_empty(self): flo = lf.Flow("test") - self.assertRaises(exc.Empty, compiler.PatternCompiler(flo).compile) + compiler.PatternCompiler(flo).compile() def test_linear(self): a, b, c, d = test_utils.make_many(4) diff --git a/taskflow/tests/unit/test_engines.py b/taskflow/tests/unit/test_engines.py index 5fb0f28c..288afb18 100644 --- a/taskflow/tests/unit/test_engines.py +++ b/taskflow/tests/unit/test_engines.py @@ -42,6 +42,13 @@ from taskflow.utils import persistence_utils as p_utils from taskflow.utils import threading_utils as tu +# Expected engine transitions when empty workflows are ran... +_EMPTY_TRANSITIONS = [ + states.RESUMING, states.SCHEDULING, states.WAITING, + states.ANALYZING, states.SUCCESS, +] + + class EngineTaskTest(object): def test_run_task_as_flow(self): @@ -255,10 +262,10 @@ class EngineMultipleResultsTest(utils.EngineTestBase): class EngineLinearFlowTest(utils.EngineTestBase): - def test_run_empty_flow(self): + def test_run_empty_linear_flow(self): flow = lf.Flow('flow-1') engine = self._make_engine(flow) - self.assertRaises(exc.Empty, engine.run) + self.assertEqual(_EMPTY_TRANSITIONS, list(engine.run_iter())) def test_overlap_parent_sibling_expected_result(self): flow = lf.Flow('flow-1') @@ -456,10 +463,10 @@ class EngineLinearFlowTest(utils.EngineTestBase): class EngineParallelFlowTest(utils.EngineTestBase): - def test_run_empty_flow(self): + def test_run_empty_unordered_flow(self): flow = uf.Flow('p-1') engine = self._make_engine(flow) - self.assertRaises(exc.Empty, engine.run) + self.assertEqual(_EMPTY_TRANSITIONS, list(engine.run_iter())) def test_parallel_flow_with_priority(self): flow = uf.Flow('p-1') @@ -664,16 +671,16 @@ class EngineLinearAndUnorderedExceptionsTest(utils.EngineTestBase): class EngineGraphFlowTest(utils.EngineTestBase): - def test_run_empty_flow(self): + def test_run_empty_graph_flow(self): flow = gf.Flow('g-1') engine = self._make_engine(flow) - self.assertRaises(exc.Empty, engine.run) + self.assertEqual(_EMPTY_TRANSITIONS, list(engine.run_iter())) - def test_run_nested_empty_flows(self): + def test_run_empty_nested_graph_flows(self): flow = gf.Flow('g-1').add(lf.Flow('l-1'), gf.Flow('g-2')) engine = self._make_engine(flow) - self.assertRaises(exc.Empty, engine.run) + self.assertEqual(_EMPTY_TRANSITIONS, list(engine.run_iter())) def test_graph_flow_one_task(self): flow = gf.Flow('g-1').add( From 0b034d611f56d8d6d855cf147caeb3d0f8d6068f Mon Sep 17 00:00:00 2001 From: Joshua Harlow Date: Wed, 7 Oct 2015 11:56:26 -0700 Subject: [PATCH 54/54] Move validation of compiled unit out of compiler Instead of having the compiler do any validation on the graph it has created instead have the compiler just compile and have the engine that uses that compiled result do any post compilation validation instead. This makes it more clear that the compiler just compiles a flow (and tasks and nested flows) into a graph, and that is all that it does. Change-Id: I96a35d732dc2be9fc8bc8dc6466256a19ac2df6d --- taskflow/engines/action_engine/compiler.py | 31 ++++++++++--------- taskflow/engines/action_engine/engine.py | 21 +++++++++++-- .../tests/unit/action_engine/test_compile.py | 7 +++-- taskflow/utils/misc.py | 13 -------- 4 files changed, 40 insertions(+), 32 deletions(-) diff --git a/taskflow/engines/action_engine/compiler.py b/taskflow/engines/action_engine/compiler.py index b50fcca7..e27b1f8f 100644 --- a/taskflow/engines/action_engine/compiler.py +++ b/taskflow/engines/action_engine/compiler.py @@ -17,16 +17,15 @@ import threading import fasteners +from oslo_utils import excutils import six -from taskflow import exceptions as exc from taskflow import flow from taskflow import logging from taskflow import task from taskflow.types import graph as gr from taskflow.types import tree as tr from taskflow.utils import iter_utils -from taskflow.utils import misc from taskflow.flow import (LINK_INVARIANT, LINK_RETRY) # noqa @@ -322,24 +321,26 @@ class PatternCompiler(object): def _post_compile(self, graph, node): """Called after the compilation of the root finishes successfully.""" - dup_names = misc.get_duplicate_keys( - (node for node, node_attrs in graph.nodes_iter(data=True) - if node_attrs['kind'] in ATOMS), - key=lambda node: node.name) - if dup_names: - raise exc.Duplicate( - "Atoms with duplicate names found: %s" % (sorted(dup_names))) self._history.clear() + self._level = 0 @fasteners.locked def compile(self): """Compiles the contained item into a compiled equivalent.""" if self._compilation is None: self._pre_compile() - graph, node = self._compile(self._root, parent=None) - self._post_compile(graph, node) - if self._freeze: - graph.freeze() - node.freeze() - self._compilation = Compilation(graph, node) + try: + graph, node = self._compile(self._root, parent=None) + except Exception: + with excutils.save_and_reraise_exception(): + # Always clear the history, to avoid retaining junk + # in memory that isn't needed to be in memory if + # compilation fails... + self._history.clear() + else: + self._post_compile(graph, node) + if self._freeze: + graph.freeze() + node.freeze() + self._compilation = Compilation(graph, node) return self._compilation diff --git a/taskflow/engines/action_engine/engine.py b/taskflow/engines/action_engine/engine.py index 845b702a..5d2bb085 100644 --- a/taskflow/engines/action_engine/engine.py +++ b/taskflow/engines/action_engine/engine.py @@ -222,6 +222,24 @@ class ActionEngine(base.Engine): six.itervalues(self.storage.get_revert_failures())) failure.Failure.reraise_if_any(it) + @staticmethod + def _check_compilation(compilation): + """Performs post compilation validation/checks.""" + seen = set() + dups = set() + execution_graph = compilation.execution_graph + for node, node_attrs in execution_graph.nodes_iter(data=True): + if node_attrs['kind'] in compiler.ATOMS: + atom_name = node.name + if atom_name in seen: + dups.add(atom_name) + else: + seen.add(atom_name) + if dups: + raise exc.Duplicate( + "Atoms with duplicate names found: %s" % (sorted(dups))) + return compilation + def _change_state(self, state): with self._state_lock: old_state = self.storage.get_flow_state() @@ -318,8 +336,7 @@ class ActionEngine(base.Engine): def compile(self): if self._compiled: return - self._compilation = self._compiler.compile() - + self._compilation = self._check_compilation(self._compiler.compile()) self._runtime = runtime.Runtime(self._compilation, self.storage, self.atom_notifier, diff --git a/taskflow/tests/unit/action_engine/test_compile.py b/taskflow/tests/unit/action_engine/test_compile.py index e8d01266..6ccf3588 100644 --- a/taskflow/tests/unit/action_engine/test_compile.py +++ b/taskflow/tests/unit/action_engine/test_compile.py @@ -14,6 +14,7 @@ # License for the specific language governing permissions and limitations # under the License. +from taskflow import engines from taskflow.engines.action_engine import compiler from taskflow import exceptions as exc from taskflow.patterns import graph_flow as gf @@ -399,17 +400,19 @@ class PatternCompileTest(test.TestCase): test_utils.DummyTask(name="a"), test_utils.DummyTask(name="a") ) + e = engines.load(flo) self.assertRaisesRegexp(exc.Duplicate, '^Atoms with duplicate names', - compiler.PatternCompiler(flo).compile) + e.compile) def test_checks_for_dups_globally(self): flo = gf.Flow("test").add( gf.Flow("int1").add(test_utils.DummyTask(name="a")), gf.Flow("int2").add(test_utils.DummyTask(name="a"))) + e = engines.load(flo) self.assertRaisesRegexp(exc.Duplicate, '^Atoms with duplicate names', - compiler.PatternCompiler(flo).compile) + e.compile) def test_retry_in_linear_flow(self): flo = lf.Flow("test", retry.AlwaysRevert("c")) diff --git a/taskflow/utils/misc.py b/taskflow/utils/misc.py index e837a426..3f3da487 100644 --- a/taskflow/utils/misc.py +++ b/taskflow/utils/misc.py @@ -35,7 +35,6 @@ from oslo_utils import importutils from oslo_utils import netutils from oslo_utils import reflection import six -from six.moves import map as compat_map from six.moves import range as compat_range from taskflow.types import failure @@ -453,18 +452,6 @@ def sequence_minus(seq1, seq2): return result -def get_duplicate_keys(iterable, key=None): - if key is not None: - iterable = compat_map(key, iterable) - keys = set() - duplicates = set() - for item in iterable: - if item in keys: - duplicates.add(item) - keys.add(item) - return duplicates - - class ExponentialBackoff(object): """An iterable object that will yield back an exponential delay sequence.