From 926994e2b8f9d083085b4a959fa5c640d23953b9 Mon Sep 17 00:00:00 2001 From: Joshua Harlow Date: Tue, 1 Apr 2014 13:57:04 -0700 Subject: [PATCH 001/188] Avoid holding the state lock while notifying To avoid dead lock where a notifier will callback into the engine to perform a further state transition make sure we activate the state change notifiation after locking and not during it. Fixes bug 1301091 Change-Id: Ic81e15150e44d36489757372db32adfb5440feb4 --- taskflow/engines/action_engine/engine.py | 15 ++++++++------- 1 file changed, 8 insertions(+), 7 deletions(-) diff --git a/taskflow/engines/action_engine/engine.py b/taskflow/engines/action_engine/engine.py index cd7acf95..4f3d85c1 100644 --- a/taskflow/engines/action_engine/engine.py +++ b/taskflow/engines/action_engine/engine.py @@ -110,17 +110,18 @@ class ActionEngine(base.EngineBase): failures = self.storage.get_failures() misc.Failure.reraise_if_any(failures.values()) - @lock_utils.locked(lock='_state_lock') def _change_state(self, state): - old_state = self.storage.get_flow_state() - if not states.check_flow_transition(old_state, state): - return - self.storage.set_flow_state(state) + with self._state_lock: + old_state = self.storage.get_flow_state() + if not states.check_flow_transition(old_state, state): + return + self.storage.set_flow_state(state) try: flow_uuid = self._flow.uuid except AttributeError: - # NOTE(harlowja): if the flow was just a single task, then it will - # not itself have a uuid, but the constructed flow_detail will. + # NOTE(harlowja): if the flow was just a single task, then it + # will not itself have a uuid, but the constructed flow_detail + # will. if self._flow_detail is not None: flow_uuid = self._flow_detail.uuid else: From 11ae7f64fbb183ce32c9e3790dd39dc0a7c66a9a Mon Sep 17 00:00:00 2001 From: Joshua Harlow Date: Thu, 3 Apr 2014 10:52:40 -0700 Subject: [PATCH 002/188] Exception in worker queue thread Fix how we should not try to use the partial function for kwargs in this case, but should just use the normal args instead to pass in the path to the callback function. Fixes bug 1302089 Change-Id: I98344de8d45683531e5b0bf7100e7d3c8877cbdb --- taskflow/jobs/backends/impl_zookeeper.py | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/taskflow/jobs/backends/impl_zookeeper.py b/taskflow/jobs/backends/impl_zookeeper.py index a8c4577d..38b5b990 100644 --- a/taskflow/jobs/backends/impl_zookeeper.py +++ b/taskflow/jobs/backends/impl_zookeeper.py @@ -262,9 +262,9 @@ class ZookeeperJobBoard(jobboard.JobBoard): # This method is called from a asynchronous handler so it's # better to exit from this quickly to allow other # asynchronous handlers to be executed. - func = functools.partial(self._process_child, path=path) + child_proc = functools.partial(self._process_child, path) result = self._client.get_async(path) - result.rawlink(func) + result.rawlink(child_proc) def _format_job(self, job): posting = { From 5fb1a166b6a39261bfe9119cad942e137217175f Mon Sep 17 00:00:00 2001 From: Doug Hellmann Date: Fri, 4 Apr 2014 06:43:25 -0700 Subject: [PATCH 003/188] import run_cross_tests.sh from incubator Change-Id: I0f35dcd5a3aece55a2924f18093efdaa62968640 --- openstack-common.conf | 2 + tools/run_cross_tests.sh | 91 ++++++++++++++++++++++++++++++++++++++++ 2 files changed, 93 insertions(+) create mode 100755 tools/run_cross_tests.sh diff --git a/openstack-common.conf b/openstack-common.conf index 932266ea..24d2fc0f 100644 --- a/openstack-common.conf +++ b/openstack-common.conf @@ -7,6 +7,8 @@ module=jsonutils module=timeutils module=uuidutils +script=tools/run_cross_tests.sh + # The base module to hold the copy of openstack.common base=taskflow diff --git a/tools/run_cross_tests.sh b/tools/run_cross_tests.sh new file mode 100755 index 00000000..5e7bc118 --- /dev/null +++ b/tools/run_cross_tests.sh @@ -0,0 +1,91 @@ +#!/bin/bash +# +# Run cross-project tests +# +# Usage: +# +# run_cross_tests.sh project_dir venv + +# Fail the build if any command fails +set -e + +project_dir="$1" +venv="$2" + +if [ -z "$project_dir" -o -z "$venv" ] +then + cat - < ./subunit_log.txt + fi + .tox/$venv/bin/python /usr/local/jenkins/slave_scripts/subunit2html.py ./subunit_log.txt testr_results.html + gzip -9 ./subunit_log.txt + gzip -9 ./testr_results.html + + export PYTHON=.tox/$venv/bin/python + set -e + rancount=$(.tox/$venv/bin/testr last | sed -ne 's/Ran \([0-9]\+\).*tests in.*/\1/p') + if [ "$rancount" -eq "0" ] ; then + echo + echo "Zero tests were run. At least one test should have been run." + echo "Failing this test as a result" + echo + exit 1 + fi +fi + +# If we make it this far, report status based on the tests that were +# run. +exit $result From 585908561d667c1856494d3d1340d7d81bc46827 Mon Sep 17 00:00:00 2001 From: Joshua Harlow Date: Fri, 4 Apr 2014 12:18:25 -0700 Subject: [PATCH 004/188] Update oslo-incubator to 46f2b697b6aacc67 Bring in the newer oslo-incubator changes into taskflow copy Change-Id: I23f0f7dbf1ccbf9f73d4d88d8b4d24cf60d90840 --- taskflow/openstack/common/__init__.py | 17 +++++ taskflow/openstack/common/excutils.py | 32 +++++++--- taskflow/openstack/common/gettextutils.py | 78 +++++++++++++---------- taskflow/openstack/common/importutils.py | 7 ++ taskflow/openstack/common/jsonutils.py | 10 +-- 5 files changed, 91 insertions(+), 53 deletions(-) diff --git a/taskflow/openstack/common/__init__.py b/taskflow/openstack/common/__init__.py index e69de29b..d1223eaf 100644 --- a/taskflow/openstack/common/__init__.py +++ b/taskflow/openstack/common/__init__.py @@ -0,0 +1,17 @@ +# +# Licensed under the Apache License, Version 2.0 (the "License"); you may +# not use this file except in compliance with the License. You may obtain +# a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the +# License for the specific language governing permissions and limitations +# under the License. + +import six + + +six.add_move(six.MovedModule('mox', 'mox', 'mox3.mox')) diff --git a/taskflow/openstack/common/excutils.py b/taskflow/openstack/common/excutils.py index 765a45c8..790fc0b1 100644 --- a/taskflow/openstack/common/excutils.py +++ b/taskflow/openstack/common/excutils.py @@ -24,7 +24,7 @@ import traceback import six -from taskflow.openstack.common.gettextutils import _ +from taskflow.openstack.common.gettextutils import _LE class save_and_reraise_exception(object): @@ -49,9 +49,22 @@ class save_and_reraise_exception(object): decide_if_need_reraise() if not should_be_reraised: ctxt.reraise = False + + If another exception occurs and reraise flag is False, + the saved exception will not be logged. + + If the caller wants to raise new exception during exception handling + he/she sets reraise to False initially with an ability to set it back to + True if needed:: + + except Exception: + with save_and_reraise_exception(reraise=False) as ctxt: + [if statements to determine whether to raise a new exception] + # Not raising a new exception, so reraise + ctxt.reraise = True """ - def __init__(self): - self.reraise = True + def __init__(self, reraise=True): + self.reraise = reraise def __enter__(self): self.type_, self.value, self.tb, = sys.exc_info() @@ -59,10 +72,11 @@ class save_and_reraise_exception(object): def __exit__(self, exc_type, exc_val, exc_tb): if exc_type is not None: - logging.error(_('Original exception being dropped: %s'), - traceback.format_exception(self.type_, - self.value, - self.tb)) + if self.reraise: + logging.error(_LE('Original exception being dropped: %s'), + traceback.format_exception(self.type_, + self.value, + self.tb)) return False if self.reraise: six.reraise(self.type_, self.value, self.tb) @@ -88,8 +102,8 @@ def forever_retry_uncaught_exceptions(infunc): if (cur_time - last_log_time > 60 or this_exc_message != last_exc_message): logging.exception( - _('Unexpected exception occurred %d time(s)... ' - 'retrying.') % exc_count) + _LE('Unexpected exception occurred %d time(s)... ' + 'retrying.') % exc_count) last_log_time = cur_time last_exc_message = this_exc_message exc_count = 0 diff --git a/taskflow/openstack/common/gettextutils.py b/taskflow/openstack/common/gettextutils.py index f950234b..8aaa24ea 100644 --- a/taskflow/openstack/common/gettextutils.py +++ b/taskflow/openstack/common/gettextutils.py @@ -23,11 +23,11 @@ Usual usage in an openstack.common module: """ import copy +import functools import gettext import locale from logging import handlers import os -import re from babel import localedata import six @@ -35,6 +35,17 @@ import six _localedir = os.environ.get('taskflow'.upper() + '_LOCALEDIR') _t = gettext.translation('taskflow', localedir=_localedir, fallback=True) +# We use separate translation catalogs for each log level, so set up a +# mapping between the log level name and the translator. The domain +# for the log level is project_name + "-log-" + log_level so messages +# for each level end up in their own catalog. +_t_log_levels = dict( + (level, gettext.translation('taskflow' + '-log-' + level, + localedir=_localedir, + fallback=True)) + for level in ['info', 'warning', 'error', 'critical'] +) + _AVAILABLE_LANGUAGES = {} USE_LAZY = False @@ -60,6 +71,28 @@ def _(msg): return _t.ugettext(msg) +def _log_translation(msg, level): + """Build a single translation of a log message + """ + if USE_LAZY: + return Message(msg, domain='taskflow' + '-log-' + level) + else: + translator = _t_log_levels[level] + if six.PY3: + return translator.gettext(msg) + return translator.ugettext(msg) + +# Translators for log levels. +# +# The abbreviated names are meant to reflect the usual use of a short +# name like '_'. The "L" is for "log" and the other letter comes from +# the level. +_LI = functools.partial(_log_translation, level='info') +_LW = functools.partial(_log_translation, level='warning') +_LE = functools.partial(_log_translation, level='error') +_LC = functools.partial(_log_translation, level='critical') + + def install(domain, lazy=False): """Install a _() function using the given translation domain. @@ -214,47 +247,22 @@ class Message(six.text_type): if other is None: params = (other,) elif isinstance(other, dict): - params = self._trim_dictionary_parameters(other) + # Merge the dictionaries + # Copy each item in case one does not support deep copy. + params = {} + if isinstance(self.params, dict): + for key, val in self.params.items(): + params[key] = self._copy_param(val) + for key, val in other.items(): + params[key] = self._copy_param(val) else: params = self._copy_param(other) return params - def _trim_dictionary_parameters(self, dict_param): - """Return a dict that only has matching entries in the msgid.""" - # NOTE(luisg): Here we trim down the dictionary passed as parameters - # to avoid carrying a lot of unnecessary weight around in the message - # object, for example if someone passes in Message() % locals() but - # only some params are used, and additionally we prevent errors for - # non-deepcopyable objects by unicoding() them. - - # Look for %(param) keys in msgid; - # Skip %% and deal with the case where % is first character on the line - keys = re.findall('(?:[^%]|^)?%\((\w*)\)[a-z]', self.msgid) - - # If we don't find any %(param) keys but have a %s - if not keys and re.findall('(?:[^%]|^)%[a-z]', self.msgid): - # Apparently the full dictionary is the parameter - params = self._copy_param(dict_param) - else: - params = {} - # Save our existing parameters as defaults to protect - # ourselves from losing values if we are called through an - # (erroneous) chain that builds a valid Message with - # arguments, and then does something like "msg % kwds" - # where kwds is an empty dictionary. - src = {} - if isinstance(self.params, dict): - src.update(self.params) - src.update(dict_param) - for key in keys: - params[key] = self._copy_param(src[key]) - - return params - def _copy_param(self, param): try: return copy.deepcopy(param) - except TypeError: + except Exception: # Fallback to casting to unicode this will handle the # python code-like objects that can't be deep-copied return six.text_type(param) diff --git a/taskflow/openstack/common/importutils.py b/taskflow/openstack/common/importutils.py index 4fd9ae2b..8d412cd4 100644 --- a/taskflow/openstack/common/importutils.py +++ b/taskflow/openstack/common/importutils.py @@ -58,6 +58,13 @@ def import_module(import_str): return sys.modules[import_str] +def import_versioned_module(version, submodule=None): + module = 'taskflow.v%s' % version + if submodule: + module = '.'.join((module, submodule)) + return import_module(module) + + def try_import(import_str, default=None): """Try to import a module and if it fails return default.""" try: diff --git a/taskflow/openstack/common/jsonutils.py b/taskflow/openstack/common/jsonutils.py index 559d1caa..fec61bd0 100644 --- a/taskflow/openstack/common/jsonutils.py +++ b/taskflow/openstack/common/jsonutils.py @@ -36,17 +36,9 @@ import functools import inspect import itertools import json -try: - import xmlrpclib -except ImportError: - # NOTE(jaypipes): xmlrpclib was renamed to xmlrpc.client in Python3 - # however the function and object call signatures - # remained the same. This whole try/except block should - # be removed and replaced with a call to six.moves once - # six 1.4.2 is released. See http://bit.ly/1bqrVzu - import xmlrpc.client as xmlrpclib import six +import six.moves.xmlrpc_client as xmlrpclib from taskflow.openstack.common import gettextutils from taskflow.openstack.common import importutils From 6752c2e551cc99994abe503f6f0d05a66d2e45f5 Mon Sep 17 00:00:00 2001 From: OpenStack Jenkins Date: Mon, 7 Apr 2014 21:17:26 +0000 Subject: [PATCH 005/188] Updated from global requirements Change-Id: I41f2f679b40830427841f1c5c56b8d95ec49b786 --- requirements.txt | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/requirements.txt b/requirements.txt index 5cde5e15..8e86d9b1 100644 --- a/requirements.txt +++ b/requirements.txt @@ -1,5 +1,5 @@ # Packages needed for using this library. -pbr>=0.6,<1.0 +pbr>=0.6,!=0.7,<1.0 anyjson>=0.3.3 iso8601>=0.1.9 # Python 2->3 compatibility library. From f7662d9d2fb1ee112c0e1a1c4ffd5b8196b56717 Mon Sep 17 00:00:00 2001 From: Joshua Harlow Date: Fri, 4 Apr 2014 15:04:33 -0700 Subject: [PATCH 006/188] Allow for only iterating over the most 'fresh' jobs Instead of allowing the backend to determine how fresh are the jobs yielded back (in the zookeeper case the jobs are async populated so freshness depends on the freshness of the watches established) allow the iteration method to support a way to request (if backend supported) that the jobs iterated be as fresh as possible. Change-Id: I8a7c9d7d086ad5fa85dc842fb36e3356f781f057 --- taskflow/jobs/backends/impl_zookeeper.py | 34 +++++++++++++++++++----- taskflow/jobs/jobboard.py | 10 ++++++- taskflow/tests/unit/jobs/test_zk_job.py | 11 +++++++- 3 files changed, 46 insertions(+), 9 deletions(-) diff --git a/taskflow/jobs/backends/impl_zookeeper.py b/taskflow/jobs/backends/impl_zookeeper.py index 38b5b990..360714b3 100644 --- a/taskflow/jobs/backends/impl_zookeeper.py +++ b/taskflow/jobs/backends/impl_zookeeper.py @@ -187,7 +187,24 @@ class ZookeeperJobBoard(jobboard.JobBoard): count += 1 return count - def iterjobs(self, only_unclaimed=False): + def _force_refresh(self, delayed=False): + try: + children = self._client.get_children(self.path) + except self._client.handler.timeout_exception as e: + raise excp.JobFailure("Refreshing failure, connection timed out", + e) + except k_exceptions.SessionExpiredError as e: + raise excp.JobFailure("Refreshing failure, session expired", e) + except k_exceptions.NoNodeError: + pass + except k_exceptions.KazooException as e: + raise excp.JobFailure("Refreshing failure, internal error", e) + else: + self._on_job_posting(children, delayed=delayed) + + def iterjobs(self, only_unclaimed=False, ensure_fresh=False): + if ensure_fresh: + self._force_refresh() ok_states = ALL_JOB_STATES if only_unclaimed: ok_states = UNCLAIMED_JOB_STATES @@ -236,7 +253,7 @@ class ZookeeperJobBoard(jobboard.JobBoard): LOG.warn("Internal error fetching job data from path: %s", path, exc_info=True) - def _on_job_posting(self, children): + def _on_job_posting(self, children, delayed=True): LOG.debug("Got children %s under path %s", children, self.path) child_paths = [k_paths.join(self.path, c) for c in children] @@ -259,12 +276,15 @@ class ZookeeperJobBoard(jobboard.JobBoard): if path not in self._known_jobs: # Fire off the request to populate this job asynchronously. # - # This method is called from a asynchronous handler so it's - # better to exit from this quickly to allow other - # asynchronous handlers to be executed. + # This method is *usually* called from a asynchronous + # handler so it's better to exit from this quickly to + # allow other asynchronous handlers to be executed. + request = self._client.get_async(path) child_proc = functools.partial(self._process_child, path) - result = self._client.get_async(path) - result.rawlink(child_proc) + if delayed: + request.rawlink(child_proc) + else: + child_proc(request) def _format_job(self, job): posting = { diff --git a/taskflow/jobs/jobboard.py b/taskflow/jobs/jobboard.py index 50af583e..a2833550 100644 --- a/taskflow/jobs/jobboard.py +++ b/taskflow/jobs/jobboard.py @@ -32,9 +32,17 @@ class JobBoard(object): self._name = name @abc.abstractmethod - def iterjobs(self, only_unclaimed=False): + def iterjobs(self, only_unclaimed=False, ensure_fresh=False): """Yields back jobs that are currently on this jobboard (claimed or not claimed). + + :param only_unclaimed: boolean that indicates whether to only iteration + over unclaimed jobs. + :param ensure_fresh: boolean that requests to only iterate over the + most recent jobs available, where the definition of what is recent + is backend specific. It is allowable that a backend may ignore this + value if the backends internal semantics/capabilities can not + support this argument. """ @abc.abstractproperty diff --git a/taskflow/tests/unit/jobs/test_zk_job.py b/taskflow/tests/unit/jobs/test_zk_job.py index 37fa87d1..3074c4a6 100644 --- a/taskflow/tests/unit/jobs/test_zk_job.py +++ b/taskflow/tests/unit/jobs/test_zk_job.py @@ -66,6 +66,15 @@ class TestZookeeperJobs(test.TestCase): self.client.flush() self.assertTrue(self.board.connected) + def test_fresh_iter(self): + with connect_close(self.board): + book = p_utils.temporary_log_book() + self.board.post('test', book) + self.client.flush() + + jobs = list(self.board.iterjobs(ensure_fresh=True)) + self.assertEqual(1, len(jobs)) + def test_posting_received_raw(self): book = p_utils.temporary_log_book() @@ -77,7 +86,7 @@ class TestZookeeperJobs(test.TestCase): self.client.flush() self.assertEqual(self.board, posted_job.board) - self.assertTrue(1, self.board.job_count) + self.assertEqual(1, self.board.job_count) self.assertIn(posted_job.uuid, [j.uuid for j in self.board.iterjobs()]) From 59f77aaf773d63ba051046dd46d6c84e34409fed Mon Sep 17 00:00:00 2001 From: Joshua Harlow Date: Tue, 8 Apr 2014 14:27:11 -0700 Subject: [PATCH 007/188] Fix not found being raised when iterating Instead of re-raising a not found exception just silence those exceptions when iterating (and do not yield that job back in the first place) and then remove the job from the jobboard internals to avoid further iteration. Fixes bug 1304562 Change-Id: Ibc6d3ea55dc19e70104bfc801283b8ad919496c4 --- taskflow/jobs/backends/impl_zookeeper.py | 8 ++++++-- 1 file changed, 6 insertions(+), 2 deletions(-) diff --git a/taskflow/jobs/backends/impl_zookeeper.py b/taskflow/jobs/backends/impl_zookeeper.py index 38b5b990..d635396d 100644 --- a/taskflow/jobs/backends/impl_zookeeper.py +++ b/taskflow/jobs/backends/impl_zookeeper.py @@ -192,8 +192,8 @@ class ZookeeperJobBoard(jobboard.JobBoard): if only_unclaimed: ok_states = UNCLAIMED_JOB_STATES with self._job_mutate: - known_jobs = list(six.itervalues(self._known_jobs)) - for (job, posting_state) in known_jobs: + known_jobs = list(six.iteritems(self._known_jobs)) + for (path, (job, posting_state)) in known_jobs: if posting_state != _READY: continue try: @@ -202,6 +202,10 @@ class ZookeeperJobBoard(jobboard.JobBoard): except excp.JobFailure as e: LOG.warn("Failed determining the state of job %s" " due to: %s", job.uuid, e) + except excp.NotFound: + # Someone destroyed it while we are iterating. + with self._job_mutate: + self._remove_job(path) def _remove_job(self, path): LOG.debug("Removing job that was at path: %s", path) From 2be76ee7d3e62558f04b1d6981c8cf6781ab3142 Mon Sep 17 00:00:00 2001 From: Joshua Harlow Date: Thu, 10 Apr 2014 18:37:32 -0700 Subject: [PATCH 008/188] Create a periodic worker helper class Create a simple class that can call a set of functions periodically and use this in the case where we run a notification thread instead of handling this internally to the worker executor directly. Change-Id: I386e027cf461480799614e20e920b46788f99cab --- taskflow/engines/worker_based/executor.py | 71 +++++++++++++++++------ 1 file changed, 53 insertions(+), 18 deletions(-) diff --git a/taskflow/engines/worker_based/executor.py b/taskflow/engines/worker_based/executor.py index 77c54737..75c28855 100644 --- a/taskflow/engines/worker_based/executor.py +++ b/taskflow/engines/worker_based/executor.py @@ -26,10 +26,46 @@ from taskflow.engines.worker_based import proxy from taskflow import exceptions as exc from taskflow.utils import async_utils from taskflow.utils import misc +from taskflow.utils import reflection LOG = logging.getLogger(__name__) +def _is_alive(thread): + if not thread: + return False + return thread.is_alive() + + +class PeriodicWorker(object): + """Calls a set of functions when activated periodically. + + NOTE(harlowja): the provided timeout object determines the periodicity. + """ + def __init__(self, timeout, functors): + self._timeout = timeout + self._functors = [] + for f in functors: + self._functors.append((f, reflection.get_callable_name(f))) + + def start(self): + while not self._timeout.is_stopped(): + for (f, f_name) in self._functors: + LOG.debug("Calling periodic function '%s'", f_name) + try: + f() + except Exception: + LOG.warn("Failed to call periodic function '%s'", f_name, + exc_info=True) + self._timeout.wait() + + def stop(self): + self._timeout.interrupt() + + def reset(self): + self._timeout.reset() + + class WorkerTaskExecutor(executor.TaskExecutorBase): """Executes tasks on remote workers.""" @@ -41,8 +77,9 @@ class WorkerTaskExecutor(executor.TaskExecutorBase): self._proxy = proxy.Proxy(uuid, exchange, self._on_message, self._on_wait, **kwargs) self._proxy_thread = None - self._notify_thread = None - self._notify_timeout = misc.Timeout(pr.NOTIFY_PERIOD) + self._periodic = PeriodicWorker(misc.Timeout(pr.NOTIFY_PERIOD), + [self._notify_topics]) + self._periodic_thread = None def _make_thread(self, target): thread = threading.Thread(target=target) @@ -134,7 +171,7 @@ class WorkerTaskExecutor(executor.TaskExecutorBase): def _submit_task(self, task, task_uuid, action, arguments, progress_callback, timeout=pr.REQUEST_TIMEOUT, **kwargs): - """Submit task request to workers.""" + """Submit task request to a worker.""" request = pr.Request(task, task_uuid, action, arguments, progress_callback, timeout, **kwargs) @@ -168,11 +205,8 @@ class WorkerTaskExecutor(executor.TaskExecutorBase): request.set_result(failure) def _notify_topics(self): - """Cyclically publish notify message to each topic.""" - LOG.debug("Notify thread started.") - while not self._notify_timeout.is_stopped(): - self._proxy.publish(pr.Notify(), self._topics, reply_to=self._uuid) - self._notify_timeout.wait() + """Cyclically called to publish notify message to each topic.""" + self._proxy.publish(pr.Notify(), self._topics, reply_to=self._uuid) def execute_task(self, task, task_uuid, arguments, progress_callback=None): @@ -191,23 +225,24 @@ class WorkerTaskExecutor(executor.TaskExecutorBase): def start(self): """Start proxy thread (and associated topic notification thread).""" - if self._proxy_thread is None: + if not _is_alive(self._proxy_thread): self._proxy_thread = self._make_thread(self._proxy.start) self._proxy_thread.start() self._proxy.wait() - self._notify_timeout.reset() - self._notify_thread = self._make_thread(self._notify_topics) - self._notify_thread.start() + if not _is_alive(self._periodic_thread): + self._periodic.reset() + self._periodic_thread = self._make_thread(self._periodic.start) + self._periodic_thread.start() def stop(self): """Stop proxy thread (and associated topic notification thread), so those threads will be gracefully terminated. """ + if self._periodic_thread is not None: + self._periodic.stop() + self._periodic_thread.join() + self._periodic_thread = None if self._proxy_thread is not None: - if self._proxy_thread.is_alive(): - self._notify_timeout.interrupt() - self._notify_thread.join() - self._proxy.stop() - self._proxy_thread.join() - self._notify_thread = None + self._proxy.stop() + self._proxy_thread.join() self._proxy_thread = None From 1930cbad7d4ef0d2f3c462c9a8ee346d75757a56 Mon Sep 17 00:00:00 2001 From: Joshua Harlow Date: Thu, 10 Apr 2014 19:08:07 -0700 Subject: [PATCH 009/188] Move the daemon thread helper function This function seems better suited in the threading_utils module. Change-Id: Iddd438b57973c7c6c26bd7b6239630656530bd1b --- taskflow/engines/worker_based/executor.py | 14 +++----------- taskflow/utils/threading_utils.py | 11 +++++++++++ 2 files changed, 14 insertions(+), 11 deletions(-) diff --git a/taskflow/engines/worker_based/executor.py b/taskflow/engines/worker_based/executor.py index 75c28855..98ff3a8d 100644 --- a/taskflow/engines/worker_based/executor.py +++ b/taskflow/engines/worker_based/executor.py @@ -15,7 +15,6 @@ # under the License. import logging -import threading from kombu import exceptions as kombu_exc @@ -27,6 +26,7 @@ from taskflow import exceptions as exc from taskflow.utils import async_utils from taskflow.utils import misc from taskflow.utils import reflection +from taskflow.utils import threading_utils as tu LOG = logging.getLogger(__name__) @@ -81,14 +81,6 @@ class WorkerTaskExecutor(executor.TaskExecutorBase): [self._notify_topics]) self._periodic_thread = None - def _make_thread(self, target): - thread = threading.Thread(target=target) - # NOTE(skudriashev): When the main thread is terminated unexpectedly - # and thread is still alive - it will prevent main thread from exiting - # unless the daemon property is set to True. - thread.daemon = True - return thread - def _on_message(self, data, message): """This method is called on incoming message.""" LOG.debug("Got message: %s", data) @@ -226,12 +218,12 @@ class WorkerTaskExecutor(executor.TaskExecutorBase): def start(self): """Start proxy thread (and associated topic notification thread).""" if not _is_alive(self._proxy_thread): - self._proxy_thread = self._make_thread(self._proxy.start) + self._proxy_thread = tu.daemon_thread(self._proxy.start) self._proxy_thread.start() self._proxy.wait() if not _is_alive(self._periodic_thread): self._periodic.reset() - self._periodic_thread = self._make_thread(self._periodic.start) + self._periodic_thread = tu.daemon_thread(self._periodic.start) self._periodic_thread.start() def stop(self): diff --git a/taskflow/utils/threading_utils.py b/taskflow/utils/threading_utils.py index 0e2d1d05..c669619a 100644 --- a/taskflow/utils/threading_utils.py +++ b/taskflow/utils/threading_utils.py @@ -15,6 +15,7 @@ # under the License. import multiprocessing +import threading import six @@ -34,3 +35,13 @@ def get_optimal_thread_count(): # just setup two threads since it's hard to know what else we # should do in this situation. return 2 + + +def daemon_thread(target, *args, **kwargs): + """Makes a daemon thread that calls the given target when started.""" + thread = threading.Thread(target=target, args=args, kwargs=kwargs) + # NOTE(skudriashev): When the main thread is terminated unexpectedly + # and thread is still alive - it will prevent main thread from exiting + # unless the daemon property is set to True. + thread.daemon = True + return thread From f2c82f09293bde7eb73cb782b3a33dabaa760e33 Mon Sep 17 00:00:00 2001 From: Joshua Harlow Date: Fri, 11 Apr 2014 12:35:01 -0700 Subject: [PATCH 010/188] Doc adjustments - Cleanup some grammar and adjust some wording usage. - Add more docs to utils about what should and should not be used. - Add more engine docs about how each one is used and tips and notes about each. - Line length adjustments (might as well keep it somewhat in the normal range of what openstack code expects) Change-Id: Ice6711f00e2b50e0bee777388c0555d79cc6e1b0 --- doc/source/atoms.rst | 10 +++-- doc/source/engines.rst | 66 +++++++++++++++++++++++-------- doc/source/inputs_and_outputs.rst | 40 +++++++++++++++---- doc/source/utils.rst | 27 +++++++++++++ 4 files changed, 114 insertions(+), 29 deletions(-) diff --git a/doc/source/atoms.rst b/doc/source/atoms.rst index a68aae42..22bc97c1 100644 --- a/doc/source/atoms.rst +++ b/doc/source/atoms.rst @@ -3,10 +3,10 @@ Atoms, Tasks and Retries ------------------------ An atom is the smallest unit in taskflow which acts as the base for other -classes. Atoms have a name and a version (if applicable). Atom is expected +classes. Atoms have a name and a version (if applicable). An atom is expected to name desired input values (requirements) and name outputs (provided -values), see :doc:`arguments_and_results` page for complete reference -about it. +values), see :doc:`arguments_and_results` page for a complete reference +about these inputs and outputs. .. automodule:: taskflow.atom @@ -22,7 +22,9 @@ Retry ===== A retry (derived from an atom) is a special unit that handles flow errors, -controlls flow execution and can retry it with another parameters if needed. +controls flow execution and can retry atoms with another parameters if needed. +It is useful to allow for alternate ways of retrying atoms when they fail so +the whole flow can proceed even when a group of atoms fail. .. automodule:: taskflow.retry diff --git a/doc/source/engines.rst b/doc/source/engines.rst index 62a625d1..5c3a3020 100644 --- a/doc/source/engines.rst +++ b/doc/source/engines.rst @@ -5,32 +5,33 @@ Engines Overview ======== -Engines are what **really** runs your tasks and flows. +Engines are what **really** runs your atoms. An *engine* takes a flow structure (described by :doc:`patterns`) and uses it to decide which :doc:`atom ` to run and when. -TaskFlow provides different implementation of engines. Some may be easier to -use (ie, require no additional infrastructure setup) and understand, others +TaskFlow provides different implementations of engines. Some may be easier to +use (ie, require no additional infrastructure setup) and understand; others might require more complicated setup but provide better scalability. The idea and *ideal* is that deployers or developers of a service that uses TaskFlow can select an engine that suites their setup best without modifying the code of said service. -Engines might have different capabilities and configuration, but all of them -**must** implement same interface and preserve semantics of patterns (e.g. +Engines usually have different capabilities and configuration, but all of them +**must** implement the same interface and preserve the semantics of patterns (e.g. parts of :py:class:`linear flow ` are run -one after another, in order, even if engine is *capable* run tasks in +one after another, in order, even if engine is *capable* of running tasks in parallel). Creating Engines ================ -All engines are mere classes that implement same interface, and of course it is -possible to import them and create their instances just like with any classes -in Python. But easier (and recommended) way for creating engine is using -engine helpers. All of them are imported into `taskflow.engines` module, so the -typical usage of them might look like:: +All engines are mere classes that implement the same interface, and of course +it is possible to import them and create instances just like with any classes +in Python. But the easier (and recommended) way for creating an engine is using +the engine helper functions. All of these functions are imported into the +`taskflow.engines` module namespace, so the typical usage of these functions +might look like:: from taskflow import engines @@ -46,7 +47,7 @@ Engine Configuration ==================== To select which engine to use and pass parameters to an engine you should use -``engine_conf`` parameter any helper factory function accepts. It may be: +the ``engine_conf`` parameter any helper factory function accepts. It may be: * a string, naming engine type; * a dictionary, holding engine type with key ``'engine'`` and possibly @@ -62,6 +63,12 @@ Single-Threaded Engine Runs all tasks on the single thread -- the same thread `engine.run()` is called on. This engine is used by default. +.. tip:: + + If eventlet is used then this engine will not block other threads + from running as eventlet automatically creates a co-routine system (using + greenthreads and monkey patching). See `eventlet `_ + and `greenlet `_ for more details. Parallel Engine --------------- @@ -75,10 +82,17 @@ Additional configuration parameters: * ``executor``: a class that provides ``concurrent.futures.Executor``-like interface; it will be used for scheduling tasks. You can use instances of ``concurrent.futures.ThreadPoolExecutor`` or - ``taskflow.utils.eventlet_utils.GreenExecutor``. Sharing executor between - engine instances provides better scalability. + ``taskflow.utils.eventlet_utils.GreenExecutor`` (which internally uses + `eventlet `_ and greenthread pools). + +.. tip:: + + Sharing executor between engine instances provides better + scalability by reducing thread creation and teardown as well as by reusing + existing pools (which is a good practice in general). .. note:: + Running tasks with ``concurrent.futures.ProcessPoolExecutor`` is not supported now. @@ -88,14 +102,32 @@ Worker-Based Engine **Engine type**: ``'worker-based'`` This is engine that schedules tasks to **workers** -- separate processes -dedicated for tasks execution, possibly running on other machines. +dedicated for certain tasks execution, possibly running on other machines, +connected via `amqp `_ (or other supported +`kombu `_ transports). For more information, +please see `wiki page`_ for more details on how the worker based engine +operates. -This engine is under active development and is not recommended for production -use yet. For more information, please see `wiki page`_ for more details. +.. note:: + + This engine is under active development and is experimental but it is + useable and does work but is missing some features (please check the + `blueprint page`_ for known issues and plans) that will make it more + production ready. .. _wiki page: https://wiki.openstack.org/wiki/TaskFlow/Worker-based_Engine +.. _blueprint page: https://blueprints.launchpad.net/taskflow Engine Interface ================ .. automodule:: taskflow.engines.base + +Hierarchy +========= + +.. inheritance-diagram:: + taskflow.engines.base + taskflow.engines.action_engine.engine + taskflow.engines.worker_based.engine + :parts: 1 diff --git a/doc/source/inputs_and_outputs.rst b/doc/source/inputs_and_outputs.rst index 9ded1ec4..2542fe71 100644 --- a/doc/source/inputs_and_outputs.rst +++ b/doc/source/inputs_and_outputs.rst @@ -12,11 +12,21 @@ use :doc:`persistence` directly. Flow Inputs and Outputs ----------------------- -Tasks accept inputs via task arguments and provide outputs via task results (see :doc:`arguments_and_results` for more details). This the standard and recommended way to pass data from one task to another. Of course not every task argument needs to be provided to some other task of a flow, and not every task result should be consumed by every task. +Tasks accept inputs via task arguments and provide outputs via task results +(see :doc:`arguments_and_results` for more details). This is the standard and +recommended way to pass data from one task to another. Of course not every task +argument needs to be provided to some other task of a flow, and not every task +result should be consumed by every task. -If some value is required by one or more tasks of a flow, but is not provided by any task, it is considered to be flow input, and **must** be put into the storage before the flow is run. A set of names required by a flow can be retrieved via that flow's ``requires`` property. These names can be used to determine what names may be applicable for placing in storage ahead of time and which names are not applicable. +If some value is required by one or more tasks of a flow, but is not provided +by any task, it is considered to be flow input, and **must** be put into the +storage before the flow is run. A set of names required by a flow can be +retrieved via that flow's ``requires`` property. These names can be used to +determine what names may be applicable for placing in storage ahead of time +and which names are not applicable. -All values provided by tasks of the flow are considered to be flow outputs; the set of names of such values is available via ``provides`` property of the flow. +All values provided by tasks of the flow are considered to be flow outputs; the +set of names of such values is available via ``provides`` property of the flow. .. testsetup:: @@ -52,12 +62,17 @@ As you can see, this flow does not require b, as it is provided by the fist task Engine and Storage ------------------ -The storage layer is how an engine persists flow and task details. For more in-depth design details see :doc:`persistence` and :doc:`storage`. +The storage layer is how an engine persists flow and task details. For more +in-depth design details see :doc:`persistence` and :doc:`storage`. Inputs ------ -As mentioned above, if some value is required by one or more tasks of a flow, but is not provided by any task, it is considered to be flow input, and **must** be put into the storage before the flow is run. On failure to do so :py:class:`~taskflow.exceptions.MissingDependencies` is raised by engine: +As mentioned above, if some value is required by one or more tasks of a flow, +but is not provided by any task, it is considered to be flow input, and +**must** be put into the storage before the flow is run. On failure to do +so :py:class:`~taskflow.exceptions.MissingDependencies` is raised by the engine +prior to running: .. doctest:: @@ -80,7 +95,9 @@ As mentioned above, if some value is required by one or more tasks of a flow, bu taskflow.exceptions.MissingDependencies: taskflow.patterns.linear_flow.Flow: cat-dog; 2 requires ['meow', 'woof'] but no other entity produces said requirements -The recommended way to provide flow inputs is to use ``store`` parameter of engine helpers (:py:func:`~taskflow.engines.helpers.run` or :py:func:`~taskflow.engines.helpers.load`): +The recommended way to provide flow inputs is to use the ``store`` parameter +of the engine helpers (:py:func:`~taskflow.engines.helpers.run` or +:py:func:`~taskflow.engines.helpers.load`): .. doctest:: @@ -102,7 +119,10 @@ The recommended way to provide flow inputs is to use ``store`` parameter of engi woof {'meow': 'meow', 'woof': 'woof', 'dog': 'dog'} -You can also directly interact with the engine storage layer to add additional values, also you can't use :py:func:`~taskflow.engines.helpers.run` in this case: +You can also directly interact with the engine storage layer to add +additional values, note that if this route is used you can't use +:py:func:`~taskflow.engines.helpers.run` in this case to run your engine (instead +your must activate the engines run method directly): .. doctest:: @@ -118,7 +138,11 @@ You can also directly interact with the engine storage layer to add additional v Outputs ------- -As you can see from examples above, run method returns all flow outputs in a ``dict``. This same data can be fetched via :py:meth:`~taskflow.storage.Storage.fetch_all` method of the storage. You can also get single results using :py:meth:`~taskflow.storage.Storage.fetch_all`. For example: +As you can see from examples above, the run method returns all flow outputs in +a ``dict``. This same data can be fetched via +:py:meth:`~taskflow.storage.Storage.fetch_all` method of the storage. You can +also get single results using :py:meth:`~taskflow.storage.Storage.fetch_all`. For +example: .. doctest:: diff --git a/doc/source/utils.rst b/doc/source/utils.rst index 4880922e..b0bd3815 100644 --- a/doc/source/utils.rst +++ b/doc/source/utils.rst @@ -2,4 +2,31 @@ Utils ----- +There are various helper utils that are part of taskflows internal usage (and +external/public usage of these helpers should be kept to a minimum as these +utility functions may be altered more often in the future). + +External usage +============== + +The following classes and modules are *recommended* for external usage: + .. autoclass:: taskflow.utils.misc.Failure + :members: + +.. autoclass:: taskflow.utils.eventlet_utils.GreenExecutor + :members: + +.. autofunction:: taskflow.utils.graph_utils.pformat + +.. autofunction:: taskflow.utils.graph_utils.export_graph_to_dot + +.. autofunction:: taskflow.utils.persistence_utils.temporary_log_book + +.. autofunction:: taskflow.utils.persistence_utils.temporary_flow_detail + +.. autofunction:: taskflow.utils.persistence_utils.pformat + +.. autofunction:: taskflow.utils.persistence_utils.pformat_flow_detail + +.. autofunction:: taskflow.utils.persistence_utils.pformat_atom_detail From b410a019288df41426dc15a7ebf7613d24a23ff8 Mon Sep 17 00:00:00 2001 From: Joshua Harlow Date: Fri, 11 Apr 2014 14:14:07 -0700 Subject: [PATCH 011/188] Allow transient values to be stored in storage Instead of requiring all values to be saved to storage in a persistent manner allow certain values to be saved in memory only (aka transient values) so that tasks can accept things which should not be persisted (resources, passwords, file handles, sockets...) Breaking change: disallowing empty names for atom details when they are ensured for (previously this was not done at the storage level) but at the execution level. This adds it in both places. Change-Id: Ie68abdcf94f33fba5dbc05f03397aea5d82ea88f --- taskflow/storage.py | 41 +++++++++++++++++++++++------ taskflow/tests/unit/test_storage.py | 29 ++++++++++++++++++++ 2 files changed, 62 insertions(+), 8 deletions(-) diff --git a/taskflow/storage.py b/taskflow/storage.py index 14fd3180..35ba7d0e 100644 --- a/taskflow/storage.py +++ b/taskflow/storage.py @@ -51,6 +51,7 @@ class Storage(object): self._backend = backend self._flowdetail = flow_detail self._lock = self._lock_cls() + self._transients = {} # NOTE(imelnikov): failure serialization looses information, # so we cache failures here, in atom name -> failure mapping. @@ -99,6 +100,8 @@ class Storage(object): Returns uuid for the task details corresponding to the task with given name. """ + if not task_name: + raise ValueError("Task name must be non-empty") with self._lock.write_lock(): try: task_id = self._atom_name_to_uuid[task_name] @@ -127,6 +130,8 @@ class Storage(object): Returns uuid for the retry details corresponding to the retry with given name. """ + if not retry_name: + raise ValueError("Retry name must be non-empty") with self._lock.write_lock(): try: retry_id = self._atom_name_to_uuid[retry_name] @@ -405,17 +410,21 @@ class Storage(object): if self._reset_atom(ad, state): self._with_connection(self._save_atom_detail, ad) - def inject(self, pairs): + def inject(self, pairs, transient=False): """Add values into storage. This method should be used to put flow parameters (requirements that are not satisfied by any task in the flow) into storage. + + :param: transient save the data in-memory only instead of persisting + the data to backend storage (useful for resource-like objects + or similar objects which should *not* be persisted) """ - with self._lock.write_lock(): + + def save_persistent(): try: - ad = self._atomdetail_by_name( - self.injector_name, - expected_type=logbook.TaskDetail) + ad = self._atomdetail_by_name(self.injector_name, + expected_type=logbook.TaskDetail) except exceptions.NotFound: uuid = uuidutils.generate_uuid() self._create_atom_detail(logbook.TaskDetail, @@ -427,8 +436,21 @@ class Storage(object): else: ad.results.update(pairs) self._with_connection(self._save_atom_detail, ad) - names = six.iterkeys(ad.results) - self._set_result_mapping(self.injector_name, + return (self.injector_name, six.iterkeys(ad.results)) + + def save_transient(): + self._transients.update(pairs) + # NOTE(harlowja): none is not a valid atom name, so that means + # we can use it internally to reference all of our transient + # variables. + return (None, six.iterkeys(self._transients)) + + with self._lock.write_lock(): + if transient: + (atom_name, names) = save_transient() + else: + (atom_name, names) = save_persistent() + self._set_result_mapping(atom_name, dict((name, name) for name in names)) def _set_result_mapping(self, atom_name, mapping): @@ -470,8 +492,11 @@ class Storage(object): raise exceptions.NotFound("Name %r is not mapped" % name) # Return the first one that is found. for (atom_name, index) in reversed(indexes): - try: + if not atom_name: + results = self._transients + else: results = self._get(atom_name, only_last=True) + try: return misc.item_from(results, index, name) except exceptions.NotFound: pass diff --git a/taskflow/tests/unit/test_storage.py b/taskflow/tests/unit/test_storage.py index eb088190..001cba97 100644 --- a/taskflow/tests/unit/test_storage.py +++ b/taskflow/tests/unit/test_storage.py @@ -371,6 +371,35 @@ class StorageTestMixin(object): s.ensure_task('my task') self.assertTrue(uuidutils.is_uuid_like(s.get_atom_uuid('my task'))) + def test_transient_storage_fetch_all(self): + s = self._get_storage() + s.inject([("a", "b")], transient=True) + s.inject([("b", "c")]) + + results = s.fetch_all() + self.assertEqual({"a": "b", "b": "c"}, results) + + def test_transient_storage_fetch_mapped(self): + s = self._get_storage() + s.inject([("a", "b")], transient=True) + s.inject([("b", "c")]) + desired = { + 'y': 'a', + 'z': 'b', + } + args = s.fetch_mapped_args(desired) + self.assertEqual({'y': 'b', 'z': 'c'}, args) + + def test_transient_storage_restore(self): + _lb, flow_detail = p_utils.temporary_flow_detail(self.backend) + s = self._get_storage(flow_detail=flow_detail) + s.inject([("a", "b")], transient=True) + s.inject([("b", "c")]) + + s2 = self._get_storage(flow_detail=flow_detail) + results = s2.fetch_all() + self.assertEqual({"b": "c"}, results) + def test_unknown_task_by_name(self): s = self._get_storage() self.assertRaisesRegexp(exceptions.NotFound, From a2351987351dd92acefc3d87b783f8a877aee2e6 Mon Sep 17 00:00:00 2001 From: Joshua Harlow Date: Sat, 12 Apr 2014 23:09:00 -0700 Subject: [PATCH 012/188] More keywords & classifier topics Add more classifier information that is relevant for taskflow to help more people locate what taskflow is all about when they are browsing/searching pypi. Change-Id: I0e6f435e980fca194c98b24b346205ecf8f1e7a7 --- setup.cfg | 5 ++++- 1 file changed, 4 insertions(+), 1 deletion(-) diff --git a/setup.cfg b/setup.cfg index 784e3de4..eaa14df0 100644 --- a/setup.cfg +++ b/setup.cfg @@ -10,11 +10,12 @@ keywords = reliable recoverable execution tasks flows workflows jobs persistence states asynchronous parallel threads + dataflow openstack classifier = Development Status :: 4 - Beta Environment :: OpenStack - Intended Audience :: Information Technology Intended Audience :: Developers + Intended Audience :: Information Technology License :: OSI Approved :: Apache Software License Operating System :: POSIX :: Linux Programming Language :: Python @@ -23,6 +24,8 @@ classifier = Programming Language :: Python :: 2.7 Programming Language :: Python :: 3 Programming Language :: Python :: 3.3 + Topic :: Software Development :: Libraries + Topic :: System :: Distributed Computing [global] setup-hooks = From 6b91109dd8faa15e55b67d008e45520664d041ce Mon Sep 17 00:00:00 2001 From: Thomas Bechtold Date: Mon, 14 Apr 2014 19:05:58 +0200 Subject: [PATCH 013/188] Reuse already defined variable The REVERTING variable is already defined as flow state. Reuse it for the task state. Change-Id: Ied67118be79e3ac933d5b7006720b72f8dc441b4 --- taskflow/states.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/taskflow/states.py b/taskflow/states.py index 7fea38b0..883bf622 100644 --- a/taskflow/states.py +++ b/taskflow/states.py @@ -36,7 +36,7 @@ RESUMING = 'RESUMING' FAILURE = FAILURE PENDING = PENDING REVERTED = REVERTED -REVERTING = 'REVERTING' +REVERTING = REVERTING SUCCESS = SUCCESS RUNNING = RUNNING RETRYING = 'RETRYING' From 6f5896c8bd16cfca07f6936f65a8dd25c6167d7e Mon Sep 17 00:00:00 2001 From: Joshua Harlow Date: Wed, 2 Apr 2014 15:59:41 -0700 Subject: [PATCH 014/188] Allow fetching jobboard implementations To provide a standard way to fetch jobboards that doesn't require importing the modules themselves but by using entrypoints instead. Blueprint jobboard-entrypoints Change-Id: I13639c6be78a5d003e50e6cfd452c7b810072006 --- setup.cfg | 3 ++ taskflow/jobs/backends/__init__.py | 32 +++++++++++++- taskflow/jobs/backends/impl_zookeeper.py | 3 +- taskflow/jobs/jobboard.py | 3 +- taskflow/tests/unit/jobs/test_entrypoint.py | 49 +++++++++++++++++++++ 5 files changed, 86 insertions(+), 4 deletions(-) create mode 100644 taskflow/tests/unit/jobs/test_entrypoint.py diff --git a/setup.cfg b/setup.cfg index eaa14df0..bc8f9a32 100644 --- a/setup.cfg +++ b/setup.cfg @@ -36,6 +36,9 @@ packages = taskflow [entry_points] +taskflow.jobboards = + zookeeper = taskflow.jobs.backends.impl_zookeeper:ZookeeperJobBoard + taskflow.persistence = dir = taskflow.persistence.backends.impl_dir:DirBackend file = taskflow.persistence.backends.impl_dir:DirBackend diff --git a/taskflow/jobs/backends/__init__.py b/taskflow/jobs/backends/__init__.py index da9e7d90..f9efc534 100644 --- a/taskflow/jobs/backends/__init__.py +++ b/taskflow/jobs/backends/__init__.py @@ -1,6 +1,6 @@ # -*- coding: utf-8 -*- -# Copyright (C) 2013 Yahoo! Inc. All Rights Reserved. +# Copyright (C) 2014 Yahoo! Inc. All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain @@ -13,3 +13,33 @@ # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. + +import logging + +import six +from stevedore import driver + +from taskflow import exceptions as exc + + +# NOTE(harlowja): this is the entrypoint namespace, not the module namespace. +BACKEND_NAMESPACE = 'taskflow.jobboards' + +LOG = logging.getLogger(__name__) + + +def fetch(name, conf, namespace=BACKEND_NAMESPACE, **kwargs): + # NOTE(harlowja): this allows simpler syntax. + if isinstance(conf, six.string_types): + conf = {'board': conf} + + board = conf['board'] + LOG.debug('Looking for %r jobboard driver in %r', board, namespace) + try: + mgr = driver.DriverManager(namespace, board, + invoke_on_load=True, + invoke_args=(name, conf), + invoke_kwds=kwargs) + return mgr.driver + except RuntimeError as e: + raise exc.NotFound("Could not find jobboard %s" % (board), e) diff --git a/taskflow/jobs/backends/impl_zookeeper.py b/taskflow/jobs/backends/impl_zookeeper.py index d635396d..96baadb2 100644 --- a/taskflow/jobs/backends/impl_zookeeper.py +++ b/taskflow/jobs/backends/impl_zookeeper.py @@ -144,8 +144,7 @@ class ZookeeperJob(base_job.Job): class ZookeeperJobBoard(jobboard.JobBoard): def __init__(self, name, conf, client=None): - super(ZookeeperJobBoard, self).__init__(name) - self._conf = conf + super(ZookeeperJobBoard, self).__init__(name, conf) if client is not None: self._client = client self._owned = False diff --git a/taskflow/jobs/jobboard.py b/taskflow/jobs/jobboard.py index 50af583e..c8f54e7c 100644 --- a/taskflow/jobs/jobboard.py +++ b/taskflow/jobs/jobboard.py @@ -28,8 +28,9 @@ class JobBoard(object): capabilities of the underlying jobboard implementation. """ - def __init__(self, name): + def __init__(self, name, conf): self._name = name + self._conf = conf @abc.abstractmethod def iterjobs(self, only_unclaimed=False): diff --git a/taskflow/tests/unit/jobs/test_entrypoint.py b/taskflow/tests/unit/jobs/test_entrypoint.py new file mode 100644 index 00000000..17dfa02e --- /dev/null +++ b/taskflow/tests/unit/jobs/test_entrypoint.py @@ -0,0 +1,49 @@ +# -*- coding: utf-8 -*- + +# Copyright (C) 2014 Yahoo! Inc. All Rights Reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); you may +# not use this file except in compliance with the License. You may obtain +# a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the +# License for the specific language governing permissions and limitations +# under the License. + +import contextlib + +from zake import fake_client + +from taskflow.jobs import backends +from taskflow.jobs.backends import impl_zookeeper +from taskflow import test + + +class BackendFetchingTest(test.TestCase): + def test_zk_entry_point_text(self): + conf = 'zookeeper' + with contextlib.closing(backends.fetch('test', conf)) as be: + self.assertIsInstance(be, impl_zookeeper.ZookeeperJobBoard) + + def test_zk_entry_point(self): + conf = { + 'board': 'zookeeper', + } + with contextlib.closing(backends.fetch('test', conf)) as be: + self.assertIsInstance(be, impl_zookeeper.ZookeeperJobBoard) + + def test_zk_entry_point_existing_client(self): + existing_client = fake_client.FakeClient() + conf = { + 'board': 'zookeeper', + } + kwargs = { + 'client': existing_client, + } + with contextlib.closing(backends.fetch('test', conf, **kwargs)) as be: + self.assertIsInstance(be, impl_zookeeper.ZookeeperJobBoard) + self.assertIs(existing_client, be._client) From 3fd758d317cac2799cec981b39c8f95a87ef6b2f Mon Sep 17 00:00:00 2001 From: Joshua Harlow Date: Tue, 8 Apr 2014 23:02:20 -0700 Subject: [PATCH 015/188] Get persistence backend via kwargs instead of conf The persistence backend that the zookeeper backend uses to load logbooks when they appear should come in via a kwarg instead of coming in via conf (since it's not configuration). Change-Id: I300d7681e5884a989549c8b84d29f04e9ea3d060 --- taskflow/jobs/backends/impl_zookeeper.py | 4 ++-- taskflow/tests/unit/jobs/test_zk_job.py | 11 ++++++----- 2 files changed, 8 insertions(+), 7 deletions(-) diff --git a/taskflow/jobs/backends/impl_zookeeper.py b/taskflow/jobs/backends/impl_zookeeper.py index 96baadb2..7cb059ae 100644 --- a/taskflow/jobs/backends/impl_zookeeper.py +++ b/taskflow/jobs/backends/impl_zookeeper.py @@ -143,7 +143,7 @@ class ZookeeperJob(base_job.Job): class ZookeeperJobBoard(jobboard.JobBoard): - def __init__(self, name, conf, client=None): + def __init__(self, name, conf, client=None, persistence=None): super(ZookeeperJobBoard, self).__init__(name, conf) if client is not None: self._client = client @@ -162,7 +162,7 @@ class ZookeeperJobBoard(jobboard.JobBoard): # not currently the full logbook (later when a zookeeper backend # appears we can likely optimize for that backend usage by directly # reading from the path where the data is stored, if we want). - self._persistence = self._conf.get("persistence") + self._persistence = persistence # Misc. internal details self._known_jobs = {} self._job_mutate = self._client.handler.rlock_object() diff --git a/taskflow/tests/unit/jobs/test_zk_job.py b/taskflow/tests/unit/jobs/test_zk_job.py index 37fa87d1..2dd6d573 100644 --- a/taskflow/tests/unit/jobs/test_zk_job.py +++ b/taskflow/tests/unit/jobs/test_zk_job.py @@ -44,11 +44,12 @@ def connect_close(*args): a.close() -def create_board(**kwargs): - client = fake_client.FakeClient() - board = impl_zookeeper.ZookeeperJobBoard('test-board', - conf=dict(kwargs), - client=client) +def create_board(client=None, persistence=None): + if not client: + client = fake_client.FakeClient() + board = impl_zookeeper.ZookeeperJobBoard('test-board', {}, + client=client, + persistence=persistence) return (client, board) From 514761e1f1c10fe1aac897a289ba2583060da3ae Mon Sep 17 00:00:00 2001 From: Joshua Harlow Date: Mon, 7 Apr 2014 17:15:37 -0700 Subject: [PATCH 016/188] Add docs for jobs and jobboards Add initial docs explaining the jobboard and job concept and basic examples for how to use this paradigm. Part of blueprint jobboard-example-code Change-Id: Ia8bc48967e55df6107dba272ae7187b18ba2a16e --- doc/source/index.rst | 1 + doc/source/jobs.rst | 210 +++++++++++++++++++++++++++++++++++++++++++ 2 files changed, 211 insertions(+) create mode 100644 doc/source/jobs.rst diff --git a/doc/source/index.rst b/doc/source/index.rst index a2196f08..1fc28c3a 100644 --- a/doc/source/index.rst +++ b/doc/source/index.rst @@ -14,6 +14,7 @@ Contents arguments_and_results patterns engines + jobs inputs_and_outputs notifications storage diff --git a/doc/source/jobs.rst b/doc/source/jobs.rst new file mode 100644 index 00000000..c7978a18 --- /dev/null +++ b/doc/source/jobs.rst @@ -0,0 +1,210 @@ +---- +Jobs +---- + +Overview +======== + +Jobs and jobboards are a **novel** concept that taskflow provides to allow for +automatic ownership transfer of workflows between capable +owners (those owners usually then use :doc:`engines ` to complete the +workflow). They provide the necessary semantics to be able to atomically +transfer a job from a producer to a consumer in a reliable and fault tolerant +manner. They are modeled off the concept used to post and acquire work in the +physical world (typically a job listing in a newspaper or online website +serves a similar role). + +**TLDR:** It's similar to a queue, but consumers lock items on the queue when +claiming them, and only remove them from the queue when they're done with the +work. If the consumer fails, the lock is *automatically* released and the item +is back on the queue for further consumption. + +Features +-------- + +- High availability + + - Guarantees workflow forward progress by transfering partially completed work + or work that has not been started to entities which can either resume the + previously partially completed work or begin initial work to ensure that + the workflow as a whole progresses (where progressing implies transitioning + through the workflow :doc:`patterns ` and :doc:`atoms ` + and completing their associated state transitions). + +- Atomic transfer and single ownership + + - Ensures that only one workflow is managed (aka owned) by a single owner at + a time in an atomic manner (including when the workflow is transferred to + a owner that is resuming some other failed owners work). This avoids + contention and ensures a workflow is managed by one and only one entity at + a time. + - *Note:* this does not mean that the owner needs to run the + workflow itself but instead said owner could use an engine that runs the + work in a distributed manner to ensure that the workflow progresses. + +- Separation of workflow construction and execution + + - Jobs can be created with logbooks that contain a specification of the work + to be done by a entity (such as an API server). The job then can be + completed by a entity that is watching that jobboard (not neccasarily the + API server itself). This creates a disconnection between work + formation and work completion that is useful for scaling out horizontally. + +- Asynchronous completion + + - When for example a API server posts a job for completion to a + jobboard that API server can return a *tracking* identifier to the user + calling the API service. This *tracking* identifier can be used by the + user to poll for status (similar in concept to a shipping *tracking* + identifier created by fedex or UPS). + +For more information, please see `wiki page`_ for more details. + +Jobs +---- + +A job consists of a unique identifier, name, and a reference to a logbook +which contains the details of the work that has been or should be/will be +completed to finish the work that has been created for that job. + +Jobboards +--------- + +A jobboard is responsible for managing the posting, ownership, and delivery +of jobs. It acts as the location where jobs can be posted, claimed and searched +for; typically by iteration or notification. Jobboards may be backed by +different *capable* implementations (each with potentially differing +configuration) but all jobboards implement the same interface and semantics so +that the backend usage is as transparent as possible. This allows deployers or +developers of a service that uses TaskFlow to select a jobboard implementation +that fits their setup (and there intended usage) best. + +Using Jobboards +=============== + +All engines are mere classes that implement same interface, and of course it is +possible to import them and create their instances just like with any classes +in Python. But the easier (and recommended) way for creating jobboards is by +using the `fetch()` functionality. Using this function the typical creation of +a jobboard (and an example posting of a job) might look like: + +.. code-block:: python + + from taskflow.persistence import backends as persistence_backends + from taskflow.jobs import backends as job_backends + + ... + persistence = persistence_backends.fetch({ + "connection': "mysql", + "user": ..., + "password": ..., + }) + book = make_and_save_logbook(persistence) + board = job_backends.fetch('my-board', { + "board": "zookeeper", + }, persistence=persistence) + job = board.post("my-first-job", book) + ... + +Consumption of jobs is similarly achieved by creating a jobboard and using +the iteration functionality to find and claim jobs (and eventually consume +them). The typical usage of a joboard for consumption (and work completion) +might look like: + +.. code-block:: python + + import time + + from taskflow import exceptions as exc + from taskflow.persistence import backends as persistence_backends + from taskflow.jobs import backends as job_backends + + ... + my_name = 'worker-1' + coffee_break_time = 60 + persistence = persistence_backends.fetch({ + "connection': "mysql", + "user": ..., + "password": ..., + }) + board = job_backends.fetch('my-board', { + "board": "zookeeper", + }, persistence=persistence) + while True: + my_job = None + for job in board.iterjobs(only_unclaimed=True): + try: + board.claim(job, my_name) + except exc.UnclaimableJob: + pass + else: + my_job = job + break + if my_job is not None: + try: + perform_job(my_job) + except Exception: + LOG.exception("I failed performing job: %s", my_job) + else: + # I finished it, now cleanup. + board.consume(my_job) + persistence.destroy_logbook(my_job.book.uuid) + time.sleep(coffee_break_time) + ... + + +.. automodule:: taskflow.jobs.backends +.. automodule:: taskflow.persistence +.. automodule:: taskflow.persistence.backends + +Jobboard Configuration +====================== + +Known engine types are listed below. + +Zookeeper +--------- + +**Board type**: ``'zookeeper'`` + +Uses `zookeeper`_ to provide the jobboard capabilities and semantics by using +a zookeeper directory, ephemeral, non-ephemeral nodes and watches. + +Additional *kwarg* parameters: + +* ``client``: a class that provides ``kazoo.client.KazooClient``-like + interface; it will be used for zookeeper interactions, sharing clients + between jobboard instances will likely provide better scalability and can + help avoid creating to many open connections to a set of zookeeper servers. +* ``persistence``: a class that provides a :doc:`persistence ` + backend interface; it will be used for loading jobs logbooks for usage at + runtime or for usage before a job is claimed for introspection. + +Additional *configuration* parameters: + +* ``path``: the root zookeeper path to store job information (*defaults* to + ``/taskflow/jobs``) +* ``hosts``: the list of zookeeper hosts to connect to (*defaults* to + ``localhost:2181``); only used if a client is not provided. +* ``timeout``: the timeout used when performing operations with zookeeper; + only used if a client is not provided. +* ``handler``: a class that provides ``kazoo.handlers``-like interface; it will + be used internally by `kazoo`_ to perform asynchronous operations, useful when + your program uses eventlet and you want to instruct kazoo to use an eventlet + compatible handler (such as the `eventlet handler`_). + + +Job Interface +============= + +.. automodule:: taskflow.jobs.job + +Jobboard Interface +================== + +.. automodule:: taskflow.jobs.jobboard + +.. _wiki page: https://wiki.openstack.org/wiki/TaskFlow/Paradigm_shifts#Workflow_ownership_transfer +.. _zookeeper: http://zookeeper.apache.org/ +.. _kazoo: http://kazoo.readthedocs.org/ +.. _eventlet handler: https://pypi.python.org/pypi/kazoo-eventlet-handler/ From 963330242f773eaa1dda6eac07b470ac75eabf51 Mon Sep 17 00:00:00 2001 From: Joshua Harlow Date: Thu, 17 Apr 2014 10:45:45 -0700 Subject: [PATCH 017/188] Ensure example abandons job when it fails If a worker can't finish a job and the worker has not crashed it should make sure that it manually abandons the job that it claimed (if it crashes this release will happen automatically). Change-Id: I2d6894281915fd335bffa34c006aed023e88e9fc --- doc/source/jobs.rst | 1 + 1 file changed, 1 insertion(+) diff --git a/doc/source/jobs.rst b/doc/source/jobs.rst index c7978a18..248ce500 100644 --- a/doc/source/jobs.rst +++ b/doc/source/jobs.rst @@ -145,6 +145,7 @@ might look like: perform_job(my_job) except Exception: LOG.exception("I failed performing job: %s", my_job) + board.abandon(my_job, my_name) else: # I finished it, now cleanup. board.consume(my_job) From c9e6bccddcf8a56d1ca47e83b579fcd23d4304f9 Mon Sep 17 00:00:00 2001 From: Joshua Harlow Date: Thu, 17 Apr 2014 23:07:59 -0700 Subject: [PATCH 018/188] Be better at failure tolerance Instead of raising exceptions from a graph action while tasks are still running or if a set of tasks was scheduled and the last task could not be scheduled (for example) just add the failure to the failure list and continue processing what is actively running. This ensures that we don't leave futures in an abandoned state and clean up our executing work which is what all good behaving software should try to do. Change-Id: I9256d7afa86f922273586a98b27442b6ba87767c --- .../engines/action_engine/graph_action.py | 67 ++++++++++++------- 1 file changed, 42 insertions(+), 25 deletions(-) diff --git a/taskflow/engines/action_engine/graph_action.py b/taskflow/engines/action_engine/graph_action.py index ae7f9019..b27b69d0 100644 --- a/taskflow/engines/action_engine/graph_action.py +++ b/taskflow/engines/action_engine/graph_action.py @@ -42,29 +42,34 @@ class FutureGraphAction(object): def is_running(self): return self._storage.get_flow_state() == st.RUNNING - def _schedule(self, nodes): - """Schedule nodes for execution. + def _schedule_node(self, node): + """Schedule a single node for execution.""" + if isinstance(node, task.BaseTask): + return self._schedule_task(node) + elif isinstance(node, r.Retry): + return self._schedule_retry(node) + else: + raise TypeError("Unknown how to schedule node %s" % node) - Returns list of futures. - """ + def _schedule(self, nodes): + """Schedule a group of nodes for execution.""" futures = [] for node in nodes: - if isinstance(node, task.BaseTask): - future = self._schedule_task(node) - elif isinstance(node, r.Retry): - future = self._schedule_retry(node) - else: - raise TypeError("Unknown how to schedule node %s" % node) - futures.append(future) - return futures + try: + futures.append(self._schedule_node(node)) + except Exception: + # Immediately stop scheduling future work so that we can + # exit execution early (rather than later) if a single task + # fails to schedule correctly. + return (futures, [misc.Failure()]) + return (futures, []) def execute(self): # Prepare flow to be resumed next_nodes = self._prepare_flow_for_resume() next_nodes.update(self._analyzer.get_next_nodes()) - not_done = self._schedule(next_nodes) + not_done, failures = self._schedule(next_nodes) - failures = [] while not_done: # NOTE(imelnikov): if timeout occurs before any of futures # completes, done list will be empty and we'll just go @@ -72,24 +77,36 @@ class FutureGraphAction(object): done, not_done = self._task_action.wait_for_any( not_done, _WAITING_TIMEOUT) + # Analyze the results and schedule more nodes (unless we had + # failures). If failures occured just continue processing what + # is running (so that we don't leave it abandoned) but do not + # schedule anything new. next_nodes = set() for future in done: - node, event, result = future.result() - if isinstance(node, task.BaseTask): - self._complete_task(node, event, result) - if isinstance(result, misc.Failure): - if event == ex.EXECUTED: - self._process_atom_failure(node, result) + try: + node, event, result = future.result() + if isinstance(node, task.BaseTask): + self._complete_task(node, event, result) + if isinstance(result, misc.Failure): + if event == ex.EXECUTED: + self._process_atom_failure(node, result) + else: + failures.append(result) + except Exception: + failures.append(misc.Failure()) + else: + try: + more_nodes = self._analyzer.get_next_nodes(node) + except Exception: + failures.append(misc.Failure()) else: - failures.append(result) - next_nodes.update(self._analyzer.get_next_nodes(node)) - + next_nodes.update(more_nodes) if next_nodes and not failures and self.is_running(): - not_done.extend(self._schedule(next_nodes)) + more_not_done, failures = self._schedule(next_nodes) + not_done.extend(more_not_done) if failures: misc.Failure.reraise_if_any(failures) - if self._analyzer.get_next_nodes(): return st.SUSPENDED elif self._analyzer.is_success(): From 3209820b8d37483ce9f37efe1c30c19588e06bf9 Mon Sep 17 00:00:00 2001 From: Joshua Harlow Date: Sat, 19 Apr 2014 22:36:52 -0700 Subject: [PATCH 019/188] Stings -> Strings Fix spelling mistake. Change-Id: Iadbd3663a8f1cc8e95a754a165ef682490736256 --- taskflow/exceptions.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/taskflow/exceptions.py b/taskflow/exceptions.py index f526068e..95e378af 100644 --- a/taskflow/exceptions.py +++ b/taskflow/exceptions.py @@ -154,7 +154,7 @@ class WrappedFailure(Exception): """Check if any of exc_classes caused (part of) the failure. Arguments of this method can be exception types or type names - (stings). If any of wrapped failures were caused by exception + (strings). If any of wrapped failures were caused by exception of given type, the corresponding argument is returned. Else, None is returned. """ From 95981fe4a8441962761a144c4deae67e4b64a82c Mon Sep 17 00:00:00 2001 From: OpenStack Proposal Bot Date: Sun, 20 Apr 2014 09:59:58 +0000 Subject: [PATCH 020/188] Updated from global requirements Change-Id: I4cb4b627a4cecce6c6768357486712711d350d5a --- requirements.txt | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/requirements.txt b/requirements.txt index 8e86d9b1..2b42a00c 100644 --- a/requirements.txt +++ b/requirements.txt @@ -3,7 +3,7 @@ pbr>=0.6,!=0.7,<1.0 anyjson>=0.3.3 iso8601>=0.1.9 # Python 2->3 compatibility library. -six>=1.5.2 +six>=1.6.0 # Very nice graph library networkx>=1.8 Babel>=1.3 From 7277a73f8ceedcd62b3dfe02b7f9c83080829a31 Mon Sep 17 00:00:00 2001 From: Joshua Harlow Date: Sat, 19 Apr 2014 18:33:14 -0700 Subject: [PATCH 021/188] Add persistence docs + adjustments - Increase the level of docs around why the persistence objects exist and what they are used for and how an engine interacts with persistence objects to accomplish the wider goals. - Pass backend conf as arg, not kwarg and to match with the other fetch/load functions allow for providing a kwargs which can be used for backend specific params. Change-Id: Ia83e63196bcfcc966f68212f84f79a1e9f18e8bc --- doc/source/index.rst | 1 - doc/source/inputs_and_outputs.rst | 2 +- doc/source/jobs.rst | 14 +- doc/source/persistence.rst | 195 +++++++++++++++++++++- doc/source/storage.rst | 5 - taskflow/jobs/backends/__init__.py | 4 + taskflow/persistence/backends/__init__.py | 8 +- 7 files changed, 204 insertions(+), 25 deletions(-) delete mode 100644 doc/source/storage.rst diff --git a/doc/source/index.rst b/doc/source/index.rst index 1fc28c3a..84075223 100644 --- a/doc/source/index.rst +++ b/doc/source/index.rst @@ -17,7 +17,6 @@ Contents jobs inputs_and_outputs notifications - storage persistence exceptions utils diff --git a/doc/source/inputs_and_outputs.rst b/doc/source/inputs_and_outputs.rst index 2542fe71..ee00945f 100644 --- a/doc/source/inputs_and_outputs.rst +++ b/doc/source/inputs_and_outputs.rst @@ -63,7 +63,7 @@ Engine and Storage ------------------ The storage layer is how an engine persists flow and task details. For more -in-depth design details see :doc:`persistence` and :doc:`storage`. +in-depth design details see :doc:`persistence`. Inputs ------ diff --git a/doc/source/jobs.rst b/doc/source/jobs.rst index 248ce500..0bff17c9 100644 --- a/doc/source/jobs.rst +++ b/doc/source/jobs.rst @@ -85,8 +85,11 @@ Using Jobboards All engines are mere classes that implement same interface, and of course it is possible to import them and create their instances just like with any classes in Python. But the easier (and recommended) way for creating jobboards is by -using the `fetch()` functionality. Using this function the typical creation of -a jobboard (and an example posting of a job) might look like: +using the :py:meth:`fetch() ` function which uses +entrypoints (internally using `stevedore`_) to fetch and configure your backend + +Using this function the typical creation of a jobboard (and an example posting +of a job) might look like: .. code-block:: python @@ -153,11 +156,6 @@ might look like: time.sleep(coffee_break_time) ... - -.. automodule:: taskflow.jobs.backends -.. automodule:: taskflow.persistence -.. automodule:: taskflow.persistence.backends - Jobboard Configuration ====================== @@ -198,6 +196,7 @@ Additional *configuration* parameters: Job Interface ============= +.. automodule:: taskflow.jobs.backends .. automodule:: taskflow.jobs.job Jobboard Interface @@ -209,3 +208,4 @@ Jobboard Interface .. _zookeeper: http://zookeeper.apache.org/ .. _kazoo: http://kazoo.readthedocs.org/ .. _eventlet handler: https://pypi.python.org/pypi/kazoo-eventlet-handler/ +.. _stevedore: http://stevedore.readthedocs.org/ diff --git a/doc/source/persistence.rst b/doc/source/persistence.rst index 6eccbba4..4981b83c 100644 --- a/doc/source/persistence.rst +++ b/doc/source/persistence.rst @@ -1,17 +1,194 @@ ------------ +=========== Persistence ------------ +=========== -Persistence objects -~~~~~~~~~~~~~~~~~~~ +Overview +======== -.. automodule:: taskflow.persistence.logbook +In order to be able to recieve inputs and create outputs from atoms (or other +engine processes) in a fault-tolerant way, there is a need to be able to place +what atoms output in some kind of location where it can be re-used by other +atoms (or used for other purposes). To accomodate this type of usage taskflow +provides an abstraction (provided by pluggable `stevedore`_ backends) that is +similar in concept to a running programs *memory*. +This abstraction serves the following *major* purposes: -Persistence backends -~~~~~~~~~~~~~~~~~~~~ +* Tracking of what was done (introspection). +* Saving *memory* which allows for restarting from the last saved state + which is a critical feature to restart and resume workflows (checkpointing). +* Associating additional metadata with atoms while running (without having those + atoms need to save this data themselves). This makes it possible to add-on + new metadata in the future without having to change the atoms themselves. For + example the following can be saved: + + * Timing information (how long a task took to run). + * User information (who the task ran as). + * When a atom/workflow was ran (and why). + +* Saving historical data (failures, successes, intermediary results...) to allow + for retry atoms to be able to decide if they should should continue vs. stop. +* *Something you create...* + +For more *general* information, please see `wiki page`_. + +.. _stevedore: http://stevedore.readthedocs.org/ +.. _wiki page: https://wiki.openstack.org/wiki/TaskFlow/Persistence + +How it is used +============== + +On :doc:`engine ` construction typically a backend (it can be optional) +will be provided which satisifies the :py:class:`~taskflow.persistence.backends.base.Backend` +abstraction. Along with providing a backend object a :py:class:`~taskflow.persistence.logbook.FlowDetail` +object will also be created and provided (this object will contain the details about +the flow to be ran) to the engine constructor (or associated :py:meth:`load() ` helper functions). +Typically a :py:class:`~taskflow.persistence.logbook.FlowDetail` object is created from +a :py:class:`~taskflow.persistence.logbook.LogBook` object (the book object +acts as a type of container for :py:class:`~taskflow.persistence.logbook.FlowDetail` +and :py:class:`~taskflow.persistence.logbook.AtomDetail` objects). + +**Preparation**: Once an engine starts to run it will create a :py:class:`~taskflow.storage.Storage` +object which will act as the engines interface to the underlying backend storage +objects (it provides helper functions that are commonly used by the engine, +avoiding repeating code when interacting with the provided :py:class:`~taskflow.persistence.logbook.FlowDetail` +and :py:class:`~taskflow.persistence.backends.base.Backend` objects). As an engine +initializes it will extract (or create) :py:class:`~taskflow.persistence.logbook.AtomDetail` +objects for each atom in the workflow the engine will be executing. + +**Execution:** When an engine beings to execute it will examine any previously existing +:py:class:`~taskflow.persistence.logbook.AtomDetail` objects to see if they can be used +for resuming; see `big picture`_ for more details on this subject. For atoms which have not +finished (or did not finish correctly from a previous run) they will begin executing +only after any dependent inputs are ready. This is done by analyzing the execution +graph and looking at predecessor :py:class:`~taskflow.persistence.logbook.AtomDetail` +outputs and states (which may have been persisted in a past run). This will result +in either using there previous information or by running those predecessors and +saving their output to the :py:class:`~taskflow.persistence.logbook.FlowDetail` and +:py:class:`~taskflow.persistence.backends.base.Backend` objects. This execution, analysis +and interaction with the storage objects continues (what is described here is +a simplification of what really happens; which is quite a bit more complex) +until the engine has finished running (at which point the engine will have +succeeded or failed in its attempt to run the workflow). + +**Post-execution:** Typically when an engine is done running the logbook would +be discarded (to avoid creating a stockpile of useless data) and the backend +storage would be told to delete any contents for a given execution. For certain +use-cases though it may be advantageous to retain logbooks and there contents. + +A few scenarios come to mind: + +* Post runtime failure analysis and triage (saving what failed and why). +* Metrics (saving timing information associated with each atom and using it + to perform offline performance analysis, which enables tuning tasks and/or + isolating and fixing slow tasks). +* Data mining logbooks to find trends (in failures for example). +* Saving logbooks for further forensics analysis. +* Exporting logbooks to `hdfs`_ (or other no-sql storage) and running some type + of map-reduce jobs on them. + +.. _hdfs: https://hadoop.apache.org/docs/current/hadoop-project-dist/hadoop-hdfs/HdfsUserGuide.html +.. _big picture: https://wiki.openstack.org/wiki/TaskFlow/Patterns_and_Engines/Persistence#Big_Picture + +.. note:: + + It should be emphasized that logbook is the authoritative, and, preferably, + the **only** (see :doc:`inputs and outputs `) source of + run-time state information (breaking this principle makes it hard/impossible + to restart or resume in any type of automated fashion). When an atom returns + a result, it should be written directly to a logbook. When atom or flow state + changes in any way, logbook is first to know (see :doc:`notifications ` + for how a user may also get notified of those same state changes). The logbook + and a backend and associated storage helper class are responsible to store the actual data. + These components used together specify the persistence mechanism (how data + is saved and where -- memory, database, whatever...) and the persistence policy + (when data is saved -- every time it changes or at some particular moments + or simply never). + +Persistence Configuration +========================= + +To select which persistence backend to use you should use the +:py:meth:`fetch() ` function which uses +entrypoints (internally using `stevedore`_) to fetch and configure your backend. This makes +it simpler than accessing the backend data types directly and provides a common +function from which a backend can be fetched. + +Using this function to fetch a backend might look like: + +.. code-block:: python + + from taskflow.persistence import backends + + ... + persistence = backends.fetch(conf={ + "connection': "mysql", + "user": ..., + "password": ..., + }) + book = make_and_save_logbook(persistence) + ... + +As can be seen from above the ``conf`` parameter acts as a dictionary that +is used to fetch and configure your backend. The restrictions on it are +the following: + +* a dictionary (or dictionary like type), holding backend type with key + ``'connection'`` and possibly type-specific backend parameters as other + keys. + +Known engine types are listed below. + +**Connection**: ``'memory'`` + +Retains all data in local memory (not persisted to reliable storage). Useful +for scenarios where persistence is not required (and also in unit tests). + +**Connection**: ``'dir'`` or ``'file'`` + +Retains all data in a directory & file based structure on local disk. Will be +persisted **locally** in the case of system failure (allowing for resumption +from the same local machine only). Useful for cases where a *more* reliable +persistence is desired along with the simplicity of files and directories (a +concept everyone is familiar with). + +**Connection**: ``'mysql'`` or ``'postgres'`` or ``'sqlite'`` + +Retains all data in a `ACID`_ compliant database using the `sqlalchemy`_ library +for schemas, connections, and database interaction functionality. Useful when +you need a higher level of durability than offered by the previous solutions. When +using these connection types it is possible to resume a engine from a peer machine (this +does not apply when using sqlite). + +.. _sqlalchemy: http://www.sqlalchemy.org/docs/ +.. _ACID: https://en.wikipedia.org/wiki/ACID + +**Connection**: ``'zookeeper'`` + +Retains all data in a `zookeeper`_ backend (zookeeper exposes operations on +files and directories, similar to the above ``'dir'`` or ``'file'`` connection +types). Internally the `kazoo`_ library is used to interact with zookeeper +to perform reliable, distributed and atomic operations on the contents of a +logbook represented as znodes. Since zookeeper is also distributed it is also +able to resume a engine from a peer machine (having similar functionality +as the database connection types listed previously). + +.. _zookeeper: http://zookeeper.apache.org +.. _kazoo: http://kazoo.readthedocs.org/ + +Persistence Backend Interfaces +============================== .. automodule:: taskflow.persistence.backends - :undoc-members: - .. automodule:: taskflow.persistence.backends.base +.. automodule:: taskflow.persistence.logbook + +Hierarchy +========= + +.. inheritance-diagram:: + taskflow.persistence.backends.impl_memory + taskflow.persistence.backends.impl_zookeeper + taskflow.persistence.backends.impl_dir + taskflow.persistence.backends.impl_sqlalchemy + :parts: 1 diff --git a/doc/source/storage.rst b/doc/source/storage.rst deleted file mode 100644 index 0e5fc407..00000000 --- a/doc/source/storage.rst +++ /dev/null @@ -1,5 +0,0 @@ -------- -Storage -------- - -.. automodule:: taskflow.storage diff --git a/taskflow/jobs/backends/__init__.py b/taskflow/jobs/backends/__init__.py index f9efc534..ad4dc060 100644 --- a/taskflow/jobs/backends/__init__.py +++ b/taskflow/jobs/backends/__init__.py @@ -29,6 +29,10 @@ LOG = logging.getLogger(__name__) def fetch(name, conf, namespace=BACKEND_NAMESPACE, **kwargs): + """Fetch a jobboard backend with the given configuration (and any board + specific kwargs) in the given entrypoint namespace and create it with the + given name. + """ # NOTE(harlowja): this allows simpler syntax. if isinstance(conf, six.string_types): conf = {'board': conf} diff --git a/taskflow/persistence/backends/__init__.py b/taskflow/persistence/backends/__init__.py index 3565bc82..5cf30243 100644 --- a/taskflow/persistence/backends/__init__.py +++ b/taskflow/persistence/backends/__init__.py @@ -32,7 +32,10 @@ SCHEME_REGEX = re.compile(r"^([A-Za-z]{1}[A-Za-z0-9+.-]*):") LOG = logging.getLogger(__name__) -def fetch(conf, namespace=BACKEND_NAMESPACE): +def fetch(conf, namespace=BACKEND_NAMESPACE, **kwargs): + """Fetches a given backend using the given configuration (and any backend + specific kwargs) in the given entrypoint namespace. + """ connection = conf['connection'] match = SCHEME_REGEX.match(connection) @@ -45,7 +48,8 @@ def fetch(conf, namespace=BACKEND_NAMESPACE): try: mgr = driver.DriverManager(namespace, backend_name, invoke_on_load=True, - invoke_kwds={'conf': conf}) + invoke_args=(conf,), + invoke_kwds=kwargs) return mgr.driver except RuntimeError as e: raise exc.NotFound("Could not find backend %s: %s" % (backend_name, e)) From 5ca61f956e588d8a088d78bc9a58b2904acfb416 Mon Sep 17 00:00:00 2001 From: Joshua Harlow Date: Sat, 12 Apr 2014 21:35:16 -0700 Subject: [PATCH 022/188] Add a directed graph type (new types module) Most of the utility graph functions we have can be connected to a directed graph class that itself derives (and adds on to) the networkx base class. Doing this allows for functionality that isn't exposed in networkx to be exposed in our subclass (which is a useful pattern to have). It also makes it possible (if ever needed) to replace the networkx usage in taskflow with something else if this ever becomes a major request. Change-Id: I0a825d5637236d7b5dbdbda0d426adb0183d5ba3 --- doc/source/utils.rst | 8 -- taskflow/patterns/graph_flow.py | 22 ++-- taskflow/tests/unit/test_action_engine.py | 6 +- taskflow/tests/unit/test_flattening.py | 45 ++++---- taskflow/types/__init__.py | 0 taskflow/types/graph.py | 122 ++++++++++++++++++++++ taskflow/utils/flow_utils.py | 30 +++--- taskflow/utils/graph_utils.py | 98 ----------------- tools/state_graph.py | 8 +- 9 files changed, 176 insertions(+), 163 deletions(-) create mode 100644 taskflow/types/__init__.py create mode 100644 taskflow/types/graph.py delete mode 100644 taskflow/utils/graph_utils.py diff --git a/doc/source/utils.rst b/doc/source/utils.rst index b0bd3815..b847e07f 100644 --- a/doc/source/utils.rst +++ b/doc/source/utils.rst @@ -17,16 +17,8 @@ The following classes and modules are *recommended* for external usage: .. autoclass:: taskflow.utils.eventlet_utils.GreenExecutor :members: -.. autofunction:: taskflow.utils.graph_utils.pformat - -.. autofunction:: taskflow.utils.graph_utils.export_graph_to_dot - .. autofunction:: taskflow.utils.persistence_utils.temporary_log_book .. autofunction:: taskflow.utils.persistence_utils.temporary_flow_detail .. autofunction:: taskflow.utils.persistence_utils.pformat - -.. autofunction:: taskflow.utils.persistence_utils.pformat_flow_detail - -.. autofunction:: taskflow.utils.persistence_utils.pformat_atom_detail diff --git a/taskflow/patterns/graph_flow.py b/taskflow/patterns/graph_flow.py index ccbcf413..68691996 100644 --- a/taskflow/patterns/graph_flow.py +++ b/taskflow/patterns/graph_flow.py @@ -16,12 +16,11 @@ import collections -import networkx as nx from networkx.algorithms import traversal from taskflow import exceptions as exc from taskflow import flow -from taskflow.utils import graph_utils +from taskflow.types import graph as gr class Flow(flow.Flow): @@ -39,7 +38,8 @@ class Flow(flow.Flow): def __init__(self, name, retry=None): super(Flow, self).__init__(name, retry) - self._graph = nx.freeze(nx.DiGraph()) + self._graph = gr.DiGraph() + self._graph.freeze() def link(self, u, v): """Link existing node u as a runtime dependency of existing node v.""" @@ -57,7 +57,7 @@ class Flow(flow.Flow): mutable_graph = False # NOTE(harlowja): Add an edge to a temporary copy and only if that # copy is valid then do we swap with the underlying graph. - attrs = graph_utils.get_edge_attrs(graph, u, v) + attrs = graph.get_edge_data(u, v) if not attrs: attrs = {} if manual: @@ -67,21 +67,22 @@ class Flow(flow.Flow): attrs['reasons'] = set() attrs['reasons'].add(reason) if not mutable_graph: - graph = nx.DiGraph(graph) + graph = gr.DiGraph(graph) graph.add_edge(u, v, **attrs) return graph - def _swap(self, replacement_graph): + def _swap(self, graph): """Validates the replacement graph and then swaps the underlying graph with a frozen version of the replacement graph (this maintains the invariant that the underlying graph is immutable). """ - if not nx.is_directed_acyclic_graph(replacement_graph): + if not graph.is_directed_acyclic(): raise exc.DependencyFailure("No path through the items in the" " graph produces an ordering that" " will allow for correct dependency" " resolution") - self._graph = nx.freeze(replacement_graph) + self._graph = graph + self._graph.freeze() def add(self, *items): """Adds a given task/tasks/flow/flows to this flow.""" @@ -109,7 +110,7 @@ class Flow(flow.Flow): # NOTE(harlowja): Add items and edges to a temporary copy of the # underlying graph and only if that is successful added to do we then # swap with the underlying graph. - tmp_graph = nx.DiGraph(self._graph) + tmp_graph = gr.DiGraph(self._graph) for item in items: tmp_graph.add_node(item) update_requirements(item) @@ -237,5 +238,6 @@ class TargetedFlow(Flow): nodes = [self._target] nodes.extend(dst for _src, dst in traversal.dfs_edges(self._graph.reverse(), self._target)) - self._subgraph = nx.freeze(self._graph.subgraph(nodes)) + self._subgraph = self._graph.subgraph(nodes) + self._subgraph.freeze() return self._subgraph diff --git a/taskflow/tests/unit/test_action_engine.py b/taskflow/tests/unit/test_action_engine.py index d2401f43..d711a1c2 100644 --- a/taskflow/tests/unit/test_action_engine.py +++ b/taskflow/tests/unit/test_action_engine.py @@ -15,7 +15,6 @@ # under the License. import contextlib -import networkx import testtools import threading @@ -36,6 +35,7 @@ from taskflow import states from taskflow import task from taskflow import test from taskflow.tests import utils +from taskflow.types import graph as gr from taskflow.utils import eventlet_utils as eu from taskflow.utils import misc @@ -466,7 +466,7 @@ class EngineGraphFlowTest(utils.EngineTestBase): engine = self._make_engine(flow) engine.compile() graph = engine.execution_graph - self.assertIsInstance(graph, networkx.DiGraph) + self.assertIsInstance(graph, gr.DiGraph) def test_task_graph_property_for_one_task(self): flow = utils.TaskNoRequiresNoReturns(name='task1') @@ -474,7 +474,7 @@ class EngineGraphFlowTest(utils.EngineTestBase): engine = self._make_engine(flow) engine.compile() graph = engine.execution_graph - self.assertIsInstance(graph, networkx.DiGraph) + self.assertIsInstance(graph, gr.DiGraph) class EngineCheckingTaskTest(utils.EngineTestBase): diff --git a/taskflow/tests/unit/test_flattening.py b/taskflow/tests/unit/test_flattening.py index 9d56c111..600a000a 100644 --- a/taskflow/tests/unit/test_flattening.py +++ b/taskflow/tests/unit/test_flattening.py @@ -16,8 +16,6 @@ import string -import networkx as nx - from taskflow import exceptions as exc from taskflow.patterns import graph_flow as gf from taskflow.patterns import linear_flow as lf @@ -27,7 +25,6 @@ from taskflow import retry from taskflow import test from taskflow.tests import utils as t_utils from taskflow.utils import flow_utils as f_utils -from taskflow.utils import graph_utils as g_utils def _make_many(amount): @@ -66,13 +63,13 @@ class FlattenTest(test.TestCase): g = f_utils.flatten(flo) self.assertEqual(4, len(g)) - order = nx.topological_sort(g) + order = g.topological_sort() self.assertEqual([a, b, c, d], order) self.assertTrue(g.has_edge(c, d)) self.assertEqual(g.get_edge_data(c, d), {'invariant': True}) - self.assertEqual([d], list(g_utils.get_no_successors(g))) - self.assertEqual([a], list(g_utils.get_no_predecessors(g))) + self.assertEqual([d], list(g.no_successors_iter())) + self.assertEqual([a], list(g.no_predecessors_iter())) def test_invalid_flatten(self): a, b, c = _make_many(3) @@ -89,9 +86,9 @@ class FlattenTest(test.TestCase): self.assertEqual(4, len(g)) self.assertEqual(0, g.number_of_edges()) self.assertEqual(set([a, b, c, d]), - set(g_utils.get_no_successors(g))) + set(g.no_successors_iter())) self.assertEqual(set([a, b, c, d]), - set(g_utils.get_no_predecessors(g))) + set(g.no_predecessors_iter())) def test_linear_nested_flatten(self): a, b, c, d = _make_many(4) @@ -206,8 +203,8 @@ class FlattenTest(test.TestCase): (b, c, {'manual': True}), (c, d, {'manual': True}), ]) - self.assertItemsEqual([a], g_utils.get_no_predecessors(g)) - self.assertItemsEqual([d], g_utils.get_no_successors(g)) + self.assertItemsEqual([a], g.no_predecessors_iter()) + self.assertItemsEqual([d], g.no_successors_iter()) def test_graph_flatten_dependencies(self): a = t_utils.ProvidesRequiresTask('a', provides=['x'], requires=[]) @@ -219,8 +216,8 @@ class FlattenTest(test.TestCase): self.assertItemsEqual(g.edges(data=True), [ (a, b, {'reasons': set(['x'])}) ]) - self.assertItemsEqual([a], g_utils.get_no_predecessors(g)) - self.assertItemsEqual([b], g_utils.get_no_successors(g)) + self.assertItemsEqual([a], g.no_predecessors_iter()) + self.assertItemsEqual([b], g.no_successors_iter()) def test_graph_flatten_nested_requires(self): a = t_utils.ProvidesRequiresTask('a', provides=['x'], requires=[]) @@ -237,8 +234,8 @@ class FlattenTest(test.TestCase): (a, c, {'reasons': set(['x'])}), (b, c, {'invariant': True}) ]) - self.assertItemsEqual([a, b], g_utils.get_no_predecessors(g)) - self.assertItemsEqual([c], g_utils.get_no_successors(g)) + self.assertItemsEqual([a, b], g.no_predecessors_iter()) + self.assertItemsEqual([c], g.no_successors_iter()) def test_graph_flatten_nested_provides(self): a = t_utils.ProvidesRequiresTask('a', provides=[], requires=['x']) @@ -255,8 +252,8 @@ class FlattenTest(test.TestCase): (b, c, {'invariant': True}), (b, a, {'reasons': set(['x'])}) ]) - self.assertItemsEqual([b], g_utils.get_no_predecessors(g)) - self.assertItemsEqual([a, c], g_utils.get_no_successors(g)) + self.assertItemsEqual([b], g.no_predecessors_iter()) + self.assertItemsEqual([a, c], g.no_successors_iter()) def test_flatten_checks_for_dups(self): flo = gf.Flow("test").add( @@ -304,8 +301,8 @@ class FlattenTest(test.TestCase): (c1, c2, {'retry': True}) ]) self.assertIs(c1, g.node[c2]['retry']) - self.assertItemsEqual([c1], g_utils.get_no_predecessors(g)) - self.assertItemsEqual([c2], g_utils.get_no_successors(g)) + self.assertItemsEqual([c1], g.no_predecessors_iter()) + self.assertItemsEqual([c2], g.no_successors_iter()) def test_flatten_retry_in_linear_flow_with_tasks(self): c = retry.AlwaysRevert("c") @@ -318,8 +315,8 @@ class FlattenTest(test.TestCase): (c, a, {'retry': True}) ]) - self.assertItemsEqual([c], g_utils.get_no_predecessors(g)) - self.assertItemsEqual([b], g_utils.get_no_successors(g)) + self.assertItemsEqual([c], g.no_predecessors_iter()) + self.assertItemsEqual([b], g.no_successors_iter()) self.assertIs(c, g.node[a]['retry']) self.assertIs(c, g.node[b]['retry']) @@ -334,8 +331,8 @@ class FlattenTest(test.TestCase): (c, b, {'retry': True}) ]) - self.assertItemsEqual([c], g_utils.get_no_predecessors(g)) - self.assertItemsEqual([a, b], g_utils.get_no_successors(g)) + self.assertItemsEqual([c], g.no_predecessors_iter()) + self.assertItemsEqual([a, b], g.no_successors_iter()) self.assertIs(c, g.node[a]['retry']) self.assertIs(c, g.node[b]['retry']) @@ -352,8 +349,8 @@ class FlattenTest(test.TestCase): (b, c, {'manual': True}) ]) - self.assertItemsEqual([r], g_utils.get_no_predecessors(g)) - self.assertItemsEqual([a, c], g_utils.get_no_successors(g)) + self.assertItemsEqual([r], g.no_predecessors_iter()) + self.assertItemsEqual([a, c], g.no_successors_iter()) self.assertIs(r, g.node[a]['retry']) self.assertIs(r, g.node[b]['retry']) self.assertIs(r, g.node[c]['retry']) diff --git a/taskflow/types/__init__.py b/taskflow/types/__init__.py new file mode 100644 index 00000000..e69de29b diff --git a/taskflow/types/graph.py b/taskflow/types/graph.py new file mode 100644 index 00000000..f6759127 --- /dev/null +++ b/taskflow/types/graph.py @@ -0,0 +1,122 @@ +# -*- coding: utf-8 -*- + +# Copyright (C) 2012 Yahoo! Inc. All Rights Reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); you may +# not use this file except in compliance with the License. You may obtain +# a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the +# License for the specific language governing permissions and limitations +# under the License. + +import networkx as nx +import six + + +class DiGraph(nx.DiGraph): + """A directed graph subclass with useful utility functions.""" + def __init__(self, data=None, name=''): + super(DiGraph, self).__init__(name=name, data=data) + self.frozen = False + + def freeze(self): + """Freezes the graph so that no more mutations can occur.""" + if not self.frozen: + nx.freeze(self) + return self + + def get_edge_data(self, u, v, default=None): + """Returns a *copy* of the attribute dictionary associated with edges + between (u, v). + + NOTE(harlowja): this differs from the networkx get_edge_data() as that + function does not return a copy (but returns a reference to the actual + edge data). + """ + try: + return dict(self.adj[u][v]) + except KeyError: + return default + + def topological_sort(self): + """Return a list of nodes in this graph in topological sort order.""" + return nx.topological_sort(self) + + def pformat(self): + """Pretty formats your graph into a string representation that includes + details about your graph, including; name, type, frozeness, node count, + nodes, edge count, edges, graph density and graph cycles (if any). + """ + lines = [] + lines.append("Name: %s" % self.name) + lines.append("Type: %s" % type(self).__name__) + lines.append("Frozen: %s" % nx.is_frozen(self)) + lines.append("Nodes: %s" % self.number_of_nodes()) + for n in self.nodes_iter(): + lines.append(" - %s" % n) + lines.append("Edges: %s" % self.number_of_edges()) + for (u, v, e_data) in self.edges_iter(data=True): + if e_data: + lines.append(" %s -> %s (%s)" % (u, v, e_data)) + else: + lines.append(" %s -> %s" % (u, v)) + lines.append("Density: %0.3f" % nx.density(self)) + cycles = list(nx.cycles.recursive_simple_cycles(self)) + lines.append("Cycles: %s" % len(cycles)) + for cycle in cycles: + buf = six.StringIO() + buf.write("%s" % (cycle[0])) + for i in range(1, len(cycle)): + buf.write(" --> %s" % (cycle[i])) + buf.write(" --> %s" % (cycle[0])) + lines.append(" %s" % buf.getvalue()) + return "\n".join(lines) + + def export_to_dot(self): + """Exports the graph to a dot format (requires pydot library).""" + return nx.to_pydot(self).to_string() + + def is_directed_acyclic(self): + """Returns if this graph is a DAG or not.""" + return nx.is_directed_acyclic_graph(self) + + def no_successors_iter(self): + """Returns an iterator for all nodes with no successors.""" + for n in self.nodes_iter(): + if not len(self.successors(n)): + yield n + + def no_predecessors_iter(self): + """Returns an iterator for all nodes with no predecessors.""" + for n in self.nodes_iter(): + if not len(self.predecessors(n)): + yield n + + +def merge_graphs(graphs, allow_overlaps=False): + """Merges a bunch of graphs into a single graph.""" + if not graphs: + return None + graph = graphs[0] + for g in graphs[1:]: + # This should ensure that the nodes to be merged do not already exist + # in the graph that is to be merged into. This could be problematic if + # there are duplicates. + if not allow_overlaps: + # Attempt to induce a subgraph using the to be merged graphs nodes + # and see if any graph results. + overlaps = graph.subgraph(g.nodes_iter()) + if len(overlaps): + raise ValueError("Can not merge graph %s into %s since there " + "are %s overlapping nodes" (g, graph, + len(overlaps))) + # Keep the target graphs name. + name = graph.name + graph = nx.algorithms.compose(graph, g) + graph.name = name + return graph diff --git a/taskflow/utils/flow_utils.py b/taskflow/utils/flow_utils.py index ec365648..6b54d563 100644 --- a/taskflow/utils/flow_utils.py +++ b/taskflow/utils/flow_utils.py @@ -16,13 +16,11 @@ import logging -import networkx as nx - from taskflow import exceptions from taskflow import flow from taskflow import retry from taskflow import task -from taskflow.utils import graph_utils as gu +from taskflow.types import graph as gr from taskflow.utils import misc @@ -80,7 +78,7 @@ class Flattener(object): graph.add_node(retry) # All graph nodes that have no predecessors should depend on its retry - nodes_to = [n for n in gu.get_no_predecessors(graph) if n != retry] + nodes_to = [n for n in graph.no_predecessors_iter() if n != retry] self._add_new_edges(graph, [retry], nodes_to, RETRY_EDGE_DATA) # Add link to retry for each node of subgraph that hasn't @@ -91,34 +89,37 @@ class Flattener(object): def _flatten_task(self, task): """Flattens a individual task.""" - graph = nx.DiGraph(name=task.name) + graph = gr.DiGraph(name=task.name) graph.add_node(task) return graph def _flatten_flow(self, flow): """Flattens a graph flow.""" - graph = nx.DiGraph(name=flow.name) + graph = gr.DiGraph(name=flow.name) + # Flatten all nodes into a single subgraph per node. subgraph_map = {} for item in flow: subgraph = self._flatten(item) subgraph_map[item] = subgraph - graph = gu.merge_graphs([graph, subgraph]) + graph = gr.merge_graphs([graph, subgraph]) # Reconnect all node edges to their corresponding subgraphs. for (u, v, attrs) in flow.iter_links(): + u_g = subgraph_map[u] + v_g = subgraph_map[v] if any(attrs.get(k) for k in ('invariant', 'manual', 'retry')): # Connect nodes with no predecessors in v to nodes with # no successors in u (thus maintaining the edge dependency). self._add_new_edges(graph, - gu.get_no_successors(subgraph_map[u]), - gu.get_no_predecessors(subgraph_map[v]), + u_g.no_successors_iter(), + v_g.no_predecessors_iter(), edge_attrs=attrs) else: # This is dependency-only edge, connect corresponding # providers and consumers. - for provider in subgraph_map[u]: - for consumer in subgraph_map[v]: + for provider in u_g: + for consumer in v_g: reasons = provider.provides & consumer.requires if reasons: graph.add_edge(provider, consumer, reasons=reasons) @@ -143,7 +144,7 @@ class Flattener(object): # and not under all cases. if LOG.isEnabledFor(logging.DEBUG): LOG.debug("Translated '%s' into a graph:", item) - for line in gu.pformat(graph).splitlines(): + for line in graph.pformat().splitlines(): # Indent it so that it's slightly offset from the above line. LOG.debug(" %s", line) @@ -168,10 +169,9 @@ class Flattener(object): self._pre_flatten() graph = self._flatten(self._root) self._post_flatten(graph) + self._graph = graph if self._freeze: - self._graph = nx.freeze(graph) - else: - self._graph = graph + self._graph.freeze() return self._graph diff --git a/taskflow/utils/graph_utils.py b/taskflow/utils/graph_utils.py deleted file mode 100644 index 7f18134c..00000000 --- a/taskflow/utils/graph_utils.py +++ /dev/null @@ -1,98 +0,0 @@ -# -*- coding: utf-8 -*- - -# Copyright (C) 2012 Yahoo! Inc. All Rights Reserved. -# -# Licensed under the Apache License, Version 2.0 (the "License"); you may -# not use this file except in compliance with the License. You may obtain -# a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT -# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the -# License for the specific language governing permissions and limitations -# under the License. - -import networkx as nx -import six - - -def get_edge_attrs(graph, u, v): - """Gets the dictionary of edge attributes between u->v (or none).""" - if not graph.has_edge(u, v): - return None - return dict(graph.adj[u][v]) - - -def merge_graphs(graphs, allow_overlaps=False): - if not graphs: - return None - graph = graphs[0] - for g in graphs[1:]: - # This should ensure that the nodes to be merged do not already exist - # in the graph that is to be merged into. This could be problematic if - # there are duplicates. - if not allow_overlaps: - # Attempt to induce a subgraph using the to be merged graphs nodes - # and see if any graph results. - overlaps = graph.subgraph(g.nodes_iter()) - if len(overlaps): - raise ValueError("Can not merge graph %s into %s since there " - "are %s overlapping nodes" (g, graph, - len(overlaps))) - # Keep the target graphs name. - name = graph.name - graph = nx.algorithms.compose(graph, g) - graph.name = name - return graph - - -def get_no_successors(graph): - """Returns an iterator for all nodes with no successors.""" - for n in graph.nodes_iter(): - if not len(graph.successors(n)): - yield n - - -def get_no_predecessors(graph): - """Returns an iterator for all nodes with no predecessors.""" - for n in graph.nodes_iter(): - if not len(graph.predecessors(n)): - yield n - - -def pformat(graph): - """Pretty formats your graph into a string representation that includes - details about your graph, including; name, type, frozeness, node count, - nodes, edge count, edges, graph density and graph cycles (if any). - """ - lines = [] - lines.append("Name: %s" % graph.name) - lines.append("Type: %s" % type(graph).__name__) - lines.append("Frozen: %s" % nx.is_frozen(graph)) - lines.append("Nodes: %s" % graph.number_of_nodes()) - for n in graph.nodes_iter(): - lines.append(" - %s" % n) - lines.append("Edges: %s" % graph.number_of_edges()) - for (u, v, e_data) in graph.edges_iter(data=True): - if e_data: - lines.append(" %s -> %s (%s)" % (u, v, e_data)) - else: - lines.append(" %s -> %s" % (u, v)) - lines.append("Density: %0.3f" % nx.density(graph)) - cycles = list(nx.cycles.recursive_simple_cycles(graph)) - lines.append("Cycles: %s" % len(cycles)) - for cycle in cycles: - buf = six.StringIO() - buf.write(str(cycle[0])) - for i in range(1, len(cycle)): - buf.write(" --> %s" % (cycle[i])) - buf.write(" --> %s" % (cycle[0])) - lines.append(" %s" % buf.getvalue()) - return "\n".join(lines) - - -def export_graph_to_dot(graph): - """Exports the graph to a dot format (requires pydot library).""" - return nx.to_pydot(graph).to_string() diff --git a/tools/state_graph.py b/tools/state_graph.py index 4a2587c5..f6a2057d 100644 --- a/tools/state_graph.py +++ b/tools/state_graph.py @@ -11,10 +11,8 @@ import optparse import subprocess import tempfile -import networkx as nx - from taskflow import states -from taskflow.utils import graph_utils as gu +from taskflow.types import graph as gr def mini_exec(cmd, ok_codes=(0,)): @@ -31,7 +29,7 @@ def mini_exec(cmd, ok_codes=(0,)): def make_svg(graph, output_filename, output_format): # NOTE(harlowja): requires pydot! - gdot = gu.export_graph_to_dot(graph) + gdot = graph.export_to_dot() if output_format == 'dot': output = gdot elif output_format in ('svg', 'svgz', 'png'): @@ -62,7 +60,7 @@ def main(): if options.filename is None: options.filename = 'states.%s' % options.format - g = nx.DiGraph(name="State transitions") + g = gr.DiGraph(name="State transitions") if not options.tasks: source = states._ALLOWED_FLOW_TRANSITIONS else: From 7d42e5b33cb0a1aacb6609344454c2538d99200c Mon Sep 17 00:00:00 2001 From: Joshua Harlow Date: Tue, 8 Apr 2014 14:46:56 -0700 Subject: [PATCH 023/188] Use sequencing when posting jobs Retain the posting order by using sequencing when posting jobs to a jobboard (at least in the impl_zookeeper case this is possible). Adjust the interface to recommend that jobboard implementations post in oldest to newest order (we can not easily enforce this since some implementations will likely not be able to do this in every scenario). Bring in a new zake which has sequencing support (since this is required to support this feature). Change-Id: I0c475c3c1d16e9bbc9db6ff0ad6b7103c5948a47 --- taskflow/jobs/backends/impl_zookeeper.py | 180 +++++++++++------------ taskflow/jobs/jobboard.py | 5 + taskflow/tests/unit/jobs/test_zk_job.py | 10 +- test-requirements.txt | 2 +- 4 files changed, 92 insertions(+), 105 deletions(-) diff --git a/taskflow/jobs/backends/impl_zookeeper.py b/taskflow/jobs/backends/impl_zookeeper.py index 3f593ece..6d38f77e 100644 --- a/taskflow/jobs/backends/impl_zookeeper.py +++ b/taskflow/jobs/backends/impl_zookeeper.py @@ -28,6 +28,7 @@ from taskflow.jobs import job as base_job from taskflow.jobs import jobboard from taskflow.openstack.common import excutils from taskflow.openstack.common import jsonutils +from taskflow.openstack.common import uuidutils from taskflow.persistence import logbook from taskflow import states from taskflow.utils import kazoo_utils @@ -48,18 +49,6 @@ ALL_JOB_STATES = ( # Transaction support was added in 3.4.0 MIN_ZK_VERSION = (3, 4, 0) -# Used to ensure that watchers don't try to overwrite jobs that are still being -# posted (and may have not been completly posted yet), these jobs should not be -# yield back until they are in the ready state. -_READY = 'ready' -_POSTING = 'posting' - - -def _get_paths(base_path, job_uuid): - job_path = k_paths.join(base_path, job_uuid) - lock_path = k_paths.join(base_path, job_uuid, 'lock') - return (job_path, lock_path) - def _check_who(who): if not isinstance(who, six.string_types): @@ -69,7 +58,7 @@ def _check_who(who): class ZookeeperJob(base_job.Job): - def __init__(self, name, board, client, backend, + def __init__(self, name, board, client, backend, path, uuid=None, details=None, book=None, book_data=None): super(ZookeeperJob, self).__init__(name, uuid=uuid, details=details) self._board = board @@ -82,6 +71,16 @@ class ZookeeperJob(base_job.Job): if all((self._book, self._book_data)): raise ValueError("Only one of 'book_data' or 'book'" " can be provided") + self._path = path + self._lock_path = "%s.lock" % (path) + + @property + def lock_path(self): + return self._lock_path + + @property + def path(self): + return self._path @property def board(self): @@ -103,9 +102,8 @@ class ZookeeperJob(base_job.Job): def state(self): owner = self.board.find_owner(self) job_data = {} - job_path, _lock_path = _get_paths(self.board.path, self.uuid) try: - raw_data, _data_stat = self._client.get(job_path) + raw_data, _data_stat = self._client.get(self.path) job_data = misc.decode_json(raw_data) except k_exceptions.NoNodeError: pass @@ -128,6 +126,12 @@ class ZookeeperJob(base_job.Job): return states.UNCLAIMED return states.CLAIMED + def __cmp__(self, other): + return cmp(self.path, other.path) + + def __hash__(self): + return hash(self.path) + @property def book(self): if self._book is None: @@ -170,6 +174,9 @@ class ZookeeperJobBoard(jobboard.JobBoard): self._client.add_listener(self._state_change_listener) self._bad_paths = frozenset([path]) self._job_watcher = None + # Since we use sequenced ids this will be the path that the sequences + # are prefixed with, for example, job0000000001, job0000000002, ... + self._job_base = k_paths.join(path, "job") @property def path(self): @@ -178,13 +185,7 @@ class ZookeeperJobBoard(jobboard.JobBoard): @property def job_count(self): with self._job_mutate: - known_jobs = list(six.itervalues(self._known_jobs)) - count = 0 - for (_job, posting_state) in known_jobs: - if posting_state != _READY: - continue - count += 1 - return count + return len(self._known_jobs) def _force_refresh(self, delayed=False): try: @@ -208,20 +209,18 @@ class ZookeeperJobBoard(jobboard.JobBoard): if only_unclaimed: ok_states = UNCLAIMED_JOB_STATES with self._job_mutate: - known_jobs = list(six.iteritems(self._known_jobs)) - for (path, (job, posting_state)) in known_jobs: - if posting_state != _READY: - continue + known_jobs = list(six.itervalues(self._known_jobs)) + for job in sorted(known_jobs): try: if job.state in ok_states: yield job - except excp.JobFailure as e: - LOG.warn("Failed determining the state of job %s" - " due to: %s", job.uuid, e) + except excp.JobFailure: + LOG.warn("Failed determining the state of job: %s (%s)", + job.uuid, job.path, exc_info=True) except excp.NotFound: # Someone destroyed it while we are iterating. with self._job_mutate: - self._remove_job(path) + self._remove_job(job.path) def _remove_job(self, path): LOG.debug("Removing job that was at path: %s", path) @@ -235,11 +234,11 @@ class ZookeeperJobBoard(jobboard.JobBoard): with self._job_mutate: if path not in self._known_jobs: job = ZookeeperJob(job_data['name'], self, - self._client, self._persistence, + self._client, self._persistence, path, uuid=job_data['uuid'], book_data=job_data.get("book"), details=job_data.get("details", {})) - self._known_jobs[path] = (job, _READY) + self._known_jobs[path] = job except (ValueError, TypeError, KeyError): LOG.warn("Incorrectly formatted job data found at path: %s", path, exc_info=True) @@ -263,9 +262,7 @@ class ZookeeperJobBoard(jobboard.JobBoard): # Remove jobs that we know about but which are no longer children with self._job_mutate: removals = set() - for path, (_job, posting_state) in six.iteritems(self._known_jobs): - if posting_state != _READY: - continue + for path, _job in six.iteritems(self._known_jobs): if path not in child_paths: removals.add(path) for path in removals: @@ -289,64 +286,54 @@ class ZookeeperJobBoard(jobboard.JobBoard): else: child_proc(request) - def _format_job(self, job): - posting = { - 'uuid': job.uuid, - 'name': job.name, - } - if job.details is not None: - posting['details'] = job.details - if job.book is not None: - posting['book'] = { - 'name': job.book.name, - 'uuid': job.book.uuid, - } - return misc.binary_encode(jsonutils.dumps(posting)) - def post(self, name, book, details=None): - # Didn't work, clean it up. - def try_clean(path): - with self._job_mutate: - self._remove_job(path) + def format_posting(job_uuid): + posting = { + 'uuid': job_uuid, + 'name': name, + } + if details: + posting['details'] = details + else: + posting['details'] = {} + if book is not None: + posting['book'] = { + 'name': book.name, + 'uuid': book.uuid, + } + return posting # NOTE(harlowja): Jobs are not ephemeral, they will persist until they # are consumed (this may change later, but seems safer to do this until # further notice). - job = ZookeeperJob(name, self, - self._client, self._persistence, - book=book, details=details) - job_path, _lock_path = _get_paths(self.path, job.uuid) - # NOTE(harlowja): This avoids the watcher thread from attempting to - # overwrite or delete this job which is not yet ready but is in the - # process of being posted. - with self._job_mutate: - self._known_jobs[job_path] = (job, _POSTING) - with self._wrap(job.uuid, "Posting failure: %s", ensure_known=False): - try: - self._client.create(job_path, value=self._format_job(job)) - with self._job_mutate: - self._known_jobs[job_path] = (job, _READY) - return job - except k_exceptions.NodeExistsException: - try_clean(job_path) - raise excp.Duplicate("Duplicate job %s already posted" - % job.uuid) - except Exception: - with excutils.save_and_reraise_exception(): - try_clean(job_path) + job_uuid = uuidutils.generate_uuid() + with self._wrap(job_uuid, None, + "Posting failure: %s", ensure_known=False): + job_posting = format_posting(job_uuid) + job_posting = misc.binary_encode(jsonutils.dumps(job_posting)) + job_path = self._client.create(self._job_base, + value=job_posting, + sequence=True, + ephemeral=False) + job = ZookeeperJob(name, self, self._client, + self._persistence, job_path, + book=book, details=details, + uuid=job_uuid) + with self._job_mutate: + self._known_jobs[job_path] = job + return job def claim(self, job, who): _check_who(who) - job_path, lock_path = _get_paths(self.path, job.uuid) - with self._wrap(job.uuid, "Claiming failure: %s"): + with self._wrap(job.uuid, job.path, "Claiming failure: %s"): # NOTE(harlowja): post as json which will allow for future changes # more easily than a raw string/text. value = jsonutils.dumps({ 'owner': who, }) try: - self._client.create(lock_path, + self._client.create(job.lock_path, value=misc.binary_encode(value), ephemeral=True) except k_exceptions.NodeExistsException: @@ -362,9 +349,14 @@ class ZookeeperJobBoard(jobboard.JobBoard): raise excp.UnclaimableJob(msg) @contextlib.contextmanager - def _wrap(self, job_uuid, fail_msg_tpl="Failure: %s", ensure_known=True): + def _wrap(self, job_uuid, job_path, + fail_msg_tpl="Failure: %s", ensure_known=True): + if job_path: + fail_msg_tpl += " (%s)" % (job_path) if ensure_known: - job_path, _lock_path = _get_paths(self.path, job_uuid) + if not job_path: + raise ValueError("Unable to check if %r is a known path" + % (job_path)) with self._job_mutate: if job_path not in self._known_jobs: fail_msg_tpl += ", unknown job" @@ -385,11 +377,10 @@ class ZookeeperJobBoard(jobboard.JobBoard): raise excp.JobFailure(fail_msg_tpl % (job_uuid), e) def find_owner(self, job): - _job_path, lock_path = _get_paths(self.path, job.uuid) - with self._wrap(job.uuid, "Owner query failure: %s"): + with self._wrap(job.uuid, job.path, "Owner query failure: %s"): try: - self._client.sync(lock_path) - raw_data, _lock_stat = self._client.get(lock_path) + self._client.sync(job.lock_path) + raw_data, _lock_stat = self._client.get(job.lock_path) data = misc.decode_json(raw_data) owner = data.get("owner") except k_exceptions.NoNodeError: @@ -397,16 +388,14 @@ class ZookeeperJobBoard(jobboard.JobBoard): return owner def _get_owner_and_data(self, job): - job_path, lock_path = _get_paths(self.path, job.uuid) - lock_data, lock_stat = self._client.get(lock_path) - job_data, job_stat = self._client.get(job_path) + lock_data, lock_stat = self._client.get(job.lock_path) + job_data, job_stat = self._client.get(job.path) return (misc.decode_json(lock_data), lock_stat, misc.decode_json(job_data), job_stat) def consume(self, job, who): _check_who(who) - job_path, lock_path = _get_paths(self.path, job.uuid) - with self._wrap(job.uuid, "Consumption failure: %s"): + with self._wrap(job.uuid, job.path, "Consumption failure: %s"): try: owner_data = self._get_owner_and_data(job) lock_data, lock_stat, data, data_stat = owner_data @@ -418,17 +407,15 @@ class ZookeeperJobBoard(jobboard.JobBoard): raise excp.JobFailure("Can not consume a job %s" " which is not owned by %s" % (job.uuid, who)) - with self._client.transaction() as txn: - txn.delete(lock_path, version=lock_stat.version) - txn.delete(job_path, version=data_stat.version) + txn.delete(job.lock_path, version=lock_stat.version) + txn.delete(job.path, version=data_stat.version) with self._job_mutate: - self._remove_job(job_path) + self._remove_job(job.path) def abandon(self, job, who): _check_who(who) - job_path, lock_path = _get_paths(self.path, job.uuid) - with self._wrap(job.uuid, "Abandonment failure: %s"): + with self._wrap(job.uuid, job.path, "Abandonment failure: %s"): try: owner_data = self._get_owner_and_data(job) lock_data, lock_stat, data, data_stat = owner_data @@ -440,9 +427,8 @@ class ZookeeperJobBoard(jobboard.JobBoard): raise excp.JobFailure("Can not abandon a job %s" " which is not owned by %s" % (job.uuid, who)) - with self._client.transaction() as txn: - txn.delete(lock_path, version=lock_stat.version) + txn.delete(job.lock_path, version=lock_stat.version) def _state_change_listener(self, state): LOG.debug("Kazoo client has changed to state: %s", state) diff --git a/taskflow/jobs/jobboard.py b/taskflow/jobs/jobboard.py index 83e9cb5e..a2a3d29b 100644 --- a/taskflow/jobs/jobboard.py +++ b/taskflow/jobs/jobboard.py @@ -37,6 +37,11 @@ class JobBoard(object): """Yields back jobs that are currently on this jobboard (claimed or not claimed). + NOTE(harlowja): the ordering of this iteration should be by posting + order (oldest to newest) if possible, but it is left up to the backing + implementation to provide the order that best suits it (so don't depend + on it always being oldest to newest). + :param only_unclaimed: boolean that indicates whether to only iteration over unclaimed jobs. :param ensure_fresh: boolean that requests to only iterate over the diff --git a/taskflow/tests/unit/jobs/test_zk_job.py b/taskflow/tests/unit/jobs/test_zk_job.py index 57a7119d..93a074f1 100644 --- a/taskflow/tests/unit/jobs/test_zk_job.py +++ b/taskflow/tests/unit/jobs/test_zk_job.py @@ -102,7 +102,6 @@ class TestZookeeperJobs(test.TestCase): # Check the actual data that was posted. self.assertEqual(1, len(paths)) path_key = list(six.iterkeys(paths))[0] - self.assertIn(posted_job.uuid, path_key) self.assertTrue(len(paths[path_key]['data']) > 0) self.assertDictEqual({ 'uuid': posted_job.uuid, @@ -220,13 +219,10 @@ class TestZookeeperJobs(test.TestCase): self.assertEqual(states.UNCLAIMED, j.state) def test_posting_no_post(self): - - def bad_format(job): - raise UnicodeError("Could not format") - with connect_close(self.board): - with mock.patch.object(self.board, '_format_job', bad_format): - self.assertRaises(UnicodeError, self.board.post, + with mock.patch.object(self.client, 'create') as create_func: + create_func.side_effect = IOError("Unable to post") + self.assertRaises(IOError, self.board.post, 'test', p_utils.temporary_log_book()) self.assertEqual(0, self.board.job_count) diff --git a/test-requirements.txt b/test-requirements.txt index a43c28db..fc0bcba4 100644 --- a/test-requirements.txt +++ b/test-requirements.txt @@ -5,7 +5,7 @@ mock>=1.0 python-subunit>=0.0.18 testrepository>=0.0.18 testtools>=0.9.34 -zake>=0.0.13 +zake>=0.0.15 # docs build jobs sphinx>=1.1.2,<1.2 oslosphinx From 843b48773699f48ee8393e539d524466993a5724 Mon Sep 17 00:00:00 2001 From: Joshua Harlow Date: Thu, 3 Apr 2014 17:49:56 -0700 Subject: [PATCH 024/188] Allow jobboard event notification Instead of requiring iteration to be able to list new jobs and existing jobs so that they can be claimed, allow for attaching callbacks to the jobboard where instead of requiring iteration those callbacks will be automatically called by the jobboard internally when events are recieved. Breaking change: renames transition notifier to notifier since it is really not just a notifier for transitions but is a generic notifier of events occuring (and details about those events). This is an internal api so its not expected that this will cause any issues (it's not expected for external users to be creating instances of this class). Implements bp board-notification Change-Id: I2384d5e335ed9d17e29fec9a78699e3156fa225c --- doc/source/notifications.rst | 10 ++--- taskflow/engines/base.py | 4 +- taskflow/jobs/backends/impl_zookeeper.py | 30 +++++++++++-- taskflow/jobs/jobboard.py | 21 +++++++++ taskflow/listeners/base.py | 4 +- taskflow/listeners/logging.py | 4 +- taskflow/listeners/printing.py | 4 +- taskflow/tests/unit/test_utils.py | 32 +++++++------- taskflow/utils/misc.py | 55 ++++++++++++------------ 9 files changed, 104 insertions(+), 60 deletions(-) diff --git a/doc/source/notifications.rst b/doc/source/notifications.rst index 2bf3e908..8f92317d 100644 --- a/doc/source/notifications.rst +++ b/doc/source/notifications.rst @@ -17,7 +17,7 @@ transitions, which is useful for monitoring, logging, metrics, debugging and plenty of other tasks. To receive these notifications you should register a callback in -:py:class:`~taskflow.utils.misc.TransitionNotifier` provided by engine. +:py:class:`~taskflow.utils.misc.Notifier` provided by engine. Each engine provides two of them: one notifies about flow state changes, and another notifies about changes of tasks. @@ -30,15 +30,15 @@ Receiving Notifications with Callbacks -------------------------------------- To manage notifications instances of -:py:class:`~taskflow.utils.misc.TransitionNotifier` are used. +:py:class:`~taskflow.utils.misc.Notifier` are used. -.. autoclass:: taskflow.utils.misc.TransitionNotifier +.. autoclass:: taskflow.utils.misc.Notifier Flow Notifications ------------------ To receive notification on flow state changes use -:py:class:`~taskflow.utils.misc.TransitionNotifier` available as +:py:class:`~taskflow.utils.misc.Notifier` available as ``notifier`` property of the engine. A basic example is: .. doctest:: @@ -71,7 +71,7 @@ Task notifications ------------------ To receive notification on task state changes use -:py:class:`~taskflow.utils.misc.TransitionNotifier` available as +:py:class:`~taskflow.utils.misc.Notifier` available as ``task_notifier`` property of the engine. A basic example is: .. doctest:: diff --git a/taskflow/engines/base.py b/taskflow/engines/base.py index e015798a..8a6d42c9 100644 --- a/taskflow/engines/base.py +++ b/taskflow/engines/base.py @@ -35,8 +35,8 @@ class EngineBase(object): else: self._conf = dict(conf) self._storage = None - self.notifier = misc.TransitionNotifier() - self.task_notifier = misc.TransitionNotifier() + self.notifier = misc.Notifier() + self.task_notifier = misc.Notifier() @property def storage(self): diff --git a/taskflow/jobs/backends/impl_zookeeper.py b/taskflow/jobs/backends/impl_zookeeper.py index 6d38f77e..c7be0624 100644 --- a/taskflow/jobs/backends/impl_zookeeper.py +++ b/taskflow/jobs/backends/impl_zookeeper.py @@ -18,6 +18,7 @@ import contextlib import functools import logging +from concurrent import futures from kazoo import exceptions as k_exceptions from kazoo.protocol import paths as k_paths from kazoo.recipe import watchers @@ -146,8 +147,9 @@ class ZookeeperJob(base_job.Job): return self._book -class ZookeeperJobBoard(jobboard.JobBoard): - def __init__(self, name, conf, client=None, persistence=None): +class ZookeeperJobBoard(jobboard.NotifyingJobBoard): + def __init__(self, name, conf, + client=None, persistence=None, emit_notifications=True): super(ZookeeperJobBoard, self).__init__(name, conf) if client is not None: self._client = client @@ -177,6 +179,13 @@ class ZookeeperJobBoard(jobboard.JobBoard): # Since we use sequenced ids this will be the path that the sequences # are prefixed with, for example, job0000000001, job0000000002, ... self._job_base = k_paths.join(path, "job") + self._worker = None + self._emit_notifications = bool(emit_notifications) + + def _emit(self, state, details): + # Submit the work to the executor to avoid blocking the kazoo queue. + if self._worker is not None: + self._worker.submit(self.notifier.notify, state, details) @property def path(self): @@ -224,7 +233,12 @@ class ZookeeperJobBoard(jobboard.JobBoard): def _remove_job(self, path): LOG.debug("Removing job that was at path: %s", path) - self._known_jobs.pop(path, None) + job = self._known_jobs.pop(path, None) + if job is not None: + self._emit(jobboard.REMOVAL, + details={ + 'job': job, + }) def _process_child(self, path, request): """Receives the result of a child data fetch request.""" @@ -239,6 +253,10 @@ class ZookeeperJobBoard(jobboard.JobBoard): book_data=job_data.get("book"), details=job_data.get("details", {})) self._known_jobs[path] = job + self._emit(jobboard.POSTED, + details={ + 'job': job, + }) except (ValueError, TypeError, KeyError): LOG.warn("Incorrectly formatted job data found at path: %s", path, exc_info=True) @@ -447,6 +465,10 @@ class ZookeeperJobBoard(jobboard.JobBoard): if self._owned: LOG.debug("Stopping client") kazoo_utils.finalize_client(self._client) + if self._worker is not None: + LOG.debug("Shutting down the notifier") + self._worker.shutdown() + self._worker = None self._clear() LOG.debug("Stopped & cleared local state") @@ -472,6 +494,8 @@ class ZookeeperJobBoard(jobboard.JobBoard): raise excp.JobFailure("Failed to connect to zookeeper", e) try: kazoo_utils.check_compatible(self._client, MIN_ZK_VERSION) + if self._worker is None and self._emit_notifications: + self._worker = futures.ThreadPoolExecutor(max_workers=1) self._client.ensure_path(self.path) self._job_watcher = watchers.ChildrenWatch( self._client, diff --git a/taskflow/jobs/jobboard.py b/taskflow/jobs/jobboard.py index a2a3d29b..737251bd 100644 --- a/taskflow/jobs/jobboard.py +++ b/taskflow/jobs/jobboard.py @@ -19,6 +19,8 @@ import abc import six +from taskflow.utils import misc + @six.add_metaclass(abc.ABCMeta) class JobBoard(object): @@ -114,3 +116,22 @@ class JobBoard(object): abandoning a unclaimed job (or a job they do not own) will cause an exception. """ + + +# Jobboard events +POSTED = 'POSTED' # new job is/has been posted +REMOVAL = 'REMOVAL' # existing job is/has been removed + + +class NotifyingJobBoard(JobBoard): + """A jobboard subclass that can notify about jobs being created + and removed, which can remove the repeated usage of iterjobs() to achieve + the same operation. + + NOTE(harlowja): notifications that are emitted *may* be emitted on a + separate dedicated thread when they occur, so ensure that all callbacks + registered are thread safe. + """ + def __init__(self, name, conf): + super(NotifyingJobBoard, self).__init__(name, conf) + self.notifier = misc.Notifier() diff --git a/taskflow/listeners/base.py b/taskflow/listeners/base.py index e8f1674c..352b652a 100644 --- a/taskflow/listeners/base.py +++ b/taskflow/listeners/base.py @@ -46,8 +46,8 @@ class ListenerBase(object): """ def __init__(self, engine, - task_listen_for=(misc.TransitionNotifier.ANY,), - flow_listen_for=(misc.TransitionNotifier.ANY,)): + task_listen_for=(misc.Notifier.ANY,), + flow_listen_for=(misc.Notifier.ANY,)): if not task_listen_for: task_listen_for = [] if not flow_listen_for: diff --git a/taskflow/listeners/logging.py b/taskflow/listeners/logging.py index bcf0cf3d..71bf83f5 100644 --- a/taskflow/listeners/logging.py +++ b/taskflow/listeners/logging.py @@ -33,8 +33,8 @@ class LoggingListener(base.LoggingBase): can also be configured, ``logging.DEBUG`` is used by default. """ def __init__(self, engine, - task_listen_for=(misc.TransitionNotifier.ANY,), - flow_listen_for=(misc.TransitionNotifier.ANY,), + task_listen_for=(misc.Notifier.ANY,), + flow_listen_for=(misc.Notifier.ANY,), log=None, level=logging.DEBUG): super(LoggingListener, self).__init__(engine, diff --git a/taskflow/listeners/printing.py b/taskflow/listeners/printing.py index b8b2cf5d..e9359bf5 100644 --- a/taskflow/listeners/printing.py +++ b/taskflow/listeners/printing.py @@ -26,8 +26,8 @@ from taskflow.utils import misc class PrintingListener(base.LoggingBase): """Writes the task and flow notifications messages to stdout or stderr.""" def __init__(self, engine, - task_listen_for=(misc.TransitionNotifier.ANY,), - flow_listen_for=(misc.TransitionNotifier.ANY,), + task_listen_for=(misc.Notifier.ANY,), + flow_listen_for=(misc.Notifier.ANY,), stderr=False): super(PrintingListener, self).__init__(engine, task_listen_for=task_listen_for, diff --git a/taskflow/tests/unit/test_utils.py b/taskflow/tests/unit/test_utils.py index 1c5c197b..d0bb0695 100644 --- a/taskflow/tests/unit/test_utils.py +++ b/taskflow/tests/unit/test_utils.py @@ -149,8 +149,8 @@ class NotifierTest(test.TestCase): def call_me(state, details): call_collector.append((state, details)) - notifier = misc.TransitionNotifier() - notifier.register(misc.TransitionNotifier.ANY, call_me) + notifier = misc.Notifier() + notifier.register(misc.Notifier.ANY, call_me) notifier.notify(states.SUCCESS, {}) notifier.notify(states.SUCCESS, {}) @@ -166,14 +166,14 @@ class NotifierTest(test.TestCase): def call_me_too(self, state, details): pass - notifier = misc.TransitionNotifier() - notifier.register(misc.TransitionNotifier.ANY, call_me) + notifier = misc.Notifier() + notifier.register(misc.Notifier.ANY, call_me) a = A() - notifier.register(misc.TransitionNotifier.ANY, a.call_me_too) + notifier.register(misc.Notifier.ANY, a.call_me_too) self.assertEqual(2, len(notifier)) - notifier.deregister(misc.TransitionNotifier.ANY, call_me) - notifier.deregister(misc.TransitionNotifier.ANY, a.call_me_too) + notifier.deregister(misc.Notifier.ANY, call_me) + notifier.deregister(misc.Notifier.ANY, a.call_me_too) self.assertEqual(0, len(notifier)) def test_notify_reset(self): @@ -181,8 +181,8 @@ class NotifierTest(test.TestCase): def call_me(state, details): pass - notifier = misc.TransitionNotifier() - notifier.register(misc.TransitionNotifier.ANY, call_me) + notifier = misc.Notifier() + notifier.register(misc.Notifier.ANY, call_me) self.assertEqual(1, len(notifier)) notifier.reset() @@ -193,9 +193,9 @@ class NotifierTest(test.TestCase): def call_me(state, details): pass - notifier = misc.TransitionNotifier() + notifier = misc.Notifier() self.assertRaises(KeyError, notifier.register, - misc.TransitionNotifier.ANY, call_me, + misc.Notifier.ANY, call_me, kwargs={'details': 5}) def test_selective_notify(self): @@ -204,21 +204,21 @@ class NotifierTest(test.TestCase): def call_me_on(registered_state, state, details): call_counts[registered_state].append((state, details)) - notifier = misc.TransitionNotifier() + notifier = misc.Notifier() notifier.register(states.SUCCESS, functools.partial(call_me_on, states.SUCCESS)) - notifier.register(misc.TransitionNotifier.ANY, + notifier.register(misc.Notifier.ANY, functools.partial(call_me_on, - misc.TransitionNotifier.ANY)) + misc.Notifier.ANY)) self.assertEqual(2, len(notifier)) notifier.notify(states.SUCCESS, {}) - self.assertEqual(1, len(call_counts[misc.TransitionNotifier.ANY])) + self.assertEqual(1, len(call_counts[misc.Notifier.ANY])) self.assertEqual(1, len(call_counts[states.SUCCESS])) notifier.notify(states.FAILURE, {}) - self.assertEqual(2, len(call_counts[misc.TransitionNotifier.ANY])) + self.assertEqual(2, len(call_counts[misc.Notifier.ANY])) self.assertEqual(1, len(call_counts[states.SUCCESS])) self.assertEqual(2, len(call_counts)) diff --git a/taskflow/utils/misc.py b/taskflow/utils/misc.py index 1da28a4c..a2d4a48a 100644 --- a/taskflow/utils/misc.py +++ b/taskflow/utils/misc.py @@ -391,7 +391,7 @@ class StopWatch(object): return self -class TransitionNotifier(object): +class Notifier(object): """A utility helper class that can be used to subscribe to notifications of events occurring as well as allow a entity to post said notifications to subscribers. @@ -405,15 +405,14 @@ class TransitionNotifier(object): def __len__(self): """Returns how many callbacks are registered.""" - count = 0 - for (_s, callbacks) in six.iteritems(self._listeners): + for (_event_type, callbacks) in six.iteritems(self._listeners): count += len(callbacks) return count - def is_registered(self, state, callback): + def is_registered(self, event_type, callback): """Check if a callback is registered.""" - listeners = list(self._listeners.get(state, [])) + listeners = list(self._listeners.get(event_type, [])) for (cb, _args, _kwargs) in listeners: if reflection.is_same_callback(cb, callback): return True @@ -423,17 +422,17 @@ class TransitionNotifier(object): """Forget all previously registered callbacks.""" self._listeners.clear() - def notify(self, state, details): - """Notify about state change. + def notify(self, event_type, details): + """Notify about event occurrence. All callbacks registered to receive notifications about given - state will be called. + event type will be called. - :param state: state we moved to - :param details: addition transition details + :param event_type: event type that occured + :param details: addition event details """ listeners = list(self._listeners.get(self.ANY, [])) - for i in self._listeners[state]: + for i in self._listeners[event_type]: if i not in listeners: listeners.append(i) if not listeners: @@ -445,23 +444,23 @@ class TransitionNotifier(object): kwargs = {} kwargs['details'] = details try: - callback(state, *args, **kwargs) + callback(event_type, *args, **kwargs) except Exception: - LOG.warn("Failure calling callback %s to notify about state" - " transition %s, details: %s", - callback, state, details, exc_info=True) + LOG.warn("Failure calling callback %s to notify about event" + " %s, details: %s", callback, event_type, + details, exc_info=True) - def register(self, state, callback, args=None, kwargs=None): - """Register a callback to be called when state is changed. + def register(self, event_type, callback, args=None, kwargs=None): + """Register a callback to be called when event of a given type occurs. Callback will be called with provided ``args`` and ``kwargs`` and - when state is changed to ``state`` (or on any state change if - ``state`` equals to ``TransitionNotifier.ANY``). It will also - get additional keyword argument, ``details``, that will hold - transition details provided to :py:meth:`notify` method. + when event type occurs (or on any event if ``event_type`` equals to + ``Notifier.ANY``). It will also get additional keyword argument, + ``details``, that will hold event details provided to + :py:meth:`notify` method. """ assert six.callable(callback), "Callback must be callable" - if self.is_registered(state, callback): + if self.is_registered(event_type, callback): raise ValueError("Callback %s already registered" % (callback)) if kwargs: for k in self.RESERVED_KEYS: @@ -471,15 +470,15 @@ class TransitionNotifier(object): kwargs = copy.copy(kwargs) if args: args = copy.copy(args) - self._listeners[state].append((callback, args, kwargs)) + self._listeners[event_type].append((callback, args, kwargs)) - def deregister(self, state, callback): - """Remove callback from listening to state ``state``.""" - if state not in self._listeners: + def deregister(self, event_type, callback): + """Remove a single callback from listening to event ``event_type``.""" + if event_type not in self._listeners: return - for i, (cb, args, kwargs) in enumerate(self._listeners[state]): + for i, (cb, args, kwargs) in enumerate(self._listeners[event_type]): if reflection.is_same_callback(cb, callback): - self._listeners[state].pop(i) + self._listeners[event_type].pop(i) break From 8732e4772d7266c9319bd7bada96dc0d13aae4d2 Mon Sep 17 00:00:00 2001 From: Joshua Harlow Date: Wed, 9 Apr 2014 14:25:15 -0700 Subject: [PATCH 025/188] Add last_modified & created_on attributes to jobs In order to be able to allow jobboard object users to know when a job was last modified or when the job was created we need to provide accessors to make this possible. These can be used to claim a job after a given period, or for general tracking of which are the oldest jobs... Implements: blueprint job-reference-impl Change-Id: I467bb083d0b143826a44c6aeb6499c483b88fe65 --- taskflow/jobs/backends/impl_zookeeper.py | 62 ++++++++++++++++++++++-- taskflow/jobs/job.py | 10 ++++ taskflow/tests/unit/jobs/test_zk_job.py | 14 ++++++ taskflow/utils/misc.py | 8 +++ 4 files changed, 91 insertions(+), 3 deletions(-) diff --git a/taskflow/jobs/backends/impl_zookeeper.py b/taskflow/jobs/backends/impl_zookeeper.py index c7be0624..b0a53ecc 100644 --- a/taskflow/jobs/backends/impl_zookeeper.py +++ b/taskflow/jobs/backends/impl_zookeeper.py @@ -60,7 +60,8 @@ def _check_who(who): class ZookeeperJob(base_job.Job): def __init__(self, name, board, client, backend, path, - uuid=None, details=None, book=None, book_data=None): + uuid=None, details=None, book=None, book_data=None, + created_on=None): super(ZookeeperJob, self).__init__(name, uuid=uuid, details=details) self._board = board self._book = book @@ -74,6 +75,8 @@ class ZookeeperJob(base_job.Job): " can be provided") self._path = path self._lock_path = "%s.lock" % (path) + self._created_on = created_on + self._node_not_found = False @property def lock_path(self): @@ -83,6 +86,57 @@ class ZookeeperJob(base_job.Job): def path(self): return self._path + def _get_node_attr(self, path, attr_name, trans_func=None): + try: + _data, node_stat = self._client.get(path) + attr = getattr(node_stat, attr_name) + if trans_func is not None: + return trans_func(attr) + else: + return attr + except k_exceptions.NoNodeError as e: + raise excp.NotFound("Can not fetch the %r attribute" + " of job %s (%s), path %s not found" + % (attr_name, self.uuid, self.path, path), e) + except self._client.handler.timeout_exception as e: + raise excp.JobFailure("Can not fetch the %r attribute" + " of job %s (%s), connection timed out" + % (attr_name, self.uuid, self.path), e) + except k_exceptions.SessionExpiredError as e: + raise excp.JobFailure("Can not fetch the %r attribute" + " of job %s (%s), session expired" + % (attr_name, self.uuid, self.path), e) + except (AttributeError, k_exceptions.KazooException) as e: + raise excp.JobFailure("Can not fetch the %r attribute" + " of job %s (%s), internal error" % + (attr_name, self.uuid, self.path), e) + + @property + def last_modified(self): + modified_on = None + try: + if not self._node_not_found: + modified_on = self._get_node_attr( + self.path, 'mtime', + trans_func=misc.millis_to_datetime) + except excp.NotFound: + self._node_not_found = True + return modified_on + + @property + def created_on(self): + # This one we can cache (since it won't change after creation). + if self._node_not_found: + return None + if self._created_on is None: + try: + self._created_on = self._get_node_attr( + self.path, 'ctime', + trans_func=misc.millis_to_datetime) + except excp.NotFound: + self._node_not_found = True + return self._created_on + @property def board(self): return self._board @@ -243,15 +297,17 @@ class ZookeeperJobBoard(jobboard.NotifyingJobBoard): def _process_child(self, path, request): """Receives the result of a child data fetch request.""" try: - raw_data, _stat = request.get() + raw_data, node_stat = request.get() job_data = misc.decode_json(raw_data) + created_on = misc.millis_to_datetime(node_stat.ctime) with self._job_mutate: if path not in self._known_jobs: job = ZookeeperJob(job_data['name'], self, self._client, self._persistence, path, uuid=job_data['uuid'], book_data=job_data.get("book"), - details=job_data.get("details", {})) + details=job_data.get("details", {}), + created_on=created_on) self._known_jobs[path] = job self._emit(jobboard.POSTED, details={ diff --git a/taskflow/jobs/job.py b/taskflow/jobs/job.py index fbb98f2d..a0264901 100644 --- a/taskflow/jobs/job.py +++ b/taskflow/jobs/job.py @@ -46,6 +46,16 @@ class Job(object): details = {} self._details = details + @abc.abstractproperty + def last_modified(self): + """The datetime the job was last modified.""" + pass + + @abc.abstractproperty + def created_on(self): + """The datetime the job was created on.""" + pass + @abc.abstractproperty def board(self): """The board this job was posted on or was created from.""" diff --git a/taskflow/tests/unit/jobs/test_zk_job.py b/taskflow/tests/unit/jobs/test_zk_job.py index 93a074f1..26a0fec5 100644 --- a/taskflow/tests/unit/jobs/test_zk_job.py +++ b/taskflow/tests/unit/jobs/test_zk_job.py @@ -67,6 +67,20 @@ class TestZookeeperJobs(test.TestCase): self.client.flush() self.assertTrue(self.board.connected) + @mock.patch("taskflow.jobs.backends.impl_zookeeper.misc." + "millis_to_datetime") + def test_posting_dates(self, mock_dt): + epoch = misc.millis_to_datetime(0) + mock_dt.return_value = epoch + + with connect_close(self.board): + j = self.board.post('test', p_utils.temporary_log_book()) + self.client.flush() + self.assertEqual(epoch, j.created_on) + self.assertEqual(epoch, j.last_modified) + + self.assertTrue(mock_dt.called) + def test_fresh_iter(self): with connect_close(self.board): book = p_utils.temporary_log_book() diff --git a/taskflow/utils/misc.py b/taskflow/utils/misc.py index a2d4a48a..ed9a33a7 100644 --- a/taskflow/utils/misc.py +++ b/taskflow/utils/misc.py @@ -18,6 +18,7 @@ import collections import contextlib import copy +import datetime import errno import functools import keyword @@ -106,6 +107,13 @@ def wraps(fn): return wrapper +def millis_to_datetime(milliseconds): + """Converts a given number of milliseconds from the epoch into a datetime + object. + """ + return datetime.datetime.fromtimestamp(float(milliseconds) / 1000) + + def get_version_string(obj): """Gets a object's version as a string. From f81cb4ecc5fa14aa7c854f4f91bcf636ecb00c92 Mon Sep 17 00:00:00 2001 From: Joshua Harlow Date: Mon, 21 Apr 2014 16:22:30 -0700 Subject: [PATCH 026/188] Add a job consideration doc Discuss about the dual-engine usage consideration and add suggestions around how this can be alleviated when a jobboard is used to claim & perform work. (Removed from gate, zuul lost this change and it log jammed the gate.) Change-Id: I0c2460fc3ff7e9d34b7578d2e967f6bfa489e770 --- doc/source/jobs.rst | 46 +++++++++++++++++++++++++++++++++++++++++++++ 1 file changed, 46 insertions(+) diff --git a/doc/source/jobs.rst b/doc/source/jobs.rst index 0bff17c9..470a1f2c 100644 --- a/doc/source/jobs.rst +++ b/doc/source/jobs.rst @@ -192,6 +192,52 @@ Additional *configuration* parameters: your program uses eventlet and you want to instruct kazoo to use an eventlet compatible handler (such as the `eventlet handler`_). +Considerations +============== + +Some usage considerations should be used when using a jobboard to make sure +it's used in a safe and reliable manner. Eventually we hope to make these +non-issues but for now they are worth mentioning. + +Dual-engine jobs +---------------- + +**What:** Since atoms and engines are not currently `preemptable`_ we can not force +a engine (or the threads/remote workers... it is using to run) to stop working on +an atom (it is general bad behavior to force code to stop without its consent anyway) if it has +already started working on an atom (short of doing a ``kill -9`` on the running interpreter). +This could cause problems since the points an engine can notice that it no longer owns a +claim is at any :doc:`state ` change that occurs (transitioning to a +new atom or recording a result for example), where upon noticing the claim has +been lost the engine can immediately stop doing further work. The effect that this +causes is that when a claim is lost another engine can immediately attempt to acquire +the claim that was previously lost and it *could* begin working on the unfinished tasks +that the later engine may also still be executing (since that engine is not yet +aware that it has lost the claim). + +**TLDR:** not `preemptable`_, possible to become aware of losing a claim +after the fact (at the next state change), another engine could have acquired +the claim by then, therefore both would be *working* on a job. + +**Alleviate by:** + +#. Ensure your atoms are `idempotent`_, this will cause an engine that may be + executing the same atom to be able to continue executing without causing + any conflicts/problems (idempotency guarantees this). +#. On claiming jobs that have been claimed previously enforce a policy that happens + before the jobs workflow begins to execute (possibly prior to an engine beginning + the jobs work) that ensures that any prior work has been rolled back before + continuing rolling forward. For example: + + * Rolling back the last atom/set of atoms that finished. + * Rolling back the last state change that occurred. + +#. Delay claiming partially completed work by adding a wait period (to allow the + previous engine to coalesce) before working on a partially completed job (combine + this with the prior suggestions and dual-engine issues should be avoided). + +.. _idempotent: http://en.wikipedia.org/wiki/Idempotence +.. _preemptable: http://en.wikipedia.org/wiki/Preemption_%28computing%29 Job Interface ============= From ef72c4dcdeaf02fe4d5787f9ac650e75d1827dfb Mon Sep 17 00:00:00 2001 From: Stanislav Kudriashev Date: Wed, 23 Apr 2014 23:42:44 +0200 Subject: [PATCH 027/188] Fix documentation spelling errors Change-Id: Ic5a06195cba7c27ff7664fcb8e8b514d7dc31cb7 --- doc/source/engines.rst | 2 +- doc/source/jobs.rst | 22 +++++++++++----------- doc/source/persistence.rst | 6 +++--- 3 files changed, 15 insertions(+), 15 deletions(-) diff --git a/doc/source/engines.rst b/doc/source/engines.rst index 5c3a3020..62d3a1a3 100644 --- a/doc/source/engines.rst +++ b/doc/source/engines.rst @@ -111,7 +111,7 @@ operates. .. note:: This engine is under active development and is experimental but it is - useable and does work but is missing some features (please check the + usable and does work but is missing some features (please check the `blueprint page`_ for known issues and plans) that will make it more production ready. diff --git a/doc/source/jobs.rst b/doc/source/jobs.rst index 0bff17c9..002e60dc 100644 --- a/doc/source/jobs.rst +++ b/doc/source/jobs.rst @@ -24,12 +24,12 @@ Features - High availability - - Guarantees workflow forward progress by transfering partially completed work - or work that has not been started to entities which can either resume the - previously partially completed work or begin initial work to ensure that - the workflow as a whole progresses (where progressing implies transitioning - through the workflow :doc:`patterns ` and :doc:`atoms ` - and completing their associated state transitions). + - Guarantees workflow forward progress by transferring partially complete + work or work that has not been started to entities which can either resume + the previously partially completed work or begin initial work to ensure + that the workflow as a whole progresses (where progressing implies + transitioning through the workflow :doc:`patterns ` and + :doc:`atoms ` and completing their associated state transitions). - Atomic transfer and single ownership @@ -46,7 +46,7 @@ Features - Jobs can be created with logbooks that contain a specification of the work to be done by a entity (such as an API server). The job then can be - completed by a entity that is watching that jobboard (not neccasarily the + completed by a entity that is watching that jobboard (not necessarily the API server itself). This creates a disconnection between work formation and work completion that is useful for scaling out horizontally. @@ -111,7 +111,7 @@ of a job) might look like: Consumption of jobs is similarly achieved by creating a jobboard and using the iteration functionality to find and claim jobs (and eventually consume -them). The typical usage of a joboard for consumption (and work completion) +them). The typical usage of a jobboard for consumption (and work completion) might look like: .. code-block:: python @@ -188,9 +188,9 @@ Additional *configuration* parameters: * ``timeout``: the timeout used when performing operations with zookeeper; only used if a client is not provided. * ``handler``: a class that provides ``kazoo.handlers``-like interface; it will - be used internally by `kazoo`_ to perform asynchronous operations, useful when - your program uses eventlet and you want to instruct kazoo to use an eventlet - compatible handler (such as the `eventlet handler`_). + be used internally by `kazoo`_ to perform asynchronous operations, useful + when your program uses eventlet and you want to instruct kazoo to use an + eventlet compatible handler (such as the `eventlet handler`_). Job Interface diff --git a/doc/source/persistence.rst b/doc/source/persistence.rst index 4981b83c..f7fe810c 100644 --- a/doc/source/persistence.rst +++ b/doc/source/persistence.rst @@ -5,10 +5,10 @@ Persistence Overview ======== -In order to be able to recieve inputs and create outputs from atoms (or other +In order to be able to receive inputs and create outputs from atoms (or other engine processes) in a fault-tolerant way, there is a need to be able to place what atoms output in some kind of location where it can be re-used by other -atoms (or used for other purposes). To accomodate this type of usage taskflow +atoms (or used for other purposes). To accommodate this type of usage taskflow provides an abstraction (provided by pluggable `stevedore`_ backends) that is similar in concept to a running programs *memory*. @@ -39,7 +39,7 @@ How it is used ============== On :doc:`engine ` construction typically a backend (it can be optional) -will be provided which satisifies the :py:class:`~taskflow.persistence.backends.base.Backend` +will be provided which satisfies the :py:class:`~taskflow.persistence.backends.base.Backend` abstraction. Along with providing a backend object a :py:class:`~taskflow.persistence.logbook.FlowDetail` object will also be created and provided (this object will contain the details about the flow to be ran) to the engine constructor (or associated :py:meth:`load() ` helper functions). From abd97f71e54515c057e94f7d21aa953faba3f5fc Mon Sep 17 00:00:00 2001 From: Joshua Harlow Date: Thu, 24 Apr 2014 15:33:24 -0700 Subject: [PATCH 028/188] Add a example that activates a future when a result is ready To allow for an engine to continue to run while at the same time returning from a function when a component of that engine finishes a pattern can be used that ties and engines listeners to the function return, allowing for both to be used simulatenously. Change-Id: Iab49e0c7b233138bc2d02247ab7aa3d99a82cd67 --- taskflow/examples/delayed_return.py | 90 +++++++++++++++++++++++++++++ 1 file changed, 90 insertions(+) create mode 100644 taskflow/examples/delayed_return.py diff --git a/taskflow/examples/delayed_return.py b/taskflow/examples/delayed_return.py new file mode 100644 index 00000000..e77b961c --- /dev/null +++ b/taskflow/examples/delayed_return.py @@ -0,0 +1,90 @@ +# -*- coding: utf-8 -*- + +# Copyright (C) 2014 Yahoo! Inc. All Rights Reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); you may +# not use this file except in compliance with the License. You may obtain +# a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the +# License for the specific language governing permissions and limitations +# under the License. + +import logging +import os +import sys + +from concurrent import futures + +logging.basicConfig(level=logging.ERROR) + +self_dir = os.path.abspath(os.path.dirname(__file__)) +top_dir = os.path.abspath(os.path.join(os.path.dirname(__file__), + os.pardir, + os.pardir)) +sys.path.insert(0, top_dir) +sys.path.insert(0, self_dir) + +# INTRO: in this example linear_flow we will attach a listener to an engine +# and delay the return from a function until after the result of a task has +# occured in that engine. The engine will continue running (in the background) +# while the function will have returned. + +import taskflow.engines + +from taskflow.listeners import base +from taskflow.patterns import linear_flow as lf +from taskflow import states +from taskflow import task +from taskflow.utils import misc + + +class PokeFutureListener(base.ListenerBase): + def __init__(self, engine, future, task_name): + super(PokeFutureListener, self).__init__( + engine, + task_listen_for=(misc.Notifier.ANY,), + flow_listen_for=[]) + self._future = future + self._task_name = task_name + + def _task_receiver(self, state, details): + if state in (states.SUCCESS, states.FAILURE): + if details.get('task_name') == self._task_name: + if state == states.SUCCESS: + self._future.set_result(details['result']) + else: + failure = details['result'] + self._future.set_exception(failure.exception) + + +class Hi(task.Task): + def execute(self): + # raise IOError("I broken") + return 'hi' + + +class Bye(task.Task): + def execute(self): + return 'bye' + + +def return_from_flow(pool): + wf = lf.Flow("root").add(Hi("hi"), Bye("bye")) + eng = taskflow.engines.load(wf, engine_conf='serial') + f = futures.Future() + watcher = PokeFutureListener(eng, f, 'hi') + watcher.register() + pool.submit(eng.run) + return (eng, f.result()) + + +with futures.ThreadPoolExecutor(1) as pool: + engine, hi_result = return_from_flow(pool) + print(hi_result) + +print(engine.storage.get_flow_state()) From 48b310200a3ac8550327d350594b7865088874e3 Mon Sep 17 00:00:00 2001 From: Joshua Harlow Date: Thu, 24 Apr 2014 17:01:38 -0700 Subject: [PATCH 029/188] Add a persistence backend fetching context manager Allow the persistence backends to be fetched using a new helper method that can be used as a context manager, it will fetch the backend, ensure it's upgraded and upon context manager exit will close the backend automatically. Change-Id: I1bf8e43dcce25c02823cca92e3e7ed3ef254a847 --- taskflow/persistence/backends/__init__.py | 12 ++++++++++++ 1 file changed, 12 insertions(+) diff --git a/taskflow/persistence/backends/__init__.py b/taskflow/persistence/backends/__init__.py index 5cf30243..29c71d8c 100644 --- a/taskflow/persistence/backends/__init__.py +++ b/taskflow/persistence/backends/__init__.py @@ -14,6 +14,7 @@ # License for the specific language governing permissions and limitations # under the License. +import contextlib import logging import re @@ -53,3 +54,14 @@ def fetch(conf, namespace=BACKEND_NAMESPACE, **kwargs): return mgr.driver except RuntimeError as e: raise exc.NotFound("Could not find backend %s: %s" % (backend_name, e)) + + +@contextlib.contextmanager +def backend(conf, namespace=BACKEND_NAMESPACE, **kwargs): + """Fetches a persistence backend, ensures that it is upgraded and upon + context manager completion closes the backend. + """ + with contextlib.closing(fetch(conf, namespace=namespace, **kwargs)) as be: + with contextlib.closing(be.get_connection()) as conn: + conn.upgrade() + yield be From 54bb34bd4377bc699b7e5b82aa509fcfa2799b08 Mon Sep 17 00:00:00 2001 From: Joshua Harlow Date: Fri, 25 Apr 2014 11:44:48 -0700 Subject: [PATCH 030/188] Skip loading (and failing to load) lock files Ignore lock files (and other hidden files) that zookeeper will notify the jobboard about and avoid reading these as potential jobs (which they are not). Fixes bug 1312843 Change-Id: Ifa1ed31e22aed838f9baf883a6faaec00187663a --- taskflow/jobs/backends/impl_zookeeper.py | 13 ++++++++++--- 1 file changed, 10 insertions(+), 3 deletions(-) diff --git a/taskflow/jobs/backends/impl_zookeeper.py b/taskflow/jobs/backends/impl_zookeeper.py index b0a53ecc..eb555a36 100644 --- a/taskflow/jobs/backends/impl_zookeeper.py +++ b/taskflow/jobs/backends/impl_zookeeper.py @@ -49,6 +49,8 @@ ALL_JOB_STATES = ( # Transaction support was added in 3.4.0 MIN_ZK_VERSION = (3, 4, 0) +LOCK_POSTFIX = ".lock" +JOB_PREFIX = 'job' def _check_who(who): @@ -74,7 +76,7 @@ class ZookeeperJob(base_job.Job): raise ValueError("Only one of 'book_data' or 'book'" " can be provided") self._path = path - self._lock_path = "%s.lock" % (path) + self._lock_path = path + LOCK_POSTFIX self._created_on = created_on self._node_not_found = False @@ -232,7 +234,7 @@ class ZookeeperJobBoard(jobboard.NotifyingJobBoard): self._job_watcher = None # Since we use sequenced ids this will be the path that the sequences # are prefixed with, for example, job0000000001, job0000000002, ... - self._job_base = k_paths.join(path, "job") + self._job_base = k_paths.join(path, JOB_PREFIX) self._worker = None self._emit_notifications = bool(emit_notifications) @@ -331,7 +333,12 @@ class ZookeeperJobBoard(jobboard.NotifyingJobBoard): def _on_job_posting(self, children, delayed=True): LOG.debug("Got children %s under path %s", children, self.path) - child_paths = [k_paths.join(self.path, c) for c in children] + child_paths = [] + for c in children: + if c.endswith(LOCK_POSTFIX) or not c.startswith(JOB_PREFIX): + # Skip lock paths or non-job-paths (these are not valid jobs) + continue + child_paths.append(k_paths.join(self.path, c)) # Remove jobs that we know about but which are no longer children with self._job_mutate: From 50c2d5484d75668f00e2481b77e29653cf8a373e Mon Sep 17 00:00:00 2001 From: Joshua Harlow Date: Sat, 26 Apr 2014 11:22:09 -0700 Subject: [PATCH 031/188] Docs and cleanups for test_examples runner - Ensure test method name is safe to use - Add docs to explain what the functions are doing - Skip no_test files (as well as utils files) Change-Id: I5d0c9f354f1c5c7be36575a1e2288442a160129b --- taskflow/tests/test_examples.py | 34 +++++++++++++++++++++++---------- 1 file changed, 24 insertions(+), 10 deletions(-) diff --git a/taskflow/tests/test_examples.py b/taskflow/tests/test_examples.py index 50404216..43f8f4d9 100644 --- a/taskflow/tests/test_examples.py +++ b/taskflow/tests/test_examples.py @@ -40,6 +40,9 @@ ROOT_DIR = os.path.abspath( os.path.dirname( os.path.dirname(__file__)))) +UUID_RE = re.compile('XXXXXXXX-XXXX-XXXX-XXXX-XXXXXXXXXXXX' + .replace('X', '[0-9a-f]')) + def root_path(*args): return os.path.join(ROOT_DIR, *args) @@ -71,19 +74,26 @@ def expected_output_path(name): def list_examples(): examples_dir = root_path('taskflow', 'examples') for filename in os.listdir(examples_dir): + path = os.path.join(examples_dir, filename) + if not os.path.isfile(path): + continue name, ext = os.path.splitext(filename) - if ext == ".py" and 'utils' not in name.lower(): + if ext != ".py": + continue + bad_endings = [] + for i in ("utils", "no_test"): + if name.endswith(i): + bad_endings.append(True) + if not any(bad_endings): yield name class ExamplesTestCase(taskflow.test.TestCase): - maxDiff = None # sky's the limit - - uuid_re = re.compile('XXXXXXXX-XXXX-XXXX-XXXX-XXXXXXXXXXXX' - .replace('X', '[0-9a-f]')) - @classmethod def update(cls): + """For each example, adds on a test method that the testing framework + will then run. + """ def add_test_method(name, method_name): def test_example(self): self._check_example(name) @@ -91,18 +101,22 @@ class ExamplesTestCase(taskflow.test.TestCase): setattr(cls, method_name, test_example) for name in list_examples(): - add_test_method(name, 'test_%s' % name) + safe_name = str(re.sub("[^a-zA-Z0-9_]+", "_", name)) + if re.match(r"^[_]+$", safe_name): + continue + add_test_method(name, 'test_%s' % safe_name) def _check_example(self, name): + """Runs the example, and checks the output against expected output.""" output = run_example(name) eop = expected_output_path(name) if os.path.isfile(eop): with open(eop) as f: expected_output = f.read() # NOTE(imelnikov): on each run new uuid is generated, so we just - # replace them with some constant string - output = self.uuid_re.sub('', output) - expected_output = self.uuid_re.sub('', expected_output) + # replace them with some constant string + output = UUID_RE.sub('', output) + expected_output = UUID_RE.sub('', expected_output) self.assertEqual(output, expected_output) ExamplesTestCase.update() From 7f6ef479c19baeddd876384c9f8a935f59f9ccbb Mon Sep 17 00:00:00 2001 From: Joshua Harlow Date: Sat, 26 Apr 2014 13:47:15 -0700 Subject: [PATCH 032/188] Add a resumption strategy doc Move docs from wiki to developer docs and add on and adjust to reflect the current state of things. Change-Id: I50ab1ebeb33074d1fbc7493749d0d518b66de69e --- doc/source/index.rst | 1 + doc/source/persistence.rst | 3 +- doc/source/resumption.rst | 156 +++++++++++++++++++++++++++++++++++++ 3 files changed, 158 insertions(+), 2 deletions(-) create mode 100644 doc/source/resumption.rst diff --git a/doc/source/index.rst b/doc/source/index.rst index 84075223..a59575c6 100644 --- a/doc/source/index.rst +++ b/doc/source/index.rst @@ -18,6 +18,7 @@ Contents inputs_and_outputs notifications persistence + resumption exceptions utils states diff --git a/doc/source/persistence.rst b/doc/source/persistence.rst index f7fe810c..0a1f84b6 100644 --- a/doc/source/persistence.rst +++ b/doc/source/persistence.rst @@ -58,7 +58,7 @@ objects for each atom in the workflow the engine will be executing. **Execution:** When an engine beings to execute it will examine any previously existing :py:class:`~taskflow.persistence.logbook.AtomDetail` objects to see if they can be used -for resuming; see `big picture`_ for more details on this subject. For atoms which have not +for resuming; see :doc:`resumption ` for more details on this subject. For atoms which have not finished (or did not finish correctly from a previous run) they will begin executing only after any dependent inputs are ready. This is done by analyzing the execution graph and looking at predecessor :py:class:`~taskflow.persistence.logbook.AtomDetail` @@ -88,7 +88,6 @@ A few scenarios come to mind: of map-reduce jobs on them. .. _hdfs: https://hadoop.apache.org/docs/current/hadoop-project-dist/hadoop-hdfs/HdfsUserGuide.html -.. _big picture: https://wiki.openstack.org/wiki/TaskFlow/Patterns_and_Engines/Persistence#Big_Picture .. note:: diff --git a/doc/source/resumption.rst b/doc/source/resumption.rst new file mode 100644 index 00000000..b80fa909 --- /dev/null +++ b/doc/source/resumption.rst @@ -0,0 +1,156 @@ +---------- +Resumption +---------- + +Overview +======== + +**Question**: *How can we persist the flow so that it can be resumed, restarted or +rolled-back on engine failure?* + +**Answer:** Since a flow is a set of :doc:`atoms ` and relations between atoms we +need to create a model and corresponding information that allows us to persist +the *right* amount of information to preserve, resume, and rollback a flow on +software or hardware failure. + +To allow for resumption taskflow must be able to re-create the flow and re-connect +the links between atom (and between atoms->atom details and so on) in order to +revert those atoms or resume those atoms in the correct ordering. Taskflow provides +a pattern that can help in automating this process (it does **not** prohibit the user +from creating their own strategies for doing this). + +Factories +========= + +The default provided way is to provide a `factory`_ function which will create (or +recreate your workflow). This function can be provided when loading +a flow and corresponding engine via the provided +:py:meth:`load_from_factory() ` method. This +`factory`_ function is expected to be a function (or ``staticmethod``) which is reimportable (aka +has a well defined name that can be located by the ``__import__`` function in python, this +excludes ``lambda`` style functions and ``instance`` methods). The `factory`_ function +name will be saved into the logbook and it will be imported and called to create the +workflow objects (or recreate it if resumption happens). This allows for the flow +to be recreated if and when that is needed (even on remote machines, as long as the +reimportable name can be located). + +.. _factory: https://en.wikipedia.org/wiki/Factory_%28object-oriented_programming%29 + +Names +===== + +When a flow is created it is expected that each atom has a unique name, this +name serves a special purpose in the resumption process (as well as serving +a useful purpose when running, allowing for atom identification in the +:doc:`notification ` process). The reason for having names is that +an atom in a flow needs to be somehow matched with (a potentially) +existing :py:class:`~taskflow.persistence.logbook.AtomDetail` during engine +resumption & subsequent running. + +The match should be: + +* stable if atoms are added or removed +* should not change when service is restarted, upgraded... +* should be the same across all server instances in HA setups + +Names provide this although they do have weaknesses: + +* the names of atoms must be unique in flow +* it becomes hard to change the name of atom since a name change causes other + side-effects + +.. note:: + + Even though these weaknesses names were selected as a *good enough* solution for the above + matching requirements (until something better is invented/created that can satisfy those + same requirements). + +Scenarios +========= + +When new flow is loaded into engine, there is no persisted data +for it yet, so a corresponding :py:class:`~taskflow.persistence.logbook.FlowDetail` object +will be created, as well as a :py:class:`~taskflow.persistence.logbook.AtomDetail` object for +each atom that is contained in it. These will be immediately saved into the persistence backend +that is configured. If no persistence backend is configured, then as expected nothing will be +saved and the atoms and flow will be ran in a non-persistent manner. + +**Subsequent run:** When we resume the flow from a persistent backend (for example, +if the flow was interrupted and engine destroyed to save resources or if the +service was restarted), we need to re-create the flow. For that, we will call +the function that was saved on first-time loading that builds the flow for +us (aka; the flow factory function described above) and the engine will run. The +following scenarios explain some expected structural changes and how they can +be accommodated (and what the effect will be when resuming & running). + +Same atoms +---------- + +When the factory function mentioned above returns the exact same the flow and +atoms (no changes are performed). + +**Runtime change:** Nothing should be done -- the engine will re-associate +atoms with :py:class:`~taskflow.persistence.logbook.AtomDetail` objects by name +and then the engine resumes. + +Atom was added +-------------- + +When the factory function mentioned above alters the flow by adding +a new atom in (for example for changing the runtime structure of what was previously +ran in the first run). + +**Runtime change:** By default when the engine resumes it will notice that +a corresponding :py:class:`~taskflow.persistence.logbook.AtomDetail` does not +exist and one will be created and associated. + +Atom was removed +---------------- + +When the factory function mentioned above alters the flow by removing +a new atom in (for example for changing the runtime structure of what was previously +ran in the first run). + +**Runtime change:** Nothing should be done -- flow structure is reloaded from factory +function, and removed atom is not in it -- so, flow will be ran as if it was +not there, and any results it returned if it was completed before will be ignored. + +Atom code was changed +--------------------- + +When the factory function mentioned above alters the flow by deciding that a newer +version of a previously existing atom should be ran (possibly to perform some +kind of upgrade or to fix a bug in a prior atoms code). + +**Factory change:** The atom name & version will have to be altered. The +factory should replace this name where it was being used previously. + +**Runtime change:** This will fall under the same runtime adjustments that exist +when a new atom is added. In the future taskflow could make this easier by +providing a ``upgrade()`` function that can be used to give users the ability +to upgrade atoms before running (manual introspection & modification of a +:py:class:`~taskflow.persistence.logbook.LogBook` can be done before engine loading +and running to accomplish this in the meantime). + +Atom was split in two atoms or merged from two (or more) to one atom +-------------------------------------------------------------------- + +When the factory function mentioned above alters the flow by deciding that a previously +existing atom should be split into N atoms or the factory function decides that N atoms +should be merged in Date: Sat, 26 Apr 2014 18:13:48 -0700 Subject: [PATCH 033/188] Fix spelling error Fix 'ans' -> 'and' in atom detail. Change-Id: I0c33b03d3b475791960f63450acf2e639184ae1f --- taskflow/persistence/logbook.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/taskflow/persistence/logbook.py b/taskflow/persistence/logbook.py index 2de58485..31815a1d 100644 --- a/taskflow/persistence/logbook.py +++ b/taskflow/persistence/logbook.py @@ -361,7 +361,7 @@ class AtomDetail(object): @abc.abstractmethod def reset(self, state): - """Resets detail results ans failures.""" + """Resets detail results and failures.""" class TaskDetail(AtomDetail): From 964a37df9af277fe6edc48f2da47a156ea265e7c Mon Sep 17 00:00:00 2001 From: Joshua Harlow Date: Sat, 26 Apr 2014 19:30:19 -0700 Subject: [PATCH 034/188] Documentation tune-ups Change-Id: Iac0ddd4948ab364e1905327978bf449b52294388 --- doc/source/engines.rst | 26 +++++++-------- doc/source/jobs.rst | 61 +++++++++++++++--------------------- doc/source/notifications.rst | 2 +- doc/source/persistence.rst | 20 +++++++++--- 4 files changed, 54 insertions(+), 55 deletions(-) diff --git a/doc/source/engines.rst b/doc/source/engines.rst index 62d3a1a3..359cea6b 100644 --- a/doc/source/engines.rst +++ b/doc/source/engines.rst @@ -23,8 +23,8 @@ parts of :py:class:`linear flow ` are run one after another, in order, even if engine is *capable* of running tasks in parallel). -Creating Engines -================ +Creating +======== All engines are mere classes that implement the same interface, and of course it is possible to import them and create instances just like with any classes @@ -43,8 +43,8 @@ might look like:: .. automodule:: taskflow.engines.helpers -Engine Configuration -==================== +Usage +===== To select which engine to use and pass parameters to an engine you should use the ``engine_conf`` parameter any helper factory function accepts. It may be: @@ -53,10 +53,8 @@ the ``engine_conf`` parameter any helper factory function accepts. It may be: * a dictionary, holding engine type with key ``'engine'`` and possibly type-specific engine parameters. -Known engine types are listed below. - -Single-Threaded Engine ----------------------- +Single-Threaded +--------------- **Engine type**: ``'serial'`` @@ -70,8 +68,8 @@ on. This engine is used by default. greenthreads and monkey patching). See `eventlet `_ and `greenlet `_ for more details. -Parallel Engine ---------------- +Parallel +-------- **Engine type**: ``'parallel'`` @@ -96,8 +94,8 @@ Additional configuration parameters: Running tasks with ``concurrent.futures.ProcessPoolExecutor`` is not supported now. -Worker-Based Engine -------------------- +Worker-Based +------------ **Engine type**: ``'worker-based'`` @@ -118,8 +116,8 @@ operates. .. _wiki page: https://wiki.openstack.org/wiki/TaskFlow/Worker-based_Engine .. _blueprint page: https://blueprints.launchpad.net/taskflow -Engine Interface -================ +Interfaces +========== .. automodule:: taskflow.engines.base diff --git a/doc/source/jobs.rst b/doc/source/jobs.rst index 0d021696..374eb3c9 100644 --- a/doc/source/jobs.rst +++ b/doc/source/jobs.rst @@ -19,8 +19,29 @@ claiming them, and only remove them from the queue when they're done with the work. If the consumer fails, the lock is *automatically* released and the item is back on the queue for further consumption. +For more information, please see `wiki page`_ for more details. + +Definitions +=========== + +Jobs + A :py:class:`job ` consists of a unique identifier, name, + and a reference to a :py:class:`logbook ` + which contains the details of the work that has been or should be/will be + completed to finish the work that has been created for that job. + +Jobboards + A :py:class:`jobboard ` is responsible for managing + the posting, ownership, and delivery of jobs. It acts as the location where jobs + can be posted, claimed and searched for; typically by iteration or notification. + Jobboards may be backed by different *capable* implementations (each with potentially differing + configuration) but all jobboards implement the same interface and semantics so + that the backend usage is as transparent as possible. This allows deployers or + developers of a service that uses TaskFlow to select a jobboard implementation + that fits their setup (and there intended usage) best. + Features --------- +======== - High availability @@ -58,29 +79,8 @@ Features user to poll for status (similar in concept to a shipping *tracking* identifier created by fedex or UPS). -For more information, please see `wiki page`_ for more details. - -Jobs ----- - -A job consists of a unique identifier, name, and a reference to a logbook -which contains the details of the work that has been or should be/will be -completed to finish the work that has been created for that job. - -Jobboards ---------- - -A jobboard is responsible for managing the posting, ownership, and delivery -of jobs. It acts as the location where jobs can be posted, claimed and searched -for; typically by iteration or notification. Jobboards may be backed by -different *capable* implementations (each with potentially differing -configuration) but all jobboards implement the same interface and semantics so -that the backend usage is as transparent as possible. This allows deployers or -developers of a service that uses TaskFlow to select a jobboard implementation -that fits their setup (and there intended usage) best. - -Using Jobboards -=============== +Usage +===== All engines are mere classes that implement same interface, and of course it is possible to import them and create their instances just like with any classes @@ -156,11 +156,6 @@ might look like: time.sleep(coffee_break_time) ... -Jobboard Configuration -====================== - -Known engine types are listed below. - Zookeeper --------- @@ -239,15 +234,11 @@ the claim by then, therefore both would be *working* on a job. .. _idempotent: http://en.wikipedia.org/wiki/Idempotence .. _preemptable: http://en.wikipedia.org/wiki/Preemption_%28computing%29 -Job Interface -============= +Interfaces +========== .. automodule:: taskflow.jobs.backends .. automodule:: taskflow.jobs.job - -Jobboard Interface -================== - .. automodule:: taskflow.jobs.jobboard .. _wiki page: https://wiki.openstack.org/wiki/TaskFlow/Paradigm_shifts#Workflow_ownership_transfer diff --git a/doc/source/notifications.rst b/doc/source/notifications.rst index 8f92317d..88969bdd 100644 --- a/doc/source/notifications.rst +++ b/doc/source/notifications.rst @@ -67,7 +67,7 @@ To receive notification on flow state changes use woof Flow 'cat-dog' transition to state SUCCESS -Task notifications +Task Notifications ------------------ To receive notification on task state changes use diff --git a/doc/source/persistence.rst b/doc/source/persistence.rst index f7fe810c..75d1fab3 100644 --- a/doc/source/persistence.rst +++ b/doc/source/persistence.rst @@ -105,8 +105,8 @@ A few scenarios come to mind: (when data is saved -- every time it changes or at some particular moments or simply never). -Persistence Configuration -========================= +Usage +===== To select which persistence backend to use you should use the :py:meth:`fetch() ` function which uses @@ -137,13 +137,17 @@ the following: ``'connection'`` and possibly type-specific backend parameters as other keys. -Known engine types are listed below. +Memory +------ **Connection**: ``'memory'`` Retains all data in local memory (not persisted to reliable storage). Useful for scenarios where persistence is not required (and also in unit tests). +Files +----- + **Connection**: ``'dir'`` or ``'file'`` Retains all data in a directory & file based structure on local disk. Will be @@ -152,6 +156,9 @@ from the same local machine only). Useful for cases where a *more* reliable persistence is desired along with the simplicity of files and directories (a concept everyone is familiar with). +Sqlalchemy +---------- + **Connection**: ``'mysql'`` or ``'postgres'`` or ``'sqlite'`` Retains all data in a `ACID`_ compliant database using the `sqlalchemy`_ library @@ -163,6 +170,9 @@ does not apply when using sqlite). .. _sqlalchemy: http://www.sqlalchemy.org/docs/ .. _ACID: https://en.wikipedia.org/wiki/ACID +Zookeeper +--------- + **Connection**: ``'zookeeper'`` Retains all data in a `zookeeper`_ backend (zookeeper exposes operations on @@ -176,8 +186,8 @@ as the database connection types listed previously). .. _zookeeper: http://zookeeper.apache.org .. _kazoo: http://kazoo.readthedocs.org/ -Persistence Backend Interfaces -============================== +Interfaces +========== .. automodule:: taskflow.persistence.backends .. automodule:: taskflow.persistence.backends.base From 02b9ce410cbbb429f8f42a5c191f24b03ad9c45c Mon Sep 17 00:00:00 2001 From: Joshua Harlow Date: Sat, 26 Apr 2014 22:04:56 -0700 Subject: [PATCH 035/188] Fix "occured" -> "occurred" Change-Id: I96383a4cd436a6b923a1c951ee41a28f5b73a14b --- taskflow/engines/action_engine/graph_action.py | 2 +- taskflow/utils/misc.py | 2 +- 2 files changed, 2 insertions(+), 2 deletions(-) diff --git a/taskflow/engines/action_engine/graph_action.py b/taskflow/engines/action_engine/graph_action.py index b27b69d0..34a3943c 100644 --- a/taskflow/engines/action_engine/graph_action.py +++ b/taskflow/engines/action_engine/graph_action.py @@ -78,7 +78,7 @@ class FutureGraphAction(object): not_done, _WAITING_TIMEOUT) # Analyze the results and schedule more nodes (unless we had - # failures). If failures occured just continue processing what + # failures). If failures occurred just continue processing what # is running (so that we don't leave it abandoned) but do not # schedule anything new. next_nodes = set() diff --git a/taskflow/utils/misc.py b/taskflow/utils/misc.py index ed9a33a7..0e3a1c3d 100644 --- a/taskflow/utils/misc.py +++ b/taskflow/utils/misc.py @@ -436,7 +436,7 @@ class Notifier(object): All callbacks registered to receive notifications about given event type will be called. - :param event_type: event type that occured + :param event_type: event type that occurred :param details: addition event details """ listeners = list(self._listeners.get(self.ANY, [])) From cb2ea424c7fd5637e03fcf9d306a3e1cb9f4acc0 Mon Sep 17 00:00:00 2001 From: Joshua Harlow Date: Thu, 3 Apr 2014 17:49:10 -0700 Subject: [PATCH 036/188] Integrate urlparse for configuration augmentation Allow the uri passed to various entrypoints to itself contain most of the configuration for that entrypoint by using the RFC 3986 uri format to allow for additional parameters to be used in configuring and selecting those entrypoints. Also bring over network_utils from oslo since it provides a more compliant url splitting support. Part of blueprint persistence-uris Change-Id: I8e5d8584d2cac4f9b338fa155f220cdfd52bb7d9 --- openstack-common.conf | 1 + taskflow/engines/helpers.py | 12 ++- taskflow/jobs/backends/__init__.py | 10 +- taskflow/openstack/common/network_utils.py | 108 +++++++++++++++++++++ taskflow/persistence/backends/__init__.py | 20 ++-- taskflow/tests/unit/test_utils.py | 45 +++++++++ taskflow/utils/misc.py | 87 +++++++++++++++++ 7 files changed, 268 insertions(+), 15 deletions(-) create mode 100644 taskflow/openstack/common/network_utils.py diff --git a/openstack-common.conf b/openstack-common.conf index 24d2fc0f..8adc059c 100644 --- a/openstack-common.conf +++ b/openstack-common.conf @@ -6,6 +6,7 @@ module=importutils module=jsonutils module=timeutils module=uuidutils +module=network_utils script=tools/run_cross_tests.sh diff --git a/taskflow/engines/helpers.py b/taskflow/engines/helpers.py index a4435487..876577ff 100644 --- a/taskflow/engines/helpers.py +++ b/taskflow/engines/helpers.py @@ -21,6 +21,7 @@ import stevedore.driver from taskflow.openstack.common import importutils from taskflow.persistence import backends as p_backends +from taskflow.utils import misc from taskflow.utils import persistence_utils as p_utils from taskflow.utils import reflection @@ -80,6 +81,15 @@ def load(flow, store=None, flow_detail=None, book=None, if isinstance(engine_conf, six.string_types): engine_conf = {'engine': engine_conf} + engine_name = engine_conf['engine'] + try: + pieces = misc.parse_uri(engine_name) + except (TypeError, ValueError): + pass + else: + engine_name = pieces['scheme'] + engine_conf = misc.merge_uri(pieces, engine_conf.copy()) + if isinstance(backend, dict): backend = p_backends.fetch(backend) @@ -88,7 +98,7 @@ def load(flow, store=None, flow_detail=None, book=None, backend=backend) mgr = stevedore.driver.DriverManager( - namespace, engine_conf['engine'], + namespace, engine_name, invoke_on_load=True, invoke_kwds={ 'conf': engine_conf.copy(), diff --git a/taskflow/jobs/backends/__init__.py b/taskflow/jobs/backends/__init__.py index ad4dc060..b720024b 100644 --- a/taskflow/jobs/backends/__init__.py +++ b/taskflow/jobs/backends/__init__.py @@ -20,6 +20,7 @@ import six from stevedore import driver from taskflow import exceptions as exc +from taskflow.utils import misc # NOTE(harlowja): this is the entrypoint namespace, not the module namespace. @@ -33,11 +34,16 @@ def fetch(name, conf, namespace=BACKEND_NAMESPACE, **kwargs): specific kwargs) in the given entrypoint namespace and create it with the given name. """ - # NOTE(harlowja): this allows simpler syntax. if isinstance(conf, six.string_types): conf = {'board': conf} - board = conf['board'] + try: + pieces = misc.parse_uri(board) + except (TypeError, ValueError): + pass + else: + board = pieces['scheme'] + conf = misc.merge_uri(pieces, conf.copy()) LOG.debug('Looking for %r jobboard driver in %r', board, namespace) try: mgr = driver.DriverManager(namespace, board, diff --git a/taskflow/openstack/common/network_utils.py b/taskflow/openstack/common/network_utils.py new file mode 100644 index 00000000..fa812b29 --- /dev/null +++ b/taskflow/openstack/common/network_utils.py @@ -0,0 +1,108 @@ +# Copyright 2012 OpenStack Foundation. +# All Rights Reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); you may +# not use this file except in compliance with the License. You may obtain +# a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the +# License for the specific language governing permissions and limitations +# under the License. + +""" +Network-related utilities and helper functions. +""" + +# TODO(jd) Use six.moves once +# https://bitbucket.org/gutworth/six/pull-request/28 +# is merged +try: + import urllib.parse + SplitResult = urllib.parse.SplitResult +except ImportError: + import urlparse + SplitResult = urlparse.SplitResult + +from six.moves.urllib import parse + + +def parse_host_port(address, default_port=None): + """Interpret a string as a host:port pair. + + An IPv6 address MUST be escaped if accompanied by a port, + because otherwise ambiguity ensues: 2001:db8:85a3::8a2e:370:7334 + means both [2001:db8:85a3::8a2e:370:7334] and + [2001:db8:85a3::8a2e:370]:7334. + + >>> parse_host_port('server01:80') + ('server01', 80) + >>> parse_host_port('server01') + ('server01', None) + >>> parse_host_port('server01', default_port=1234) + ('server01', 1234) + >>> parse_host_port('[::1]:80') + ('::1', 80) + >>> parse_host_port('[::1]') + ('::1', None) + >>> parse_host_port('[::1]', default_port=1234) + ('::1', 1234) + >>> parse_host_port('2001:db8:85a3::8a2e:370:7334', default_port=1234) + ('2001:db8:85a3::8a2e:370:7334', 1234) + + """ + if address[0] == '[': + # Escaped ipv6 + _host, _port = address[1:].split(']') + host = _host + if ':' in _port: + port = _port.split(':')[1] + else: + port = default_port + else: + if address.count(':') == 1: + host, port = address.split(':') + else: + # 0 means ipv4, >1 means ipv6. + # We prohibit unescaped ipv6 addresses with port. + host = address + port = default_port + + return (host, None if port is None else int(port)) + + +class ModifiedSplitResult(SplitResult): + """Split results class for urlsplit.""" + + # NOTE(dims): The functions below are needed for Python 2.6.x. + # We can remove these when we drop support for 2.6.x. + @property + def hostname(self): + netloc = self.netloc.split('@', 1)[-1] + host, port = parse_host_port(netloc) + return host + + @property + def port(self): + netloc = self.netloc.split('@', 1)[-1] + host, port = parse_host_port(netloc) + return port + + +def urlsplit(url, scheme='', allow_fragments=True): + """Parse a URL using urlparse.urlsplit(), splitting query and fragments. + This function papers over Python issue9374 when needed. + + The parameters are the same as urlparse.urlsplit. + """ + scheme, netloc, path, query, fragment = parse.urlsplit( + url, scheme, allow_fragments) + if allow_fragments and '#' in path: + path, fragment = path.split('#', 1) + if '?' in path: + path, query = path.split('?', 1) + return ModifiedSplitResult(scheme, netloc, + path, query, fragment) diff --git a/taskflow/persistence/backends/__init__.py b/taskflow/persistence/backends/__init__.py index 5cf30243..f89e60d4 100644 --- a/taskflow/persistence/backends/__init__.py +++ b/taskflow/persistence/backends/__init__.py @@ -15,20 +15,16 @@ # under the License. import logging -import re from stevedore import driver from taskflow import exceptions as exc +from taskflow.utils import misc # NOTE(harlowja): this is the entrypoint namespace, not the module namespace. BACKEND_NAMESPACE = 'taskflow.persistence' -# NOTE(imelnikov): regular expression to get scheme from URI, -# see RFC 3986 section 3.1 -SCHEME_REGEX = re.compile(r"^([A-Za-z]{1}[A-Za-z0-9+.-]*):") - LOG = logging.getLogger(__name__) @@ -36,14 +32,14 @@ def fetch(conf, namespace=BACKEND_NAMESPACE, **kwargs): """Fetches a given backend using the given configuration (and any backend specific kwargs) in the given entrypoint namespace. """ - connection = conf['connection'] - - match = SCHEME_REGEX.match(connection) - if match: - backend_name = match.group(1) + backend_name = conf['connection'] + try: + pieces = misc.parse_uri(backend_name) + except (TypeError, ValueError): + pass else: - backend_name = connection - + backend_name = pieces['scheme'] + conf = misc.merge_uri(pieces, conf.copy()) LOG.debug('Looking for %r backend driver in %r', backend_name, namespace) try: mgr = driver.DriverManager(namespace, backend_name, diff --git a/taskflow/tests/unit/test_utils.py b/taskflow/tests/unit/test_utils.py index d0bb0695..fbdf3ec6 100644 --- a/taskflow/tests/unit/test_utils.py +++ b/taskflow/tests/unit/test_utils.py @@ -456,6 +456,51 @@ class StopWatchUtilsTest(test.TestCase): self.assertGreater(0.01, watch.elapsed()) +class UriParseTest(test.TestCase): + def test_parse(self): + url = "zookeeper://192.168.0.1:2181/a/b/?c=d" + parsed = misc.parse_uri(url) + self.assertEqual('zookeeper', parsed.scheme) + self.assertEqual(2181, parsed.port) + self.assertEqual('192.168.0.1', parsed.hostname) + self.assertEqual('', parsed.fragment) + self.assertEqual('/a/b/', parsed.path) + self.assertEqual({'c': 'd'}, parsed.params) + + def test_multi_params(self): + url = "mysql://www.yahoo.com:3306/a/b/?c=d&c=e" + parsed = misc.parse_uri(url, query_duplicates=True) + self.assertEqual({'c': ['d', 'e']}, parsed.params) + + def test_port_provided(self): + url = "rabbitmq://www.yahoo.com:5672" + parsed = misc.parse_uri(url) + self.assertEqual('rabbitmq', parsed.scheme) + self.assertEqual('www.yahoo.com', parsed.hostname) + self.assertEqual(5672, parsed.port) + self.assertEqual('', parsed.path) + + def test_ipv6_host(self): + url = "rsync://[2001:db8:0:1]:873" + parsed = misc.parse_uri(url) + self.assertEqual('rsync', parsed.scheme) + self.assertEqual('2001:db8:0:1', parsed.hostname) + self.assertEqual(873, parsed.port) + + def test_user_password(self): + url = "rsync://test:test_pw@www.yahoo.com:873" + parsed = misc.parse_uri(url) + self.assertEqual('test', parsed.username) + self.assertEqual('test_pw', parsed.password) + self.assertEqual('www.yahoo.com', parsed.hostname) + + def test_user(self): + url = "rsync://test@www.yahoo.com:873" + parsed = misc.parse_uri(url) + self.assertEqual('test', parsed.username) + self.assertEqual(None, parsed.password) + + class ExcInfoUtilsTest(test.TestCase): def _make_ex_info(self): diff --git a/taskflow/utils/misc.py b/taskflow/utils/misc.py index ed9a33a7..69606c5b 100644 --- a/taskflow/utils/misc.py +++ b/taskflow/utils/misc.py @@ -24,6 +24,7 @@ import functools import keyword import logging import os +import re import string import sys import threading @@ -31,15 +32,101 @@ import time import traceback import six +from six.moves.urllib import parse as urlparse from taskflow import exceptions as exc from taskflow.openstack.common import jsonutils +from taskflow.openstack.common import network_utils from taskflow.utils import reflection LOG = logging.getLogger(__name__) NUMERIC_TYPES = six.integer_types + (float,) +# NOTE(imelnikov): regular expression to get scheme from URI, +# see RFC 3986 section 3.1 +_SCHEME_REGEX = re.compile(r"^([A-Za-z][A-Za-z0-9+.-]*):") + + +def merge_uri(uri_pieces, conf): + """Merges the username, password, hostname, and query params of a uri into + the given configuration (does not overwrite the configuration keys if they + already exist) and returns the adjusted configuration. + + NOTE(harlowja): does not merge the path, scheme or fragment. + """ + for k in ('username', 'password'): + if not uri_pieces[k]: + continue + conf.setdefault(k, uri_pieces[k]) + hostname = uri_pieces.get('hostname') + if hostname: + port = uri_pieces.get('port') + if port is not None: + hostname += ":%s" % (port) + conf.setdefault('hostname', hostname) + for (k, v) in six.iteritems(uri_pieces['params']): + conf.setdefault(k, v) + return conf + + +def parse_uri(uri, query_duplicates=False): + """Parses a uri into its components and returns a dictionary containing + those components. + """ + # Do some basic validation before continuing... + if not isinstance(uri, six.string_types): + raise TypeError("Can only parse string types to uri data, " + "and not an object of type %s" + % reflection.get_class_name(uri)) + match = _SCHEME_REGEX.match(uri) + if not match: + raise ValueError("Uri %r does not start with a RFC 3986 compliant" + " scheme" % (uri)) + parsed = network_utils.urlsplit(uri) + if parsed.query: + query_params = urlparse.parse_qsl(parsed.query) + if not query_duplicates: + query_params = dict(query_params) + else: + # Retain duplicates in a list for keys which have duplicates, but + # for items which are not duplicated, just associate the key with + # the value. + tmp_query_params = {} + for (k, v) in query_params: + if k in tmp_query_params: + p_v = tmp_query_params[k] + if isinstance(p_v, list): + p_v.append(v) + else: + p_v = [p_v, v] + tmp_query_params[k] = p_v + else: + tmp_query_params[k] = v + query_params = tmp_query_params + else: + query_params = {} + uri_pieces = { + 'scheme': parsed.scheme, + 'username': parsed.username, + 'password': parsed.password, + 'fragment': parsed.fragment, + 'path': parsed.path, + 'params': query_params, + } + for k in ('hostname', 'port'): + try: + uri_pieces[k] = getattr(parsed, k) + except (IndexError, ValueError): + # The underlying network_utils throws when the host string is empty + # which it may be in cases where it is not provided. + # + # NOTE(harlowja): when https://review.openstack.org/#/c/86921/ gets + # merged we can just remove this since that error will no longer + # occur. + uri_pieces[k] = None + return AttrDict(**uri_pieces) + def binary_encode(text, encoding='utf-8'): """Converts a string of into a binary type using given encoding. From 2a0851c2ab88a057a45ce0dbfe99aa21efb42e5e Mon Sep 17 00:00:00 2001 From: Joshua Harlow Date: Fri, 25 Apr 2014 15:27:51 -0700 Subject: [PATCH 037/188] Load engines with defined args and provided kwargs To match the other fetch() functions, make the engine load and run methods take in kwargs which can be used by a specific engine if needed. Instead of the previous passing engine args by invoke_kwds, use the invoke_args with the specific kwargs being placed in invoke_kwds. Change-Id: I7974ed774909de82a8b48615dfe29836ca8d809d --- taskflow/engines/helpers.py | 24 +++++++++++------------- 1 file changed, 11 insertions(+), 13 deletions(-) diff --git a/taskflow/engines/helpers.py b/taskflow/engines/helpers.py index 876577ff..c70c8f1e 100644 --- a/taskflow/engines/helpers.py +++ b/taskflow/engines/helpers.py @@ -47,7 +47,8 @@ def _fetch_validate_factory(flow_factory): def load(flow, store=None, flow_detail=None, book=None, - engine_conf=None, backend=None, namespace=ENGINES_NAMESPACE): + engine_conf=None, backend=None, namespace=ENGINES_NAMESPACE, + **kwargs): """Load flow into engine. This function creates and prepares engine to run the @@ -100,12 +101,8 @@ def load(flow, store=None, flow_detail=None, book=None, mgr = stevedore.driver.DriverManager( namespace, engine_name, invoke_on_load=True, - invoke_kwds={ - 'conf': engine_conf.copy(), - 'flow': flow, - 'flow_detail': flow_detail, - 'backend': backend - }) + invoke_args=(flow, flow_detail, backend, engine_conf), + invoke_kwds=kwargs) engine = mgr.driver if store: engine.storage.inject(store) @@ -113,7 +110,7 @@ def load(flow, store=None, flow_detail=None, book=None, def run(flow, store=None, flow_detail=None, book=None, - engine_conf=None, backend=None, namespace=ENGINES_NAMESPACE): + engine_conf=None, backend=None, namespace=ENGINES_NAMESPACE, **kwargs): """Run the flow. This function load the flow into engine (with 'load' function) @@ -141,7 +138,7 @@ def run(flow, store=None, flow_detail=None, book=None, """ engine = load(flow, store=store, flow_detail=flow_detail, book=book, engine_conf=engine_conf, backend=backend, - namespace=namespace) + namespace=namespace, **kwargs) engine.run() return engine.storage.fetch_all() @@ -187,7 +184,7 @@ def save_factory_details(flow_detail, def load_from_factory(flow_factory, factory_args=None, factory_kwargs=None, store=None, book=None, engine_conf=None, backend=None, - namespace=ENGINES_NAMESPACE): + namespace=ENGINES_NAMESPACE, **kwargs): """Loads a flow from a factory function into an engine. Gets flow factory function (or name of it) and creates flow with @@ -220,7 +217,8 @@ def load_from_factory(flow_factory, factory_args=None, factory_kwargs=None, flow_factory, factory_args, factory_kwargs, backend=backend) return load(flow=flow, store=store, flow_detail=flow_detail, book=book, - engine_conf=engine_conf, backend=backend, namespace=namespace) + engine_conf=engine_conf, backend=backend, namespace=namespace, + **kwargs) def flow_from_detail(flow_detail): @@ -249,7 +247,7 @@ def flow_from_detail(flow_detail): def load_from_detail(flow_detail, store=None, engine_conf=None, backend=None, - namespace=ENGINES_NAMESPACE): + namespace=ENGINES_NAMESPACE, **kwargs): """Reload flow previously loaded with load_form_factory function. Gets flow factory name from metadata, calls it to recreate the flow @@ -266,4 +264,4 @@ def load_from_detail(flow_detail, store=None, engine_conf=None, backend=None, flow = flow_from_detail(flow_detail) return load(flow, flow_detail=flow_detail, store=store, engine_conf=engine_conf, backend=backend, - namespace=namespace) + namespace=namespace, **kwargs) From 8800572da240b1f1856898952ce804936dafe49f Mon Sep 17 00:00:00 2001 From: Joshua Harlow Date: Mon, 28 Apr 2014 20:22:32 -0700 Subject: [PATCH 038/188] Remove persistence wiki page link All of the relevant docs have been moved over to the developer docs and the only thing left on the wiki page is a high-level intro and a link to the developer docs. Change-Id: I1fcc5d34586bad3d92fa8bb5043c55249e1c9bdd --- doc/source/persistence.rst | 3 --- 1 file changed, 3 deletions(-) diff --git a/doc/source/persistence.rst b/doc/source/persistence.rst index 75d1fab3..01adcf22 100644 --- a/doc/source/persistence.rst +++ b/doc/source/persistence.rst @@ -30,10 +30,7 @@ This abstraction serves the following *major* purposes: for retry atoms to be able to decide if they should should continue vs. stop. * *Something you create...* -For more *general* information, please see `wiki page`_. - .. _stevedore: http://stevedore.readthedocs.org/ -.. _wiki page: https://wiki.openstack.org/wiki/TaskFlow/Persistence How it is used ============== From 6eb82b6b1a1474ee1cda1c12a61617b8892cbf53 Mon Sep 17 00:00:00 2001 From: Ihar Hrachyshka Date: Tue, 29 Apr 2014 17:21:52 +0200 Subject: [PATCH 039/188] Synced jsonutils from oslo-incubator The sync includes change that drastically enhances performance on Python 2.6 with fresh simplejson library installed. The latest commit in oslo-incubator: - 732bdb6297eb9de81667f7713ebcb1ccc2ee45a7 Change-Id: Ib3dc0b713ed90396919feba018772243b3b9c90f Closes-Bug: 1314129 --- taskflow/openstack/common/jsonutils.py | 12 +++++++++++- 1 file changed, 11 insertions(+), 1 deletion(-) diff --git a/taskflow/openstack/common/jsonutils.py b/taskflow/openstack/common/jsonutils.py index fec61bd0..80b85221 100644 --- a/taskflow/openstack/common/jsonutils.py +++ b/taskflow/openstack/common/jsonutils.py @@ -35,7 +35,17 @@ import datetime import functools import inspect import itertools -import json +import sys + +if sys.version_info < (2, 7): + # On Python <= 2.6, json module is not C boosted, so try to use + # simplejson module if available + try: + import simplejson as json + except ImportError: + import json +else: + import json import six import six.moves.xmlrpc_client as xmlrpclib From 322a9216f30ff215b72a3f206046f12fd1c532aa Mon Sep 17 00:00:00 2001 From: Joshua Harlow Date: Tue, 29 Apr 2014 16:11:21 -0700 Subject: [PATCH 040/188] Update jobboard docs Add parameter definitions to a few critical methods. Change-Id: I34067574ddff4378377cbd0f1870f36fdba64107 --- taskflow/jobs/jobboard.py | 17 ++++++++++++++++- 1 file changed, 16 insertions(+), 1 deletion(-) diff --git a/taskflow/jobs/jobboard.py b/taskflow/jobs/jobboard.py index 737251bd..070b3ea3 100644 --- a/taskflow/jobs/jobboard.py +++ b/taskflow/jobs/jobboard.py @@ -76,7 +76,13 @@ class JobBoard(object): A job that has been consumed can not be reclaimed or reposted by another entity (job postings are immutable). Any entity consuming - a unclaimed job (or a job they do not own) will cause an exception. + a unclaimed job (or a job they do not have a claim on) will cause an + exception. + + :param job: a job on this jobboard that can be consumed (if it does + not exist then a NotFound exception will be raised). + :param who: string that names the entity performing the consumption, + this must be the same name that was used for claiming this job. """ @abc.abstractmethod @@ -103,6 +109,10 @@ class JobBoard(object): will at sometime in the future work on that jobs flows and either fail at completing them (resulting in a reposting) or consume that job from the jobboard (signaling its completion). + + :param job: a job on this jobboard that can be claimed (if it does + not exist then a NotFound exception will be raised). + :param who: string that names the claiming entity. """ @abc.abstractmethod @@ -115,6 +125,11 @@ class JobBoard(object): Only the entity that has claimed that job can abandon a job. Any entity abandoning a unclaimed job (or a job they do not own) will cause an exception. + + :param job: a job on this jobboard that can be abandoned (if it does + not exist then a NotFound exception will be raised). + :param who: string that names the entity performing the abandoning, + this must be the same name that was used for claiming this job. """ From 5c6a1d408109ffbf301510e84543e7d097349e5f Mon Sep 17 00:00:00 2001 From: Joshua Harlow Date: Fri, 25 Apr 2014 15:45:34 -0700 Subject: [PATCH 041/188] Wrap the failure to load in the not found exception Wrap the loading of the engine into a not found exception to match the usage in other entrypoints. This makes the usage of the various entrypoint loading functions more consistent. Change-Id: Id86f7b716c9b3dd76aba411529d2e647ad93a120 --- taskflow/engines/helpers.py | 23 +++++++++++++--------- taskflow/tests/unit/test_engine_helpers.py | 16 +++++++++++++++ 2 files changed, 30 insertions(+), 9 deletions(-) diff --git a/taskflow/engines/helpers.py b/taskflow/engines/helpers.py index c70c8f1e..c9000b80 100644 --- a/taskflow/engines/helpers.py +++ b/taskflow/engines/helpers.py @@ -19,6 +19,7 @@ import contextlib import six import stevedore.driver +from taskflow import exceptions as exc from taskflow.openstack.common import importutils from taskflow.persistence import backends as p_backends from taskflow.utils import misc @@ -98,15 +99,19 @@ def load(flow, store=None, flow_detail=None, book=None, flow_detail = p_utils.create_flow_detail(flow, book=book, backend=backend) - mgr = stevedore.driver.DriverManager( - namespace, engine_name, - invoke_on_load=True, - invoke_args=(flow, flow_detail, backend, engine_conf), - invoke_kwds=kwargs) - engine = mgr.driver - if store: - engine.storage.inject(store) - return engine + try: + mgr = stevedore.driver.DriverManager( + namespace, engine_name, + invoke_on_load=True, + invoke_args=(flow, flow_detail, backend, engine_conf), + invoke_kwds=kwargs) + engine = mgr.driver + except RuntimeError as e: + raise exc.NotFound("Could not find engine %s" % (engine_name), e) + else: + if store: + engine.storage.inject(store) + return engine def run(flow, store=None, flow_detail=None, book=None, diff --git a/taskflow/tests/unit/test_engine_helpers.py b/taskflow/tests/unit/test_engine_helpers.py index 6f3d3386..da0a276b 100644 --- a/taskflow/tests/unit/test_engine_helpers.py +++ b/taskflow/tests/unit/test_engine_helpers.py @@ -16,6 +16,8 @@ import mock +from taskflow import exceptions as exc +from taskflow.patterns import linear_flow from taskflow import test from taskflow.tests import utils as test_utils from taskflow.utils import persistence_utils as p_utils @@ -23,6 +25,20 @@ from taskflow.utils import persistence_utils as p_utils import taskflow.engines +class EngineLoadingTestCase(test.TestCase): + def test_default_load(self): + f = linear_flow.Flow('test') + f.add(test_utils.TaskOneReturn("run-1")) + e = taskflow.engines.load(f) + self.assertIsNotNone(e) + + def test_unknown_load(self): + f = linear_flow.Flow('test') + f.add(test_utils.TaskOneReturn("run-1")) + self.assertRaises(exc.NotFound, taskflow.engines.load, f, + engine_conf='not_really_any_engine') + + class FlowFromDetailTestCase(test.TestCase): def test_no_meta(self): _lb, flow_detail = p_utils.temporary_flow_detail() From 0667ae5406af68a21b41917af61282d04ff9d068 Mon Sep 17 00:00:00 2001 From: Joshua Harlow Date: Thu, 24 Apr 2014 16:29:11 -0700 Subject: [PATCH 042/188] Add a jobboard fetching context manager Allow the jobboard backend to be fetched and automatically connected and closed by providing a method that does the fetching, connecting and closing as a context manager. Change-Id: I3b2ea707009f8154f2c68652f101490c09f35c8c --- taskflow/jobs/backends/__init__.py | 12 ++++++++++++ taskflow/jobs/jobboard.py | 12 ++++++++++++ 2 files changed, 24 insertions(+) diff --git a/taskflow/jobs/backends/__init__.py b/taskflow/jobs/backends/__init__.py index ad4dc060..c10a4ef1 100644 --- a/taskflow/jobs/backends/__init__.py +++ b/taskflow/jobs/backends/__init__.py @@ -14,6 +14,7 @@ # License for the specific language governing permissions and limitations # under the License. +import contextlib import logging import six @@ -47,3 +48,14 @@ def fetch(name, conf, namespace=BACKEND_NAMESPACE, **kwargs): return mgr.driver except RuntimeError as e: raise exc.NotFound("Could not find jobboard %s" % (board), e) + + +@contextlib.contextmanager +def backend(name, conf, namespace=BACKEND_NAMESPACE, **kwargs): + """Fetches a jobboard backend, connects to it and allows it to be used in + a context manager statement with the jobboard being closed upon completion. + """ + jb = fetch(name, conf, namespace=namespace, **kwargs) + jb.connect() + with contextlib.closing(jb): + yield jb diff --git a/taskflow/jobs/jobboard.py b/taskflow/jobs/jobboard.py index 737251bd..e988a339 100644 --- a/taskflow/jobs/jobboard.py +++ b/taskflow/jobs/jobboard.py @@ -117,6 +117,18 @@ class JobBoard(object): exception. """ + @abc.abstractmethod + def connect(self): + """Opens the connection to any backend system.""" + + @abc.abstractmethod + def close(self): + """Close the connection to any backend system. + + Once closed the jobboard can no longer be used (unless reconnection + occurs). + """ + # Jobboard events POSTED = 'POSTED' # new job is/has been posted From 479b3c84302066d58b2b16e784a3ebb438912631 Mon Sep 17 00:00:00 2001 From: Joshua Harlow Date: Tue, 29 Apr 2014 13:43:53 -0700 Subject: [PATCH 043/188] Move from generator to iterator for iterjobs Instead of yielding back valid jobs, alter the function to return a iterator which can support various capabilities when iterated over. These capabilities can be changed during iteration and are more flexible in allowing new jobboard implementations to change there supported iteration capabilities. Change-Id: Ibcd47d881a5c8689b44bc444402f51030649c0be --- taskflow/jobs/backends/impl_zookeeper.py | 89 ++++++++++++++++++------ taskflow/jobs/jobboard.py | 13 +++- taskflow/tests/unit/jobs/test_zk_job.py | 12 ++++ 3 files changed, 91 insertions(+), 23 deletions(-) diff --git a/taskflow/jobs/backends/impl_zookeeper.py b/taskflow/jobs/backends/impl_zookeeper.py index b0a53ecc..b9331404 100644 --- a/taskflow/jobs/backends/impl_zookeeper.py +++ b/taskflow/jobs/backends/impl_zookeeper.py @@ -14,6 +14,7 @@ # License for the specific language governing permissions and limitations # under the License. +import collections import contextlib import functools import logging @@ -201,6 +202,69 @@ class ZookeeperJob(base_job.Job): return self._book +class ZookeeperJobBoardIterator(six.Iterator): + """Iterator over a zookeeper jobboard. + + It supports the following attributes/constructor arguments: + + * ensure_fresh: boolean that requests that during every fetch of a new + set of jobs this will cause the iterator to force the backend to + refresh (ensuring that the jobboard has the most recent job listings). + * only_unclaimed: boolean that indicates whether to only iterate + over unclaimed jobs. + """ + + def __init__(self, board, only_unclaimed=False, ensure_fresh=False): + self._board = board + self._jobs = collections.deque() + self._fetched = False + self.ensure_fresh = ensure_fresh + self.only_unclaimed = only_unclaimed + + @property + def board(self): + return self._board + + def __iter__(self): + return self + + def _fetch_jobs(self): + if self.ensure_fresh: + self._board._force_refresh() + with self._board._job_mutate: + return sorted(six.itervalues(self._board._known_jobs)) + + def _next_job(self): + if self.only_unclaimed: + allowed_states = UNCLAIMED_JOB_STATES + else: + allowed_states = ALL_JOB_STATES + job = None + while self._jobs and job is None: + maybe_job = self._jobs.popleft() + try: + if maybe_job.state in allowed_states: + job = maybe_job + except excp.JobFailure: + LOG.warn("Failed determining the state of job: %s (%s)", + maybe_job.uuid, maybe_job.path, exc_info=True) + except excp.NotFound: + with self._board._job_mutate: + self._board._remove_job(maybe_job.path) + return job + + def __next__(self): + if not self._jobs: + if not self._fetched: + self._jobs.extend(self._fetch_jobs()) + self._fetched = True + job = self._next_job() + if job is None: + raise StopIteration + else: + return job + + class ZookeeperJobBoard(jobboard.NotifyingJobBoard): def __init__(self, name, conf, client=None, persistence=None, emit_notifications=True): @@ -250,7 +314,7 @@ class ZookeeperJobBoard(jobboard.NotifyingJobBoard): with self._job_mutate: return len(self._known_jobs) - def _force_refresh(self, delayed=False): + def _force_refresh(self): try: children = self._client.get_children(self.path) except self._client.handler.timeout_exception as e: @@ -263,27 +327,12 @@ class ZookeeperJobBoard(jobboard.NotifyingJobBoard): except k_exceptions.KazooException as e: raise excp.JobFailure("Refreshing failure, internal error", e) else: - self._on_job_posting(children, delayed=delayed) + self._on_job_posting(children, delayed=False) def iterjobs(self, only_unclaimed=False, ensure_fresh=False): - if ensure_fresh: - self._force_refresh() - ok_states = ALL_JOB_STATES - if only_unclaimed: - ok_states = UNCLAIMED_JOB_STATES - with self._job_mutate: - known_jobs = list(six.itervalues(self._known_jobs)) - for job in sorted(known_jobs): - try: - if job.state in ok_states: - yield job - except excp.JobFailure: - LOG.warn("Failed determining the state of job: %s (%s)", - job.uuid, job.path, exc_info=True) - except excp.NotFound: - # Someone destroyed it while we are iterating. - with self._job_mutate: - self._remove_job(job.path) + return ZookeeperJobBoardIterator(self, + only_unclaimed=only_unclaimed, + ensure_fresh=ensure_fresh) def _remove_job(self, path): LOG.debug("Removing job that was at path: %s", path) diff --git a/taskflow/jobs/jobboard.py b/taskflow/jobs/jobboard.py index 737251bd..0aa533ea 100644 --- a/taskflow/jobs/jobboard.py +++ b/taskflow/jobs/jobboard.py @@ -36,14 +36,19 @@ class JobBoard(object): @abc.abstractmethod def iterjobs(self, only_unclaimed=False, ensure_fresh=False): - """Yields back jobs that are currently on this jobboard (claimed - or not claimed). + """Returns an iterator that will provide back jobs that are currently + on this jobboard. NOTE(harlowja): the ordering of this iteration should be by posting order (oldest to newest) if possible, but it is left up to the backing implementation to provide the order that best suits it (so don't depend on it always being oldest to newest). + NOTE(harlowja): the iterator that is returned may support other + attributes which can be used to further customize how iteration can + be accomplished; check with the backends iterator object to determine + what other attributes are supported. + :param only_unclaimed: boolean that indicates whether to only iteration over unclaimed jobs. :param ensure_fresh: boolean that requests to only iterate over the @@ -55,7 +60,9 @@ class JobBoard(object): @abc.abstractproperty def job_count(self): - """Returns how many jobs are on this jobboard.""" + """Returns how many jobs are on this jobboard (this count may change as + new jobs appear or are removed). + """ @abc.abstractmethod def find_owner(self, job): diff --git a/taskflow/tests/unit/jobs/test_zk_job.py b/taskflow/tests/unit/jobs/test_zk_job.py index 26a0fec5..9b7c54c4 100644 --- a/taskflow/tests/unit/jobs/test_zk_job.py +++ b/taskflow/tests/unit/jobs/test_zk_job.py @@ -81,6 +81,18 @@ class TestZookeeperJobs(test.TestCase): self.assertTrue(mock_dt.called) + def test_board_iter(self): + with connect_close(self.board): + it = self.board.iterjobs() + self.assertEqual(it.board, self.board) + self.assertFalse(it.only_unclaimed) + self.assertFalse(it.ensure_fresh) + + def test_board_iter_empty(self): + with connect_close(self.board): + jobs_found = list(self.board.iterjobs()) + self.assertEqual([], jobs_found) + def test_fresh_iter(self): with connect_close(self.board): book = p_utils.temporary_log_book() From 472e4996b765bd6e038b588d012fed4ea9ee8265 Mon Sep 17 00:00:00 2001 From: OpenStack Proposal Bot Date: Thu, 1 May 2014 12:43:52 +0000 Subject: [PATCH 044/188] Updated from global requirements Change-Id: Ic79c0367a0902e0a28d44b84174f132d41caa55b --- setup.py | 8 ++++++++ 1 file changed, 8 insertions(+) diff --git a/setup.py b/setup.py index 70c2b3f3..73637574 100644 --- a/setup.py +++ b/setup.py @@ -17,6 +17,14 @@ # THIS FILE IS MANAGED BY THE GLOBAL REQUIREMENTS REPO - DO NOT EDIT import setuptools +# In python < 2.7.4, a lazy loading of package `pbr` will break +# setuptools if some other modules registered functions in `atexit`. +# solution from: http://bugs.python.org/issue15881#msg170215 +try: + import multiprocessing # noqa +except ImportError: + pass + setuptools.setup( setup_requires=['pbr'], pbr=True) From bb64df0b1533f293ad939c14234e9fb00b8f83de Mon Sep 17 00:00:00 2001 From: Joshua Harlow Date: Mon, 28 Apr 2014 15:05:27 -0700 Subject: [PATCH 045/188] Add docs for the worker based engine (WBE) Begin to rework the twiki docs into a developer doc, moving items from the twiki to rst and adjusting examples to make a unified place where the worker model can be documented. Change-Id: I43ed828be33351b9fb6606317011e7204f61a136 --- doc/source/engines.rst | 18 +- doc/source/img/distributed_flow_rpc.png | Bin 0 -> 68549 bytes doc/source/index.rst | 2 +- doc/source/workers.rst | 328 ++++++++++++++++++++++++ taskflow/engines/worker_based/engine.py | 14 + 5 files changed, 345 insertions(+), 17 deletions(-) create mode 100644 doc/source/img/distributed_flow_rpc.png create mode 100644 doc/source/workers.rst diff --git a/doc/source/engines.rst b/doc/source/engines.rst index 62d3a1a3..3f380c93 100644 --- a/doc/source/engines.rst +++ b/doc/source/engines.rst @@ -101,22 +101,8 @@ Worker-Based Engine **Engine type**: ``'worker-based'`` -This is engine that schedules tasks to **workers** -- separate processes -dedicated for certain tasks execution, possibly running on other machines, -connected via `amqp `_ (or other supported -`kombu `_ transports). For more information, -please see `wiki page`_ for more details on how the worker based engine -operates. - -.. note:: - - This engine is under active development and is experimental but it is - usable and does work but is missing some features (please check the - `blueprint page`_ for known issues and plans) that will make it more - production ready. - -.. _wiki page: https://wiki.openstack.org/wiki/TaskFlow/Worker-based_Engine -.. _blueprint page: https://blueprints.launchpad.net/taskflow +For more information, please see :doc:`workers ` for more details on +how the worker based engine operates (and the design decisions behind it). Engine Interface ================ diff --git a/doc/source/img/distributed_flow_rpc.png b/doc/source/img/distributed_flow_rpc.png new file mode 100644 index 0000000000000000000000000000000000000000..e2ad2198463f0fe5a84b03a6f39cdc8a96c0df19 GIT binary patch literal 68549 zcmY&=WmJ`2*EOMl5)#r9B8`A_gVNpI-67p6AdPf)Ntbkr(%l_`bmyV@_VIq+AD<2f zjsxSmcC0Pv!`FfcHv65_&&Ffh-wU|?X!kr2Qqb+z>v;0uDGl$bEg z6ZCI(TTvYN2;xh7UWX6}QLY4$DFr+XN!k?7g z77mtOy@ntpou_IJy#=Aa#gRUdyNpj!l3lGZ(OTxOo5d^US>lR}Q`47BeNRYK^sM}@ zp%poki14;PUxMN&c9mqcrLqMj!1{Q5BSwtDb@eW4bb0gcvHke`rnMYh!k*=E#N-~9 zI=0VS5a}(ZhxPq9d_x3e88#E{2@X;a8im;{G4;Br^ z)BliqH4_PpAq2dUNvAL-A&y=%n^~U3T%R{|A(?S4c@`hCPGL`{iV42Z9H^o+g?#9N(3_B_5BkqfBb4|az=u}k_mUV(?h>hSso zV~L@CHugpDeVUU~HqO8K<+1U?gQ<$RV7~-iP&|S<5Iu#uu(qa#84rt5gS~K&wm)(G zquJWKOgYV9t?F3itBameSs_}LN*XFzFaTo|4?f=vuI`*wSNr?uhjeZi^@qgY0|T_o z3=BuO`Y$ALRo(`E!&5+d%d8=7W@AHyO-Oj>s2aY@>{!#?bMRMV(y7SNKj6pSSW4m{ zkCd7mc%~s)S5}qxhMl!~U3x85G@n3S{-3N9m-LSo$VEpH8YmEc4(_YZtuiKZcHEF1u?*4SO(L2=bV29SdwiUsm!#&slqeo; z7H4%Po#gRnUj#$SgrJjklQQJCvf5Xti=-SoIhapMp(q^O=}#o?WIe|+KlXnIwweAI z8ln=-7+#UjYuioF7ZSE4lJGm-@|}L5YxqN>@f4>AI=^ZHnfjE`Op)mR9VQ)7nfiG6 zE%@opXS9LnWY6Wsa=`4p6(mt!zGWu)?{IKiuT*=VTy%UV#^)*=H1AJ*)2SYRl)d0} zqqB|>eKSyEKj?CHgbAL%@W|~LOQYviIO1YwVp7M?z+g*?{=WR3a3LvEF1ldIGbSB0 z#IG4o!CxT*Az=6552dX1k;i^(=2hR0byQBz3cRnQ-B9?nq5zK0in#7J+z-pul-3&^ zPVa*cQqZ+Q$6?YTF7mo}Uq=E@#vM$g_tYzA*Gm7H zEs!2#*c>rd+l`P6djacKX^0){%N5B?jJ=sbJ~7;VZq|4+>U@E~7Zlz|b?Xxm7V-Bq z4-tlQb6CtJ1C?o9uF3b90j9};NAmfzx6IjykS-)Vf%HQ{Rd{_H8*gTI_Ijd;L6MM% z9x~DQ8wxQBdcz>ljghwO^$nTyc81X}>uF?JnWAwIi5v`%zp->*(H&gTqaeW7*Sq{3 z*q~;NcPlmI_WYWj1mc}_gqkVCncJ>|^E2K~9A68A?RB8twZ$bGS7?}1EjSFUl?phk zg}PodMaaMIyoI-M>MRt?Sz8*^(GZu~oQa05;tSj(YtorqjCdT+l08UCQ8MkzdQadgJ@#>cRuB;OWMFrq~KIMM!a@|EUf2C{nz ziHwo0wA4h%|z z2D?*@EQT^P?RbML4qE0|xfa)K*P=v#dz6gX=#}5W@l`$Km0a$j4`IVQ@OP$O%^eIC z54oTKBR%1zRaNjTP3Ll+TCd&^dyhrk`|7yidhvpS=wrbiTLDv$_|Iep?%QISA>XHm z#9pT3u8XMMiFfn4YzVdces3bR>dd-lgaA^$J;sc&MSExuFF0?9k4M>!cuvRZa6^pv z)c$L2JZ=hxQGe+J@e;C>17E>_#|Pg{oUO_wL4dA`&RHY^9-ZyWi+{VZ)=}pc=9#=7 z8BvHhrZ2ci%*&^VpD*P*L*XwZ-*|RjeumR-3`AXMsLw5L#Zt?p;rw-fVl7p|-w1%L zG~9vWNTIu#y-u%4md;00=pd`R_>%?Bg;WAnINvMAE%A9hFiCD939iB7uE?f7sD7fQ zn|B3PQ=JSgXw6pyoq1nhU0aMer6V_fzMNR)+;Wz6;)4dUx705FhITe-lzWx{4#s03 zy_K57X(h`X&)?jdB94_y0xjOBsp?-#_uB+f?is}jEwFbG>+QZrQkW9=S(T&?04Qxd z?8T!k*R%NS6=?aQhX@i!Y3O7YU@!$i8%9wS>P&R{^Z6Q)vo z7*$ae5hdjI3%=$|xRL1i9}4LzYTdU5 zT?iLtF``Mzh6nl+??0l;FBE*EgLO>mZ`6u;%jvdB_Bka8S3+8rtMfC!1djP zaL*-O(zW4d5}5XM{-iEjk!h^?qGcX*j7MP|m)+K5Xu5R`S1pb!Dc7nFL@k9YmhX-O zU#9yrVc>9mV zepmRW=WLA6k79xwnwW*X68PF~zy`y}l01C@Kgjji3tN{YH42+mYX2ux;A&%{oyAO! zsKKqhe}yRR#_+RUxH^XZ?e7@r)F%0du3_F8>y??LVJlUbfreYK8p--3LUzGpZ94Bj zX#biSi4rMw{T>}jH1QP-m_G5zc0fHiDnnANLsv5We&;8hja&!{-sEm3U-->1lU}>e ztD}{NxAc&A<-r&UqWGR#&2Gia?{zxFWOK=WH9DL!Ub+-Bhq|iNyY}5INJIeioDvQ< z(ltYf&J>2Ue;GHgdn3@ARjh|1agwgoi{@y4EPIPW&<{Ot#zVm>{vbSlGF7r1tCZz3 z94q!E3}miMV5@|17$1L&+dcQq8O@IQ-Q8{B*WHP3f&Z3V^fg&KgqAnDTe?j{S|}LM zwuv$svAU#?-A5LxtkGk|c}~5-`MBQeGC5p;w;+v+-7)Z)uNENIySNAk3K)z%c{RB` zN(UU;>DIz+!#$?~kd83CK)z#w2@Z{qj`x)C;6Z`aG$Xbe!;ON>k13m_0EM`YJk8-| zybBMwo>v=X^R3LGHkGy;i4&zF#;M5+Gx}8?pCLzYio9-m3pV-lMMe_`Iz>r+K~N3p zHmeT&2}9ZJ&-;o)G7w+y(ye%@6_z6KP=XNCbk51_y-v+usnegW5QiDRoyKm*rP8+d z-8apnCUE8YD?uIMnraz20R89LDqk5ch*<@7rQa%wAAfq+m(UyIclztY&gG<^U?=H+ zOVh<``(9<{OZfKa_w%ac;A_C*MDwN<=O@#kv1vRwuQd7%+bkAan%fi4^-+ z@~z7FFbl3%MUI0DV1}l$M7VAsSrSU=g!~2V zg3-M$V;A5Hz`nsueV>%~z85`VgXTOLhva*E{mR=nO1aPDyO0Yne`guvW`o__+otrX ztI*M!DgO0$&`r52nd$>bPL;zf zDWGM7tWatgm6bipb(YP1CYYwyH}+LQ-?ujzXT=S1v^j5F?r?mw_+%m%F7=uQjg^)2 zJzC6qO+y*5Fdnai_WKNuNg3nOce8zJXNFLhI*T5OLc$^rz7J>e74cIR!k<+Bh=M8} zg`6-df8-8C{iBHfy@zSb8Q9EM*l;F0vda0GxT#xKFIv2%GB&zt?Ao90b1dUxJt)h3 zOa6U~Z8Ork-471MT!&dp>SrdIG30?U(n;R;TSG}qF@N^RN`E^UJL~c${ID*?e&3wFR+@x_2jQA-wT#m-`w}!ld@$v7jK0Zg=HgM_W5x2LSjcIW049q-uC{ zWtN8^Ay0mEcLOZQZ{2dMzVOr=dc#ygQc;Q~EAa6pVk6IALJt{$uHV0f9d{r)yp6tU zlz3QVU+4h3Jnx{LZeL$tPu0@WVoX&1mo$g4=nAjLOJ(81O{9hVrTNsgUp8sRt6hEQ zNkoJk)$Fukdawk3@pRQFd)H(+Wg!lUMr3kYQvSHxM%1{2=zLemh;zK!Qrc4pk_Fer z9RtIIH~aXCDR+jf?&p{S!{#cUh7#_U*Gq|-i;}y>Qa&3JQ%#~7!Cp+x^{uI7O4oW% zqtjTM74-Y&M=DX{@SbND5VULdOT`YAou$02?tUgNFRzCwtQo%}qPZ5M+Uiz45}#C- z3>u^0?D19)DwcCB-LWW{p%nJF<0*SV=rq5|)g?@ao}O#*@E95y@xa4Q(EdwiK_!_u zzG+EL!P`P%nT&eB){l=K;&WXAUpci>%5rC+WU=cpzF%Y`1LOg~(bFZ#t>m3CfsZoa z0Bj29laN&qCwSmHS2~Y~ojGkhDeOL+fc}|n#aG|6nsLK!1XsKEnz|c^Bu4F;K3>oP zln)GADaoq*_>-czIwM}X{x8f-Pj`2M2apADKI)Vkb|QraR%X2cdJW&>5`vc7ySLcr z<{CWHZs~*Ms00vD2q6Re3MVsBP$*|khs$a6;lzqrp<%j1___x!iE-YCGGx2L5)Y@x`L>RZu zP7@&&v&nz3+CSw@V**%kH1q0S!q>w9|DAVcm(FP8{K>A{ZDv%IJ#HC_Rz3;WA&YrZZWs6duuTvhPbkgcwp3&;G#v)sZgR@~Xwi#wIZ<0v_bA zY1^bLG_IcmFA=|LgX~eLc$vzH-&>!^0O60+{LwwbfC!S7BFGp7b>@RRG_}+!GeSBA z2?^Lo-I68yte&Lfmov6=Z|KUZMpMEq+4dD{ZH3Obalhdzg(xeJO-GW6NG11dalnqi z`brG~-z3+KhilWh2Rs)9a`ANbi)pLr1~*=5LP|wD1z$y4{6Z0eCU#k7nn*T=|ppXVe)Aaz&o;O($^;5_q1#d z43<)WCk=X53SvKohpmrp_^Ja1ECUp9;#-QGpntZCaG_#_7e!gs`dp5;s_Ixz_ADbJ zamTL5s?$i~Jy82nu(zlW>ybT}z!pPK8TK>S_^w*ZP>mp@3oy2Og6u!fhwOZp&-@uk zx}>+ezRl@P4#&dy2zR0@PI%+8*`MlCoo{@02~rNEk${o1{j=@E0!LX7Kzf*9c%=fB z|NI@{Ex?nWtx~cfwc!f_kPav2Z=BtbnSoIDCmM9d1IX8CJ4cFgH>n{9Be5$I>Nw8T zQzMzYr)TpQ$XEHTCH+s#r%dRX$xf8Zcmp0Xzv^D31GHPY`?7P}Q96A>d0CkcY+1+6 zshMQL={GeI=x|D_v=p2laUxY0$r;$MsMd;qM7SZVG}P3*jEA}~!lv=FO)y*sM4Ufd zaWy0B>wadx`pdR$=bpi(^#iPrKS(pFwrv73@?yHcwR0mx#bb{GhlgqBGN`Pq3`aaX zq~Y7l!h%QWKSPhX_SM53?RkT+KwY)#=ecgHk<~NqUm?M;N=so0#+M`Sm=V;#V?MlW z!w;~FBLKt_)*Q|{NfpBmreAs+f#NZCSV}w|4W@bb3qC-VNB<*tAhH zFfc4mafJh4P+9jmSg5YPmA6#fgoJ2^%RzOb8`!wG9IwWt216SB{*9+5!g{_|Dc4DZ z{ox83Ooz?L2(?lh?N;S0#`E7jJqt5{rIzSA|HVcJmdg6>Tsf|7wllV_Ev@ z8S}?*Hatlu8D4v;%%~W4@MUsGj&X~%B39+ok4R&Vo!btW*x&)W zofk6VWj{}%>gq08Eiht&B%qcf*`NbBJboB@aQf#_hk~SH4q0hSne-2S^HBT1V;iGp z-#UL9IJF_TAi4tNmL-9be13v}CSDryUi@F&Xrz9sEb_b)P3Ln5OX9T3?}@`l%{3|| zTO*Q2uXRNSruI6eTK~B-0b9}x4j4(S!Ot?7_E*=KR^?a9Wx9F_n!?6RQ1^I69H>xW z=zg3#)LLr88rTqf72&hz}uj}Q|9Mn{epwj8ZmWr~5PMAUy0ZQOXUe(2;tqalg@ zX~I~l-gD6_O8-f!_YnRb>L~sm1QtsV z(1{?!5dx78a6^mt=-VVP(quU|9m#xQ%^Y~PdVc=YC15~)`6`-pU4j!PnwWpfv=1=f zuNaTyU4~pBsasCjil5iFyUkifh?Wk^ADd-)BuKYug`LF0Au&qT|3H}G!lmxp`m)VD z<^KO8VUOLH_MY29W^LtXh8 z!*E>1@}|X&+BSh)7|o&@bvU|kzdGdVY?|Dj?oCI2oxdU0X0bZ2M-&jh0$yEKw~$ip z@leF5c@@-;Q_j9<(DtZtnX^%NmgB4EP6ZApdZod_^`B=9_r$-osmUbN~rNIdHIu5*>rUaD39@)Nr;ZmNb+vVICS03;{lyBK8VmR4{U zPg8+@$Zb;NGDJlX($?0tCsha0UnEoMvzljfIO0fVd4Hy8Y@wlm9i}v=cs;}?b z{-P@VbZVtpZ$rta@te@z*|lK#D}TKEvCc>)|7e|5lKRuW!-2!!wYHZk6E+=C0CZj> zD=ROj1H6hJ=26DFDc_@>Q^Sfi2Z*})dXgqTBi(K32QmqX>IN}WMjFb%zGj9@fiHGS zjAd-8PhhmXGX%qv>R&z)h#IpufzMhrD4@ZSvaB_8-u-O_tjw8h(}Y*U31L`ci{!76 z*SOgD-Lo6*f08IV;MOMlqZQ)XWf}}bOxv_X_np||*2~I6ZjJ@QO=2bMFJN$#OEr?3 z1nl~Z83W>9W4$mbj5P!f`(Q&cF79X+tT}vH5)MX+d|@Og2GI0o&z?X0cmXAoFgD&= zET(7TyZvN}%SSe!ysl0^=aS`pSLt{WXU5nwGr}+o*%&)+*_oY2l&QD=8EiJX1bfE0 zwfX0Z=N@w?rIxkz3fJw$uCEHEcY~iPKM{-97byuQgIH|IuorJ@Dzb2g-vWn`b-t#Y zfY)*zagr*kD|^WJRc!@$-G=3hOSm6PdolBIbe4dv2GNHWb50>j6xbIwJgE1_& zE<#x@&hO&yT#uDXeXt5gpD2baw?Y0KZX1BoN52NqQa=5${r(g>4mn~0I83?v*KoGL zkVJRl=G3VX?x&#W7IrYH|F;-4K-z-y8Ch57ENyLFX0%qg`b4x)M3=)M1pM0S2PSh(*wA5sNAxODIRe?YxKh3dd zbaJ{k>w6zHaFir~DKg=7_fw7Wk5U8X41tVr+0Sowt4(QgGE5nDqFsn7ObfsR| zjja8*V82gxLr%SRR@DeZkz#V88ScwKcW8Jd^p7(?Hm3dR#oPde0OkjuWE3h)eCER8 z@Bj8$r|BBk$7T;|zI0;1U%Sc6S-g%h7(%=D*MN2W3=;QVhSl@^=##zc`v!fl&8QZCJHK?w%_d>_KA1o{uLrHhU?PK7nCoDI?CA#6D&ldoE=X3f zxLjB)JE&e$IaFe=h~JfuDq;MQ`>wST3Gf$C@1%WY@!uVVz3(eXZLAMdc_YK}Yl>VE zELdr1C?^ujGw3(qde(G0D6_nI)9X6k|M|IG!b zKAziTJPX#C3T92{X7c=V&HYIah~D6F8SD^N9{(LzLOB-T))>4ZS>RBKrQf=%vOXk} zs5f>XC=U*u>>aOB)dCgCeBeJ=xU|=}_DKL^5SXHR@KcC0X~eb3wIU=~8>sTd9=BQI zG8ElFXcI-M{=!ufWp>{ZmiQ6Es?Z^!B30gIk+wKIFF7-q7EH%$koTs?q|(6y>{p>kiUI3p2Fq) z2H=uO|031@^#VX);l9yNsW!1AcGYA5pK@FWKsOLu%PaQ;dP05)E)k3v0Nyutkym}_<*Zg=OKmZ z*y@t0Lsy5Mcz>oeycLOT{{HUvPyA>oa5}48d$(8@Z6g;Xqq+wMN^))Kg?vNq#n=wiBnvi%MJ$CNCS>+7H9KT@3*alfp5T+7v)YuF}z zI}4zb?X|sYf$XmZr=6u4!_}Dz?a`}9o||nCB!o6tHCHvu>GRxL-y&S%FQ4r6_pa?i z+%VBTjz02HgMk4YOT2GP&qItp9lb=iKD4G;Ka%{)aQJ<)Y6cnN38^P_v6+`S;%&My z-sv66std^`>VO~1w&3F@o(dOxMZ(0hh8XpRI~asINMsv}hw$Ov%Po#1R;p#&3!7FT zsm82Q2u!z@j6E0$)Iml=-%)80H6r+p_M#dq>|0CW zW~;3Bi3KaZ!mRBbNiK`lVOl?p{}LO@Y6=m$dxYt1$;2gjwbvbpp-0m@QO19nHl>oZ zT4PH{sA{)9v6|GMHX6zuX*oIH5ho&7P}%GKEAMNx`B)kMAGt1#>jy)?Jx$k6|=0j{rkl-^bFsa5=Ds-$PcN1Y$H3 zU#F3Sl-A6W@Opo|n&aHeTL$<|%9kbYP~Qc*y~8eXqdl)Tue$`K0H*anYbM)HjRb#$ zdO}1LD$^{ij-zCNcFQrW8(@^7&9BdS5Up}QR1LqY&}o+)i&|~+`onBQ8LiB?hC8BR zUA-hQD>T=!4Rd$(#`tF#N$gV332fecl?PV#5Vq2fKpuoP|D!`shuh!y;UeTQ@jMfA#P?MhQFz(=Q+hxuid_vd3#U z)_w5()g_=6*6MBjhCb;wgsz|WMX8^(J8~)z-8L+QxNn{HSB8S1J{RcFIMwZ@Eq;8Q zGOtsvI2s1kP#6MusZMJ=1!*T~8S&NXMyNe{;^PCV2nbPl9|y07H#^e%(0SWR$6e*^ z08x2|cpu&SibOmQTJ|s48)&6Jo||L!p05E#j}N;o&!MPg90)yVS1vXhh#z@YRd<_7 zisaH2u;?@H`StgIqQ2K^5tkuI#w7nEN2Y$C=Mb~%`q@raN1&^E@axWBh=1zu>${~O z;fB-U-*(fN*Ox~FR6+s>`o3eI*v6R|Pdp!A@(pKVEl$Vaf@AS3o1^st59kfK-*HGd z73qU^JgV{l8z*|SAZyYjDZ$cHd%1?d?PO6@VYNJfx_2%enuw;ePz^p*wMESr-d%^W)@>OsvuRiQ zZ#I+dbwk%oon$wLHLlzIY~vH)C_8It@o0hI8{xp}kU2@m5|dTTb+W%jJCfN*j9hUO zG|rF}+rOfZIF9UO^~_Q|SM|J_Vp{%iX-#u+pb=3Hc()T#IFHRe;H~d0_7}GzEgS4s ze}D?hcVQAbg_1@Q2u)nasLoo~O}HO()QK~T<@oscj#INSy_K%R(2wCCv_ht1go;1t z^bLE@a&7E18Y16)jWPhF@;Ek&ok**ARIVJ;&TenxQyZtl4j#zEMm_loinoeLr=mc; zLOso!;|>bi#)ZZ7m!WRAxtoLy*1cam4W3011_PTADnmlZ=#4}Er6$ANEQima_rh|f zJf`AKu{{>CBWtBQ=p6U7Vm}C|L-QA|d#okUFh%mkg#D;&9G`m4>GPKyU{u#+3->!p zpAoRTvS-bf-4iO7%tb19I1%Y5O}FZS_REF8L&^_%TJ`3?+|b4E`>CaLy7ndePnGRI z{|I1Aqx}RICOGmgqC{8nIGmuav=$uuqrl+e&@V({?2dnyp1*9yQI2B;Knl7t&@LM{ z_Sv!#4|y7%ubo#5sn^|ez#IjxC{%<>;cl-rk&Zmv;AYm96wg-H?M?qZ3!)Pi6T^BW zhx?j-wdW!k&oI*H$Fz3{cHtSHt@MjG;A5!ba^fbUki^}diIgvtN&dN2w}`2XJ7doF zo*jo$f`-4nT!Yh$X_2GMbZ3qRu)6%)yC1RD*Rb*|J~aie{YtgQo$k<6l3o)UTp{=p zLD}C-0@d?yPwzd^tyod5658%#FYv)q~ ziVgc3air~l3v>oh9Jo+RZF_&M6BR1^Ns3CNsMQQ*>_1$0Aj`nKDKMNK`0Hvf3wh{2 zULOMBuYU0h6Dlh~@}Uo2Nlv6uYk$8jo3#yaZ0|!VV0?w2COxiioy9K}2E5!9$1#s^ zqTOdoph*q1-jr!R5|$=D1& zUZ|Fg9C$I~{P=H4#LI<5+`3f0VgC+X{BB2%Kr{_Rm2q zRM7?fY+9gs3{*x@JiP5ElgtYZgbQH95@_QXNHV9WX-u0r*%MG%o~TYCO~_8?r`seO z!XIf2J|B5y9-G$-)|(VW_!| z`g@&>U&mal%z!f86@_cJX5t zL)KWKE@EL+8nB%@;yAM6MAEo79lP|e3S{r|V^vFKs&w1zZJVHkg-$;T(9p3kcLJN! z^r>dFG8MU9eTR#+6;{v6W?VW8`=cKSeTgz^+`3~)h0+w=VTJkuo%Y=KJxw*bvYY8b+JLQ(Z$%-mpHBYPDlikTMj--*cDfHE6 zvs5Ya-mbUAZ5SeJ`zKT~yKF6^3fvsB^2RMwE|$H3gA<=4Og6o{;=a9n5dnz8uF)#P z=6a*cuZ!buqDJ$eTa#0;Gh8m4q+q3TMQui(4T3q6>5mvcHdaNu`>$@ywv;Y7lHW@_ z^AA@gcatr!#MWqX=HkwX0^3rUs`D;D@x9Ufg2)?Q+~4sUSi55d84Fk&05}d-2;wb! zZ6#mhUqEw7FPLJr?I`WNz?Em-WyMSFJXMif!vXOq9j?BCa)02R;aBnzO&3#1w<$?i z=t*#zg`uihcE^V1Q0^X)Z@Z{mc_UMZ7asc$8kHZ-JueBb;9Y^*m(+{ruaGKPkR$wVjc zGHsd)m5N3PkVoF1!opuXePz{1eA@}m>$)GddW~-HdQ*@=Pr;pu;TWO%?S{hw+gX8b zK@ap%AR(%>M5gd~oZu}lLh_-X7SH;3igb{dKFIWzK4Pspj^&oeVm^`w%)<&mL(qA( z2Q@=~eE6`1M$&x?I%ZzirLh-Im#0Npt3^1Fv?$+;HTz0pHv*|J*rANmg!ITDbh~fX z3Jjp{?{J`f;2gBlm11p49^=HMS_5xKsuG&D_(9f!mOwIOi)01UGC{H~T)Env(>ye= zUn&bgrEa@^0>`dykYjB$&a8N{3g;b_*T^kj>GU?aiw(=?YpPbQ3Ra)C7MOv+mO=@8 zc-#lU-olJxwPL1uXKpXLAGz6&GK#1wb`}0@N}+Z+(pc`LQNyS8}?mbf+GZEriI zglvO&8Kbd@i1H2=N4zgC3BP4?^Z`COUvE#9c2!=WZ-5(KqgiNc5){Fw^2qP%52D5X z>Bzforlp2N6XZyVDjCOLx6$?DT}#avl7Lfd)VsuR_aEBUMBbbxKSt?Q4DyaM ze}`=E{S~7uj5QUo)6Vs&a^SQ%`L}kvY(7zA5WBcL-+@?~SosGiDQRN9gaj>kAoWTpb@{F5BkS93{I;h9 z05@I|>P_0G-jCbX;m z%#AIX&5^jYC3lDOoj?B?#hC2&M){OW%D7QV-%#^u{df`;e*G+*Nz`J?U7|YCqFzP~ z(zSQ^ZJ#4n+qS>e1n&ZYR;9GBcwem14e~zRSIbYfNT_JC+O_D+G@aQhH`Tf?gNj4R zR7Uy(>lX0QaNF!|4%}6lh0(c zGw=#sp5=w+Ru@x=DXvNchH|35}BOF)TxE{k;jtALx}3m<^b(prALcZ_E{8~ zw{-1cDs&S(>7uP*qfXjV_t`w`)BzgZKaB0leLLU7TZ9NVLE&*ZfzV>4r+jW96jpB_$ zCg&wq2EE1rAz4q;p;zM5yQBkZXY!SQprTcT@nAlHWF)2bvYI zi>cPS6N1q*AxTa8)y9NUfmc!Wq#hqtvvV zDv|9>@3**JVbaOd>u{inT<2!vQhgJjNgwp_#ziKskDQ~pza~Fft=Y>hFN<sM8`bL1u1+NUcEEee)63ntR5rR9$HOyc>%*0wMMnF=vmD+lv;BLphMDLV zOM?CFF8k9WLqjp zSqcKAK%xJg%h>?oj3jS4n|3waQAr~9im0|Pkg_nOg-&EiXSzg4WC^*{CPDoes;Nqz zc)4%9JBxk^CR>eB`pr>kROkgU1v|B?fhpAU$_o7;yE*%zkFJE(8? z+)x0an6ER5c7U_Dmvu5M|0@{{^|9Y4S~F#OK81%3#igH(o?86Sgxg~H)gXZiS`fv* zgvBS?LY>YBP-ZOY8L-H4YMOYjRVqy3mnO3+O%tH7dmhjfFA?xvwKOG@47OFrFKi znU#8onss&~e;&jdu^ptA<>Zbn8O!zeHx^S%dP?|YZ{Fy6_yWTTQ(HX-W6@#j zH-62&liTchE_i!torNF}!_FkJ+>{^NR-GQqt;_~T36nH`6q}V>{vCEYFet*X@Wwk*9 zAL33>Uhio6p>6Bc(W*=wq(-Y+ac^-U-r64iGlTqxVS&C~6~!Sh^|5e%((=Uyc`U+J z+05>SII};XXgR8x>2wP5F6j48KjUgs%zd8iwF#krs`#5T^4II|?D92TjB`I{CId6` zfZ<4-Z$z69DBm$Egha`yQdWbV4P-nKPRQnB|U*F{n` z?ds_41rRPpV7Gb@BmU@Y_#}(EQ{UNeg*M9;d0Ke3^(U?Lo6r^zVLhz0b{ zj#^H2y_dT##lGtkcFDVxfrMpH@d4+!RDf!TGNT#WVL!-Z_>JP*cNXn!*ETWgu|urn z)U?cDZ7o;!Gn_vsfj}26nzRRL7cl^>L}L!2^7Iu;`vo-sRrFKmgBfJdt3)wQ2ECPH zcp?sSaIg~7gFaQF20RODfOCS7tG4T^0up-5o0HAY1@ht;uCvB6F)_pkj@&T$7RP#8 zT0E1(Z)QHq!hn*!h0h77^*K*e(LBUba7tw; zIg(CaIlx;==(1R|pLFxAqws+iOUfR@ z{r1BJViHVmg=f~27QSqN)}MEjvz91BbHRhbWo<*ouI)w5UwaPTVOi)Lp}Y zoJq<@+jM;2_+d}R(%~noMEr=T#C>403inH2%%3^lzU1mzH$$Te3Qv4;cvFg)2;hv= z;CXTYQ2+?K%HTV-GR;p9r1U8b(iz5AkIQ$!=c4b{6ByN;p7PFQG0I%fq5=~-E*w%# zV#(Sm`J}X2$3)>xsPlWOOpC@sy1CRbxJ23XV0ZcmjP3 ztrQrDNQx%`{{2?CCuiIekJ*E?myyKqKSwYGFsPf2E`CrW5fF+;92HL=Y8RGjm>-$Q z&7TAHdmvrXcZ`7iWf|lAV`&IZTJ`8|Cm@kf?-ndxZufGNBaieU{^VSlQ*s8Ag(DSf zgiE9sjYczPoSCdw-oq21nW&{~MWDbTo5h(&N$7F$Ag)umbZ64!a?{+r42%oI)yyx# zB4(_pwma2WzZ`!sN0ICt9(;gOb>FGK9g=W zphiHw=RF?k=;3cE6$G2i*5f94dro^x@Bhn5IKR!OluilF{18z{rrTASu)s_~7tkIN-R-IkWRXi}UTe+JK(vrCyF#+Aw(Sm&M4+2A8Tb-5DF$E;XlKm9q z2lZM*zMz*aV^8PpFJ{y#yfiL=qhuQOrhSQ$TfZ9gyMoAcACmIPFYC#b5;& z@8dxwspntdkGa?^e^KJ6YER~+Rne`K3-%cT91moKOaKy5#JoM;MmpfGfhV+hAHKOt zueLzeqS?*qQw6rmSr=c6XYpBo5=YF;KawBr2K6xhCYVl1mNSJfOja3x_2(4<)}~xmGK9fwv@$C!)=SR)$#w$=wb=5_ zmf?cw{oNi%`QT&rm*2dRKl3I$U({<=RUs2X%Krc^v((~A0$D$bul)N-*;qM%`wTb- z&Yr6Fq&w)KbV8%v7j%{oA0`I!i6JOjgOX_~PmXIWuPktn+x5q51vwmP!Pe|^na3Bp4?8;F;(+i|vpN>-FLp*6D*++C0Z=x|1rN+qCC1w6kTcYash zTkpe!REln0D*ugdll=W*+&4Pu{l{l#22_BDVUbhmDre5mjIe(?PQ8ZATiRGGt#>cZ z>^e=c&LsPeH_HrCssz(&qi?K9%((AQD_VRoR~eCSFq z(`!7-+m?XRHp0(D>fW^Ext@ljG~hCcq6uH4+hYXlTH4_Acp-`U~ryDCU*R0zue%zVmheJ@%Wo#rcsBZbJRM;zUOWq&v!jyu-!Zj;-_%Bz}t5{xp;l; zvr#kaKXY~jZGB|skf=wBk+^r>B7bPEE6@yWh2Egl+O`M=95Y^urd#-8cxfK2Cki&p#V=Nj9zuPg;9>kv2Az2a44^UcHA9})4OYL4^S&1EseHxqJ)i>j9kQbEro-je<140{d^SMHaU4NVj}ME z9l5HFSj{eDaP)q<5Hf@8E&+*MbFl^Y|y*PuC#$&7T%&#Qvnmpip&rM zX%@5sS=)Dfszh9SZ2JHM%yPwwtXEJ@e!5HscoCo>f;&_k&es%d_>K)ldrQuA!^dB_;PFD3)qO zlWPU`2`4^q#)bQ4P>^5K0{kqF``aT-SM3q0d1(RKpFvHn5L`Y6*LW4$B~c03t%O*z zUiDSfzClI*V2cL>s@MtUmAGKqDqz?wRZGd%bAimhgIsGk1I9I5J=x@J1bG)D<^i$F zy8+C{A?{s?p;umf+%AL5#%NZQt_1%_?6Kc<8~&r}mzAhf%2q<)bSA!>)Cx4WqW z+4Jra>{H8}CHH)0rz$bufNjV0^xV5wsj_ zM8ki2v(OSJ2ll_fyt<~2 zW*!}osl9`f_Xu(Nl+t2?FWJ@0DmEY!A2<{%T(1(h-KIK{LZk;a%`}Hx2qEXMV_~clSZ#`YT6TY&(3g8G^@#I39?ZzeXc=9u^C!-9v&vE2Qk5 zz~^(&?M37VI{1NHg1=^6ad?mqM(JiuvzAmPXB#RgI%S$9ub0p;yXT`_c_?%k`(!$5 z7o|vJ!6Z3R^h$DK1F?3ye9N87)NwO%|K*3rw&m*<*VfIYp&4Y!v%+uHZ9|I{pcWRl z|Gd}omWthO34FEjrE<`a z$dQYJF>&6?(aRgZLP{7Z0Vkl3G+^B1D$ZG5-j5xh$!JUd-oo0JV*s5}PoHiKo&pv# zkU49RbB%vMn=)J>mZVT|A}tbflY3F+zl|M2W5aG>F|TtX}39l=zS!_gDQD9;Cs$)rc>G*>2?HMm6+Mk%h+@ zhoh%_+}vBy22Fx=N%!L5_a0Pe--m-=41m=*R--O{H}VLIp9oIJok#Th;5VCug1h7_ z-d*B=S$$vq;hekg3pUy7!Nc+DFB3?DVg%YDL!Y9Vc`1v-AxAyry<(jVMwn}81K-kZ zs3kDzB`5M32C9($BOeOJBg&6)U!&)8Z$`NY`@bl zu}`U{mdN5)PgEfejOE0Siv8APx3t<#Efgtz_y4H-%fBkSa0?U$q$Q*qq(!6u zySuwXMI@y=R8pk7d(n+_cX#KR>)Ct1f5H3Vocja(SirjHJ?AyA7-L*>`79jpDTqkz zepDH*Ti{wh_5Mv`D^`eRs(o!6|2>$1@c8A{?E<6sdIR}y1q@w$T1p&Jhk! zses6l6pI>Nn_-T-FQGg73~7Sp&4?0RKPE(OTZ-pAOt zss?2W0oYl#*X(k^p`PEvG9g(>x0yKunrSrE4yQhsgPj~k;@G9{oNHzozTXF|-#S9O za>Pwlpa%THkzu31@JA>O#j~i72RVV-?9KDD+wzI3{f}(6kGM3KQomR5RaHNCa9wjV z5BW`xtEj5FEOQeqxDY6);NWyGjOTE1Uz1G4MiaySO2!Q%zWLg?OyNREgfIEt%uAF4 zlbp?BINfC{m7l<3$%VP(2oHJbiup8&)#=ii6#V4q3qc8tGQY_T67 zq1#`-sxqwjm5$21J8XVMyCoR06CVo;%d3maJUld125W6g|J~QuR8&-Fq2M2}uo%R3 z7a9M&g1!|wyBjt3f3HL)GJ|6~7Fzt|5vZtQBTc?tZCf|T=BA(fss!bbO!Irt!4XWK z?P0U11pe=vjCbPiKOaA<8ij^#WP3Ey)oYh_>yV|e`N(D--7lmdd)L~Cx3*JEc$~;I z6HJ@Q;OT5)#gAPElro6<&I;=%dNkI>%H8ImO{XvF}uN78f6n zTcW49qH;l7@|f6GT{K#{XdkJfg~% zx(1E5{<&1qa}R=F&XD67UuPloAgngSmC5A=b(jyto;`KKq?V=&EoaDC?`78;j>CrG zV(x90YJPVgf~R&l+o9a!w#qFLz8yTJP;xX}e&-%?xV85D_zTW!;kistui#b=eUG>B zo5P|2E~~!oSL6S=aTi~bJ0%N`Kr6Yvsv+}e>H|(qJmLdvkI;;|TAOg^;afL@=q}1t|LdF*>Tvt5?tx%Dk?yG>WC@>XxeX zJ4gJ*A`$B<9HU~^e`fs%xoKV+HEwpXpqe4SL-@EovU(GNlezTIa=d};N|}Gb<({&= zNT!Zhxf9>+%r`wbcnfk#qaHxq1*uNEIo#tucP2?Qj%QKBzCJU1b9f-Jt3vDK`sQaG zeKd`JtL)k(jr~%C;$&$Oe*k;POfsMGw8QGarA}F8Wv<(OqU-L&_p=d(_)lQEzg|;x z9HXR*dL(|a(U84i)+gAOd{`rejvq8FW;@WH*Y0`YTI1q&2BHYf85TOfW10|T=sTEg z`C3f!ncv-0)T*#hK(H&=M-p?%axQGkPB69Sx3@}%M?m+<(;^6Pzg5hXVQL)om3!wB z@J&j;96yEqd=&V2rez`2?02&%PNTz%h|}bZ)jr=c!0wpBZ3N%WNHA7z@k~Wlm6MZG zMrlbMzRk25y^I4}@TomPedxhz_JZ5`-$}>ZoE&|RiR)Xc*yd$&N>?2|B#aE>!+y8F zn0LWbhdS(*T`+QqZ?#MZq>REtao?h}cAuM_IJYb|M{jU0X^8G}}WaI8=(7<;uT)VFV$pJ`dVEWy*2T^X_V=OR8<53L*Sb&73KYBu2>KJe@xNi;){=`z8^0n4;n zF2Jx!9OC@O9B!`ERqcx)-tq*?Xfq*QUiQNf;y)8Tdil=v=)7I{b0TOwB3Q({?4iEd zX^wfg3okUwYh9KfOc(mGCz2_^pPBtnlu#7 zp79(t>aWaHXQuCi17;K3riPC)_q)5LOrGOA{nAqNa`UhQXjuk$Z#-0DWgDEx@+~rs z?=`o_S+-sVJ+SOT+@@Gk`cgOLWt3#u{j@78bG@s#|3y@V$~&Ba2{gqOm6nR}d=$8M zw=)6l`h#gaQ9wO3{>LvV_*K~h+!vB}l0GI{`C!4?MeMIG18E^h7Z;b&uo;|)9N-1D zz!A3U*X_vgc`sTZhYgTpgTMl`+KFPs2G|jzYZLgFSwygs4d>weFvk!xzeqnbTQe*S zSKgXxOw}afh}Pc`j~hAYY)OqKO*x`jcy0HPWtafi%82Y-)eyI6*rDDtHk?sY`B`KohIZpLW_~pmN0qj2B~ImCu$>p(D~ihGf&XJW#&%! z-icJ^X2J%OiN`qJ`W`8@`HEO6D(cT@7t`%9Sw}P}rRlZ3xci|ocx>NV5Bf*M0EKH0 zB+;q;r1@HhvER?%>NGCO>O|x*uWIelwv^;JK=g3DpHe(`llSH=pevqR)o-VspW;8y zk5(Op`|{PdyP$ntMppLcpIXqs)1=(#UF}A-!Ko&+pw(i?)rsU`b`r{OJ71sG?ib>8 z(%7kRV~VCO`S)K;2Yw8Fj&vHLr&`hX{8~)dIBBeaTWlEC`zf=o94-9i!E_O)cv_eI z)6O8jE%V7?v;Ogb8NzcIdk_)Fek;BZA>@yN`np$D@$x~8iu zx#hLPat&;AL)BV>wpw{PLmqFMDHa}CPagAfnR_ROl4QC`hxFsE{V@jJh@R7O8z+U# zv$ti#X^>CpSif2R>AMCZM?_B)P2_`fr^;&qq72ueo}negn>l2`Ik@E)rG)zlY#rZ_ zHtl^wmAQi>j@jLGQfv;2La;P%h>b?Q4a2a+xwxlvFq(p_VCI6P_}GwdR>|*+?QQpy zvBV{adN|N`EZ{LnIQ}k=@I|S&xltDo!7#A}qXb4lP7k^!tGDU$U~Q#0Dww8-Hbu#h zIr?2wPn%4FO3%uKCKMr)-g!ihFL$itm$|Ze**cNqq&9vjWEFtPWB2~RdUZG#l2Oe3 z`|e`3*<-1FPEHSwhB3{!+j3)Hp4z}b{1&7~n4S50|QKaT>;fVoB z;KCC#@O*pN`Bnvu{UZS%iBs%xXDi~$oP&lF!?qL%H(Je}80ZEi!OD?yO=|`e&xk`O zf7Gc5+&%2kDfh%_03`?9hJTE2#)Qg5vY;RAz*Z@|p;%TeO0tNA-q*8wIXqpId#b?P zq88Jgf_hus#_u*7rR)~F^q^*aYa5epC08z8RG`3s4=iZf{VP_5JHlbZ)n>gMxkNgj z90(E-grgakf-Sty6G#6_P=G@tWVhn6Kf>b+IAUsN#AmsaDF*7C_ysp%9*8EtQ!uy;UC#lgjCv z^D9N~rfW(OZW|Z6HJT!Vp*))+@4w5Zek~-S7%TIO^Mu@G^tJYjc@5F@0|XIye`>!e z(bQk(Yw;3Ld z(PO-EQ{uL6)&LFz_I+TCn6g5#SOaWY#N~LvH=&JFw4O=3LaC54x_)G3e^@#qBZv*e zPql8I);tDFCZ|?`zk6|t?76!_O~D+U-ECxdaM7Ogqg0#Y9I^?Iskee2wOBi{$phyX z4Vd4NhHJ%W`))i@BhMn69|n+x@QKYbj+kFV-kkquSc~w)F11DDuMWQ&HJotoc(D0B45(amhP&Z(+IBf5klV9>+IL*(kt_nEgbnZr5z zc7XFfpA?t{&Nhdc?1ON7(-N5cl{M{5Fz?&nUfkcTwux*hkblkh;LaP=u|GA_0WnLv zbs}MCKREgB^z~NplIDi~JQ0pLPSph`pvW{iu1&n%tqOU4VaIEG(jI#Dm#dE0;{6(1 z%?w8^s)ik+&JQ6dGF;6hxVZc@&*a+Jq^DRdLI~jB$US@NEjrCr%?QXqqLTXz-WLGay$sD zO!a{-&F*ROZ1ARuBMK$X`U~R`!mbdSu22tY)q!RME~bZvn$lSt@$Fqjz!3360|3?@ zK+cbNZ5?fMi#mvj);>}6=T^XW;fH^^AdXSF|9q6N<$O7kEN_X+h8UlL1E$&Z)GN^Q z*poWya8xo~NdG%;m=;7ekQbu7L>cCwbu+5*>cn>$J-bZozPWGC;q`+3R1`vR-=ZzI znCEwUl=dRx!H_G9DKoDuw_qaPSb0ZP!d*2?vImPq?j^jSzso;id!U~%?LD9XLCK|@9v5|fSNe@bE2#M zKkF6!vCqd>2fZ3b@@vN^Xh*Y&I<@u_MK+9ytc1mJ(&XBBm>%ZJL=vsE5BH>>pegX%(q-{1F*>=gE zFM}+W({J#@pXsaLdeb5^qMKsPbj*d*IkZmYt=fNIei*OIBoVMjT>f@!v~{yO>Dw)8 z;Bh$2*b~6l4uU_7X1QZTaH6ph$fU?#T~iocxUa<87LJsLS4jQJQph|X zYwiJAw_`GL=iTGtC>87b=!Ha!pKKNn(m8p#>^VBT4>nVu%xD61MtGNL*tp{3EGU%y zo-QWxY)V7pq`_R}-U`@v2d99l=jw=iZbB(NlCQnI^ZvAGx>x9X`>l%wDC@r0Do2u* z5=bkh4vqR}gh_b(0>Ss(XgOL&UeRO+DPwo;XUfv#S1~mQ%qb+L+*zH^$-?}B)~xp* zE@>UUGE45gZ_dLVsHL~~n4xm8QtLxFRf@yFz~Je9{{bAgB*i?p1`P8lLub2BL?vR@ zXJLWyL#u3&uK^v1%Ck4O+EAzXxXbr7E7s!f()N8Rti^1tjm|q%9b z)OtfISd~RB>KpyJS(w88@Q<9#nL;U#-A=&O>Et$gYAvPZuFw=yMh_;^6*Rj8&rn`b z@e6uFyK2;f zUb~tvl+o$*W~!vW>h#5hIg3Op&xIM7^%U-BPAPvj;Bk2HQ&%Z*YZkuc;W842&BYsY zf}6kI*r#AZR2*1%zCJDs=!XUI{_o%7T|qt@#uzXPPXP+_s79kpI2F~2o#{&Sgtv8j zIX{F{M_NN}Ul_h~;t*i?*E8f#GCI{W)UwpKlLHR&NDNu9yFA6nd*|q5lzM8R88YNL zNT(^=HOUQ$XP{t_b2MaK*sDC2ks{Zom)k0{L{ryp-m;x7iuN8F z{DsY4o_AO`bSMS$9T5=OR6L?neJpo5=g%jSap~O)4n$nBTP_!NeX%_(0fgU*`&W8E zheZoqv}$urRz0^%#`In#2yPXOwm7Zn(eT-qf_hdnO_5$1+i$(toQAQX7{$y;sBEQY zwLkSi*4w6uY=cJc$ZWGLST?%{7{8zD$cn^|Ec5P1+uOYcnj!&sFG8A&%5vF&yGBs` zn4-c2QbK@RFa_fW>KWyoGP~?4U1aYyT_O6^d;`sdAH#Q%){TE#>0Q&lZCx>Ax15i= z7%H)Yxj0;sq7U9+@Zcqo6BKl!6$z@Fv2?*XhBGA0Q^))X>b95lf?|6gs3Qoi;5i$8 z7Y?RL2INU##c0>v&G*^e%x;NQJ;(#V-n3Ykday4H;&r9t7UQiX>8WXdy0QP zk&&O7RY3$@6B4;ETV=f!?QrqJho}37%aFz`?TXfN)}re8PCbYWd(Ho;FlnAE zcc$W&vAm|nxVwX$Dfu!cI1w%fIMZA2eJU`Ad!*5%>Y{A^0c63 zu1NUqq`zBgCN`P=epl2M4W#hCqwCD#!5U5={P@&5{0!Kh%E?VZ^poc`7V}7*@(KdS zJBVDJ@p;e%4>)PO(Wr^f^GuZf74}UKro2^uF!~#TJkN`=%m}5hj>$PAdbT!XX(__m z$B6q0Hwc%K9JNXFl@xO9tSz;zQxBKVC=4|XkpnzcA(2S$z1fU<_Gz6-a-OzQmfE(o z!UTad(X|dKwTDiXEqgGkheEM}^Brn!EmlPpulR2& zlvX`Ssn71iiMa@gQ}FWh&1ck-l249mjFNpmyPui2C+>7PWMyTI*5_DZlli@}QFC6M zU;BIqXj8qD@b_#po0o^@OlQnGT)CR7Kb+*sp_%D|bX>BKh3`*KdlONu-cZ=Z6cKae zi5@iA6|?1rzbK0XWgg(5r+w=^x=@Od+2`)H+)m95QFJQZX}=>|#sQU|25Zmio_@Q~`smd0la2mJZaN7l zlqzgXz3p0UlXxBG+2vI~0sSw$!+j!1=m9h)Q^>(GtHfI8+vzUA;}Z#hZ3>@y+6R10 zF)IuD=3Pk%)H0h^J%nuALY22@SVqf74zQL}y~x^xCE_vGfFfcu@x?zU)3}>Ez6E=^4Zcfpcpi8mzvh3{%zp-nH(qLbi<< z846deORuM-#8gP;VO}}xe3h>rf{H|y!#qsjaSQ*4H(3TT=>OT)KT!Q}*=+Oxw=kH| zB8G5=OrzV_|7(QD#H^UTCile2m2Jp2-L<(_FCEF4SX5FX=Y!*$kEZ+QaP&M|*Z~hu zNSpnMDg?i%a?B>CVUVBi=EZLx!c1qJ0cfnwf&mZg*9Z?Z=z;WNrpfl1b{T%QSg zT5TVPU~DL^=VB1iTSUM-EUc|7EOy~l;W8iDv+(irJFk0VCmFAyE}k>S8V%&b4%GSW z*MQL5^;XR>SEpwyCoiwu{8k$;6ne>s;04_Nx$UgQW&YKh&SiVW(m zXkN=)al2b?Isl3K81&yiY`kwJT4%C``|_)2)^q6j<}q6QdLc}A<`+YESkzncBWNN} ztW(QhcASk0B|Ri)C>s3sd0=?DxeeVvp1a<4!B5AbF1j!}8-;^GX=`(zBN-lFH3LJ4 zLBfIg>(h0wX>qV^r1>J@g5!H=D6XzD2@cM}n=eN4F==uVzz&!%^@~v2Vm?c&5mHfM z=Dl5c3cddLGSoY?hac`f=#^ox{#z`sPgenz;h$Fxpau*jYPR%)0N%X@?6_%oJkYA? z(?HVmBWq-2R&iH%wGf$iQ#b&%00f~VbMkih=^7q5-buR|QcPvm4j#nyHp zPiM5^)1|8q^_7}hI;w>g^u_ZY?I2h*BU2%Kib2)!f27q0Bdk_NT_Wcbn8Cpz>!E0U zZl4$tDi+7W>Jxnkoc!jQPJKd^S0*X35d(58P7r)mz#B!4@HyS9gHta;UIBpp-OuF@ zg%wtYBebxpRx`htK}os}Dq|RMJ4!GH?g9{D1fCAAkiPyC6qFcyNMLuQf|a^GZa%w?&(x zs~=ToVM9b>BmHvr5(r{?%rs``u52-LuaPKDmqyvQHs^UA3@6+4zmynsjyg>om$Y(! z@&7EE_kQF$6eRAz*NPlGVw`HMX*RG#q?X>ia6B0Gzw_{#Fg7R><-W{YXgaQ|ryZ#0 z#B#NLBF(VMrN{8h|v30t5)9~~vkT$|#$E>Ku#^$@d4Tzd9$ z2^y_dKE_=dhvI&ad}})vZ!D>bBB&GS%lqh}ZT*%cIB{(4-}6cTd9IcAHo_kb&s2Xh z?N@G;<4sH3Mfej{!!Mx-@ZCSgkF|dUsS#a~UY}mWOJVLQW^50P?rgrDEZ&1|0x7f| zrDz`X!THdJSF%7zsIq;_!Bu_4}R1>kd{D|7#+vc3@Xh1Msea`_+}ItnNz#m zv;%HonrH;m3GmTb|MY+&hK*~n810T;m#cTynvumU*OHEUV zfyP#&MWM==csUEtl9DTGALz93KGM#<3m4`PZjNLhk$69Syz#xnSu8-QyqiTMjJxeX zU-g^D4xVz)DH@TTQ}>#E4gYo$#{EXM6Xkrxe=7fot1G8Ax%V{}>`&FD!P1{kk+4qW z{@Z6lPloK`NS|Jmq6L&ZDJp_yoPu~AV`Ary8a-|QFd*;Vb)WDh`m6sR)X_s9pNmXrf- z%mYQ0Cds=Vuk2G$)m+e7SNbg+z?#D<1~C(n*Ib#pj+dd$%diFi@D{5(ZA>g*oP}6# z2N|$eYxW&A7{Qda*F7dU3BvEZ9E1rmv}T^bpNTh0a|?RrjB@D|&a_5ZL0>6z5+qd} zNYPJ*Sn(q~7%mW)EwEC>=0j;I5%)I$<4@}`a&jvbyPT^}8{=x;lm|pt<-&Doz^wLy z{Y(>jjXN!}emSVmF`Yo!kO;5P{jVABcm7bNB@s0hs*c1NK>wcea2n%c6#xVSh=}%! zuSG)xk0e~QyHZq84v~N)Ql6jp+;2qf!fOn!nSq_Y^1~)yIM$^k&GH}nFN6~Squb>nq?Dp& z>P)l)8#r@PE5vG%$n z&dZmg{v#EG$Cn|1CH?`NkJH`^<<;?GAEcU*Xs#B=Cn{^2z=R6n2b7k{)^sAd$_QJ# zD%Qat0M@X5Gg8%;cF1gheET*Yn6#xG{F~j>+QZ)TzTZbrm8@oW78D>D03WfkoJ|BG zQq}%k59xTQOwfn18{rO6{z_o%f9iL=pYp3oA znt#@(sfR_g${iO6epHn=r{&MdzTXnlRyOz*bF}{cCr-HX2`Fv#P z6Xmluo*G5(fTdYbDV<48s4uj*an8?Da_)<|qG^tvNQ7h%DEw8#fRMxs4m?r+mfO3@(}2&nMJ;8P zncjC@W4d-w;vXC2$s>o%lqhBt?NGS-|VPB zx})2lU~y8w0Cgng@9*n<8Rixqeb&-xXKQe4Dr(Hn7B(-u0|@h#!G{Rcz;pXO&NlkJ z1l#l@n3y)MlOsrJ07x*I(*;T1$;ojza9K$<(-lwp$>hMO&QP7fOXzVa)<0M`OX0s8 z0MCB|XvpjVFcKR@o^PsZbJHk4c*(P7wk`#ymA$5*he;W>&7$K+H2~(lu#)cK{f>1+{pQ9 zXU{<<1(VM7{<34?1+rHj4B)rp(}K2NsNS=?aqndk{Ckt9&Y8hh{Ws-XUI2cKka}T- zMNM%4=ZMT?lU=lzhMFLd6a$u4& zI@o6@`t`^ln*};1+N^V=#VW|K{F-mM1{&^-o2%;}0pO zRoVA|)m9+k`~qm~v!*0Ws0DhTlD6T_^>*$*9ep7^cJW|*U!1DX^;cuw__m*0htKiN z{+{O4D3Jm&^rque4Mr<$drUaE$X0ri zpYp}F7{80lTi-z%Dg#j#$SP@xENw|me5fO2AzYH@y$U~Nm=f0qb^BK5XxP)XIgwT0 zO!m*zbl`ivb_os+jR@<%5}}utep}Zteig~X&N3MsY0AP!8wJmZB&{PwY9m`7gb zM|x~Wf~TtCRDnNPM+-9BH)0^_p6zm-a`P_0W zG~LoO28%;^g})4+32%-BwK`I!$U>ZNcC1Lr4itkTUXY4-r`ic#S31rjy4z|4%wXcv zYChF47ErR(#WXYQZuEXNFh)Vc=v+R|mBz1GXmU#%ZthX}H}Khsn)|&v3tUoX8_Bhn z1&&s}K4i!HVgVFDESN7phuq>=zMmP^57<@Kd-3gaeX6hbdsTmPK`oSp)XJC6)eZ*Fz4XW6(}PI80r= z23xHRKT(FMe(CW_4F2MmaoVR$vGO$@G;;2g_TO|m3rV0_I7)b%=HLU|B|vcEPIQEv z*vI%p)=kG;)tf**_1iRZi^T6W)&i#oaBPug^dEo4T{r{l7DOg(_wiuv?V>4cWJ3k|WeFq$6_f3o+5!gNWi(dgi zM9BRYX^8k1_*bizxr9fBj@*ollWU*8g)`bp##0oqMFcfQ4&{8|z(#ak3&&MK(OB2Q zW=o)R(}$)$fL^O<6CnV)l$$zb3L!@Ijjq{B8XCH#Yjz+t7$oDzM)L8g`T8v_N%`(V zbqxpm#qmdtpGvy5-0N$WV+&dphqRaI18`T$H>SN+X1H1D1xyYH@SirCxXY99u5 zXwoJA>(CC0Q=?-KO(7fXpZn8nCI=rvz-MIwyvB9sRKFzv15j96TrN~lw(5|0iY2~X z9z}41H>$9(zP0xudb$-h^zM?wkl2Ct)EoQJyQYu*qTpyc$7o zO`G2H$rWY-e9GOmD?M!r5L$TdM<(qqZz^K=H|DQQtx6TeEFdCGsjGXc|CKq*5ZroA zWjf8$pZGR`*0S#=xt>}Y`=O8ildBu|H#V^-lYUpL+1OEF&6iov3&m*@wn3tU3M-uwc^(qMb z3t?3{D*2id?6v;sQswRtbSM8yHT~nIjK?14XQZaFB7)yrLnf0=Uh%~`NfmDU=KT0$ z^$foNoOMQ~agF*Hbv6>P8bC@Ei$Za^-6$+*3Ci!O;~VlGi*(e<^Y0J4(k>_CEvJlM zsXS$m|GZc^zE&xrxIToPU_xN^R)=T%hWXTTy(Be_F*JC83nYxddME$fQUSKb*>^BJ z6VGZo?_Ct-@Q_aJ_pqo+%jWU=5ke_tZ3VyQBosZ~v^LsxSw;EH$i(Avo8!foyEF0{ zC@72atw9qzlO-rY@{oyQ6PK$4!4APiY1j{^_0CmxR>ZsoKxGHCAOokG8{c(gmj-EQ zB)dePPBAe;0u-Nzi@m_|py;)R05vVb)=g(L=_&}jg$JE4_v zI9M?;G|Tm$4KDYteRV+u*KT=g@M2p^yxcRguo(azXNTLJ$p?e1!t$L8ID+u5+D3o_Co24xduaR~+s>n!;jjt()uox~EcwcFAX(jHSB=2bVZ? za|Z7H`+Bc?p6WlT5P%n(qp2n_|1Qs>pmf=;aDDK!Uusc2zh0_D)?F*nrv=87Xg?!1 zkBDBNSwcFZ@wWhnI!i{BcYdK%qBq-ATsfwxODCMASs{g+kw9l zeGCkwB4^Ye$D9BjEcYzpVCJQJkXwXbTS`lFb{K>ck=1}^W5alN<%!d_fSP0c)wy0I z@1osnpnk({`Jj1KT9cw8&0l!_#Hwwv{L5UGZ^m{}ty$X-<(pR3w4s6Dzt(5!y@I?B zQcB2Dp_F8X4~8@N2Gp;70yP##Ya`(N4h94CANO^`yJ*`%t{ zBrIfkunpvx=qH1(bV>RBQbA@g$7|IQ_8n(vAPuNU-5NdepL1l>=UWzq)<1$Zf2W#1 z4${K&hRDZ4r`xLMz=?(ke%z5nh%yKg!%9qud-=ULAi0nhgo0W`WMpbk-I&Wt;pF;*6L zuY8%{;bi1FlF6@B-n4T*qIGe*J!@a8dcNg5S-HI|Qj)5#pN@>O+IG7By4A|-1mma)Ug6d9ewZFB)pgATaDXez%QNKN&ukOcCI2x{{BaJ~J*Z`*exgqqsprewi~*%3(*^Uy(YGICh|qh9H`NAy|tbd87R*(>e&@i-t) z;E{-akB95uZVzTamf4SM`qiy+ldV?j-@kTvjo`4hhga7Q(!q#UuE*tXnq6yc@)b|l z;#pM@v$nH%k`Y8o0?LuvN5qvTx_}T;R^6!lWx`f z{QLdX5#@_zn=G@vZCh6CBH@ua^!eM>ndncK!(QGSKw0n1Oc@h&-0~i%(m?*;e9)zQ zz2N@Il@uqBIp40xdyC<>%Ws_?bN(fbrD}zro<1TqE_X*>ulzX#tUA^`?d4zOmD3ph z*?Zlmwi3JV((cLnyiW1FQrh~oR{H!Hy&>wL;~P4lnV%&T`z$i(Ro7U_{yI>AOn}XC z?-NJ`B&iLSZ95YORYfg&w!J1+)}@t~eO8V`du8xTb)@ehkznfM%nW>kGK!1qC1nLb zo$B3kOBdc-flZM*|54{a=Xy)K(mkDG^7X8RE^r?=Hq)TO zH6E6xFwYIZU6E0i^PK} zn~6Z+kD=w-3YN z@3LxQn6~`TpZoIw={2wGPJB)lKsSHVB^d9B)xEJ+5u1b*im!Y&ts%?9jvo zf%D!F^st=BewT><#?YF5ePgHB?KiiqciY$>ec|wOU6^x$-Sx5s`RPRF6fha^4)apt z##B7rVOkOUAkNExQeJqzs*l6+xe0N1Y-wQ749@aqs4pjOZ^ z10NEiQuB#nj@#ZARdH{co{~N0qsz9r;Ne9GnP8~0O!mx&bq3;`IX`?YYUeRmS> zFp%^)v(s$>v~~4J38XmgN6X4PnZjIW=atB*#P=x;YfSpJ!)F0v+QgCfSGi423%`Js zOjbxrWM;eT-Q(_UZ|@k6bZAkGf!%gN!WTIq`Zr*QT>*cP7y^`rDI)Ygfc1<6cL?AB z*``bhLr21VDymbUOLU_4NuxObWg)qxM9~rWGi&A2x%Wj4+5c$)d<3c;6pD%Ux3bNE zF#(zzt70uqSGjf}&rI0YM#JnbT`V^4c(h!P8k);D8!|&D7RcclG-@Va2-jY4Xf+xk z(aOz?=WXuS@9C=%c0YMz0l@kXsf3%i2%AwGOh-p2%4~Dmm13&te8wB7c+pg*)_xEJ zEk}F4#?|H@>PQ$KNIy-UK{z^?lLWj8O1i+mg|N-_rRG}@%M<-i5ez5x`Ft5 zyxhwLQHk4)Gkvo;kv^PCw zGExG$+q(YJ*HhJ`a??$^#J>xsEu}!A0hbqGiDBq`soXgiMrJU){rHnptIqa?3qhLs zdRaLGBowoe-Rk0Vboi(7z8HNyopP!-@jriWsSM|S0`iG9R)cX_>R4sInR!l&8%F{E z3oMYjz_(ieBlp(9K}2ht@BGX#kjQ>qIl^>Qq*F{q10>2~W3vfq?) z=iFNnP_ph!Hu0M*`At*JXo}k4cWw_F*=Z*8yOuMkM?8@UXevzjiXM`18{hoZnjNMw zwKJ2PS?fm2;`}C=6=3hr0&+aG|==8^$#)S^d587mxqZ?>L=?oHa4`24ES#`2ND`86hS7RwB(VISlVjOt`oB?&#JZH|H_V-D_ik1BG}1}>~x5X zjErv^8@Kr&-+}0pQ{>?X5ruzmcQPT1UUL|D1#cjsbC}15QC3h#2S18@C<0S-L)@0h za1TG;YcT0v*a5s=LMD{F{s?-S{wDLqcu`9^vb;r2Tk9@d*-JGO1vt#VGzEz;E|B0* zAr!CG)IbGhKt)8c;{>Lm5?zyA9dOWp?mf3=ufVnqe^_Y%_{Tmvi%KuQAio)r?v+14 zNw*qEet>W!$(SPDJv=oflu#8KYMKk{EGZ#z_C04GYNQeps4t*5`Jc;621J+txn#&y zpe?8WU9sN)_w>Kl2Ic?z{+wu`>5?7>5Oz>e3*5m})}Il{jXscTQz6K)lK|>p^{4Ck+MIfRJ=ut>G_bAzwptIB^r|J`EQ%*Yy%2@z~ydK=EBrrXJvQ%kfwTU-uzS% zkNSRteRBI0<-s`dk3Y!{@$65A#;->Jwly zL+&0{82)XaA@7|Fdhngb?YUriIYTucLD0{W&Cl`*0U17D0J{_dmDk>VXMUa6K98FI zNe5uu@IrfkmyBfjhlI1U>qdYPdhMSSsxCD6rM=Dk@2Q~uxCmh&`|`geeo^K;gpU`x zQHWdRLQ4Au=TuKMQ1NoODzJ@Gu`~DdYleU{EqTE^k4lt;s8YS^qF} zC&U~H8<*wl$3g`KFHmzZr4&{SZ>GQ4zqIJ6GK(OYmHtYFS$usT$M1f|#Im`t^R)C$ z`G7C#zY+;mJjZfs+h}!FL9Zqv`o2IU=^L-%nW<8`-x+wgP|$qM@B?&;v)k-yFYPa+ zo=zmfzHy8}N(6LVP7>!e5y$) zdoBnpXKj~W+Qgsp6iw_jp9)4KCH`A9wR5}xDTUM_(1%JWYDbi>BzrwHP4JOi#9)fPj7lAlhwx+xFhmm)y)%8#|vQXpS*5A?N?* zomWN~00ChJ)I|>@TxzSK8*z6+OhTk1Tg-&Djz9ChCXz}@NO4%sas8>b8~CXTjD{xQ z;{ceU6YG|>k-H3}6j48@?upG$>Gi}myh?d63E*{9Ah$%eVFar0@QipJ9{V=2S>jn!^b-Ij}FkrDPDw21)^^RdxdO-@kw{i*Svk?;zgfTzBM zFUzw54}?yq&>KHPeaP2fpgqWA)l~!yn&-EFx}p7=1T0c6tHxER(oCRpMSd}4Xa`s( zXxs-@h$`mi&rX*b&JNxHObsq6)N(%*J_PgPn6pxXCK?wD>bh6AIMe=({TUmkcuH92 zHWuF|Du+K^cxLvqdKQUEe98jW55P=0=th+TXA5|GYjjRDrlP)=kbus1ugU*7L8$wa zI0IS%aBcwfiV?~Iw)sC74JIIP|M%*9|39ws{{w$clLZK6RZ6I{#)bQD)Mwv8U;Y0a zh=2u{=XZ_#PB$r3P6-~63>w=Y#1LN_17xR z;y_b>yhD~W0PX1%bd=gIFfV_J82$eiC=et+t2&K;x+^v|1i#qMc4qoXTP|$*MTGQ6s-;~Va+itaOjd1}?u#TV=h%*^0+E??8mRsFRXyE9wL-*@zISp{uw zOxiUuz)?85mgQR{B{uD7#T4xDsh7-(1B@=7Q6~(1=+1`{TZZt*^srFV&ZNeA?5w~@ zMRwwUrSCy0r**JCx$6R>kX(ayea5o2@`jr`Qc>p$+kb@*1Rh7g2lu!~7Jh11V?CgT zI@kZM4Uwpf2b4f(7LWHo6m?pD>a;@XHI>DlxU^dPN|0JYGxVOMa|R=emu(A@`vsrM zNneBvzvjQi05aJ%;BWwIh@#7p380bH9m8la9Dj@`KsQEAeB)i&F~m#`h9;QCs!e?V ziiDNHgD`2;e+S%E#qGIPd|4$ejVtRmYZu%r!lp=VZi5_CeUQ3>m%y%EQ@y}L75I8O?|2yChe1YBA*ow)F$$jR}N>nl)|G2a-8YFzL z>gcf3At)k8@N3Y97X>{{ZV0FWZLEbDzT3zMSA1vQw{SYRA?l#UC$_s^`ALC{cCvm~ z`aOqyR>F1i6DsQQtA%@N{2{WTgv!R7O$pLC5n{2e>NFY}a@g1GyKks_nA-*>B8#?h z_~OfA4U`WJp9W({g)Uy0&@&^xrgcJZ4`PO`b9N_4n;wFDEo4mD$3U-D>B?adWw-gE zL#`xKxoxqSr5RC?N|;)n{3c(lBaz;fS}-h7e1Gd6E3w3C?G8JDL z?R`x^-Z6QV^P@X>qB7SfX(>Jb2Bhkrc13x%3k^xZ9fEh8TU!aqye2vRk4MKfu-QR2 zWWSw^;U4^8>O6Pf!q^zE!Ok?(VJ~?xx7I3c}^GCzOT{2pqYOb<=qI+@Y zI`#h$_LX5(cTcn+3Q8kNHxhz?bccX+cc*lBmmndfbfMg&DZQV55lk=iU}fk#*=_FACgB}yih|>c|C#||tRbN&mjm+P7%MqL(m6KF zrECRe%)!PDA=PERmYi-WmivOQ2Xxe)(Z!nlIix# zJIg+M^JZbSr%b-1lVWwToKX8fqnNO4u5aw?$d9mE*224*OSHRA9&(TJBttD~Lbd9I zWSVqh{Jb-@!5qh-9c)$mKO7Nxn^;SbGa=Oms66Ac$Jsd#DP%C>Whl|WrgX!|Xyf#Pw!T~-8}n+gJjE(1 zd?ZD=mnQ;G=_>mS5qBX}YcU-4ddb)oC1s!_?<6i!IU~OaX-`?gCBIvKxN|4(NZ_A` z(E2h|g&!D}AQn(ex-(~l2R3e2eo^SDuA08TlKMbNjw2V-N0--9VV4HTvgvy~UuN{W zWH!b6PU#8H4KJBH(Q4?u)D-n*Na?GbEdZN3k4{;^KHQ+7 zIn6B9rtAdcM%A4)hZW$X%O!RweaWAal7G9i2K#B9MI?mzk&Qfp*9~2P` zXrHo!F>Q~Jom>#Ab>MWLv1*v4Yz+l0q(!2g69opF)%&=(p>=hDvduV05-lU(58PP_Aq)HvQ!v^e#94ya-c z)*d$xB%Tn1bwI^o0WJB4(YqB+>xRh6Lx*jBW6yvApPB5CUpbtdDz5Q7De%VL{>;oV zUcB*u4UPMq?iU^IF7+u{ePkw+poIBPN6*#rI7+=6_+wY31iuD#&E~l(iKjM6uzf3M zu3UN%8A<@vC+$YH`aA$hXOv(m&@jFP)2_n=H?XpT;$ad9}On6}Vkl`zr$0ayy%13y3dgI-8Rm z$mULx78lN>*VkF*1MZ3CmW#=2OC%CzR7hBohB;4EQ5`oUGy7BsT*_0963{H%@3iz~ zFBYz+(J>PCVsE7S;*Z>?UT>b{vhsTruthJ5Obb8IT$_SR@4+@X9=Uq+=J8u>4lY<( ztic58zF0q+Cn$N3J;W;aZOQ!ESq5N;Q|5sn36WiS>K5r;&6Abj8@-9d8Y@h-y{_oW z@sUoNb*iJoj^W}SJ%FZuXZUqwSVWXLUL2U`^hpf6U3;7sO+}$L7H01F*qlT^8i-2* zM=yJYxCvk`;PTuvDqf|rWW^{}RCxSZsqoi!#-(EKg{NE%%-aN8b)?ndk3N2;kiO8i zx6>cI<1nt7C)8K!kI?mp0sw_2mb3?lVlqYd;&HLtQy$(I1_c>OhA`O43OeUD2M_s1 zG2hzfVHQV!>%}BkAnd&r{iC+SWqiwMcP4fV)i;)5Ur+4;`|TftRrvC{`W~kGNWqYp z%FNPUhClG_&vjKa_7q?jfZXWr^wGruG|{rEUtJz+&vUH|B;7rLZI zHoNB!COI{hysb&!Mj^iiw!8-H9NEHSw$`C!^yI^}d1lD;Uk6!(eUZT*X(_n*WvI<~ z>q7`_Rbxspdhk}5rw(fxU|__a(hTCt9>2Sa?sB)+Iskh20yy5d<2Kp3B8M7mT;c;} zGSKlELiq0YmhTVN_eZ8;wS37T-UQewAj02Lt@JZ@+Pc1784N$ZaYafRhzkm)q>`&g z`G)k81b~D`9dI&C)GZ zUKd7@5R7y^PfX)Y!DY2OKC_Up?Gts8gB$@3S&7^q@628J!P60On40Eo0wB-3TUMK} zv!Qa(?#*jt!eOkoh!dAPMZsft{eB*@XK%bGF3v06$hCOB$3E@2;(WUOjvRjy#k3$X zLxQy^R*D;w0$BWR4(<3s1pp6;AglpEAKtvVhYSv|(~#^~%N4Z%ZK;kj52M#cUUh!p zVfpK;GSiNc0oN3|;{dcAI9*wpV@!u9bp$9t2Q}Cmf{W%`#-zQrmPWp%NzsI=T`5#l zkX`hUS9`j6>+>e112ROsI1hOfZ?Yt{gVaB}j9Bn9;85tc#RoE59|R2En^Bd-cv|hM zmqlf5M13)klvNrWnldINySgbg8H50expmEJaSQQeGk{z?c(+=47Z&*{jK7{5P`sX* z4!i_V`NQ%+EIH>NqkMVZ1eHHG_b0#(b(j^q;`86+gV@jc8DNKC^Q_CyKbEZ`YPgoU#(X)_%`yBUi}{|<`3*Kgs$?!$C2|E!i8Dk6zP^FNF;I|@*xb5`?K+k0-zo83ic%f5B|a*-kEQF zIJLwS89Bv^i1`Op0YBES2Z0sv&wlbnyLe(MuP7_vknkQ~0jLxa)$o;kYX-$ove?m{ zLt%#@v}fcH8h45f0uNE}J0X48RuDjtWV;LPW8Ptn-a_%yPN>)S z7!P<%_Shub)1Q5_3T3ocI}yR(4*xtp6^gLnyWh`PJiza_AFRsQF@6fG>yKZyWWkLmdk8XQ|}yDNbM zs1gGeF3#A`+^YG!!-bgYq9&91{P&MEO&9CUSP2y$69B9$R8{HDtQ50dfoI;0RURBtq7wgS)1{!l3KD*a%N`8EX6VxL$~4i;TZolVuu5wm zR=5yC2n;j*37`OIn5obC3bw3HZ}O!>c=nQ|#dCzMajQG6BXN`(kSh2kepz*4%Tv;+gHoBiy~0wOcHrMM4v<||;H9FFu9Fh;x{)qo2H zK_(iDEd~gcm~k+LxCp=OZet#P_P}B-IsBk`@d8BiU_OZez;oY9>VWjVN1a~>sq^gx zId}=IE+yr4aNzsY)vd{7+}Szy2T;k0-CNqG3-B6A3p$p zhRy5n9#F%{V|<~4Q8{(zK~vlUO<(O1HYX@aRmUR z_5qyJFEr_Mm2$!9ULOAbBT2-fwxBJDsdwwx(C`~@i>k4_^D2~9H;jkrDAs@gfN|HX z(H3)2QTH>szt3S_@_!yKa16jWh#A(O@Dk7o%DkJoIb{G}0rYj0OXua$MQW?pVnZp} zArwRsAq%8uE$INBxp$y%i@7H3`WB_5!GM>`-BnM%^VTP|!5`_+A3!_^@gsjY3ljKk zhX5VH=gneM|HO%ab-vGmYpZ+aYa>gWo_viBroV5A2h%ySm)5UCJYu0nlZzwB4bJ?7%q% z3+ZP+;a&Czu~l0>eFr=@V>E8Gz>Sl?zGmF3xL|t?A`&IIH0f3@Ur=XTU-yLHS_Ewe z_|edz0&eYp?u9ASrC(=H771`oDp^V&_~FcQz$=0P6qVFDbO7n7x+vlG%8>v%%Cy}4 zV)+9U;H`iIT^~OX$c_H%Go`;5Lt+$x1#37j%NaPVmtz4hl$J_07iH>LokcGF z-b{V%hG;QR>mx8*>)d4m;KH9@JqCj4ueTvN93Z67CWHyBCrAUYJpjB&+Q2B+=WUG4Ju1mS~j9b1-&#&*WBv;L6)BxIq({guzWMzPrNoc&OIo~>)96vd6 zq+@PUsZzFpzW!p11*8W-2~{yyp%{hQfBmTLSFi*>5-uH~KuDFNoee$~_`FO5U|no( zhp&M@!2R|S7{?m0CGu_yXtJmvHLes8XO1QRbtMU8;wgP0p8@4X=-#*(q}G5Btd|J| z>#M-SrC|4=ZY_t(_&04Ym=*9q(AH4J3&JFUEe>W8638^WU+!}&f@&}?-R+w z#)sy9#U9CF^lT2z+g?-HWgOnZMFJccooq2C2chMhMda3;oMJ#Xh@3!wVA zj$4uf^^MweJ!;Z_9$mf5wL^F~*8{@7}mDboGodt@@I#_|TxaqeXCMvmwnZb-&!! zcjo^ZE=(TwjZjTa2(tckwaV+}JM6oJFu70sfdLW!@tcgK4N#OZj*>Nh*y%c)ohHf0%CvdK|mWJ5H8v)CDuB z!Fx6N!pYNv)smSqcK>@`s|w3IXebgRsW06zShp>_@J`ZJr=G(G7ZK9f=u^Q+C)(>% zJrxtxc2)+ArRr{$OSN5NNSxOB>m+Y!1z!;TH}PEA3+jE|)q@Y5A8n>c9!_HpI3pdH zeUHNN#^~;7Z>VuxolhU*dT$Mik+=h~mcqqNTB6|9?LUbx5!QWJEQLXlw#u_6rg{k` z_3x!zqn~Zo>t;1#cuzZpL(?L!BC^>R4c&9cjhPs0QRY%d#hWT+@bF#ao)cDAdmJr? zgVucZ$|CPz3#-yNXz?tx(ikn{>(BqlW{UT>EVB376sI12#Kqa#B%}}Jqcc=ahi~-G z%4zho2UgoXHB}kz^kW7dvA}0Bota5?ucSM|P>zs~#aX?R|4mS2Gge8t}zv_oHv=SORL2e-<2?uPzN248ANQaJgsr)#Mpue$3w*a zEy?Y}#fDp3$4Rr1d=H{m+LpgB-*Vn_HY4z@9n#eupPAeu-joh_NW@hIqSN3o+T913 z-|e~;YUh)@rR%VhQv1I%aC&@hI5QIt-h<80(I~(-K7HdnfqKvR0y3viym+FpI&)ts z!DPR4JYazQg-C$Y%mvorMX8D_!L#SVBRG*h4`uwPWS~#v=rfw`J1=Qk^~e zkamq#Jl$zXwt!Y^J3Q0LE{RO{iPIyjUvx&gCcTZh!@W;uEUL<0?Gl;4NAp-f7uH@s z)5#?nGil2NPyH3X1i02@^EYb7|Aad!KfYxdpjRt3)52j<;MNO{SR&bKM3_7 z7(ifNn1Qbtby#NUsBWQ7{#wM~Ab)mGxQdRhuBN=s?TYr286SN0-@7r;mIN5%u*}9l z(Ec38ornQ?^IhG7z|X+d2R0j~&W-c0|H3ChIKVY70br_n+7&U6j*qD|L7ELA^(Cyo zToRFm42mEtLEh047m#_umUcjj;YG(x`Xt0o$p3BynaS&Wjw*T&3_O9IhtbNK-RTeR zLH#^fbJz?YEEk8DC+3?EDZzvN)!|K^;Bz-UQc&rLfK5C5-_Jc?nI`A+;-qF2c%`nf zi1nIBi%R6BZZ6e|!kU+}WRwocHsSTtA@VhM=h+^j`SS(m#?nn{BJ@93cw9>I-de3* zsbiOelieh$S{VFX>Y{E=3P(i@M^*Iqyz!rPH}!f?B_ zJJMg;jougHENs&bSst;L3i;-i3bdPhf>L;r#81OqS_SK0wG)xq*d{$!ASidXxazB- zqB2_Uu??>&?b5Psd(y^kYiAkZMxA&+U$fbJM)yTC3x0}D@prK+lE!WW_h?P{u$-yT zql!-9VRb-e6}t6{K=HvIaO)74!lCyhdf_sBR#x{}tMrn7?}f%aXYuy(y93gv;chpc zt%slez{DpUEGSWm!A!Z35(&PLE1YDo*&urD)Ts-wsbtb-wJ(e5Z%nT1IJ|)c`1gLw z^d`-VwORZT9xi8YrqNt{t(XmCIKC}PI(Centz+Zz|HW&nFZi2FedLzw9LQvcTWxiW zr@zp$8kM@gDM9<#BO<#PUteb&TUQqQj}?)j)M8=qr00fYTtMMQ9n!IHx?^qaUNX13 zJbLcEHHwPiisxE9WWSg=FyhJbh|8>$$%p^NtHaB(Tegwco%vAHq~tJtwgJ#1d!$q2WK`rDi+9f_KBqkKJnKez}Lnk zOHK_YCN*38P{)p`G;PQ$(cMG=25?QJha=^nq19t|WnGC3wN*ek=kPs)#ApzX9QOml zvzUBc-=R-Be+0gUis-y6efiRhuP%u1`IUOHv5#{UE$@p~;zK9d`mbZ$LaZzUY&s!0 zQhyep+Psr@T>~aI(_wi7CM_giWJj~5P}dKe?jE_eZu6o0GGI>HiC-s9KDNB}5-!*_ zc`+a#K3Iyq=5q0bZwKA;7t!@Y!Q2p*}@jRc+*#tOrna zO6zkj0VU<4w|CiCLx#cE@;#S zN9=}bi!VcC#!w{w0Su;Ng>p0@FbU0J>Y1$Z753avJF#PNj4p^2VOfCH=w*){$4J|e zWDh@0yT=px0SF4T)qRkATi-Yx3qTl$=4dRF4lLS)*IC4sTUc{r*a5ZUfGW=5V#DWK zuO2er7zkORJ%n=5E>Rg&rPE^T?(zJs(ccKqXPwC62MfElc^KxBC3f0@b|vrcb|vz- z3b%%gM?_2iQy|FgdTYhAO-OR2#l_R-D~1#E2phCLR55gwx9INhE9OFDD0A8N=sMFM zt0n$SsW3~@Se5p$hPmnO49vzm#r||ItwhWsJ@PRA^sTDCX$E2SheAY{&f<$p7_Fa> zsQD)uguR(tYrHDz9SewsKOUnW*Sl)jm=c{i!Qvt-4j1LS(#B3~7WfeC-s~&HHf5&t zZ!ZuolZ~YjM98SsO2|%gT9p>3o5Fq)x2w|lW)A>I=n`dXPI1$9iL@38=I;5}KtH~n zU0n)-@1DyjcLefND^MEDXN$FlBQMc0w0Ne^7H!HtB+>u$c5+);ejle-6k~xqU2eW) z58Nu`;;mQMA43595aITWDwRdgRTY25rX(~(PGyTxGzWL8+hISzgd-PDNCx~UgwSX% z-}?m@;1a_u8Lfr{0e6>G=LNTqk5^|)A6htoc)0L>@tTgB5ylP9K<}HIQ}D~Z*;n48 z6&UgrQ)Wr*4qutYw_ZNZHV1G^FZiAY z#r5g5KYXCMaaOs2JQKswJhcxfWchs-B!IK0=~}CJd7Bu z8gLnv=7c?M*ywqCb22TD(ZMF+_U?dBlil4ilMiW0fPe@JEP7{KTZA9J`uQEmp>jT@ zD)^wj?FOt+a41Qz;&$@S`6Vvm-Xer@*2=3GD-1PflNQt*X60#{dGaLpmAj1ZpwEDN ziYE1)G|T}ne{#Xyt}pH=)Zo9(s)Uy(#PsATLQ!*d*zcL%n-(7-AyBn|gUiU4AhsMO z`9SmGn%smJT#EHe3PUUoo2p0JQD-4VpEh)ISPYMIv*;x z*q_`#329egy6O+t3QXb3BVPkMpRkzpSs{X261D4y?J78!GL4Ms7pZXM6yy=(fr39( z64#bu5nNjSA$6~=Nr2|?4F-c=0!KfKyXr1?QoQPo z$FSzbTTUpw=?|nPM^X=NzYjg&Du^G=Ua9#(gz^)ofkceo{$ME%mw%Ft+U!3)axEtf zZmdhk#!&6em7;-9%DZoWa);yQ53tTD^$r*Q_S5w~S9X?Jd*7fdWAY8A*^RTk%D3y% z_$xPB(z_RS+fu~oK0bny@n5?JBvRh3S()vv3-tLTsi>(~Q`(>;PSJSj4h^=C&Mmf& za*VZ)-u?iDPmzc)%i&x4yC@}+3z_;AjTb{usZnUAA3UyqU`!@7Flq* zbyN(quIZ0cI58OmQ`g#$%*LZoG}OCiOdt#ake6Lr`oqTqMzuu`9w$^|xN{j3RA!vC z{|i3$__y$-Q4R4C|1uaXIG}pLSwgkoeA3{45HxC zN>7J1mSUP3I|u9h!88;ek_R~){WjN=YIVt5xGLT8`pu1{z8DH$R7k+}7}msDy|SW^ zh#jo7KHo;zu*(cp8+92hTi3BT9|WRRy&?<*aq)!a@$WX{+l(HoVo*{9oO%jNd0_|+ z;4bVQZ{4Ju6K62@b$UzomJ79EH4U7>UE#450nQ|}Qc{Hp9B;i1&UjpimvUX%)SQHN zXy1GvW6(nqtHG0hc-5U5BKg{%VMtsu%0^R8@;NQ3Y!`Z74lG0XYeRcMll0fn`@%aM zjM_*t1aD>vWx~4=vL?ImqA>7uH#IcpWz|xi!^pxy57-c;C4G$%*wtV0W2lyRF4mv? z=e5Cbfx~3d-FoHo{ELsnf~qLK^)~r|!;^0jvhCJy9e{kKiuQim?&Ql~qUy!Qv>h0G zAPC1urp<|Kr=h5niwBA=%G^^51t+&wHK*N z*}Kyxuh>gW#bUYC=i+L*xv{4@%MQyFKkIs%pH=?XJg3w>si^$Cax0dtNe7C>heX!J zTn#`A-yMF}Z{HmbiAjAtf%9hQHo#^vd=2=Q3dd38_HF7p@EPuPWU!&}*wk5{gDEMJ z)hp97Qwg1pFv|4%{V`XgF!ItrJxqk$7XuAuT#oV7Nv>X9hB-df!3wUEOky+-d+A8 zD{)HZa9B?v9T2C1*hKvi3=DOY7gGRh zDtFz}0?|Lk2lr<{5->CypmHJk$K8HI#1jG$*v{OoB(M{Ga}_I&rUzIssimcr6T?Ci zS;B;1l^~LbYo(bGSln~t>5z+lOmMRgQ8UfGq-BeofgO0l4)B%34o(?^X0hoswB$@5 zf6O5xpLU>b8iFnR5uEr3%_by=;;)Qk#g2uLJSq^fl-4TVb?4IO9>vh#GiwqzK0(*(nQCF=o{P_l( z=Ne3~rh1F$QyPoV(Ki4WBOU!f=Wby9JUw@Ppkxth2vaYw(gPeNl-LXA&d#eV*}8Ju zom!wUWHJOGV-54SFdM!F@*4%&j%N)yF#3DW_xhP;+CNUZ{c?LC@?i=oi2aB^xbw%; zNZ%qMb=MVB2fTKQ^x#Gi&Y1&ka-rl)&HG`&WOuF@ooX~dFF>y{!E|$S|FnB@)YCW6 zI@liRc@*B(qW{CwExF$R&oa{=;A;(T+bGM64Y_JaX9>j?koca`h+M`3Z8yhj>0>Z3 zH0q)tiy4|NUE+NS<3kWOb>{s%jbQ}A0LQrf3W{1*?Z>{5$>1saF{~JCES`ikGvZZ4 zvBc*cDTOJWnlsrhGH~VE^RY=CqX-5Q^%)y;rR1YU{u)Hq%m9H3@z^D-B;PJ@c1Wh4=JG?OD18D#&PPlNG8>$6qFr` zX*=2Gr4y5o=*@NQVP7|tFTMNW5S#@*Q6I#-IZ}A~-7&@*xGZg8cGKKtUwi}}7Hn4= ztF_R0q4tL`py0vz!4s zTXm>eS%Iar2P=vvdt~1(S*l(E@^=KFP8r1vCJycQX4bjYyqxwY_+ns)`j#ER`%<%h z&o}w;lZCa}Ik{a-aV&*D1iwy>kktMCcxzkgtq{B-Y2@wYVRwO?AO+~s?GBXW`?{g_ z7+w_Gd-nufa02}Lq_V*lvFzj1o52C+CMkSRHaq`d%%RoQod+($Vx%cN2?T5n@qR@~3Y3Fs78j?9| ze)qIEw;`Rcokh-6uZRO@?kAa2*L#j|9Gwq*JIHVHVFl<-@jpCUn*ed>dQ+M|L+GT3 ze?j;ppTkrAS&H8nQvY$;Fyo9-3eG#47uji*T&~5M*EdYlCE7WLp??o`+U~FNLhJqO zbBM4V5L2VB)%g&QkN&JU+G>eb8%acSb=Ja}RY0t;PLNgSS07TO-t9TW&E0EU2D z9&Mr6L*wv6>5|l!xw=f^u#t$*o*j$e5#agK-q$Jkd8iF6`7^h3U9E|232gi>z+gf8 zzB6nGLHTuqW4vceHBhz9+mU$)@Vh!t;C5r@6-#~(bef%St7~XTC>g?DB3EsQ5<`N| zm$EFHXBx}olD`-_YV6g;7(9mgxlcrCF zX1#9NHs?#4(v*Ix&e}rC-CkX4IG{u0kR-8wH{p@1C5Wa!buhKHbw@_W|4wduhL^M( zWlFpbcp{v%);l1vEz6Oy{o+mj>K5_S6B`O3(=EnFNP=Q7KsxDnE1G_tyh>W?b!+>0 zB8c?_Z?l*x)gV&SZ=iCPgq}DrT847X+WdJ3gvQ?b1Ro&iWvRWJpSFMJIIIhj1rZc} ze>rr)S^En3nb|opgTS1#@RJv~)_|pJs|(eB!d?}RBK~abKrj zD~k)6q?7iYxLO)O7>+$e7GbAMmuyL2J@rFfBP9PTccGj1J({7w`qAQ*y5RdVxflEj zcr1z=kO}^1-J#;b@bUz)XJYtY>C_K(Q{+Dx9Td;~m3e9t0npsrH;|@M5Z(8$8ruy? zjkNV-SM@`ZIJ2zf=l!%=Y{DA*9C>*EWT2d%L%1gaB?!8J3+{?1^!fqb9+d-2W@)A5 z^>}dmEWq28EQV8j5+DzYfj!oVItvy}KhU5D+-NY7qlDfZXf^HRS#cST$t zZfq)Rbpq33DbftP5LbM8I7v9KyblGuqDchGyC2avxKcw7CX>aC$Wm3I)iy_#XmD4D z8xp%VVAB9e$Yip)!V|at;l6t;BKxS4L5=m|S{5L)MEA-Mx$YPsoc?M$fr9X%!&((U zqWg>+_+Or2sl_{^tWi&%w%&e^F-7Ia;)pBt1*}jGjh`zAsNieZ>TLSY0KM+18_)Zq z-2&@vr>U$~eI&6lAZo5shqV`*zoIc6?d!I#%3{T#pEHoPY zt3f3n==Ewx=v{~1O5(4F2?q!LdFADbcO1yeF^?Uc#rgpDo?qd#{*J^{t$BdbPOj~k zrFcdSa119_lfu|aqf;5A2?wo=coodh^*!3vOMAtOQd39FpYRFB9`jv0!xPL@ftR+d zm)PO>vBndWqYhU}kYx*v&-yNlefQNYslpENg;7d{-=2A-FmVj=52*CrWsgloqK@&{N~b8Fam{~g0Vp6T(vHzL z&k~xY3c7|s@-w}~3(Ut{vtszxp;Y5y8o6cyE4j(hkR0^jOz9F+k#@3ocFEb4mv*|B z4DQgqbK`_3Vyq8?p%zFh%~o30^{xj42I6bFw+0_^ky&Rw{{tT(4h9eL_=@i%RunB&iZ^R1 zVVpLJ^{Z!V-grbZi%0HhaHY3aDMFND2xyDTX8n|ya(byLe9uQdA<>wNMJ)%8W&EN+ zMNE3;Uoh{2<)oA5DrM8E8wG75@10=!Fs1A@-R=xE59QFry-UBlORj=sozme_6$TZS z>*YmV@|-xOnbQd#uu~ln->E2%(JZcQ#KPp!<-mMZWm*OPl<|ugvDVa7$h%aYWmHyzWhnRU%ijl%_x~ zkeljMkeyS(&l{i6*VsY1yq`1jjADt8<^|5}JS5w~{J;RQ`K9ZktiJh1^L`Tt$26eG z2hP{V5F8|ocJ#-B9BXIIXZ0oXEsD3)9l>uajVDk}0B-{B?ZvL95jS2@RjWHt_`E?# z;s^eH0YBfEve6HK-R+O_Mlj28<46OW9XIN?u^Y5(tBkKEzPzs=Hi#7Y9`0aA67ClD zy9#^+!(Z?5N`&CxOvpdc?mhInJERUGUApi;aKdB34lt=~zd26%eUK`vmS6cykI+_v z#WXNdr_>x*ESAV>rSQ|W4RqE;pRvw|EA<_TEzhgW6G|G-6npytTXjj#Q# zjZ{_C&>HSO8sco~k9Q($tF*Wi13i|3Zr9GEPU(PSoT8Kr!AK$vq^%%@19mxHzlMLH z?94uSKqWZe>4D^CL9Dx&(CsG0Zp`9&vJ!}+Ndfe#Rikt?XJ>M$@QZ-r2g=@ofq_4l zAnW4Iz;@W3*(ULHR#Q6hRFO%^Wi=n!#a>3G5uCkr z>4TR&xp9J@f}?@zg^Ln5pI(9qS?)Jsg5g?YXf&&*`HOKb9LY=`Y)4v@3>=+S|rKIOnC2{I?;HJ zxi3APlwd30u64>NKj1?EQC0}sXps=?A2qO5z&HY-5G4nDSP?(luUF%FwDJ_-n2%3D zRZE+ps%Xl$oRJ@n=Xc972)?J(S62!zZIK93PgjtiHfwvJmzThYqg)Lq6hj~+beN?l zBFHlMffz+C_(BN)eyF9kWXtnbumpYg~cmsa7<~N(Bf2{q{ON$Qz0PabLBCB%*QI?o}!q z3;>^#BP>6!ZX!UU%3XVNH0t8AQPjmQ;b3d64?{o|(%XKVs1r_!_w4dgM!qd@xx{>; z`Z8Ug={MCw;4&X_j0sP9Hch0r)8~)Ss+tKM3y;z(-|dEsS@fc0B`Q&_e7dQ*2M$^v zY||Kan4jVvtWbcE|GrC(0rEV=hb3`5;%`em_0(@7+e+n)?GMIx2YuMFK+GDG7n(h!zUCPu09~vWzjV_yM}0TTUa(~%A+5f}gx%KqZX3$VsQ1ASyDi0Y?S;+bs3 zR2=c4B7xfzfR00yG)Q&GSO&d*yX$zCi+;6W$p(;s5b@}hXs*yeU~}SmM0=a}R9|U4 z0OSko^WVa^rN53=P@w;y&qM&1^#?FJJM!@CJng*zf?!{Hi)PWe@PJI_poW@$U(${W z8?guP<^J~3`Y&dYD6)n_I|zMD;BJe+H))7koLWezQ&@!s2%6-2_nX1624dR^AmJtJ z8;TrdxygW+btge^rEb+In<25>%{3its-nBAG5DTaNZe)K_x0ssN1cxaKyypgo(9(ygI=EvvZ;Vo;V5c zQj^J<_-mT0vpL{jcd!;B!GG_ksA6Fm88@?%J#UmVEq1#6T~PO8%{QO{JtKdr1=N)v z7MZbbK_XLk)}KxkRLj)#YTE$+L15*I&SLtkXwG4(#=fj%rzcE+B53(_`!_qI>-8&* zgqx~tgN7j>x=dP?7~Bn+2TJQXEs&QNKl+2z<<64RM^NsOncn?iWr*W;gNniPJJ-Z~ zB73pJLIYB*G^6C92&h2{v;$&{52d~jo}v+MeXgepVbEB3^6Qz-Oo z*p~%9VW!^Yum*kW9=pq*O}*EA8Dz#;bxnXhF>Xxl2LhKtq<#ntMqc+Ul!1TFvkOldwQ_rz zJ!`bS5J5?q>!^kHU|RXpQpbX-rP%DU!9Y){c7Rj`^y83j7q|( z#0Xi%nBevSa+F53B`qeG&B;h-qC)y~j;V7KVmdT~doe zk*e~QyT{tvjBgI^-KBD};x%%*3QE?Dt-`ZAh}`_)j}}({Nb}kpMleungY;2?n`_Km zcshvXR|iU3B9bEo@ZsRNaGBDoeSNvS-|||Ef+`2g0#9S@d|Fy4mS^kojHd`(ctuSU zC{OB$4Y+xGv@bY-&H&FJ|JyhhXDsA-b?#@zNj_bl-^G!#*&Zo_5A+hY6Sj7f^Q_)F z%+_(~w48wq4re?eaNjjsAM$d{@^!^~p`FnuJv{ZSi(Ph?BkPtUviXqMB#?e2O6}N~ zi`C0^-KE<{nWO@dzbgKVwbZ(-+VY=Wa;&WWm}1NC6KTevj-Yx#Q6Tzbm4*)vNwPA` zhjxsQJ6I?)oI*w8tEPY6fmYccNw(z!cYhcAH&2{`3i^c}+Qq@Cg+@rbE2xF{Sm7CG zEWf^-k=AImk2ZBwbZI|madxTawoV4^#dwdG??5r{d#d^XkkPtVchNO4ku{y)Y0>QN zA4dYkp9?)hkOpqh$w=U;vG#lGgXVDg4I`ws%8xEu+$gJW3xp+n*MbHSOd#jg!<-n; zuKNz44-34AkYs`=L=n4EVZ>v&;KPY2WsIY;0WoASbzmPMkurZAfY9i}Gmf6_M+kj5A6Qk92nQz1v-v{= z)0$mdrih^4yKrs$M5I!B^v~p!)DP#!rJ$KK-0Jt4h0A0easR3KIP&?*C2_0*Bbp0# z(V1a~yw6xx?PaK9je&|`!SChZ_lu@RMYp6W8K7a7z}X8_xq`BNQAkgy0q8Id3|rLP zg3Po^i=J2>6d5;ZFsOnD9mcd(Qs^kkJlfpJuC!8Fhw7j#IeSR!vy`P)p0!ce#uYYq zMy#i*#6AIdwG$ixU~OAT0owl4b>)LVPxNvMeu^6RA!tUHvLpcQ@5Lj-0b#$1LA4LP zi#Aw4KcEAZ(nt1)v`CaG0e689a`2&j?2|US7gJ4;C|YUD3b?RUnS$!k4!E?Cr?GrZ zqhbv)fy~hrh3?NzgS0M4to0V!<3Aw+5~wqTXb_O<&9%-_D}x;^lvqYa)oK&;(UTM7 zfdLgJ&&Cs6R$g9?>lgJZ<_2x|c*Ou*S0@gt>q6Slcw;WZ7i@i&aZa+6cn?T4TiT?HLodQYmI?d$==qaV_m3o5Na zr#}H&CjZW(C9b)7A*3myNSi5yC5JVz-^7XHGU*{`z z2Psmq92oNrtiod(IISFyhJ+C?Mpl!ZN~_9rhgaLbY;TnxXNF^e1F2H zQj++h^)gwz!uW<}rU@Ga zP&h-JLI5_W%+9lzYdS|Dn!acaEIShK3~cEfYkM}$;UL2960x6zXF5C2YoDxkdC63$ z9!of!Ov*}!qK=D;AtIAF>O}~`o$y~XD8Ikx^x=n=r6gidKviQR ze@1?MNBRzQ-uP{h-*~3vNbB21F1T5;U2P1q9|f3d#DKj;Y>P7RiLxBI#5;c3rNAsBhXh>})!nVfeXUERnm( zC4a=Egh6w=a>b&MlM_QP`S|?V-0^>6(G&95E3Y2uvFZdM*R{Y{5Mo=@$d^Z1#o#K+|l}{PTSH#wH8dMQiH((Js*r{ zF^sJ^UbA<|t}lp~X)-%>#09_Lxrf1}OG{HQne3-E^YYNY;JmyhB4I#|rfNdUsUD=i zS2v1_f0$-1nYV?ytTJ1vkbVkqP)8=(zm-%`SLO%J9TWZeBqZylZY`&3Xm4zhRO<09 zechtt_WU2%(dcLRzYsyk=xce3NJ{DZj^N6ZwO4&)6&}QV&Mg)Zos3i_@|;>+T(GN? zn1&QabFR`SI%^Xlm2q|uDRS{K@$m4P*PwH;4wofuY~HFpPq9F6p0mpIV!^|6dszQE zi5<#2tVnG&UGS@_+4R~psqjjRMIuK_KaaLHSCq()l;|wv-HFPo&QpBOABnL6FV?%> zx-AaEA?u`;&@Ch-LqqWx4YyfrNvOmnW(pK2_4EyOb&!ddw)Z16C$hG8sj)wJj{Csm zYBq5cFE_r*lYoW#vz!@_2D?EK?m|rV{wJX$35*@j1_xABocQKvg*rtA@sN8dDmz@x zD|^?I6^##VCaHs+TI*wzlwf{k^iV%{TF8{dZt2}nb0D9^S_XI|8TdQwH%ss<);iN?<>uB1tlML*No7_)z1ZJjcuo^9?p(yzKHgiT597J#4 zxFc!a*6QC^zGZ7Ps&l<#+%DEx=&>p_oP876N%Dm^D$U@|s?1>eKq47BEjZ#kHea26@AMdOQ+GfiJ&a*qzjKE zq(Vt2_4Z)4d}8i2W0#-V!lLZU*gJ>A=nSrh`|mGIY|lYr* znkKuGBVz+LBPlQw^Tvx^?9Od%zfR}Lt8iN++|o0mqdPR?TEMC`ULMFO?vE-EuKc>S z6@QmYrv+>8!RfL^@_#ye%do1Vu5EPDtbtw`)bz)>@ zzF$Z}thBogf(89}JHS0qk&c&D`z9eV5lfvTDf{<*b5=RQGu!rp6W6H3*c+WqGx||U zPeoX#0SL2|M;FC2K8;gZgq znJMVF`l@{Wy~+E+Gu7CtTD@YeuEV+yatlaSAI@7$1?}1?*A*`$AH_D?UsaqU8OBfe zUS_o4!Ts8?yi}!h7lPeR(Xxt-2}7;z)G?klHlo-Z4s;pu3cZSU5ui32_3YRx;QwX4 zyNbH5r?_#arg3ufOFga&`EkyHkayzb$2)721-C=_$C8qaX~HxRJX}_p8P{X7X3OqM zrT33z>N%Zxf!HLeo$d_4W0oxQ$gG#|f6dg*k7UTj}Jd zAdUYScOYBkjtSEll?dmPLo25pRwv#XDIiYiiFnzQ`NGEL?snaWz>7y>rzom4O?^jM z6I3VYmvUxJzz+Bl8AV~LB;rP-i@a{B)qE&vjDhjg!MYVyLxbw@tY`ljXj8B|KUM4Y zYiBw1d3lX=gu{-tARScbagGFr$>r1oH|pF>AjPE2>A&M(`Z^-Onf`=DH^Uf#&u(X!*4X-s0Ue7bO;d5PL$nW*vS$Vcf$t6=V7fg7iQ z)uZO8cds;^a9Q3aEWf~TJ+b==a;Jh5TP!_DxNb8Quk(?g7QE%x`|ZsnYNh`j9YDeH zpBF{tw>tWm`=4UqZ6pltn2jlze)6Y&c(Bql%WO4P`HNj)wuN$(g~22=fXjW?TA;^w zo|$<_rA7%IGv}UvrARm`s=;9}ae}Cu;94lTugm=REX9+pI@=B9pxLv%QIyJGB~tHn zQiOisyX{ci&Q-35AyZim4ejG$w;yECWxqF(C{fNMTrTi=YU__Y+1QB6ZJfg@2+rvi%Q8Xnb23!@q+3 z_F;PpHNHf8$-^a1$ZV%lF|{5wo?gtrFPYb4n(Hd|>Z{f%;pdBtjCiPAjF~~#WZ)6q zCFg#H?U9O3FkbiC^X-EiInD&ikwxF%a|2O~KTXnW?{2Rsn||fp-q6SPa(!CvCZP2M z`R=4d_dy73>n*=!(o+d}N#r5FfVlG%!1Wkdg(+!C*R;(}M_)o;h=bCqFItgsQ-su> zU#;fo(@;@^BKGfPE9# zP4nZ4dWo?CO8IawO49){rrLa^sB7|J?QDV7mvcPVxc78NM*g*Wlhfm<*7d({k7j9mj99h-ZmTU@7_2*g@9dceO(m%n zun79^LNo1e#-YuLUVuFO=p9Wpky)KjCq!^s*qd-IzK?!OHK>zyu7u2 z2fDr4IJzDZ(ubcdVl34kkhXsnBdC&%tnAk63#o;x#jT%QyxeWkJF|$!`)@vuBJ&B` zw=U9ph^qrSHJIq@Fc{l=ub7S9#`-46!Sxx zjv&w9u6wa5sb>H1p5O4nk%>oYO}N6MqZ2!enhPcH#ctthq1HTGoJQ3`lyA3nBx;sL zOLsjkPt9~1DP~7+3b|TySg#e)JIcqE$<(ZnvijIZN@g*28Yuj27+YRr*uGlU6{bAD zE2nw(%=yEN!vhNU8QML8O`LUa)qOoY;lDSlCs(FdGkg=4Z*Fd^FhxB5X{i`^GzX?7 zVaF8T+|K6hcm>i<#_Mdh4*LKQ1EiYb_Y|dQG9@<_rlh3OZK2Wl`t^bBjSzvV3LQxh zAdnGhJG1ecCwZ>3HSp&)$hne|56CG)szPzW)6HKEZ!WEOr^+=qO~l`>Jv-cZ?Ueq! zFzqvs&5y|Sn$*L2$4F6U8xBVBg08=M6{sX@YfM8bn?IYMm$*8&?Oi)}cJ_1(Ob~@p zQ;p=R<{CNc#K|Y20ztTj5e9#1HTmlzPyeimRMg7S1Z&}i`GJOgiV)Jy{->+feBC3X zoEW(*ZRVGA;fMai2Xru47r--;f*H*$squ)B++5WlEGt$3^Q}Wn=Qdl`DcHDDZbsNE zwrHaEWiN-VEPn>st6d)3`ye!e`_~#?MptDY53&g&mZ#4kuR;k|n~exa#N!&3woyNjmW=@ax2UvPIwOn;mY!1`h`2W6_2j(a%;~hK2_O z8|Y(uA848XXLTfqAM^J{#w z6xbyyWME}j;@H#u-@lrL}UmDS5ct91+!c8

=3_t8>4YNCL=vRA%a;G)F1Xz*38>NW}U|95FJ<^?nAY90q1 zIx3O@^nkqf`XkjORe#)=Kloom0U)p4rtcf8h;dx@%4y;W84(vfhLP@QfdlQEmUYpstgI~Mr61}z z?sOT13>nLEcVzc{EQwg{{C8=`61g;0d(2%UN9l(Y_9P)^giuw7-M@~&Az5U%}|^rwjN3?Fv3 zC#DH+WNSYDD(US8FH>M(Ajq>B?-qYZVz`h=2&kfg??(Qrxx+Hp)TCOgfZrU?Pfi8# zEKG6ag2^cW+fVDwPMU_nj(NtI3q+JiEN~eZs898Oi=M7Gd{vyEv8EPY)EeqcY0bj*CG0B(jhxB(1iX@CUa$!y zd+oMHmcnk`-6Mo2)S}3cPAJ^KSh>Exx4+{gSpW&b6aDZ5oqkN*lZK%0HiacaY`FKu z)Mv0i1*&KF5}w+L03gxPiPayGb?jZm#=Y?107P9Gc$vja)4es4H5bo0bW#gL;1KwL zcb#_g7`E-qZ6|{q(Cvc^Ao{Fp6?Kzktaee!vJ?*>0n_b;bp|IiR=lhy0 zFZxdxYy(2VUn%r^8Rus1pW;aJ%@#~Ge997vFKFhiGeDR>98er2NJrIy*)SLbCCc{)3Xr|3}C-f7O_sRFUV=6ne^`tdn#OOd|!v8$PgS@!si^LNF*TelvX z)Acva9`mYG>5eziVVNByHzNAf)|)3M57(?mxxFPcMyht`{QP?|fHR%1E|N^Rwv@C{ zwUpDR*~bx$d*&j)zz1SKwwR6EK9rIYHjGQPb`P4W@)WbHRjv;T4e`|whk&42`XeO& z;3^xt%X<;?Q_;lO$}l{=wpf|-Nz&!A z*70pO=(dK+!N-pBqK8fVthAdEbg!Xqs0q|)m2f-m4MhKW&*+87MM1RD_u8E<5NAtQ zR}{BW8~A_&k{|IyP`8rZOA}T({@}EW#pUjeTNAMq5eL+mIF2rFwZf^#$EG&{lHN8U zBuEfC9xYDtM+dFh_$kUL%;U4RUSmBO*;j6xHHPs%-aI<(?Hp)F*)d+1cBCmHYyR9c zayIs2S9UH(Hj8J={-f!t7v#&_1ovp0BFL9L{T6x+Qbg?VOKw_P$1axlOQ_?6u)1!J z+1T_eDYYG45cq~5wWi@cWD!y5(=C0PBEi#+bfW(~?0^UUU!VxXwd`ioEQ zRg=5s2d;KWazT3CyTElv7H2gn44#@^JmMX^lT#NeCB4gV!4NtKy{+( zh=#M9d$cuEUzPWdlV06R^-}%5X1%-VC;B{fUr#|jZLA8!0F@Xqa!X7bCU!G=y$({j zre7)gaodN8s65r$nzov-HgxSbOkHy=sb0VYiy)tk7RNB&Y1z+(KNp>r`Y@1-3Qu9@6M3Q)qKK$&~-Md0mYWrY9P9Db2qfI@~&>74lprcE#6W3 z|I+Lv;X5Nh6PMnI8pe}l(=VN@8wVbDUFU#sMa;$E}0)oZdzuG3H-bX-3{r+)XkHjtjSu^@YDCWmtIj}W!S@LpTsz0+%) zvDXr9lUy#*honng<|G-iN8J`g&nLHdnFlnhDEdv65DXWem^84O_kH_Nz_{$@Pr%o> zKDHtINyA#_=8!`s_eNZtmH*#D8ikWx)H3ij4X^%P*e|a(5&|7hQTL^fY{|T6pkVqE z_J*paD6w}H=?x3O8!{nNBmiq5qkDI4JCw~;rWLBz@~d0v1*5U9(~aCw-HK59@ZP=; zc_Z+>TY4_>KQJjl4M=mC&QE0BeQ95wwIz1?J=Yoew!Fh5IY33%%`DW}oj3<>M)ag39XKaEgE z?Z5#++0hUDzlWy*I+%LeI4)?C+ zKE;Hwr{Zu3JhY=`A$)Oi{&!@*|7^Ny7k4e!%TMuX8#`l!*!9ScR?LjNjD%+Sj!}tG z{&Oih{3_3Q`FN#3SHTIlQglEvq*5EI=?OxnxR@SPVtrxif)>wTdmdl95CB~|OvNE2 zsn+iL^RfwoYG1$Lvgvl>5A`-XNhG|N)(Ai8Ut5~s<}CVMjZ^5jBiW=?J@QSKaB6z0 zb;5641~2irmk_9YdOK~0qzwQVqQ%ik)r7hu(LVY%9dUonTYMR zSEM^4>eN8T`}HGZM&u|Vd`yDgP)33E?{HVcIJh|cAgR1F4_#jtcKLQd#%@`koZbfF zl{svp%iq|r`}mX^ZeH7f0--y9q`1GIw87eS7$#PjmnK{LS3aDPT2ETA`ho*JOSai_ z%7E~L*4xzNgZGQF55Hy8aWnacz32irI|kjGcQG8{a-66r&9%GIf4VRE?(xlqryL6W zI2RNSZn_QR@Y$Yxqx?gt%4K;f1W*JC`dw@o2!L7~kqZU=?u%N5q7~Lk(#JFMKO(@g z5s12=Lveog%z_HV)XESE;A-C9K7MXIAt*$guESu&;cap6%d!)=x|qK$!iV32`i8X3 z2UMuFy`8FKuBC&^QQ(o$*L8TO(Nl{#Ts%r%%$KlEP^*W_RFYXdL6QqLOaJ)XYHQxU zllfcT|TCKk>p72DzcJhCJAmrT^H`y6&^gpcmn5WwU8Nt=S z=j&m|fB7-~>u3LD!#~29NA>?uR-l3Zf01(b=-%vt(CI5J~Oi0CD@0sjf?^EaE<{SUv`iSYaxPct+ zu`fFp4}Sw1^u*EXs0c9}e2X+PB_#!ZI^X=xH*ygU>n=wOMuPaCwPju_qm0*A!z~sM z#$6q@Q)JmO$VzFfr*D7x%^qyM&8YQiH^9MWDeG;IRt!_y0Lx!|bg5ouX&hB0^S`o;|lZ>Ji} z{|*Hza7VOJ_Wi2YDxvV>7gHvyC;S2e%#p9AB&@bZ^KRy7h606D8OLYwxq9`fbYbl(oej4 z9h&n2Y2sj}E*aVnS=nUxn}N|BeK5)Fe?$4|o+OPhu?Tn6QlxB-(>8mI-~PK^cVL+mgpnWO^8ftHHQ>$z8JtKCS_onqjGe2@lI4DQektL(Y0LT0~=Gu|7n1_*}>kV$&AkPKUtjG+QPDp2glsN*2N{`(Gp{Uux8K8rl+01;iz6zP<*iHsYTgpbh*rz zXb(;W;}#vk&pU|a+Eu9mU$WQFXb}JhCI;``*{5A-$pn7*=b05Bc1q8Ih5-%c@CgHy zyd_HvvLe-cvw1=tr~?N=<*RJAkRxbG&D?0+nobL?T)IqQt3>X9rwwopiG$}M>-MWi zX@`<-*Xp+_q4wc5uY&QTc7<;P4==TpmCNFbB^qGk0J;fC5G;B2LKti|{ka9)MMOTE zeSGfyAJb#@sY%DyK6cmY=$WN+S`2LLPrI(C+{*+tXQ*DhJ+Uo~X2`%pI7Zd~#q8$U z7zh%w?(S}9D7;KcrsqFb0iF?fq;93=kw4#L$r(5BwvC)4+lD6}H|FJy7hs@O=h zJ%x*6XW2+R)H2MiAV8{#=NM{e6gV#}U(ilX>5Yg3$Oe zJS>Kv#@1^Z5*a@F&o;Scaxd8y>g1@c$ybHKpXtaL1ElB9-s7SY+bT@~kHr`wC{L{W z6P|H9eGBO?*V8?6*fkk+VxG(pm2jhyv9QpGI$CFnHn0^Z&AK+*5)AU|jS?l09qN9YSgw?d5jcO2l^rzAf zL&L&okp#O)K%>k91iGkZnX)f!I_jl9Jxk`)xyX8^hUu^4?fd<{vGON@cc$;YexALz ze|$xFdO}@Qqty0ZX=$nDjtmJ7AA|=#HcKW&(^kR3#vWQ-U2C1}S2Y$c;&^hpfiHcZ zW_a_%L$cvGBJ=TPUP^zsUdUu~D&%#2eZ9~T!zAw{(eR2D;U|wkUxxB=R&iYtm-ez= zvOglB&Vu=KektF*JajP(epjU+*QTHf*P=AzM=f>@gQ+CSs5I!G1___9ag-WG`3ofb3()Q(M+L+(OkFAva0<&A%@A z_Bpz7`~DN1VsR4(-b<2zW9opVo0rSLLxD8|)wIu+dYH_so7?HYwsD}RqI+3D!g^Kn z{XXoX($dnNISX>*AA-aHQfA^yBHYLF%F#ky?bE?Mr8pSEQ_V~erd{K4A`4IZ9stP1 zM69W++bPqdh)bm0Yel=C$W3D{ch@-jrORYHLidKKTe<(&MAPACOi@I zR?{}(*r%o&ps5AO6kL7Aj}&!6r5C)dpit}UJh4Z($)QSO*Hl2Ewe=s}|3?tmkTc{b z%idp!;(orMNp~MZQk{RlsMG44kdS1UclC~{#RBjSxyV0?U6PP~w7VeJ zbMt-k$G{IZ{^j1z30!v38>;yhdx`Ig^9HJeiq<`4XYqIi$=b%eUw2z9w6jzif5RYi zN`5PJrIsTKv4p&BjirMd@psQM|r*_tRGRANy}*nm@#sU>7}H8-RDx^@vS zOe_hKaeT63Yj1u-UqO?N;$`Pl(fJbditkh3##fWPpuSgy!0E@>r|;g;KVB5P#Xas> zjkJrDB|m3gwa6P5J`+zqO$7(t@fjKf*$t9s*xcM`Xiq8YwqSUYgjdYT^XN*Z+ z#kd(IM@5YeB>dQRFwAWGur#HZg^|tjgx_XL0N#Wj1cJxtAn9V`S0RpA;+rA;U?Rc4 z7Z`B^df{_(I#pJ)NUXKHKV80=GmPmnH{Q=jX2+Mrs(KL1SPZzHOf7g)&ilyg(~`)d zHq^Ir@b>-~(7@rYX1zb$BE1`_NVdn{uaS_H<(p#6I+}xa1qxGdD#!Dm;N!KM9JzLZ*J^jBTw0ZBuS%Vxvxdb zND#GKQDlJzKrcl?QHcK0q0sfMpe7`aWME^KW~|dUsfS1R1pGCeW+n?fQCpHdg%(GY zCBVFbU^cTJ^JT~&zd(yBP8Bd^a|a^qxa}OVMoa%b8>JMH?M|v(G^QPD!?(;NR(H8fXRT#<9GsQ%viNG63NR zRO-@3=5sjqeO~0qV2y!~ue%t5dVdv5SNqr#n!mbd-!ZpJS?w_bwz=HwrzDl|Uo3x} z3)WPiMQ5t+h*DkcVc^_gAC$DuH|geDlEa~vaY0gRns`n#0#I%p6s zbXc*&3{a^j;?j8fn{Fw!g&DN?2rAEAa0a?r{sEq~0AIbPEVtSs8U$*_8Dn3F%xA`4 zk0A7dBU;*i+r(%I6p5E%tstja@86TyH!%-5iJ3E;UZ;|_te zh3~;f1gRk55?|had&sn({aR}I-{Qi-j!wMxJxUiHP0orc|YnZf^{>+AkI*~pQzxAgIa@jzNbh^j|6A%Kbsd1tLR$TzWaa8bK0WKc@Y zaPt9MNz)e^dlcX@5t_=8)~JMW@HvzC@)%?OS~zWHXBMoeCKgN=%p-#u6BeJI%ZQnJf|& zgyz_Ej2bah51{cEjl1SL_147rlMD!wQ7mewvhYbK{K~my-^fMm{O=RfbY=#IrmV>R zC2k|}w!sZdW2hE#1EJ?!#CkBbQ*T4X z9(FyI+&GB53sBFA{Yd7cwXU3hA^G8Rvc40_GVT16?a<)>t_N#}dw|}sJ(9|q< zt`*Y9QEukde#3!828rMHUlLEX*zpcO1V(67!!cb1>H=xC0{$BSp_A^+UBVCRAK5C6 zCvjLxkIZoX+4VD^Ea%O4q=zZYN})(jND1n3ldu1m*5q=2#GIDrb9(m)>V;A`(No(9 z6LG`S9ar8mfzSmz#ch6F=97itNGXvhSghexvEoxg-GjBaVDoH+>hOqIBtKi>-{t)B z)5q%?DQ#fMj(%wbNkEsKrm)y2iBl!Tt~PVCwBUFVo*3d0P;PTj&-N8tB)kQJp`1Gt zPb@+Z_Mt3H;>}j6d3;#qU2mV`bz{tErLZ^*_V5kJij|LWW73~3vHRFc%f$l??W`*j zr!P?SZu&g|HUfcVmXVdtTbHlA4;$yGxWDjf^-+BYsv`+{V*D+Ot_3>M_~h;G9=Kk^ zJ=V&7f@=LhJSTkKwO_;HHzP+wH~?4~_&b#+-MG6qPY*AD)+#3~@`}8*mDPyIc|qv9 zQjU#}72*;)mERK|yqfF~fTAb z#V4aMGBT#=5D=V@agW?3z!4E&fZ1aC))!4IRLU^J#KS^*;vCHNPK8|hd-w~$FJ}Zl zd-p_y=+zZ}TQd)UtWmA+`uvZ+X6k<)YD!KTs8j@V5xO!GylDO#Nl8n2MDk@dRU1IH z)#kCljMzOS2pb#Q$w&6XzpKKgk9o!Ic(`$WBDap**XWuBu$rW?*IdyqJt%p!)Uk(#))^4JUgSkvJJJhRWxQ zgt5IG`oAh%3HN4TNo(^Vu+HU z=H09zsG~L>KV!h7wR_~%1c`nTbH7Aj@Q?n%-cJW+!jGiD4GP5oFEqK_=f3>E&_={W-bNR1_c;9B7xhcQTPz|W5JEUhp3b?8bGvb;xRj}M16iM!M+As%9#TO- zojc2Jv#jYi7yxrAGd#n0+qYTP5LHSJsbxmiL=(S0xYJrG`rSM{0LY*R4{)2TKgPyf zCMy{!E3;Up%D0Ey48U(>^us@iM+`r3WsGJ{vd2P%;PDLbueia)XnnzCG{^yoj5Znn zq^_uxoZ1E@3)`*=A;nHws5#0Hm1V0gRNI6e=Z1d!ea<>iq@Nq-4D{#$At=C8ECZiI zKfAs*Gt6M3@X8WJ(~2hegP2DSE;DHjbQr9Z>Cxe3Wq&|($RM0T>z8;4sH(Kh2Xg1t zQUWnQ+F*&{bb0f3OB^R-J`voVm6^gnLk-P1-u~$w=n`ug)aL?4=C43R8~*+>TJd9B zV-RO=2F<_?gnYRu^?Css?_#|%p_1Sx3t}TTavny|7;O*B9p2xa32x6>9EH$MI<2-8?y3dajpE}qoGS#kygtu2ir8D zz&jeUvbtt4+hF4Soxc>HR4Yfn`R&s~Q8R?~=sdT7F>nLNiGnv;|TZ6imEXNlTESkJFPKo#bMfo5jz*iS2eE_f5RxnIS_CT1oLRuH$!yUX}!M_Zc5 z8Eq#qi5z5e{^K)HA9-KMdj%H?5{hf}C0%-``-=&GKaja0k(Hzfs9Q(=^30x{Ron1* z<*hZj2=1eArY66Y7`R@gS$M%pzKeG`FWHPEZS~-SSlXZ1UZ2s8Nd4Kv)=mc zygll9YInWYQJvcX3z01v)CzQ^11zk-{lOQaynGV<))l{{bhtqJWwC#l|{uj9b577JV`srExkaC=?iBn`$ z(AC)(qTmOdkv^txA|g2{~E~B0HFHtusQQepiWST z2rg-Aj=uLfU8nwOb#V~6@rbiv( zE+GiGxrvYd0@we=u0bW{GLmQg2nTXP(S%mzS9!{dEd2kkBqarr&+_gLjnd-D^(t}; zm5?{$#JsA;1O*z$;mk9I_RYSB-+!r?WcafxhfypD21A#pk6%9Yr?b$yP<4=hSNC~o zep*?boCi)95?mV340yoceY8z>M7$hp142PHRsW0=V0%G^H&v@B6!Ow_AlNf?5|nMc z6!?Uvnzc~?fo=bmr0b8Sk@j5=t0@z)7eH2S@yIEdYd32 zZ!SoS3a7rDJB`o8rDmiEZ6Rk6Kx82>B}zU^db5T6m#h4Wx}7@T4iA4(khZvIt^6;k{s)S0l4Uh%Wn|)2Kb~;$hx9j zM~9>Wv2A_b{!$Or2>r!TibLJ%IH1_<3qPga2mnQyX8YuC)$FWD8yZfpcXW=qRaGlf z9CacM9BB!@rTF6~De^*9=F_@*29eZIddmp%d-ui0gj&U-h z52%A6ILND`26{obHtPJ0-T7F5@*yCDb@53?2lCcv${hGYs^<&NFLWOrGk7z*aec00 z1*;R#+iCUWH10~1NxsQnT{FJt?{Od%}1%XHggd#Ca0v!?0 zUxRa$e?-Nl9K)j0OM%cQjDC5472Ci4@r~7%_VnGYv0gB@uJTd>N3ZhCfXt+W)8XGP za46DF*`f&6sfOf+xo8aGVSg&TRRV1J-rrY zU~t(9`SfvVM;=lEZ5WUHrKPa21@A@Rqf9;cm$hu`_ transports). + +.. note:: + + This engine is under active development and is experimental but it is + usable and does work but is missing some features (please check the + `blueprint page`_ for known issues and plans) that will make it more + production ready. + +.. _blueprint page: https://blueprints.launchpad.net/taskflow?searchtext=wbe + +Terminology +----------- + +Client + Code or program or service that uses this library to define flows and + run them via engines. + +Transport + protocol + Mechanism (and `protocol`_ on top of that mechanism) used to pass information + between the client and worker (for example amqp as a transport and a json + encoded message format as the protocol). + +Executor + Part of the worker-based engine and is used to publish task requests, so + these requests can be accepted and processed by remote workers. + +Worker + Workers are started on remote hosts and has list of tasks it can perform (on + request). Workers accept and process task requests that are published by an + executor. Several requests can be processed simultaneously in separate + threads. For example, an `executor`_ can be passed to the worker and + configured to run in as many threads (green or not) as desired. + +Proxy + Executors interact with workers via a proxy. The proxy maintains the underlying + transport and publishes messages (and invokes callbacks on message reception). + +Requirements +------------ + +* **Transparent:** it should work as ad-hoc replacement for existing + *(local)* engines with minimal, if any refactoring (e.g. it should be + possible to run the same flows on it without changing client code if + everything is set up and configured properly). +* **Transport-agnostic:** the means of transport should be abstracted so that + we can use `oslo.messaging`_, `gearmand`_, `amqp`_, `zookeeper`_, `marconi`_, + `websockets`_ or anything else that allows for passing information between a + client and a worker. +* **Simple:** it should be simple to write and deploy. +* **Non-uniformity:** it should support non-uniform workers which allows + different workers to execute different sets of atoms depending on the workers + published capabilities. + +.. _marconi: https://wiki.openstack.org/wiki/Marconi +.. _zookeeper: http://zookeeper.org/ +.. _gearmand: http://gearman.org/ +.. _oslo.messaging: https://wiki.openstack.org/wiki/Oslo/Messaging +.. _websockets: http://en.wikipedia.org/wiki/WebSocket +.. _amqp: http://www.amqp.org/ +.. _executor: https://docs.python.org/dev/library/concurrent.futures.html#executor-objects +.. _protocol: http://en.wikipedia.org/wiki/Communications_protocol + +Use-cases +--------- + +* `Glance`_ + + * Image tasks *(long-running)* + + * Convert, import/export & more... + +* `Heat`_ + + * Engine work distribution + +* `Rally`_ + + * Load generation + +* *Your use-case here* + +.. _Heat: https://wiki.openstack.org/wiki/Heat +.. _Rally: https://wiki.openstack.org/wiki/Rally +.. _Glance: https://wiki.openstack.org/wiki/Glance + +Design +====== + +There are two communication sides, the *executor* and *worker* that communicate +using a proxy component. The proxy is designed to accept/publish messages +from/into a named exchange. + +High level architecture +----------------------- + +.. image:: img/distributed_flow_rpc.png + :height: 275px + :align: right + +Executor and worker communication +--------------------------------- + +Let's consider how communication between an executor and a worker happens. +First of all an engine resolves all atoms dependencies and schedules atoms that +can be performed at the moment. This uses the same scheduling and dependency +resolution logic that is used for every other engine type. Then the atoms which +can be executed immediately (ones that are dependent on outputs of other tasks +will be executed when that output is ready) are executed by the worker-based +engine executor in the following manner: + +1. The executor initiates task execution/reversion using a proxy object. +2. :py:class:`~taskflow.engines.worker_based.proxy.Proxy` publishes task + request (format is described below) into a named exchange using a routing + key that is used to deliver request to particular workers topic. The executor + then waits for the task requests to be accepted and confirmed by workers. If + the executor doesn't get a task confirmation from workers within the given + timeout the task is considered as timed-out and a timeout exception is + raised. +3. A worker receives a request message and starts a new thread for processing it. + + 1. The worker dispatches the request (gets desired endpoint that actually + executes the task). + 2. If dispatched succeeded then the worker sends a confirmation response + to the executor otherwise the worker sends a failed response along with + a serialized :py:class:`failure ` object + that contains what has failed (and why). + 3. The worker executes the task and once it is finished sends the result + back to the originating executor (every time a task progress event is + triggered it sends progress notification to the executor where it is + handled by the engine, dispatching to listeners and so-on). + +4. The executor gets the task request confirmation from the worker and the task + request state changes from the ``PENDING`` to the ``RUNNING`` state. Once + a task request is in the ``RUNNING`` state it can't be timed-out (considering + that task execution process may take unpredictable time). +5. The executor gets the task execution result from the worker and passes it + back to the executor and worker-based engine to finish task processing (this + repeats for subsequent tasks). + +.. note:: + + :py:class:`~taskflow.utils.misc.Failure` objects are not json-serializable + (they contain references to tracebacks which are not serializable), so they + are converted to dicts before sending and converted from dicts after + receiving on both executor & worker sides (this translation is lossy since + the traceback won't be fully retained). + +Executor request format +~~~~~~~~~~~~~~~~~~~~~~~ + +* **task** - full task name to be performed +* **action** - task action to be performed (e.g. execute, revert) +* **arguments** - arguments the task action to be called with +* **result** - task execution result (result or + :py:class:`~taskflow.utils.misc.Failure`) *[passed to revert only]* + +Additionally, the following parameters are added to the request message: + +* **reply_to** - executor named exchange workers will send responses back to +* **correlation_id** - executor request id (since there can be multiple request + being processed simultaneously) + +**Example:** + +.. code:: json + + { + "action": "execute", + "arguments": { + "joe_number": 444 + }, + "task": "tasks.CallJoe" + } + +Worker response format +~~~~~~~~~~~~~~~~~~~~~~ + +When **running:** + +.. code:: json + + { + "status": "RUNNING" + } + +When **progressing:** + +.. code:: json + + { + "event_data": , + "progress": , + "state": "PROGRESS" + } + +When **succeeded:** + +.. code:: json + + { + "event": , + "result": , + "state": "SUCCESS" + } + +When **failed:** + +.. code:: json + + { + "event": , + "result": , + "state": "FAILURE" + } + +Usage +===== + + +Workers +------- + +To use the worker based engine a set of workers must first be established on +remote machines. These workers must be provided a list of task objects, task +names, modules names (or entrypoints that can be examined for valid tasks) they +can respond to (this is done so that arbitrary code execution is not possible). + +For complete parameters and object usage please visit +:py:class:`~taskflow.engines.worker_based.worker.Worker`. + +**Example:** + +.. code:: python + + from taskflow.engines.worker_based import worker as w + + config = { + 'url': 'amqp://guest:guest@localhost:5672//', + 'exchange': 'test-exchange', + 'topic': 'test-tasks', + 'tasks': ['tasks:TestTask1', 'tasks:TestTask2'], + } + worker = w.Worker(**config) + worker.run() + +Engines +------- + +To use the worker based engine a flow must be constructed (which contains tasks +that are visible on remote machines) and the specific worker based engine +entrypoint must be selected. Certain configuration options must also be +provided so that the transport backend can be configured and initialized +correctly. Otherwise the usage should be mostly transparent (and is nearly +identical to using any other engine type). + +For complete parameters and object usage please see +:py:class:`~taskflow.engines.worker_based.engine.WorkerBasedActionEngine`. + +**Example with amqp transport:** + +.. code:: python + + engine_conf = { + 'engine': 'worker-based', + 'url': 'amqp://guest:guest@localhost:5672//', + 'exchange': 'test-exchange', + 'topics': ['topic1', 'topic2'], + } + flow = lf.Flow('simple-linear').add(...) + eng = taskflow.engines.load(flow, engine_conf=engine_conf) + eng.run() + +**Example with filesystem transport:** + +.. code:: python + + engine_conf = { + 'engine': 'worker-based', + 'exchange': 'test-exchange', + 'topics': ['topic1', 'topic2'], + 'transport': 'filesystem', + 'transport_options': { + 'data_folder_in': '/tmp/test', + 'data_folder_out': '/tmp/test', + }, + } + flow = lf.Flow('simple-linear').add(...) + eng = taskflow.engines.load(flow, engine_conf=engine_conf) + eng.run() + +Limitations +=========== + +* Atoms inside a flow must receive and accept parameters only from the ways + defined in :doc:`persistence`. In other words, the task that is created when + a workflow is constructed will not be the same task that is executed on a + remote worker (and any internal state not passed via the + :doc:`inputs_and_outputs` mechanism can not be transferred). This means + resource objects (database handles, file descriptors, sockets, ...) can **not** + be directly sent across to remote workers (instead the configuration that + defines how to fetch/create these objects must be instead). +* Worker-based engines will in the future be able to run lightweight tasks + locally to avoid transport overhead for very simple tasks (currently it will + run even lightweight tasks remotely, which may be non-performant). +* Fault detection, currently when a worker acknowledges a task the engine will + wait for the task result indefinitely (a task could take a very long time to + finish). In the future there needs to be a way to limit the duration of a + remote workers execution (and track there liveness) and possibly spawn + the task on a secondary worker if a timeout is reached (aka the first worker + has died or has stopped responding). + +Interfaces +========== + +.. automodule:: taskflow.engines.worker_based.worker +.. automodule:: taskflow.engines.worker_based.engine +.. automodule:: taskflow.engines.worker_based.proxy diff --git a/taskflow/engines/worker_based/engine.py b/taskflow/engines/worker_based/engine.py index af178cb6..0c7702e1 100644 --- a/taskflow/engines/worker_based/engine.py +++ b/taskflow/engines/worker_based/engine.py @@ -20,6 +20,20 @@ from taskflow import storage as t_storage class WorkerBasedActionEngine(engine.ActionEngine): + """Worker based action engine. + + Specific backend configuration: + + :param exchange: broker exchange exchange name in which executor / worker + communication is performed + :param url: broker connection url (see format in kombu documentation) + :param topics: list of workers topics to communicate with (this will also + be learned by listening to the notifications that workers + emit). + :keyword transport: transport to be used (e.g. amqp, memory, etc.) + :keyword transport_options: transport specific options + """ + _storage_cls = t_storage.SingleThreadedStorage def _task_executor_cls(self): From c6c57cd14e8009eaca58423f4b252927efc81786 Mon Sep 17 00:00:00 2001 From: Joshua Harlow Date: Fri, 2 May 2014 13:40:01 -0700 Subject: [PATCH 046/188] Medium-level docs on engines Describe why engines exist and also describe at a somewhat lower-level how an action engine goes through its various stages when executing and what each stages high-level goal is (and how it is performed). Change-Id: I79c4b90047826fb2c9f33da75044a9cb42cfe47d --- doc/source/engines.rst | 196 +++++++++++++++++++++++++++++++++++++ doc/source/persistence.rst | 13 +-- doc/source/utils.rst | 5 + 3 files changed, 208 insertions(+), 6 deletions(-) diff --git a/doc/source/engines.rst b/doc/source/engines.rst index 359cea6b..9bb4daa1 100644 --- a/doc/source/engines.rst +++ b/doc/source/engines.rst @@ -23,9 +23,84 @@ parts of :py:class:`linear flow ` are run one after another, in order, even if engine is *capable* of running tasks in parallel). +Why they exist +-------------- + +An engine being the core component which actually makes your flows progress is +likely a new concept for many programmers so let's describe how it operates in +more depth and some of the reasoning behind why it exists. This will hopefully +make it more clear on there value add to the TaskFlow library user. + +First though let us discuss something most are familiar already with; the difference +between `declarative`_ and `imperative`_ programming models. The imperative model +involves establishing statements that accomplish a programs action (likely using +conditionals and such other language features to do this). This kind of program embeds +the *how* to accomplish a goal while also defining *what* the goal actually is (and the state +of this is maintained in memory or on the stack while these statements execute). In contrast +there is the the declarative model which instead of combining the *how* to accomplish a goal +along side the *what* is to be accomplished splits these two into only declaring what +the intended goal is and not the *how*. In TaskFlow terminology the *what* is the structure +of your flows and the tasks and other atoms you have inside those flows, but the *how* +is not defined (the line becomes blurred since tasks themselves contain imperative +code, but for now consider a task as more of a *pure* function that executes, reverts and may +require inputs and provide outputs). This is where engines get involved; they do +the execution of the *what* defined via :doc:`atoms `, tasks, flows and +the relationships defined there-in and execute these in a well-defined +manner (and the engine is responsible for *most* of the state manipulation +instead). + +This mix of imperative and declarative (with a stronger emphasis on the +declarative model) allows for the following functionality to be possible: + +* Enhancing reliability: Decoupling of state alterations from what should be accomplished + allows for a *natural* way of resuming by allowing the engine to track the current state + and know at which point a flow is in and how to get back into that state when + resumption occurs. +* Enhancing scalability: When a engine is responsible for executing your desired work + it becomes possible to alter the *how* in the future by creating new types of execution + backends (for example the worker model which does not execute locally). Without the decoupling + of the *what* and the *how* it is not possible to provide such a feature (since by the very + nature of that coupling this kind of functionality is inherently hard to provide). +* Enhancing consistency: Since the engine is responsible for executing atoms and the + associated workflow, it can be one (if not the only) of the primary entities + that is working to keep the execution model in a consistent state. Coupled with atoms + which *should* be immutable and have have limited (if any) internal state the + ability to reason about and obtain consistency can be vastly improved. + + * With future features around locking (using `tooz`_ to help) engines can also + help ensure that resources being accessed by tasks are reliably obtained and + mutated on. This will help ensure that other processes, threads, or other types + of entities are also not executing tasks that manipulate those same resources (further + increasing consistency). + +Of course these kind of features can come with some drawbacks: + +* The downside of decoupling the *how* and the *what* is that the imperative model + where functions control & manipulate state must start to be shifted away from + (and this is likely a mindset change for programmers used to the imperative + model). We have worked to make this less of a concern by creating and + encouraging the usage of :doc:`persistence `, to help make it possible + to have some level of provided state transfer mechanism. +* Depending on how much imperative code exists (and state inside that code) there + can be *significant* rework of that code and converting or refactoring it to these new concepts. + We have tried to help here by allowing you to have tasks that internally use regular python + code (and internally can be written in an imperative style) as well as by providing examples + and these developer docs; helping this process be as seamless as possible. +* Another one of the downsides of decoupling the *what* from the *how* is that it may become + harder to use traditional techniques to debug failures (especially if remote workers are + involved). We try to help here by making it easy to track, monitor and introspect + the actions & state changes that are occurring inside an engine (see + :doc:`notifications ` for how to use some of these capabilities). + +.. _declarative: http://en.wikipedia.org/wiki/Declarative_programming +.. _imperative: http://en.wikipedia.org/wiki/Imperative_programming +.. _tooz: https://github.com/stackforge/tooz + Creating ======== +.. _creating engines: + All engines are mere classes that implement the same interface, and of course it is possible to import them and create instances just like with any classes in Python. But the easier (and recommended) way for creating an engine is using @@ -116,10 +191,131 @@ operates. .. _wiki page: https://wiki.openstack.org/wiki/TaskFlow/Worker-based_Engine .. _blueprint page: https://blueprints.launchpad.net/taskflow +How they run +============ + +To provide a peek into the general process that a engine goes through when +running lets break it apart a little and describe what one of the engine types +does while executing (for this we will look into the +:py:class:`~taskflow.engines.action_engine.engine.ActionEngine` engine type). + +Creation +-------- + +The first thing that occurs is that the user creates an engine for a given +flow, providing a flow detail (where results will be saved into a provided +:doc:`persistence ` backend). This is typically accomplished via +the methods described above in `creating engines`_. The engine at this point now will +have references to your flow and backends and other internal variables are +setup. + +Compiling +--------- + +During this stage the flow will be converted into an internal graph representation +using a flow :py:func:`~taskflow.utils.flow_utils.flatten` function. This function +converts the flow objects and contained atoms into a `networkx`_ directed graph that +contains the equivalent atoms defined in the flow and any nested flows & atoms as +well as the constraints that are created by the application of the different flow +patterns. This graph is then what will be analyzed & traversed during the engines +execution. At this point a few helper object are also created and saved to +internal engine variables (these object help in execution of atoms, analyzing +the graph and performing other internal engine activities). + +Preparation +----------- + +This stage starts by setting up the storage needed for all atoms in the +previously created graph, ensuring that corresponding +:py:class:`~taskflow.persistence.logbook.AtomDetail` (or subclass of) objects +are created for each node in the graph. Once this is done final validation occurs +on the requirements that are needed to start execution and what storage provides. +If there is any atom or flow requirements not satisfied then execution will not be +allowed to continue. + +Execution +--------- + +The graph (and helper objects) previously created are now used for guiding further +execution. The flow is put into the ``RUNNING`` :doc:`state ` and a +:py:class:`~taskflow.engines.action_engine.graph_action.FutureGraphAction` +object starts to take over and begins going through the stages listed below. + +Resumption +^^^^^^^^^^ + +One of the first stages is to analyze the :doc:`state ` of the tasks in the graph, +determining which ones have failed, which one were previously running and +determining what the intention of that task should now be (typically an +intention can be that it should ``REVERT``, or that it should ``EXECUTE`` or +that it should be ``IGNORED``). This intention is determined by analyzing the +current state of the task; which is determined by looking at the state in the task +detail object for that task and analyzing edges of the graph for things like +retry atom which can influence what a tasks intention should be (this is aided +by the usage of the :py:class:`~taskflow.engines.action_engine.graph_analyzer.GraphAnalyzer` +helper object which was designed to provide helper methods for this analysis). Once +these intentions are determined and associated with each task (the intention is +also stored in the :py:class:`~taskflow.persistence.logbook.AtomDetail` object) the +scheduling stage starts. + +Scheduling +^^^^^^^^^^ + +This stage selects which atoms are eligible to run (looking at there intention, +checking if predecessor atoms have ran and so-on, again using the +:py:class:`~taskflow.engines.action_engine.graph_analyzer.GraphAnalyzer` helper +object) and submits those atoms to a previously provided compatible +`executor`_ for asynchronous execution. This executor will return a `future`_ object +for each atom submitted; all of which are collected into a list of not done +futures. This will end the initial round of scheduling and at this point the +engine enters the waiting stage. + +Waiting +^^^^^^^ + +In this stage the engine waits for any of the future objects previously submitted +to complete. Once one of the future objects completes (or fails) that atoms result +will be examined and persisted to the persistence backend (saved into the +corresponding :py:class:`~taskflow.persistence.logbook.AtomDetail` object) and +the state of the atom is changed. At this point what happens falls into two categories, +one for if that atom failed and one for if it did not. If the atom failed it may +be set to a new intention such as ``RETRY`` or ``REVERT`` (other atoms that were +predecessors of this failing atom may also have there intention altered). Once this +intention adjustment has happened a new round of scheduling occurs and this process +repeats until the engine succeeds or fails (if the process running the engine +dies the above stages will be restarted and resuming will occur). + +.. note:: + + If the engine is suspended while the engine is going through the above + stages this will stop any further scheduling stages from occurring and + all currently executing atoms will be allowed to finish (and there results + will be saved). + +Finishing +--------- + +At this point the :py:class:`~taskflow.engines.action_engine.graph_action.FutureGraphAction` +has now finished successfully, failed, or the execution was suspended. Depending +on which one of these occurs will cause the flow to enter a new state (typically one +of ``FAILURE``, ``SUSPENDED``, ``SUCCESS`` or ``REVERTED``). :doc:`Notifications ` +will be sent out about this final state change (other state changes also send out notifications) +and any failures that occurred will be reraised (the failure objects are wrapped +exceptions). If no failures have occurred then the engine will have finished and +if so desired the :doc:`persistence ` can be used to cleanup any +details that were saved for this execution. + +.. _future: https://docs.python.org/dev/library/concurrent.futures.html#future-objects +.. _executor: https://docs.python.org/dev/library/concurrent.futures.html#concurrent.futures.Executor +.. _networkx: https://networkx.github.io/ + Interfaces ========== .. automodule:: taskflow.engines.base +.. automodule:: taskflow.engines.action_engine.engine +.. automodule:: taskflow.engines.action_engine.graph_action +.. automodule:: taskflow.engines.action_engine.graph_analyzer Hierarchy ========= diff --git a/doc/source/persistence.rst b/doc/source/persistence.rst index ae2f118f..6065e4f4 100644 --- a/doc/source/persistence.rst +++ b/doc/source/persistence.rst @@ -53,12 +53,13 @@ and :py:class:`~taskflow.persistence.backends.base.Backend` objects). As an engi initializes it will extract (or create) :py:class:`~taskflow.persistence.logbook.AtomDetail` objects for each atom in the workflow the engine will be executing. -**Execution:** When an engine beings to execute it will examine any previously existing -:py:class:`~taskflow.persistence.logbook.AtomDetail` objects to see if they can be used -for resuming; see :doc:`resumption ` for more details on this subject. For atoms which have not -finished (or did not finish correctly from a previous run) they will begin executing -only after any dependent inputs are ready. This is done by analyzing the execution -graph and looking at predecessor :py:class:`~taskflow.persistence.logbook.AtomDetail` +**Execution:** When an engine beings to execute (see :doc:`engine ` for more +of the details about how an engine goes about this process) it will examine any +previously existing :py:class:`~taskflow.persistence.logbook.AtomDetail` objects to +see if they can be used for resuming; see :doc:`resumption ` for more details +on this subject. For atoms which have not finished (or did not finish correctly from a +previous run) they will begin executing only after any dependent inputs are ready. This +is done by analyzing the execution graph and looking at predecessor :py:class:`~taskflow.persistence.logbook.AtomDetail` outputs and states (which may have been persisted in a past run). This will result in either using there previous information or by running those predecessors and saving their output to the :py:class:`~taskflow.persistence.logbook.FlowDetail` and diff --git a/doc/source/utils.rst b/doc/source/utils.rst index b847e07f..9ac1a77f 100644 --- a/doc/source/utils.rst +++ b/doc/source/utils.rst @@ -22,3 +22,8 @@ The following classes and modules are *recommended* for external usage: .. autofunction:: taskflow.utils.persistence_utils.temporary_flow_detail .. autofunction:: taskflow.utils.persistence_utils.pformat + +Internal usage +============== + +.. automodule:: taskflow.utils.flow_utils From 7d441555ef350b8c0a0efa5875865d6e9e59c7e2 Mon Sep 17 00:00:00 2001 From: Joshua Harlow Date: Sat, 3 May 2014 23:55:25 -0700 Subject: [PATCH 047/188] Adjust doc linking Instead of linking to other topics (which then uses the name of those topics as the link name) define a name that makes more sense for the inline usage and retain the link to the document using wording that fits the surronding text. Also adjust the futures/executor links to point to the external links documenting these features. Change-Id: I5a89e2f747dfec2505947f25c124b157271c07cf --- doc/source/arguments_and_results.rst | 12 ++++++------ doc/source/atoms.rst | 4 ++-- doc/source/engines.rst | 25 +++++++++++++------------ doc/source/index.rst | 7 +++++-- doc/source/inputs_and_outputs.rst | 19 ++++++++++--------- doc/source/notifications.rst | 2 +- doc/source/workers.rst | 15 ++++++++------- 7 files changed, 45 insertions(+), 39 deletions(-) diff --git a/doc/source/arguments_and_results.rst b/doc/source/arguments_and_results.rst index ab1cc5fe..9128c95d 100644 --- a/doc/source/arguments_and_results.rst +++ b/doc/source/arguments_and_results.rst @@ -9,12 +9,12 @@ Atom Arguments and Results .. |retry.revert| replace:: :py:meth:`~taskflow.retry.Retry.revert` In taskflow, all flow and task state goes to (potentially persistent) storage. -That includes all the information that atoms (e.g. tasks) in the flow need when -they are executed, and all the information task produces (via serializable task -results). A developer who implements tasks or flows can specify what arguments -a task accepts and what result it returns in several ways. This document will -help you understand what those ways are and how to use those ways to accomplish -your desired TaskFlow usage pattern. +That includes all the information that :doc:`atoms ` (e.g. tasks) in the +flow need when they are executed, and all the information task produces (via +serializable task results). A developer who implements tasks or flows can specify +what arguments a task accepts and what result it returns in several ways. This +document will help you understand what those ways are and how to use those ways +to accomplish your desired usage pattern. .. glossary:: diff --git a/doc/source/atoms.rst b/doc/source/atoms.rst index 22bc97c1..98d1ba70 100644 --- a/doc/source/atoms.rst +++ b/doc/source/atoms.rst @@ -5,8 +5,8 @@ Atoms, Tasks and Retries An atom is the smallest unit in taskflow which acts as the base for other classes. Atoms have a name and a version (if applicable). An atom is expected to name desired input values (requirements) and name outputs (provided -values), see :doc:`arguments_and_results` page for a complete reference -about these inputs and outputs. +values), see the :doc:`arguments and results ` page for +a complete reference about these inputs and outputs. .. automodule:: taskflow.atom diff --git a/doc/source/engines.rst b/doc/source/engines.rst index 95278a5b..f9e2ad90 100644 --- a/doc/source/engines.rst +++ b/doc/source/engines.rst @@ -7,8 +7,8 @@ Overview Engines are what **really** runs your atoms. -An *engine* takes a flow structure (described by :doc:`patterns`) and uses it to -decide which :doc:`atom ` to run and when. +An *engine* takes a flow structure (described by :doc:`patterns `) and +uses it to decide which :doc:`atom ` to run and when. TaskFlow provides different implementations of engines. Some may be easier to use (ie, require no additional infrastructure setup) and understand; others @@ -152,11 +152,11 @@ Parallel engine schedules tasks onto different threads to run them in parallel. Additional configuration parameters: -* ``executor``: a class that provides ``concurrent.futures.Executor``-like +* ``executor``: a object that implements a :pep:`3148` compatible `executor`_ interface; it will be used for scheduling tasks. You can use instances - of ``concurrent.futures.ThreadPoolExecutor`` or - ``taskflow.utils.eventlet_utils.GreenExecutor`` (which internally uses - `eventlet `_ and greenthread pools). + of a `thread pool executor`_ or a + :py:class:`green executor ` + (which internally uses `eventlet `_ and greenthread pools). .. tip:: @@ -166,8 +166,7 @@ Additional configuration parameters: .. note:: - Running tasks with ``concurrent.futures.ProcessPoolExecutor`` is not - supported now. + Running tasks with a `process pool executor`_ is not currently supported. Worker-Based ------------ @@ -291,10 +290,6 @@ exceptions). If no failures have occurred then the engine will have finished and if so desired the :doc:`persistence ` can be used to cleanup any details that were saved for this execution. -.. _future: https://docs.python.org/dev/library/concurrent.futures.html#future-objects -.. _executor: https://docs.python.org/dev/library/concurrent.futures.html#concurrent.futures.Executor -.. _networkx: https://networkx.github.io/ - Interfaces ========== @@ -311,3 +306,9 @@ Hierarchy taskflow.engines.action_engine.engine taskflow.engines.worker_based.engine :parts: 1 + +.. _future: https://docs.python.org/dev/library/concurrent.futures.html#future-objects +.. _executor: https://docs.python.org/dev/library/concurrent.futures.html#concurrent.futures.Executor +.. _networkx: https://networkx.github.io/ +.. _thread pool executor: https://docs.python.org/dev/library/concurrent.futures.html#threadpoolexecutor +.. _process pool executor: https://docs.python.org/dev/library/concurrent.futures.html#processpoolexecutor diff --git a/doc/source/index.rst b/doc/source/index.rst index 41aaaa84..a0e869fc 100644 --- a/doc/source/index.rst +++ b/doc/source/index.rst @@ -1,9 +1,12 @@ TaskFlow ======== -TaskFlow is a Python library for OpenStack that helps make task execution easy, consistent, and reliable. +*TaskFlow is a Python library for OpenStack that helps make task execution +easy, consistent, and reliable.* -TaskFlow documentation is hosted on wiki: https://wiki.openstack.org/wiki/TaskFlow +.. note:: + + Additional documentation is also hosted on wiki: https://wiki.openstack.org/wiki/TaskFlow Contents ======== diff --git a/doc/source/inputs_and_outputs.rst b/doc/source/inputs_and_outputs.rst index ee00945f..26171e9a 100644 --- a/doc/source/inputs_and_outputs.rst +++ b/doc/source/inputs_and_outputs.rst @@ -4,19 +4,20 @@ Inputs and Outputs In TaskFlow there are multiple ways to provide inputs for your tasks and flows and get information from them. This document describes one of them, that -involves task arguments and results. There are also :doc:`notifications`, which -allow you to get notified when task or flow changed state. You may also opt to -use :doc:`persistence` directly. +involves task arguments and results. There are also +:doc:`notifications `, which allow you to get notified when task +or flow changed state. You may also opt to use the :doc:`persistence ` +layer itself directly. ----------------------- Flow Inputs and Outputs ----------------------- Tasks accept inputs via task arguments and provide outputs via task results -(see :doc:`arguments_and_results` for more details). This is the standard and -recommended way to pass data from one task to another. Of course not every task -argument needs to be provided to some other task of a flow, and not every task -result should be consumed by every task. +(see :doc:`arguments and results ` for more details). This +is the standard and recommended way to pass data from one task to another. Of +course not every task argument needs to be provided to some other task of a +flow, and not every task result should be consumed by every task. If some value is required by one or more tasks of a flow, but is not provided by any task, it is considered to be flow input, and **must** be put into the @@ -62,8 +63,8 @@ As you can see, this flow does not require b, as it is provided by the fist task Engine and Storage ------------------ -The storage layer is how an engine persists flow and task details. For more -in-depth design details see :doc:`persistence`. +The storage layer is how an engine persists flow and task details (for more +in-depth details see :doc:`persistence `). Inputs ------ diff --git a/doc/source/notifications.rst b/doc/source/notifications.rst index 88969bdd..327792d4 100644 --- a/doc/source/notifications.rst +++ b/doc/source/notifications.rst @@ -21,7 +21,7 @@ To receive these notifications you should register a callback in Each engine provides two of them: one notifies about flow state changes, and another notifies about changes of tasks. -TaskFlow also has a set of predefined :ref:`listeners`, and provides +TaskFlow also has a set of predefined :ref:`listeners `, and provides means to write your own listeners, which can be more convenient than using raw callbacks. diff --git a/doc/source/workers.rst b/doc/source/workers.rst index 4db89d57..01787712 100644 --- a/doc/source/workers.rst +++ b/doc/source/workers.rst @@ -303,13 +303,14 @@ Limitations =========== * Atoms inside a flow must receive and accept parameters only from the ways - defined in :doc:`persistence`. In other words, the task that is created when - a workflow is constructed will not be the same task that is executed on a - remote worker (and any internal state not passed via the - :doc:`inputs_and_outputs` mechanism can not be transferred). This means - resource objects (database handles, file descriptors, sockets, ...) can **not** - be directly sent across to remote workers (instead the configuration that - defines how to fetch/create these objects must be instead). + defined in :doc:`persistence `. In other words, the task + that is created when a workflow is constructed will not be the same task that + is executed on a remote worker (and any internal state not passed via the + :doc:`input and output ` mechanism can not be + transferred). This means resource objects (database handles, file + descriptors, sockets, ...) can **not** be directly sent across to remote + workers (instead the configuration that defines how to fetch/create these + objects must be instead). * Worker-based engines will in the future be able to run lightweight tasks locally to avoid transport overhead for very simple tasks (currently it will run even lightweight tasks remotely, which may be non-performant). From 7d771fbdba075491fba064ca3a1b3bbcda23a591 Mon Sep 17 00:00:00 2001 From: Joshua Harlow Date: Sun, 4 May 2014 00:09:05 -0700 Subject: [PATCH 048/188] Tweaks to object hiearchy diagrams Change-Id: Ib2ff6e548c9e31cdaf5d8606e7e9babe20d9cb5d --- doc/source/patterns.rst | 10 ++++++++++ doc/source/persistence.rst | 2 +- 2 files changed, 11 insertions(+), 1 deletion(-) diff --git a/doc/source/patterns.rst b/doc/source/patterns.rst index 4b6ff832..8c8eb410 100644 --- a/doc/source/patterns.rst +++ b/doc/source/patterns.rst @@ -21,3 +21,13 @@ Graph flow ~~~~~~~~~~ .. automodule:: taskflow.patterns.graph_flow + +Hierarchy +~~~~~~~~~ + +.. inheritance-diagram:: + taskflow.flow + taskflow.patterns.linear_flow + taskflow.patterns.unordered_flow + taskflow.patterns.graph_flow + :parts: 2 diff --git a/doc/source/persistence.rst b/doc/source/persistence.rst index 6065e4f4..8cfe91d1 100644 --- a/doc/source/persistence.rst +++ b/doc/source/persistence.rst @@ -198,4 +198,4 @@ Hierarchy taskflow.persistence.backends.impl_zookeeper taskflow.persistence.backends.impl_dir taskflow.persistence.backends.impl_sqlalchemy - :parts: 1 + :parts: 2 From c379885f2eb5f5795cae282fe8d8bec8fc6b371e Mon Sep 17 00:00:00 2001 From: Joshua Harlow Date: Mon, 28 Apr 2014 16:16:17 -0700 Subject: [PATCH 049/188] Allow the WBE to use a preexisting executor A worker task executor is a good candidate for reuse since it maintains worker knowledge that is valuable to be retained across engine runs (tasks on which workers for example). In order for it to be reused we need a way for the WBE to be able to receive and reuse a previously existing executor. Change-Id: Ia9a8f4c544b74e12e2cbd6bd941945da1111499c --- doc/source/workers.rst | 7 +++++++ taskflow/engines/worker_based/engine.py | 21 +++++++++++---------- 2 files changed, 18 insertions(+), 10 deletions(-) diff --git a/doc/source/workers.rst b/doc/source/workers.rst index 01787712..83b92c6d 100644 --- a/doc/source/workers.rst +++ b/doc/source/workers.rst @@ -299,6 +299,12 @@ For complete parameters and object usage please see eng = taskflow.engines.load(flow, engine_conf=engine_conf) eng.run() +Additional supported keyword arguments: + +* ``executor``: a class that provides a + :py:class:`~taskflow.engines.worker_based.executor.WorkerTaskExecutor` + interface; it will be used for executing, reverting and waiting for remote tasks. + Limitations =========== @@ -327,3 +333,4 @@ Interfaces .. automodule:: taskflow.engines.worker_based.worker .. automodule:: taskflow.engines.worker_based.engine .. automodule:: taskflow.engines.worker_based.proxy +.. automodule:: taskflow.engines.worker_based.executor diff --git a/taskflow/engines/worker_based/engine.py b/taskflow/engines/worker_based/engine.py index 0c7702e1..a552222c 100644 --- a/taskflow/engines/worker_based/engine.py +++ b/taskflow/engines/worker_based/engine.py @@ -37,16 +37,17 @@ class WorkerBasedActionEngine(engine.ActionEngine): _storage_cls = t_storage.SingleThreadedStorage def _task_executor_cls(self): - return executor.WorkerTaskExecutor(**self._executor_config) + if self._executor is not None: + return self._executor + return executor.WorkerTaskExecutor( + uuid=self._flow_detail.uuid, + url=self._conf.get('url'), + exchange=self._conf.get('exchange', 'default'), + topics=self._conf.get('topics', []), + transport=self._conf.get('transport'), + transport_options=self._conf.get('transport_options')) - def __init__(self, flow, flow_detail, backend, conf): - self._executor_config = { - 'uuid': flow_detail.uuid, - 'url': conf.get('url'), - 'exchange': conf.get('exchange', 'default'), - 'topics': conf.get('topics', []), - 'transport': conf.get('transport'), - 'transport_options': conf.get('transport_options') - } + def __init__(self, flow, flow_detail, backend, conf, **kwargs): super(WorkerBasedActionEngine, self).__init__( flow, flow_detail, backend, conf) + self._executor = kwargs.get('executor') From 8ebce5b027046477a276111f20f9bc6dd135cc59 Mon Sep 17 00:00:00 2001 From: Joshua Harlow Date: Mon, 28 Apr 2014 16:16:17 -0700 Subject: [PATCH 050/188] Pass executor via kwargs instead of config Breaking change: moves from taking an executor from configuration (it really is not configuration) and instead takes that executor instead from the additional kwargs which can be provided to a engine-specific type. Change-Id: I475f33a63ebd08f6c20a16534423c8bc3502fa3f --- doc/source/engines.rst | 4 ++-- taskflow/engines/action_engine/engine.py | 4 ++-- taskflow/engines/helpers.py | 4 ++-- taskflow/tests/unit/test_action_engine.py | 6 +++--- taskflow/tests/unit/test_arguments_passing.py | 6 +++--- taskflow/tests/unit/test_retries.py | 6 +++--- taskflow/tests/unit/test_suspend_flow.py | 12 ++++++------ 7 files changed, 21 insertions(+), 21 deletions(-) diff --git a/doc/source/engines.rst b/doc/source/engines.rst index f9e2ad90..631d3d53 100644 --- a/doc/source/engines.rst +++ b/doc/source/engines.rst @@ -126,7 +126,7 @@ the ``engine_conf`` parameter any helper factory function accepts. It may be: * a string, naming engine type; * a dictionary, holding engine type with key ``'engine'`` and possibly - type-specific engine parameters. + type-specific engine configuration parameters. Single-Threaded --------------- @@ -150,7 +150,7 @@ Parallel Parallel engine schedules tasks onto different threads to run them in parallel. -Additional configuration parameters: +Additional supported keyword arguments: * ``executor``: a object that implements a :pep:`3148` compatible `executor`_ interface; it will be used for scheduling tasks. You can use instances diff --git a/taskflow/engines/action_engine/engine.py b/taskflow/engines/action_engine/engine.py index 4f3d85c1..beb6536f 100644 --- a/taskflow/engines/action_engine/engine.py +++ b/taskflow/engines/action_engine/engine.py @@ -204,7 +204,7 @@ class MultiThreadedActionEngine(ActionEngine): def _task_executor_cls(self): return executor.ParallelTaskExecutor(self._executor) - def __init__(self, flow, flow_detail, backend, conf): + def __init__(self, flow, flow_detail, backend, conf, **kwargs): super(MultiThreadedActionEngine, self).__init__( flow, flow_detail, backend, conf) - self._executor = conf.get('executor', None) + self._executor = kwargs.get('executor') diff --git a/taskflow/engines/helpers.py b/taskflow/engines/helpers.py index c70c8f1e..2aeddabc 100644 --- a/taskflow/engines/helpers.py +++ b/taskflow/engines/helpers.py @@ -57,7 +57,7 @@ def load(flow, store=None, flow_detail=None, book=None, Which engine to load is specified in 'engine_conf' parameter. It can be a string that names engine type or a dictionary which holds engine type (with 'engine' key) and additional engine-specific - configuration (for example, executor for multithreaded engine). + configuration. Which storage backend to use is defined by backend parameter. It can be backend itself, or a dictionary that is passed to @@ -119,7 +119,7 @@ def run(flow, store=None, flow_detail=None, book=None, Which engine to load is specified in 'engine_conf' parameter. It can be a string that names engine type or a dictionary which holds engine type (with 'engine' key) and additional engine-specific - configuration (for example, executor for multithreaded engine). + configuration. Which storage backend to use is defined by backend parameter. It can be backend itself, or a dictionary that is passed to diff --git a/taskflow/tests/unit/test_action_engine.py b/taskflow/tests/unit/test_action_engine.py index d711a1c2..b6c6c894 100644 --- a/taskflow/tests/unit/test_action_engine.py +++ b/taskflow/tests/unit/test_action_engine.py @@ -529,11 +529,11 @@ class MultiThreadedEngineTest(EngineTaskTest, EngineCheckingTaskTest, test.TestCase): def _make_engine(self, flow, flow_detail=None, executor=None): - engine_conf = dict(engine='parallel', - executor=executor) + engine_conf = dict(engine='parallel') return taskflow.engines.load(flow, flow_detail=flow_detail, engine_conf=engine_conf, - backend=self.backend) + backend=self.backend, + executor=executor) def test_correct_load(self): engine = self._make_engine(utils.TaskNoRequiresNoReturns) diff --git a/taskflow/tests/unit/test_arguments_passing.py b/taskflow/tests/unit/test_arguments_passing.py index 0a038bd1..4e8d5bb6 100644 --- a/taskflow/tests/unit/test_arguments_passing.py +++ b/taskflow/tests/unit/test_arguments_passing.py @@ -133,8 +133,8 @@ class SingleThreadedEngineTest(ArgumentsPassingTest, class MultiThreadedEngineTest(ArgumentsPassingTest, test.TestCase): def _make_engine(self, flow, flow_detail=None, executor=None): - engine_conf = dict(engine='parallel', - executor=executor) + engine_conf = dict(engine='parallel') return taskflow.engines.load(flow, flow_detail=flow_detail, engine_conf=engine_conf, - backend=self.backend) + backend=self.backend, + executor=executor) diff --git a/taskflow/tests/unit/test_retries.py b/taskflow/tests/unit/test_retries.py index d9c60903..6953b376 100644 --- a/taskflow/tests/unit/test_retries.py +++ b/taskflow/tests/unit/test_retries.py @@ -768,8 +768,8 @@ class MultiThreadedEngineTest(RetryTest, RetryParallelExecutionTest, test.TestCase): def _make_engine(self, flow, flow_detail=None, executor=None): - engine_conf = dict(engine='parallel', - executor=executor) + engine_conf = dict(engine='parallel') return taskflow.engines.load(flow, flow_detail=flow_detail, engine_conf=engine_conf, - backend=self.backend) + backend=self.backend, + executor=executor) diff --git a/taskflow/tests/unit/test_suspend_flow.py b/taskflow/tests/unit/test_suspend_flow.py index eeda19f0..bb953449 100644 --- a/taskflow/tests/unit/test_suspend_flow.py +++ b/taskflow/tests/unit/test_suspend_flow.py @@ -175,11 +175,11 @@ class SingleThreadedEngineTest(SuspendFlowTest, class MultiThreadedEngineTest(SuspendFlowTest, test.TestCase): def _make_engine(self, flow, flow_detail=None, executor=None): - engine_conf = dict(engine='parallel', - executor=executor) + engine_conf = dict(engine='parallel') return taskflow.engines.load(flow, flow_detail=flow_detail, engine_conf=engine_conf, - backend=self.backend) + backend=self.backend, + executor=executor) @testtools.skipIf(not eu.EVENTLET_AVAILABLE, 'eventlet is not available') @@ -189,8 +189,8 @@ class ParallelEngineWithEventletTest(SuspendFlowTest, def _make_engine(self, flow, flow_detail=None, executor=None): if executor is None: executor = eu.GreenExecutor() - engine_conf = dict(engine='parallel', - executor=executor) + engine_conf = dict(engine='parallel') return taskflow.engines.load(flow, flow_detail=flow_detail, engine_conf=engine_conf, - backend=self.backend) + backend=self.backend, + executor=executor) From 50696ac555087d2ffecc5fc3f39e3f4cda89103c Mon Sep 17 00:00:00 2001 From: Joshua Harlow Date: Sun, 4 May 2014 21:29:45 -0700 Subject: [PATCH 051/188] Engine _cls postfix is not correct Some of the time these attributes are types but other times they are functions, to avoid the confusion of naming these with a '_cls' postfix move to having a '_factory' postfix since these attributes generate other objects (which is what factories do). Change-Id: I73edd0c794223d719fbfbd0608c985cb335c8c26 --- taskflow/engines/action_engine/engine.py | 40 ++++++++++++------------ taskflow/engines/base.py | 7 +++-- taskflow/engines/worker_based/engine.py | 4 +-- 3 files changed, 26 insertions(+), 25 deletions(-) diff --git a/taskflow/engines/action_engine/engine.py b/taskflow/engines/action_engine/engine.py index beb6536f..a8cf14ed 100644 --- a/taskflow/engines/action_engine/engine.py +++ b/taskflow/engines/action_engine/engine.py @@ -48,11 +48,11 @@ class ActionEngine(base.EngineBase): reversion to commence. See the valid states in the states module to learn more about what other states the tasks & flow being ran can go through. """ - _graph_action_cls = graph_action.FutureGraphAction - _graph_analyzer_cls = graph_analyzer.GraphAnalyzer - _task_action_cls = task_action.TaskAction - _task_executor_cls = executor.SerialTaskExecutor - _retry_action_cls = retry_action.RetryAction + _graph_action_factory = graph_action.FutureGraphAction + _graph_analyzer_factory = graph_analyzer.GraphAnalyzer + _task_action_factory = task_action.TaskAction + _task_executor_factory = executor.SerialTaskExecutor + _retry_action_factory = retry_action.RetryAction def __init__(self, flow, flow_detail, backend, conf): super(ActionEngine, self).__init__(flow, flow_detail, backend, conf) @@ -173,35 +173,35 @@ class ActionEngine(base.EngineBase): execution_graph = flow_utils.flatten(self._flow) if execution_graph.number_of_nodes() == 0: raise exc.Empty("Flow %s is empty." % self._flow.name) - self._analyzer = self._graph_analyzer_cls(execution_graph, - self.storage) + self._analyzer = self._graph_analyzer_factory(execution_graph, + self.storage) if self._task_executor is None: - self._task_executor = self._task_executor_cls() + self._task_executor = self._task_executor_factory() if self._task_action is None: - self._task_action = self._task_action_cls(self.storage, - self._task_executor, - self.task_notifier) + self._task_action = self._task_action_factory(self.storage, + self._task_executor, + self.task_notifier) if self._retry_action is None: - self._retry_action = self._retry_action_cls(self.storage, - self.task_notifier) - self._root = self._graph_action_cls(self._analyzer, - self.storage, - self._task_action, - self._retry_action) + self._retry_action = self._retry_action_factory(self.storage, + self.task_notifier) + self._root = self._graph_action_factory(self._analyzer, + self.storage, + self._task_action, + self._retry_action) self._compiled = True return class SingleThreadedActionEngine(ActionEngine): """Engine that runs tasks in serial manner.""" - _storage_cls = t_storage.SingleThreadedStorage + _storage_factory = t_storage.SingleThreadedStorage class MultiThreadedActionEngine(ActionEngine): """Engine that runs tasks in parallel manner.""" - _storage_cls = t_storage.MultiThreadedStorage + _storage_factory = t_storage.MultiThreadedStorage - def _task_executor_cls(self): + def _task_executor_factory(self): return executor.ParallelTaskExecutor(self._executor) def __init__(self, flow, flow_detail, backend, conf, **kwargs): diff --git a/taskflow/engines/base.py b/taskflow/engines/base.py index 8a6d42c9..402aaee5 100644 --- a/taskflow/engines/base.py +++ b/taskflow/engines/base.py @@ -42,12 +42,13 @@ class EngineBase(object): def storage(self): """The storage unit for this flow.""" if self._storage is None: - self._storage = self._storage_cls(self._flow_detail, self._backend) + self._storage = self._storage_factory(self._flow_detail, + self._backend) return self._storage @abc.abstractproperty - def _storage_cls(self): - """Storage class that will be used to generate storage objects.""" + def _storage_factory(self): + """Storage factory that will be used to generate storage objects.""" @abc.abstractmethod def compile(self): diff --git a/taskflow/engines/worker_based/engine.py b/taskflow/engines/worker_based/engine.py index a552222c..e92e73f8 100644 --- a/taskflow/engines/worker_based/engine.py +++ b/taskflow/engines/worker_based/engine.py @@ -34,9 +34,9 @@ class WorkerBasedActionEngine(engine.ActionEngine): :keyword transport_options: transport specific options """ - _storage_cls = t_storage.SingleThreadedStorage + _storage_factory = t_storage.SingleThreadedStorage - def _task_executor_cls(self): + def _task_executor_factory(self): if self._executor is not None: return self._executor return executor.WorkerTaskExecutor( From c03cd3f85e9df113ef10833eaedfc846adde45f6 Mon Sep 17 00:00:00 2001 From: Cyril Roelandt Date: Wed, 30 Apr 2014 00:38:50 +0200 Subject: [PATCH 052/188] Add an example for the job board feature Change-Id: I9479ca20fa9ed1217a56314c1551d394f5f1ecb5 Co-Authored-By:Dan Krause --- taskflow/examples/job_board_no_test.py | 171 +++++++++++++++++++++++++ 1 file changed, 171 insertions(+) create mode 100644 taskflow/examples/job_board_no_test.py diff --git a/taskflow/examples/job_board_no_test.py b/taskflow/examples/job_board_no_test.py new file mode 100644 index 00000000..d37c96a7 --- /dev/null +++ b/taskflow/examples/job_board_no_test.py @@ -0,0 +1,171 @@ +# -*- encoding: utf-8 -*- +# +# Copyright © 2013 eNovance +# +# Authors: Dan Krause +# Cyril Roelandt +# +# Licensed under the Apache License, Version 2.0 (the "License"); you may +# not use this file except in compliance with the License. You may obtain +# a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the +# License for the specific language governing permissions and limitations +# under the License. + +# This example shows how to use the job board feature. +# +# Let's start by creating some jobs: +# $ python job_board_no_test.py create my-board my-job '{}' +# $ python job_board_no_test.py create my-board my-job '{"foo": "bar"}' +# $ python job_board_no_test.py create my-board my-job '{"foo": "baz"}' +# $ python job_board_no_test.py create my-board my-job '{"foo": "barbaz"}' +# +# Make sure they were registered: +# $ python job_board_no_test.py list my-board +# 7277181a-1f83-473d-8233-f361615bae9e - {} +# 84a396e8-d02e-450d-8566-d93cb68550c0 - {u'foo': u'bar'} +# 4d355d6a-2c72-44a2-a558-19ae52e8ae2c - {u'foo': u'baz'} +# cd9aae2c-fd64-416d-8ba0-426fa8e3d59c - {u'foo': u'barbaz'} +# +# Perform one job: +# $ python job_board_no_test.py consume my-board \ +# 84a396e8-d02e-450d-8566-d93cb68550c0 +# Performing job 84a396e8-d02e-450d-8566-d93cb68550c0 with args \ +# {u'foo': u'bar'} +# $ python job_board_no_test.py list my-board +# 7277181a-1f83-473d-8233-f361615bae9e - {} +# 4d355d6a-2c72-44a2-a558-19ae52e8ae2c - {u'foo': u'baz'} +# cd9aae2c-fd64-416d-8ba0-426fa8e3d59c - {u'foo': u'barbaz'} +# +# Delete a job: +# $ python job_board_no_test.py delete my-board \ +# cd9aae2c-fd64-416d-8ba0-426fa8e3d59c +# $ python job_board_no_test.py list my-board +# 7277181a-1f83-473d-8233-f361615bae9e - {} +# 4d355d6a-2c72-44a2-a558-19ae52e8ae2c - {u'foo': u'baz'} +# +# Delete all the remaining jobs +# $ python job_board_no_test.py clear my-board +# $ python job_board_no_test.py list my-board +# $ + +import argparse +import contextlib +import json +import os +import sys +import tempfile + +import taskflow.jobs.backends as job_backends +from taskflow.persistence import logbook + +import example_utils # noqa + + +@contextlib.contextmanager +def jobboard(*args, **kwargs): + jb = job_backends.fetch(*args, **kwargs) + jb.connect() + yield jb + jb.close() + + +conf = { + 'board': 'zookeeper', + 'hosts': ['127.0.0.1:2181'] +} + + +def consume_job(args): + def perform_job(job): + print("Performing job %s with args %s" % (job.uuid, job.details)) + + with jobboard(args.board_name, conf) as jb: + for job in jb.iterjobs(ensure_fresh=True): + if job.uuid == args.job_uuid: + jb.claim(job, "test-client") + perform_job(job) + jb.consume(job, "test-client") + + +def clear_jobs(args): + with jobboard(args.board_name, conf) as jb: + for job in jb.iterjobs(ensure_fresh=True): + jb.claim(job, "test-client") + jb.consume(job, "test-client") + + +def create_job(args): + store = json.loads(args.details) + book = logbook.LogBook(args.job_name) + if example_utils.SQLALCHEMY_AVAILABLE: + persist_path = os.path.join(tempfile.gettempdir(), "persisting.db") + backend_uri = "sqlite:///%s" % (persist_path) + else: + persist_path = os.path.join(tempfile.gettempdir(), "persisting") + backend_uri = "file:///%s" % (persist_path) + with example_utils.get_backend(backend_uri) as backend: + backend.get_connection().save_logbook(book) + with jobboard(args.board_name, conf, persistence=backend) as jb: + jb.post(args.job_name, book, details=store) + + +def list_jobs(args): + with jobboard(args.board_name, conf) as jb: + for job in jb.iterjobs(ensure_fresh=True): + print("%s - %s" % (job.uuid, job.details)) + + +def delete_job(args): + with jobboard(args.board_name, conf) as jb: + for job in jb.iterjobs(ensure_fresh=True): + if job.uuid == args.job_uuid: + jb.claim(job, "test-client") + jb.consume(job, "test-client") + + +def main(argv): + parser = argparse.ArgumentParser() + subparsers = parser.add_subparsers(title='subcommands', + description='valid subcommands', + help='additional help') + + # Consume command + parser_consume = subparsers.add_parser('consume') + parser_consume.add_argument('board_name') + parser_consume.add_argument('job_uuid') + parser_consume.set_defaults(func=consume_job) + + # Clear command + parser_consume = subparsers.add_parser('clear') + parser_consume.add_argument('board_name') + parser_consume.set_defaults(func=clear_jobs) + + # Create command + parser_create = subparsers.add_parser('create') + parser_create.add_argument('board_name') + parser_create.add_argument('job_name') + parser_create.add_argument('details') + parser_create.set_defaults(func=create_job) + + # Delete command + parser_delete = subparsers.add_parser('delete') + parser_delete.add_argument('board_name') + parser_delete.add_argument('job_uuid') + parser_delete.set_defaults(func=delete_job) + + # List command + parser_list = subparsers.add_parser('list') + parser_list.add_argument('board_name') + parser_list.set_defaults(func=list_jobs) + + args = parser.parse_args(argv) + args.func(args) + +if __name__ == '__main__': + main(sys.argv[1:]) From 2a23558dcc8ac97c868267e71e754a53d573ce2a Mon Sep 17 00:00:00 2001 From: Joshua Harlow Date: Mon, 5 May 2014 12:06:49 -0700 Subject: [PATCH 053/188] Add a cachedproperty descriptor It is pretty common that we create a property method that checks if an instance variable is none, then creates it and then sets the instance property (and then never gets called to create it again, unless property is reset to none). Apply this new property descriptor to a few places that are redoing this same action over (don't repeat yourself principle in action). Change-Id: If6c20c5a2d145c51af3f3db56a4746eca8601253 --- taskflow/engines/action_engine/engine.py | 25 +++++++++--------- taskflow/engines/base.py | 8 ++---- taskflow/utils/misc.py | 32 ++++++++++++++++++++++++ 3 files changed, 47 insertions(+), 18 deletions(-) diff --git a/taskflow/engines/action_engine/engine.py b/taskflow/engines/action_engine/engine.py index a8cf14ed..d9b4a5a3 100644 --- a/taskflow/engines/action_engine/engine.py +++ b/taskflow/engines/action_engine/engine.py @@ -61,9 +61,6 @@ class ActionEngine(base.EngineBase): self._compiled = False self._lock = threading.RLock() self._state_lock = threading.RLock() - self._task_executor = None - self._task_action = None - self._retry_action = None self._storage_ensured = False def __str__(self): @@ -166,6 +163,19 @@ class ActionEngine(base.EngineBase): self._root.reset_all() self._change_state(states.PENDING) + @misc.cachedproperty + def _retry_action(self): + return self._retry_action_factory(self.storage, self.task_notifier) + + @misc.cachedproperty + def _task_executor(self): + return self._task_executor_factory() + + @misc.cachedproperty + def _task_action(self): + return self._task_action_factory(self.storage, self._task_executor, + self.task_notifier) + @lock_utils.locked def compile(self): if self._compiled: @@ -175,15 +185,6 @@ class ActionEngine(base.EngineBase): raise exc.Empty("Flow %s is empty." % self._flow.name) self._analyzer = self._graph_analyzer_factory(execution_graph, self.storage) - if self._task_executor is None: - self._task_executor = self._task_executor_factory() - if self._task_action is None: - self._task_action = self._task_action_factory(self.storage, - self._task_executor, - self.task_notifier) - if self._retry_action is None: - self._retry_action = self._retry_action_factory(self.storage, - self.task_notifier) self._root = self._graph_action_factory(self._analyzer, self.storage, self._task_action, diff --git a/taskflow/engines/base.py b/taskflow/engines/base.py index 402aaee5..eb8d76ee 100644 --- a/taskflow/engines/base.py +++ b/taskflow/engines/base.py @@ -34,17 +34,13 @@ class EngineBase(object): self._conf = {} else: self._conf = dict(conf) - self._storage = None self.notifier = misc.Notifier() self.task_notifier = misc.Notifier() - @property + @misc.cachedproperty def storage(self): """The storage unit for this flow.""" - if self._storage is None: - self._storage = self._storage_factory(self._flow_detail, - self._backend) - return self._storage + return self._storage_factory(self._flow_detail, self._backend) @abc.abstractproperty def _storage_factory(self): diff --git a/taskflow/utils/misc.py b/taskflow/utils/misc.py index 0a592689..10ba522f 100644 --- a/taskflow/utils/misc.py +++ b/taskflow/utils/misc.py @@ -21,6 +21,7 @@ import copy import datetime import errno import functools +import inspect import keyword import logging import os @@ -174,6 +175,37 @@ def decode_json(raw_data, root_types=(dict,)): return data +class cachedproperty(object): + """Descriptor that can be placed on instance methods to translate + those methods into properties that will be cached in the instance (avoiding + repeated creation checking logic to do the equivalent). + """ + def __init__(self, wrapped): + # If a name is provided (as an argument) then this will be the string + # to place the cached attribute under if not then it will be the + # function itself to be wrapped into a property. + if inspect.isfunction(wrapped): + self._wrapped = wrapped + self._wrapped_attr = "_%s" % (wrapped.__name__) + else: + self._wrapped_attr = wrapped + self._wrapped = None + + def __call__(self, fget): + # If __init__ received a string then this will be the function to be + # wrapped as a property (if __init__ got a function then this will not + # be called). + self._wrapped = fget + return self + + def __get__(self, source, owner): + try: + return getattr(source, self._wrapped_attr) + except AttributeError: + setattr(source, self._wrapped_attr, self._wrapped(source)) + return getattr(source, self._wrapped_attr) + + def wallclock(): # NOTE(harlowja): made into a function so that this can be easily mocked # out if we want to alter time related functionality (for testing From 35529ff37d6663f1eb53ff724345f06b46ba618b Mon Sep 17 00:00:00 2001 From: Joshua Harlow Date: Wed, 30 Apr 2014 18:41:33 -0700 Subject: [PATCH 054/188] Add a new wait() method that waits for jobs to arrive Using the new iterator object add a new wait method that will wait until jobs have arrived before giving back an iterator that can be used to analyze those jobs. It also supports a timeout which can be used to avoid waiting for a very long time. Change-Id: I3d53120948d3d466ebc921a8be0a66b78732f09b --- taskflow/jobs/backends/impl_zookeeper.py | 117 +++++++++++++++-------- taskflow/jobs/jobboard.py | 15 +++ taskflow/tests/unit/jobs/test_zk_job.py | 32 +++++++ taskflow/utils/misc.py | 10 ++ 4 files changed, 135 insertions(+), 39 deletions(-) diff --git a/taskflow/jobs/backends/impl_zookeeper.py b/taskflow/jobs/backends/impl_zookeeper.py index b9331404..6b80f99b 100644 --- a/taskflow/jobs/backends/impl_zookeeper.py +++ b/taskflow/jobs/backends/impl_zookeeper.py @@ -18,6 +18,7 @@ import collections import contextlib import functools import logging +import threading from concurrent import futures from kazoo import exceptions as k_exceptions @@ -228,12 +229,6 @@ class ZookeeperJobBoardIterator(six.Iterator): def __iter__(self): return self - def _fetch_jobs(self): - if self.ensure_fresh: - self._board._force_refresh() - with self._board._job_mutate: - return sorted(six.itervalues(self._board._known_jobs)) - def _next_job(self): if self.only_unclaimed: allowed_states = UNCLAIMED_JOB_STATES @@ -249,14 +244,14 @@ class ZookeeperJobBoardIterator(six.Iterator): LOG.warn("Failed determining the state of job: %s (%s)", maybe_job.uuid, maybe_job.path, exc_info=True) except excp.NotFound: - with self._board._job_mutate: - self._board._remove_job(maybe_job.path) + self._board._remove_job(maybe_job.path) return job def __next__(self): if not self._jobs: if not self._fetched: - self._jobs.extend(self._fetch_jobs()) + jobs = self._board._fetch_jobs(ensure_fresh=self.ensure_fresh) + self._jobs.extend(jobs) self._fetched = True job = self._next_job() if job is None: @@ -289,8 +284,9 @@ class ZookeeperJobBoard(jobboard.NotifyingJobBoard): self._persistence = persistence # Misc. internal details self._known_jobs = {} - self._job_mutate = self._client.handler.rlock_object() - self._open_close_lock = self._client.handler.rlock_object() + self._job_lock = threading.RLock() + self._job_cond = threading.Condition(self._job_lock) + self._open_close_lock = threading.RLock() self._client.add_listener(self._state_change_listener) self._bad_paths = frozenset([path]) self._job_watcher = None @@ -311,9 +307,15 @@ class ZookeeperJobBoard(jobboard.NotifyingJobBoard): @property def job_count(self): - with self._job_mutate: + with self._job_lock: return len(self._known_jobs) + def _fetch_jobs(self, ensure_fresh=False): + if ensure_fresh: + self._force_refresh() + with self._job_lock: + return sorted(six.itervalues(self._known_jobs)) + def _force_refresh(self): try: children = self._client.get_children(self.path) @@ -336,32 +338,18 @@ class ZookeeperJobBoard(jobboard.NotifyingJobBoard): def _remove_job(self, path): LOG.debug("Removing job that was at path: %s", path) - job = self._known_jobs.pop(path, None) + with self._job_lock: + job = self._known_jobs.pop(path, None) if job is not None: - self._emit(jobboard.REMOVAL, - details={ - 'job': job, - }) + self._emit(jobboard.REMOVAL, details={'job': job}) def _process_child(self, path, request): """Receives the result of a child data fetch request.""" + job = None try: raw_data, node_stat = request.get() job_data = misc.decode_json(raw_data) created_on = misc.millis_to_datetime(node_stat.ctime) - with self._job_mutate: - if path not in self._known_jobs: - job = ZookeeperJob(job_data['name'], self, - self._client, self._persistence, path, - uuid=job_data['uuid'], - book_data=job_data.get("book"), - details=job_data.get("details", {}), - created_on=created_on) - self._known_jobs[path] = job - self._emit(jobboard.POSTED, - details={ - 'job': job, - }) except (ValueError, TypeError, KeyError): LOG.warn("Incorrectly formatted job data found at path: %s", path, exc_info=True) @@ -377,13 +365,29 @@ class ZookeeperJobBoard(jobboard.NotifyingJobBoard): except k_exceptions.KazooException: LOG.warn("Internal error fetching job data from path: %s", path, exc_info=True) + else: + self._job_cond.acquire() + try: + if path not in self._known_jobs: + job = ZookeeperJob(job_data['name'], self, + self._client, self._persistence, path, + uuid=job_data['uuid'], + book_data=job_data.get("book"), + details=job_data.get("details", {}), + created_on=created_on) + self._known_jobs[path] = job + self._job_cond.notify_all() + finally: + self._job_cond.release() + if job is not None: + self._emit(jobboard.POSTED, details={'job': job}) def _on_job_posting(self, children, delayed=True): LOG.debug("Got children %s under path %s", children, self.path) child_paths = [k_paths.join(self.path, c) for c in children] # Remove jobs that we know about but which are no longer children - with self._job_mutate: + with self._job_lock: removals = set() for path, _job in six.iteritems(self._known_jobs): if path not in child_paths: @@ -395,7 +399,7 @@ class ZookeeperJobBoard(jobboard.NotifyingJobBoard): for path in child_paths: if path in self._bad_paths: continue - with self._job_mutate: + with self._job_lock: if path not in self._known_jobs: # Fire off the request to populate this job asynchronously. # @@ -443,8 +447,13 @@ class ZookeeperJobBoard(jobboard.NotifyingJobBoard): self._persistence, job_path, book=book, details=details, uuid=job_uuid) - with self._job_mutate: + self._job_cond.acquire() + try: self._known_jobs[job_path] = job + self._job_cond.notify_all() + finally: + self._job_cond.release() + self._emit(jobboard.POSTED, details={'job': job}) return job def claim(self, job, who): @@ -480,7 +489,7 @@ class ZookeeperJobBoard(jobboard.NotifyingJobBoard): if not job_path: raise ValueError("Unable to check if %r is a known path" % (job_path)) - with self._job_mutate: + with self._job_lock: if job_path not in self._known_jobs: fail_msg_tpl += ", unknown job" raise excp.NotFound(fail_msg_tpl % (job_uuid)) @@ -533,8 +542,7 @@ class ZookeeperJobBoard(jobboard.NotifyingJobBoard): with self._client.transaction() as txn: txn.delete(job.lock_path, version=lock_stat.version) txn.delete(job.path, version=data_stat.version) - with self._job_mutate: - self._remove_job(job.path) + self._remove_job(job.path) def abandon(self, job, who): _check_who(who) @@ -557,9 +565,40 @@ class ZookeeperJobBoard(jobboard.NotifyingJobBoard): LOG.debug("Kazoo client has changed to state: %s", state) def _clear(self): - with self._job_mutate: - self._known_jobs = {} - self._job_watcher = None + with self._job_lock: + self._known_jobs.clear() + self._job_watcher = None + + def wait(self, timeout=None): + # Wait until timeout expires (or forever) for jobs to appear. + watch = None + if timeout is not None: + watch = misc.StopWatch(duration=float(timeout)) + watch.start() + self._job_cond.acquire() + try: + while True: + if not self._known_jobs: + if watch and watch.expired(): + raise excp.NotFound("Expired waiting for jobs to" + " arrive; waited %s seconds" + % watch.elapsed()) + # This is done since the given timeout can not be provided + # to the condition variable, since we can not ensure that + # when we acquire the condition that there will actually + # be jobs (especially if we are spuriously awaken), so we + # must recalculate the amount of time we really have left. + timeout = None + if watch is not None: + timeout = watch.leftover() + self._job_cond.wait(timeout) + else: + it = ZookeeperJobBoardIterator(self) + it._jobs.extend(self._fetch_jobs()) + it._fetched = True + return it + finally: + self._job_cond.release() @property def connected(self): diff --git a/taskflow/jobs/jobboard.py b/taskflow/jobs/jobboard.py index 0aa533ea..662a3232 100644 --- a/taskflow/jobs/jobboard.py +++ b/taskflow/jobs/jobboard.py @@ -58,6 +58,21 @@ class JobBoard(object): support this argument. """ + @abc.abstractmethod + def wait(self, timeout=None): + """Waits a given amount of time for job/s to be posted, when jobs are + found then an iterator will be returned that contains the jobs at + the given point in time. + + NOTE(harlowja): since a jobboard can be mutated on by multiple external + entities at the *same* time the iterator that can be returned *may* + still be empty due to other entities removing those jobs after the + iterator has been created (be aware of this when using it). + + :param timeout: float that indicates how long to wait for a job to + appear (if None then waits forever). + """ + @abc.abstractproperty def job_count(self): """Returns how many jobs are on this jobboard (this count may change as diff --git a/taskflow/tests/unit/jobs/test_zk_job.py b/taskflow/tests/unit/jobs/test_zk_job.py index 9b7c54c4..9154994d 100644 --- a/taskflow/tests/unit/jobs/test_zk_job.py +++ b/taskflow/tests/unit/jobs/test_zk_job.py @@ -16,6 +16,8 @@ import contextlib import mock +import threading +import time import six @@ -102,6 +104,36 @@ class TestZookeeperJobs(test.TestCase): jobs = list(self.board.iterjobs(ensure_fresh=True)) self.assertEqual(1, len(jobs)) + def test_wait_timeout(self): + with connect_close(self.board): + self.assertRaises(excp.NotFound, self.board.wait, timeout=0.1) + + def test_wait_arrival(self): + ev = threading.Event() + jobs = [] + + def poster(wait_post=0.2): + ev.wait() # wait until the waiter is active + time.sleep(wait_post) + self.board.post('test', p_utils.temporary_log_book()) + + def waiter(): + ev.set() + it = self.board.wait() + jobs.extend(it) + + with connect_close(self.board): + t1 = threading.Thread(target=poster) + t1.daemon = True + t1.start() + t2 = threading.Thread(target=waiter) + t2.daemon = True + t2.start() + for t in (t1, t2): + t.join() + + self.assertEqual(1, len(jobs)) + def test_posting_received_raw(self): book = p_utils.temporary_log_book() diff --git a/taskflow/utils/misc.py b/taskflow/utils/misc.py index 0e3a1c3d..6c20fe12 100644 --- a/taskflow/utils/misc.py +++ b/taskflow/utils/misc.py @@ -373,6 +373,16 @@ class StopWatch(object): # NOTE(harlowja): don't silence the exception. return False + def leftover(self): + if self._duration is None: + raise RuntimeError("Can not get the leftover time of a watch that" + " has no duration") + if self._state != self._STARTED: + raise RuntimeError("Can not get the leftover time of a stopwatch" + " that has not been started") + end_time = self._started_at + self._duration + return max(0.0, end_time - wallclock()) + def expired(self): if self._duration is None: return False From 0fafe1b7ecaed638dbfd5de0bfe70995310d2d7b Mon Sep 17 00:00:00 2001 From: Joshua Harlow Date: Fri, 2 May 2014 15:41:33 -0700 Subject: [PATCH 055/188] Allow the watcher to re-register if the session is lost In cases where the zookeeper cluster loses a member and the client was connected to that member it will reconnect to another member automatically. When this happens we should also make sure that the watcher also correctly is kept active (by switching from allow_session_lost=False to allow_session_lost=True). If we don't do this we will not get notified of any new jobs being added, other jobs being removed by other jobboard entities. Fixes bug 1315564 Change-Id: Ia87f606f4d0beb7ebe532d1b3a31e06a58c3ac7e --- taskflow/jobs/backends/impl_zookeeper.py | 11 ++++++----- 1 file changed, 6 insertions(+), 5 deletions(-) diff --git a/taskflow/jobs/backends/impl_zookeeper.py b/taskflow/jobs/backends/impl_zookeeper.py index eb555a36..4055fe03 100644 --- a/taskflow/jobs/backends/impl_zookeeper.py +++ b/taskflow/jobs/backends/impl_zookeeper.py @@ -560,11 +560,12 @@ class ZookeeperJobBoard(jobboard.NotifyingJobBoard): if self._worker is None and self._emit_notifications: self._worker = futures.ThreadPoolExecutor(max_workers=1) self._client.ensure_path(self.path) - self._job_watcher = watchers.ChildrenWatch( - self._client, - self.path, - func=self._on_job_posting, - allow_session_lost=False) + if self._job_watcher is None: + self._job_watcher = watchers.ChildrenWatch( + self._client, + self.path, + func=self._on_job_posting, + allow_session_lost=True) except excp.IncompatibleVersion: with excutils.save_and_reraise_exception(): try_clean() From 095650653d53d4a2f431bea20fabc440263d38e7 Mon Sep 17 00:00:00 2001 From: "Ivan A. Melnikov" Date: Tue, 6 May 2014 15:26:46 +0400 Subject: [PATCH 056/188] Put provides and requires code to basic Flow Code that calculates provides and requires for flow is almost identical for all patterns, so this change makes it completely identical and puts it to the base class. Other patterns are still allowed to override these properties for sake of customization or optimization. Change-Id: I6e875e863047b5287ec727fc9a491f252f144ecf --- taskflow/flow.py | 49 ++++++++++++++++++----------- taskflow/patterns/graph_flow.py | 22 ++----------- taskflow/patterns/linear_flow.py | 19 ----------- taskflow/patterns/unordered_flow.py | 33 ++++++------------- 4 files changed, 43 insertions(+), 80 deletions(-) diff --git a/taskflow/flow.py b/taskflow/flow.py index 98b3c49f..26d2dcfa 100644 --- a/taskflow/flow.py +++ b/taskflow/flow.py @@ -43,16 +43,10 @@ class Flow(object): def __init__(self, name, retry=None): self._name = six.text_type(name) self._retry = retry - # If retry doesn't have a name, + # NOTE(akarpinska): if retry doesn't have a name, # the name of its owner will be assigned - if self._retry: - self._retry_provides = self.retry.provides - self._retry_requires = self.retry.requires - if not self._retry.name: + if self._retry and self._retry.name is None: self._retry.set_name(self.name + "_retry") - else: - self._retry_provides = set() - self._retry_requires = set() @property def name(self): @@ -66,6 +60,10 @@ class Flow(object): """ return self._retry + @abc.abstractmethod + def add(self, *items): + """Adds a given item/items to this flow.""" + @abc.abstractmethod def __len__(self): """Returns how many items are in this flow.""" @@ -90,14 +88,29 @@ class Flow(object): lines.append("%s" % (len(self))) return "; ".join(lines) - @abc.abstractmethod - def add(self, *items): - """Adds a given item/items to this flow.""" - - @abc.abstractproperty - def requires(self): - """Browse argument requirement names this flow requires to run.""" - - @abc.abstractproperty + @property def provides(self): - """Browse argument names provided by the flow.""" + """Set of result names provided by the flow. + + Includes names of all the outputs provided by atoms of this flow. + """ + provides = set() + if self._retry: + provides.update(self._retry.provides) + for subflow in self: + provides.update(subflow.provides) + return provides + + @property + def requires(self): + """Set of argument names required by the flow. + + Includes names of all the inputs required by atoms of this + flow, but not provided within the flow itself. + """ + requires = set() + if self._retry: + requires.update(self._retry.requires) + for subflow in self: + requires.update(subflow.requires) + return requires - self.provides diff --git a/taskflow/patterns/graph_flow.py b/taskflow/patterns/graph_flow.py index 68691996..0ed74c75 100644 --- a/taskflow/patterns/graph_flow.py +++ b/taskflow/patterns/graph_flow.py @@ -104,8 +104,8 @@ class Flow(flow.Flow): if self.retry: update_requirements(self.retry) - provided.update(dict((k, - self.retry) for k in self._retry_provides)) + provided.update(dict((k, self.retry) + for k in self.retry.provides)) # NOTE(harlowja): Add items and edges to a temporary copy of the # underlying graph and only if that is successful added to do we then @@ -123,7 +123,7 @@ class Flow(flow.Flow): % dict(item=item.name, flow=provided[value].name, value=value)) - if value in self._retry_requires: + if self.retry and value in self.retry.requires: raise exc.DependencyFailure( "Flows retry controller %(retry)s requires %(value)s " "but item %(item)s being added to the flow produces " @@ -167,22 +167,6 @@ class Flow(flow.Flow): for (u, v, e_data) in self._get_subgraph().edges_iter(data=True): yield (u, v, e_data) - @property - def provides(self): - provides = set() - provides.update(self._retry_provides) - for subflow in self: - provides.update(subflow.provides) - return provides - - @property - def requires(self): - requires = set() - requires.update(self._retry_requires) - for subflow in self: - requires.update(subflow.requires) - return requires - self.provides - class TargetedFlow(Flow): """Graph flow with a target. diff --git a/taskflow/patterns/linear_flow.py b/taskflow/patterns/linear_flow.py index d7cbb549..48b4d3cb 100644 --- a/taskflow/patterns/linear_flow.py +++ b/taskflow/patterns/linear_flow.py @@ -78,22 +78,3 @@ class Flow(flow.Flow): for src, dst in zip(self._children[:-1], self._children[1:]): yield (src, dst, _LINK_METADATA.copy()) - - @property - def provides(self): - provides = set() - provides.update(self._retry_provides) - for subflow in self._children: - provides.update(subflow.provides) - return provides - - @property - def requires(self): - requires = set() - provides = set() - requires.update(self._retry_requires) - provides.update(self._retry_provides) - for subflow in self._children: - requires.update(subflow.requires - provides) - provides.update(subflow.provides) - return requires diff --git a/taskflow/patterns/unordered_flow.py b/taskflow/patterns/unordered_flow.py index 2890e80e..a8377960 100644 --- a/taskflow/patterns/unordered_flow.py +++ b/taskflow/patterns/unordered_flow.py @@ -41,12 +41,9 @@ class Flow(flow.Flow): if not items: return self - # NOTE(harlowja): check that items to be added are actually - # independent. - provides = set() - for subflow in self: - provides.update(subflow.provides) - + # check that items don't provide anything that other + # part of flow provides or requires + provides = self.provides old_requires = self.requires for item in items: item_provides = item.provides @@ -57,7 +54,7 @@ class Flow(flow.Flow): "by other item(s) of unordered flow %(flow)s" % dict(item=item.name, flow=self.name, oo=sorted(bad_provs))) - same_provides = (provides | self._retry_provides) & item.provides + same_provides = provides & item.provides if same_provides: raise exceptions.DependencyFailure( "%(item)s provides %(value)s but is already being" @@ -67,6 +64,11 @@ class Flow(flow.Flow): value=sorted(same_provides))) provides |= item.provides + # check that items don't require anything other children provides + if self.retry: + # NOTE(imelnikov): it is allowed to depend on value provided + # by retry controller of the flow + provides -= self.retry.provides for item in items: bad_reqs = provides & item.requires if bad_reqs: @@ -79,23 +81,6 @@ class Flow(flow.Flow): self._children.update(items) return self - @property - def provides(self): - provides = set() - provides.update(self._retry_provides) - for subflow in self: - provides.update(subflow.provides) - return provides - - @property - def requires(self): - requires = set() - for subflow in self: - requires.update(subflow.requires) - requires.update(self._retry_requires) - requires -= self._retry_provides - return requires - def __len__(self): return len(self._children) From 53dcbd4d97ff1b83c4db030571bfa88750dcb06a Mon Sep 17 00:00:00 2001 From: Joshua Harlow Date: Wed, 9 Apr 2014 15:28:43 -0700 Subject: [PATCH 057/188] Implement run iterations Instead of blocking the caller when they call run() allow there to be a new api run_iter() that will yield back the engine state transitions while running. This allows for a engine user to do alternate work while an engine is running (and come back to yield on there own time). Implements blueprint iterable-execution Change-Id: Ibb48c6c5618c97c59a6ab170dab5233ed47e5554 --- taskflow/engines/action_engine/engine.py | 60 ++++++++++++++----- .../engines/action_engine/graph_action.py | 50 ++++++++++++---- taskflow/states.py | 5 ++ taskflow/tests/unit/test_action_engine.py | 44 ++++++++++++++ taskflow/tests/utils.py | 2 +- taskflow/utils/lock_utils.py | 11 ++++ 6 files changed, 145 insertions(+), 27 deletions(-) diff --git a/taskflow/engines/action_engine/engine.py b/taskflow/engines/action_engine/engine.py index a8cf14ed..ef1e4d75 100644 --- a/taskflow/engines/action_engine/engine.py +++ b/taskflow/engines/action_engine/engine.py @@ -86,29 +86,61 @@ class ActionEngine(base.EngineBase): g = self._analyzer.execution_graph return g - @lock_utils.locked def run(self): - """Runs the flow in the engine to completion.""" + with lock_utils.try_lock(self._lock) as was_locked: + if not was_locked: + raise exc.ExecutionFailure("Engine currently locked, please" + " try again later") + for _state in self.run_iter(): + pass + + def run_iter(self, timeout=None): + """Runs the engine using iteration (or die trying). + + :param timeout: timeout to wait for any tasks to complete (this timeout + will be used during the waiting period that occurs after the + waiting state is yielded when unfinished tasks are being waited + for). + + Instead of running to completion in a blocking manner, this will + return a generator which will yield back the various states that the + engine is going through (and can be used to run multiple engines at + once using a generator per engine). the iterator returned also + responds to the send() method from pep-0342 and will attempt to suspend + itself if a truthy value is sent in (the suspend may be delayed until + all active tasks have finished). + + NOTE(harlowja): using the run_iter method will **not** retain the + engine lock while executing so the user should ensure that there is + only one entity using a returned engine iterator (one per engine) at a + given time. + """ self.compile() self.prepare() self._task_executor.start() + state = None try: - self._run() - finally: - self._task_executor.stop() - - def _run(self): - self._change_state(states.RUNNING) - try: - state = self._root.execute() + self._change_state(states.RUNNING) + for state in self._root.execute_iter(timeout=timeout): + try: + try_suspend = yield state + except GeneratorExit: + break + else: + if try_suspend: + self.suspend() except Exception: with excutils.save_and_reraise_exception(): self._change_state(states.FAILURE) else: - self._change_state(state) - if state != states.SUSPENDED and state != states.SUCCESS: - failures = self.storage.get_failures() - misc.Failure.reraise_if_any(failures.values()) + ignorable_states = getattr(self._root, 'ignorable_states', []) + if state and state not in ignorable_states: + self._change_state(state) + if state != states.SUSPENDED and state != states.SUCCESS: + failures = self.storage.get_failures() + misc.Failure.reraise_if_any(failures.values()) + finally: + self._task_executor.stop() def _change_state(self, state): with self._state_lock: diff --git a/taskflow/engines/action_engine/graph_action.py b/taskflow/engines/action_engine/graph_action.py index 34a3943c..691d3b55 100644 --- a/taskflow/engines/action_engine/graph_action.py +++ b/taskflow/engines/action_engine/graph_action.py @@ -33,6 +33,11 @@ class FutureGraphAction(object): in parallel, this enables parallel flow run and reversion. """ + # Informational states this action yields while running, not useful to + # have the engine record but useful to provide to end-users when doing + # execution iterations. + ignorable_states = (st.SCHEDULING, st.WAITING, st.RESUMING, st.ANALYZING) + def __init__(self, analyzer, storage, task_action, retry_action): self._analyzer = analyzer self._storage = storage @@ -64,23 +69,41 @@ class FutureGraphAction(object): return (futures, [misc.Failure()]) return (futures, []) - def execute(self): + def execute_iter(self, timeout=None): + if timeout is None: + timeout = _WAITING_TIMEOUT + # Prepare flow to be resumed + yield st.RESUMING next_nodes = self._prepare_flow_for_resume() next_nodes.update(self._analyzer.get_next_nodes()) - not_done, failures = self._schedule(next_nodes) + # Schedule nodes to be worked on + yield st.SCHEDULING + if self.is_running(): + not_done, failures = self._schedule(next_nodes) + else: + not_done, failures = ([], []) + + # Run! + # + # At this point we need to ensure we wait for all active nodes to + # finish running (even if we are asked to suspend) since we can not + # preempt those tasks (maybe in the future we will be better able to do + # this). while not_done: - # NOTE(imelnikov): if timeout occurs before any of futures - # completes, done list will be empty and we'll just go - # for next iteration. - done, not_done = self._task_action.wait_for_any( - not_done, _WAITING_TIMEOUT) + yield st.WAITING + + # TODO(harlowja): maybe we should start doing 'yield from' this + # call sometime in the future, or equivalent that will work in + # py2 and py3. + done, not_done = self._task_action.wait_for_any(not_done, timeout) # Analyze the results and schedule more nodes (unless we had # failures). If failures occurred just continue processing what # is running (so that we don't leave it abandoned) but do not # schedule anything new. + yield st.ANALYZING next_nodes = set() for future in done: try: @@ -102,17 +125,20 @@ class FutureGraphAction(object): else: next_nodes.update(more_nodes) if next_nodes and not failures and self.is_running(): - more_not_done, failures = self._schedule(next_nodes) - not_done.extend(more_not_done) + yield st.SCHEDULING + # Recheck incase someone suspended it. + if self.is_running(): + more_not_done, failures = self._schedule(next_nodes) + not_done.extend(more_not_done) if failures: misc.Failure.reraise_if_any(failures) if self._analyzer.get_next_nodes(): - return st.SUSPENDED + yield st.SUSPENDED elif self._analyzer.is_success(): - return st.SUCCESS + yield st.SUCCESS else: - return st.REVERTED + yield st.REVERTED def _schedule_task(self, task): """Schedules the given task for revert or execute depending diff --git a/taskflow/states.py b/taskflow/states.py index 883bf622..963e4f64 100644 --- a/taskflow/states.py +++ b/taskflow/states.py @@ -48,6 +48,11 @@ REVERT = 'REVERT' RETRY = 'RETRY' INTENTIONS = [EXECUTE, IGNORE, REVERT, RETRY] +# Additional engine states +SCHEDULING = 'SCHEDULING' +WAITING = 'WAITING' +ANALYZING = 'ANALYZING' + ## Flow state transitions # See: http://docs.openstack.org/developer/taskflow/states.html diff --git a/taskflow/tests/unit/test_action_engine.py b/taskflow/tests/unit/test_action_engine.py index b6c6c894..fa9f3de5 100644 --- a/taskflow/tests/unit/test_action_engine.py +++ b/taskflow/tests/unit/test_action_engine.py @@ -140,6 +140,50 @@ class EngineLinearFlowTest(utils.EngineTestBase): self.assertEqual(self.values, ['task1', 'task2']) self.assertEqual(len(flow), 2) + def test_sequential_flow_two_tasks_iter(self): + flow = lf.Flow('flow-2').add( + utils.SaveOrderTask(name='task1'), + utils.SaveOrderTask(name='task2') + ) + e = self._make_engine(flow) + gathered_states = list(e.run_iter()) + self.assertTrue(len(gathered_states) > 0) + self.assertEqual(self.values, ['task1', 'task2']) + self.assertEqual(len(flow), 2) + + def test_sequential_flow_iter_suspend_resume(self): + flow = lf.Flow('flow-2').add( + utils.SaveOrderTask(name='task1'), + utils.SaveOrderTask(name='task2') + ) + _lb, fd = p_utils.temporary_flow_detail(self.backend) + e = self._make_engine(flow, flow_detail=fd) + it = e.run_iter() + gathered_states = [] + suspend_it = None + while True: + try: + s = it.send(suspend_it) + gathered_states.append(s) + if s == states.WAITING: + # Stop it before task2 runs/starts. + suspend_it = True + except StopIteration: + break + self.assertTrue(len(gathered_states) > 0) + self.assertEqual(self.values, ['task1']) + self.assertEqual(states.SUSPENDED, e.storage.get_flow_state()) + + # Attempt to resume it and see what runs now... + # + # NOTE(harlowja): Clear all the values, but don't reset the reference. + while len(self.values): + self.values.pop() + gathered_states = list(e.run_iter()) + self.assertTrue(len(gathered_states) > 0) + self.assertEqual(self.values, ['task2']) + self.assertEqual(states.SUCCESS, e.storage.get_flow_state()) + def test_revert_removes_data(self): flow = lf.Flow('revert-removes').add( utils.TaskOneReturn(provides='one'), diff --git a/taskflow/tests/utils.py b/taskflow/tests/utils.py index 20fc3758..d8793215 100644 --- a/taskflow/tests/utils.py +++ b/taskflow/tests/utils.py @@ -258,7 +258,7 @@ class EngineTestBase(object): conn.clear_all() super(EngineTestBase, self).tearDown() - def _make_engine(self, flow, flow_detail=None): + def _make_engine(self, flow, **kwargs): raise NotImplementedError() diff --git a/taskflow/utils/lock_utils.py b/taskflow/utils/lock_utils.py index 90cd2cce..7f6a91bf 100644 --- a/taskflow/utils/lock_utils.py +++ b/taskflow/utils/lock_utils.py @@ -36,6 +36,17 @@ from taskflow.utils import threading_utils as tu LOG = logging.getLogger(__name__) +@contextlib.contextmanager +def try_lock(lock): + """Attempts to acquire a lock, and autoreleases if acquisition occurred.""" + was_locked = lock.acquire(blocking=False) + try: + yield was_locked + finally: + if was_locked: + lock.release() + + def locked(*args, **kwargs): """A decorator that looks for a given attribute (typically a lock or a list of locks) and before executing the decorated function uses the given lock From 6c7e7576dc8e24d73c5bb3cf63e0541ad0aa213b Mon Sep 17 00:00:00 2001 From: Joshua Harlow Date: Wed, 7 May 2014 17:35:28 -0700 Subject: [PATCH 058/188] Add an example which uses the run iteration functionality Create an example which can run many engines at once using many engines, each with its own iterator that can be used in a single loop to cause that engine to progress to its next state. Change-Id: I8c6ca19a752f4ced77fd86727f17ecad8c8e72c8 --- taskflow/examples/run_by_iter.out.txt | 106 ++++++++++++++++++++++++++ taskflow/examples/run_by_iter.py | 94 +++++++++++++++++++++++ 2 files changed, 200 insertions(+) create mode 100644 taskflow/examples/run_by_iter.out.txt create mode 100644 taskflow/examples/run_by_iter.py diff --git a/taskflow/examples/run_by_iter.out.txt b/taskflow/examples/run_by_iter.out.txt new file mode 100644 index 00000000..6e12eb40 --- /dev/null +++ b/taskflow/examples/run_by_iter.out.txt @@ -0,0 +1,106 @@ +RESUMING +SCHEDULING +A +WAITING +ANALYZING +SCHEDULING +B +WAITING +ANALYZING +SCHEDULING +C +WAITING +ANALYZING +SCHEDULING +D +WAITING +ANALYZING +SCHEDULING +E +WAITING +ANALYZING +SCHEDULING +F +WAITING +ANALYZING +SCHEDULING +G +WAITING +ANALYZING +SCHEDULING +H +WAITING +ANALYZING +SCHEDULING +I +WAITING +ANALYZING +SCHEDULING +J +WAITING +ANALYZING +SCHEDULING +K +WAITING +ANALYZING +SCHEDULING +L +WAITING +ANALYZING +SCHEDULING +M +WAITING +ANALYZING +SCHEDULING +N +WAITING +ANALYZING +SCHEDULING +O +WAITING +ANALYZING +SCHEDULING +P +WAITING +ANALYZING +SCHEDULING +Q +WAITING +ANALYZING +SCHEDULING +R +WAITING +ANALYZING +SCHEDULING +S +WAITING +ANALYZING +SCHEDULING +T +WAITING +ANALYZING +SCHEDULING +U +WAITING +ANALYZING +SCHEDULING +V +WAITING +ANALYZING +SCHEDULING +W +WAITING +ANALYZING +SCHEDULING +X +WAITING +ANALYZING +SCHEDULING +Y +WAITING +ANALYZING +SCHEDULING +Z +WAITING +ANALYZING +SUCCESS diff --git a/taskflow/examples/run_by_iter.py b/taskflow/examples/run_by_iter.py new file mode 100644 index 00000000..0a7761b7 --- /dev/null +++ b/taskflow/examples/run_by_iter.py @@ -0,0 +1,94 @@ +# -*- coding: utf-8 -*- + +# Copyright (C) 2014 Yahoo! Inc. All Rights Reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); you may +# not use this file except in compliance with the License. You may obtain +# a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the +# License for the specific language governing permissions and limitations +# under the License. + +import logging +import os +import sys + +import six + +logging.basicConfig(level=logging.ERROR) + +self_dir = os.path.abspath(os.path.dirname(__file__)) +top_dir = os.path.abspath(os.path.join(os.path.dirname(__file__), + os.pardir, + os.pardir)) +sys.path.insert(0, top_dir) +sys.path.insert(0, self_dir) + + +from taskflow.engines.action_engine import engine +from taskflow.patterns import linear_flow as lf +from taskflow.persistence.backends import impl_memory +from taskflow import task +from taskflow.utils import persistence_utils + + +# INTRO: This examples shows how to run a set of engines at the same time, each +# running in different engines using a single thread of control to iterate over +# each engine (which causes that engine to advanced to its next state during +# each iteration). + + +class EchoTask(task.Task): + def execute(self, value): + print(value) + return chr(ord(value) + 1) + + +def make_alphabet_flow(i): + f = lf.Flow("alphabet_%s" % (i)) + start_value = 'A' + end_value = 'Z' + curr_value = start_value + while ord(curr_value) <= ord(end_value): + next_value = chr(ord(curr_value) + 1) + if curr_value != end_value: + f.add(EchoTask(name="echoer_%s" % curr_value, + rebind={'value': curr_value}, + provides=next_value)) + else: + f.add(EchoTask(name="echoer_%s" % curr_value, + rebind={'value': curr_value})) + curr_value = next_value + return f + + +# Adjust this number to change how many engines/flows run at once. +flow_count = 1 +flows = [] +for i in range(0, flow_count): + f = make_alphabet_flow(i + 1) + flows.append(make_alphabet_flow(i + 1)) +be = impl_memory.MemoryBackend({}) +book = persistence_utils.temporary_log_book(be) +engines = [] +for f in flows: + fd = persistence_utils.create_flow_detail(f, book, be) + e = engine.SingleThreadedActionEngine(f, fd, be, {}) + e.compile() + e.storage.inject({'A': 'A'}) + e.prepare() + engines.append(e) +engine_iters = [] +for e in engines: + engine_iters.append(e.run_iter()) +while engine_iters: + for it in list(engine_iters): + try: + print(six.next(it)) + except StopIteration: + engine_iters.remove(it) From f986a82731b4402f6130365a13db711e4b779ef9 Mon Sep 17 00:00:00 2001 From: "Ivan A. Melnikov" Date: Wed, 7 May 2014 15:21:44 +0400 Subject: [PATCH 059/188] Check documentation for simple style requirements What is checked: - lines should not be longer than 79 characters, also there are few exceptions; - no tabulation for indentation; - no trailing whitespace. Change-Id: Id5b5dff380460d1fb4ffeeb23f00864e7fc158ed --- doc/source/arguments_and_results.rst | 55 +++--- doc/source/engines.rst | 246 ++++++++++++++------------- doc/source/index.rst | 3 +- doc/source/inputs_and_outputs.rst | 35 ++-- doc/source/jobs.rst | 80 ++++----- doc/source/notifications.rst | 4 +- doc/source/persistence.rst | 118 +++++++------ doc/source/resumption.rst | 151 ++++++++-------- doc/source/states.rst | 95 ++++++++--- doc/source/workers.rst | 29 ++-- tools/check_doc.py | 114 +++++++++++++ tox-tmpl.ini | 1 + tox.ini | 1 + 13 files changed, 570 insertions(+), 362 deletions(-) create mode 100644 tools/check_doc.py diff --git a/doc/source/arguments_and_results.rst b/doc/source/arguments_and_results.rst index 9128c95d..a74d8225 100644 --- a/doc/source/arguments_and_results.rst +++ b/doc/source/arguments_and_results.rst @@ -11,10 +11,10 @@ Atom Arguments and Results In taskflow, all flow and task state goes to (potentially persistent) storage. That includes all the information that :doc:`atoms ` (e.g. tasks) in the flow need when they are executed, and all the information task produces (via -serializable task results). A developer who implements tasks or flows can specify -what arguments a task accepts and what result it returns in several ways. This -document will help you understand what those ways are and how to use those ways -to accomplish your desired usage pattern. +serializable task results). A developer who implements tasks or flows can +specify what arguments a task accepts and what result it returns in several +ways. This document will help you understand what those ways are and how to use +those ways to accomplish your desired usage pattern. .. glossary:: @@ -191,11 +191,11 @@ Results Specification ===================== In python, function results are not named, so we can not infer what a task -returns. This is important since the complete task result (what the |task.execute| -method returns) is saved in (potentially persistent) storage, and it is -typically (but not always) desirable to make those results accessible to other -tasks. To accomplish this the task specifies names of those values via its -``provides`` task constructor parameter or other method (see below). +returns. This is important since the complete task result (what the +|task.execute| method returns) is saved in (potentially persistent) storage, +and it is typically (but not always) desirable to make those results accessible +to other tasks. To accomplish this the task specifies names of those values via +its ``provides`` task constructor parameter or other method (see below). Returning One Value ------------------- @@ -267,7 +267,8 @@ Another option is to return several values as a dictionary (aka a ``dict``). 'pieces': 'PIECEs' } -TaskFlow expects that a dict will be returned if ``provides`` argument is a ``set``: +TaskFlow expects that a dict will be returned if ``provides`` argument is a +``set``: :: @@ -314,15 +315,15 @@ Of course, the flow author can override this to change names if needed: BitsAndPiecesTask(provides=('b', 'p')) -or to change structure -- e.g. this instance will make whole tuple accessible to -other tasks by name 'bnp': +or to change structure -- e.g. this instance will make whole tuple accessible +to other tasks by name 'bnp': :: BitsAndPiecesTask(provides='bnp') -or the flow author may want to return default behavior and hide the results of the -task from other tasks in the flow (e.g. to avoid naming conflicts): +or the flow author may want to return default behavior and hide the results of +the task from other tasks in the flow (e.g. to avoid naming conflicts): :: @@ -339,7 +340,8 @@ For ``result`` value, two cases are possible: * if task is being reverted because it failed (an exception was raised from its |task.execute| method), ``result`` value is instance of - :py:class:`taskflow.utils.misc.Failure` object that holds exception information; + :py:class:`taskflow.utils.misc.Failure` object that holds exception + information; * if task is being reverted because some other task failed, and this task finished successfully, ``result`` value is task result fetched from storage: @@ -360,7 +362,8 @@ To determine if task failed you can check whether ``result`` is instance of def revert(self, result, spam, eggs): if isinstance(result, misc.Failure): - print("This task failed, exception: %s" % result.exception_str) + print("This task failed, exception: %s" + % result.exception_str) else: print("do_something returned %r" % result) @@ -372,9 +375,10 @@ representation of result. Retry Arguments =============== -A Retry controller works with arguments in the same way as a Task. But it has an additional parameter 'history' that is -a list of tuples. Each tuple contains a result of the previous Retry run and a table where a key is a failed task and a value -is a :py:class:`taskflow.utils.misc.Failure`. +A Retry controller works with arguments in the same way as a Task. But it has +an additional parameter 'history' that is a list of tuples. Each tuple contains +a result of the previous Retry run and a table where a key is a failed task and +a value is a :py:class:`taskflow.utils.misc.Failure`. Consider the following Retry:: @@ -393,15 +397,18 @@ Consider the following Retry:: def revert(self, history, *args, **kwargs): print history -Imagine the following Retry had returned a value '5' and then some task 'A' failed with some exception. -In this case ``on_failure`` method will receive the following history:: +Imagine the following Retry had returned a value '5' and then some task 'A' +failed with some exception. In this case ``on_failure`` method will receive +the following history:: [('5', {'A': misc.Failure()})] -Then the |retry.execute| method will be called again and it'll receive the same history. +Then the |retry.execute| method will be called again and it'll receive the same +history. -If the |retry.execute| method raises an exception, the |retry.revert| method of Retry will be called and :py:class:`taskflow.utils.misc.Failure` object will be present -in the history instead of Retry result:: +If the |retry.execute| method raises an exception, the |retry.revert| method of +Retry will be called and :py:class:`taskflow.utils.misc.Failure` object will be +present in the history instead of Retry result:: [('5', {'A': misc.Failure()}), (misc.Failure(), {})] diff --git a/doc/source/engines.rst b/doc/source/engines.rst index 631d3d53..2deb8fa1 100644 --- a/doc/source/engines.rst +++ b/doc/source/engines.rst @@ -7,8 +7,8 @@ Overview Engines are what **really** runs your atoms. -An *engine* takes a flow structure (described by :doc:`patterns `) and -uses it to decide which :doc:`atom ` to run and when. +An *engine* takes a flow structure (described by :doc:`patterns `) +and uses it to decide which :doc:`atom ` to run and when. TaskFlow provides different implementations of engines. Some may be easier to use (ie, require no additional infrastructure setup) and understand; others @@ -18,10 +18,10 @@ select an engine that suites their setup best without modifying the code of said service. Engines usually have different capabilities and configuration, but all of them -**must** implement the same interface and preserve the semantics of patterns (e.g. -parts of :py:class:`linear flow ` are run -one after another, in order, even if engine is *capable* of running tasks in -parallel). +**must** implement the same interface and preserve the semantics of patterns +(e.g. parts of :py:class:`linear flow ` +are run one after another, in order, even if engine is *capable* of running +tasks in parallel). Why they exist -------------- @@ -31,66 +31,71 @@ likely a new concept for many programmers so let's describe how it operates in more depth and some of the reasoning behind why it exists. This will hopefully make it more clear on there value add to the TaskFlow library user. -First though let us discuss something most are familiar already with; the difference -between `declarative`_ and `imperative`_ programming models. The imperative model -involves establishing statements that accomplish a programs action (likely using -conditionals and such other language features to do this). This kind of program embeds -the *how* to accomplish a goal while also defining *what* the goal actually is (and the state -of this is maintained in memory or on the stack while these statements execute). In contrast -there is the the declarative model which instead of combining the *how* to accomplish a goal -along side the *what* is to be accomplished splits these two into only declaring what -the intended goal is and not the *how*. In TaskFlow terminology the *what* is the structure -of your flows and the tasks and other atoms you have inside those flows, but the *how* -is not defined (the line becomes blurred since tasks themselves contain imperative -code, but for now consider a task as more of a *pure* function that executes, reverts and may -require inputs and provide outputs). This is where engines get involved; they do -the execution of the *what* defined via :doc:`atoms `, tasks, flows and -the relationships defined there-in and execute these in a well-defined -manner (and the engine is responsible for *most* of the state manipulation -instead). +First though let us discuss something most are familiar already with; the +difference between `declarative`_ and `imperative`_ programming models. The +imperative model involves establishing statements that accomplish a programs +action (likely using conditionals and such other language features to do this). +This kind of program embeds the *how* to accomplish a goal while also defining +*what* the goal actually is (and the state of this is maintained in memory or +on the stack while these statements execute). In contrast there is the the +declarative model which instead of combining the *how* to accomplish a goal +along side the *what* is to be accomplished splits these two into only +declaring what the intended goal is and not the *how*. In TaskFlow terminology +the *what* is the structure of your flows and the tasks and other atoms you +have inside those flows, but the *how* is not defined (the line becomes blurred +since tasks themselves contain imperative code, but for now consider a task as +more of a *pure* function that executes, reverts and may require inputs and +provide outputs). This is where engines get involved; they do the execution of +the *what* defined via :doc:`atoms `, tasks, flows and the relationships +defined there-in and execute these in a well-defined manner (and the engine is +responsible for *most* of the state manipulation instead). This mix of imperative and declarative (with a stronger emphasis on the declarative model) allows for the following functionality to be possible: -* Enhancing reliability: Decoupling of state alterations from what should be accomplished - allows for a *natural* way of resuming by allowing the engine to track the current state - and know at which point a flow is in and how to get back into that state when - resumption occurs. -* Enhancing scalability: When a engine is responsible for executing your desired work - it becomes possible to alter the *how* in the future by creating new types of execution - backends (for example the worker model which does not execute locally). Without the decoupling - of the *what* and the *how* it is not possible to provide such a feature (since by the very - nature of that coupling this kind of functionality is inherently hard to provide). -* Enhancing consistency: Since the engine is responsible for executing atoms and the - associated workflow, it can be one (if not the only) of the primary entities - that is working to keep the execution model in a consistent state. Coupled with atoms - which *should* be immutable and have have limited (if any) internal state the - ability to reason about and obtain consistency can be vastly improved. +* Enhancing reliability: Decoupling of state alterations from what should be + accomplished allows for a *natural* way of resuming by allowing the engine to + track the current state and know at which point a flow is in and how to get + back into that state when resumption occurs. +* Enhancing scalability: When a engine is responsible for executing your + desired work it becomes possible to alter the *how* in the future by creating + new types of execution backends (for example the worker model which does not + execute locally). Without the decoupling of the *what* and the *how* it is + not possible to provide such a feature (since by the very nature of that + coupling this kind of functionality is inherently hard to provide). +* Enhancing consistency: Since the engine is responsible for executing atoms + and the associated workflow, it can be one (if not the only) of the primary + entities that is working to keep the execution model in a consistent state. + Coupled with atoms which *should* be immutable and have have limited (if any) + internal state the ability to reason about and obtain consistency can be + vastly improved. - * With future features around locking (using `tooz`_ to help) engines can also - help ensure that resources being accessed by tasks are reliably obtained and - mutated on. This will help ensure that other processes, threads, or other types - of entities are also not executing tasks that manipulate those same resources (further - increasing consistency). + * With future features around locking (using `tooz`_ to help) engines can + also help ensure that resources being accessed by tasks are reliably + obtained and mutated on. This will help ensure that other processes, + threads, or other types of entities are also not executing tasks that + manipulate those same resources (further increasing consistency). Of course these kind of features can come with some drawbacks: -* The downside of decoupling the *how* and the *what* is that the imperative model - where functions control & manipulate state must start to be shifted away from - (and this is likely a mindset change for programmers used to the imperative - model). We have worked to make this less of a concern by creating and - encouraging the usage of :doc:`persistence `, to help make it possible - to have some level of provided state transfer mechanism. -* Depending on how much imperative code exists (and state inside that code) there - can be *significant* rework of that code and converting or refactoring it to these new concepts. - We have tried to help here by allowing you to have tasks that internally use regular python - code (and internally can be written in an imperative style) as well as by providing examples - and these developer docs; helping this process be as seamless as possible. -* Another one of the downsides of decoupling the *what* from the *how* is that it may become - harder to use traditional techniques to debug failures (especially if remote workers are - involved). We try to help here by making it easy to track, monitor and introspect - the actions & state changes that are occurring inside an engine (see - :doc:`notifications ` for how to use some of these capabilities). +* The downside of decoupling the *how* and the *what* is that the imperative + model where functions control & manipulate state must start to be shifted + away from (and this is likely a mindset change for programmers used to the + imperative model). We have worked to make this less of a concern by creating + and encouraging the usage of :doc:`persistence `, to help make + it possible to have some level of provided state transfer mechanism. +* Depending on how much imperative code exists (and state inside that code) + there can be *significant* rework of that code and converting or refactoring + it to these new concepts. We have tried to help here by allowing you to have + tasks that internally use regular python code (and internally can be written + in an imperative style) as well as by providing examples and these developer + docs; helping this process be as seamless as possible. +* Another one of the downsides of decoupling the *what* from the *how* is that + it may become harder to use traditional techniques to debug failures + (especially if remote workers are involved). We try to help here by making it + easy to track, monitor and introspect the actions & state changes that are + occurring inside an engine (see :doc:`notifications ` for how + to use some of these capabilities). .. _declarative: http://en.wikipedia.org/wiki/Declarative_programming .. _imperative: http://en.wikipedia.org/wiki/Imperative_programming @@ -112,7 +117,8 @@ might look like:: ... flow = make_flow() - engine = engines.load(flow, engine_conf=my_conf, backend=my_persistence_conf) + engine = engines.load(flow, engine_conf=my_conf, + backend=my_persistence_conf) engine.run @@ -153,10 +159,10 @@ Parallel engine schedules tasks onto different threads to run them in parallel. Additional supported keyword arguments: * ``executor``: a object that implements a :pep:`3148` compatible `executor`_ - interface; it will be used for scheduling tasks. You can use instances - of a `thread pool executor`_ or a - :py:class:`green executor ` - (which internally uses `eventlet `_ and greenthread pools). + interface; it will be used for scheduling tasks. You can use instances of a + `thread pool executor`_ or a :py:class:`green executor + ` (which internally uses + `eventlet `_ and greenthread pools). .. tip:: @@ -190,22 +196,23 @@ Creation The first thing that occurs is that the user creates an engine for a given flow, providing a flow detail (where results will be saved into a provided :doc:`persistence ` backend). This is typically accomplished via -the methods described above in `creating engines`_. The engine at this point now will -have references to your flow and backends and other internal variables are -setup. +the methods described above in `creating engines`_. The engine at this point +now will have references to your flow and backends and other internal variables +are setup. Compiling --------- -During this stage the flow will be converted into an internal graph representation -using a flow :py:func:`~taskflow.utils.flow_utils.flatten` function. This function -converts the flow objects and contained atoms into a `networkx`_ directed graph that -contains the equivalent atoms defined in the flow and any nested flows & atoms as -well as the constraints that are created by the application of the different flow -patterns. This graph is then what will be analyzed & traversed during the engines -execution. At this point a few helper object are also created and saved to -internal engine variables (these object help in execution of atoms, analyzing -the graph and performing other internal engine activities). +During this stage the flow will be converted into an internal graph +representation using a flow :py:func:`~taskflow.utils.flow_utils.flatten` +function. This function converts the flow objects and contained atoms into a +`networkx`_ directed graph that contains the equivalent atoms defined in the +flow and any nested flows & atoms as well as the constraints that are created +by the application of the different flow patterns. This graph is then what will +be analyzed & traversed during the engines execution. At this point a few +helper object are also created and saved to internal engine variables (these +object help in execution of atoms, analyzing the graph and performing other +internal engine activities). Preparation ----------- @@ -213,35 +220,37 @@ Preparation This stage starts by setting up the storage needed for all atoms in the previously created graph, ensuring that corresponding :py:class:`~taskflow.persistence.logbook.AtomDetail` (or subclass of) objects -are created for each node in the graph. Once this is done final validation occurs -on the requirements that are needed to start execution and what storage provides. -If there is any atom or flow requirements not satisfied then execution will not be -allowed to continue. +are created for each node in the graph. Once this is done final validation +occurs on the requirements that are needed to start execution and what storage +provides. If there is any atom or flow requirements not satisfied then +execution will not be allowed to continue. Execution --------- -The graph (and helper objects) previously created are now used for guiding further -execution. The flow is put into the ``RUNNING`` :doc:`state ` and a +The graph (and helper objects) previously created are now used for guiding +further execution. The flow is put into the ``RUNNING`` :doc:`state ` +and a :py:class:`~taskflow.engines.action_engine.graph_action.FutureGraphAction` object starts to take over and begins going through the stages listed below. Resumption ^^^^^^^^^^ -One of the first stages is to analyze the :doc:`state ` of the tasks in the graph, -determining which ones have failed, which one were previously running and -determining what the intention of that task should now be (typically an -intention can be that it should ``REVERT``, or that it should ``EXECUTE`` or -that it should be ``IGNORED``). This intention is determined by analyzing the -current state of the task; which is determined by looking at the state in the task -detail object for that task and analyzing edges of the graph for things like -retry atom which can influence what a tasks intention should be (this is aided -by the usage of the :py:class:`~taskflow.engines.action_engine.graph_analyzer.GraphAnalyzer` -helper object which was designed to provide helper methods for this analysis). Once +One of the first stages is to analyze the :doc:`state ` of the tasks in +the graph, determining which ones have failed, which one were previously +running and determining what the intention of that task should now be +(typically an intention can be that it should ``REVERT``, or that it should +``EXECUTE`` or that it should be ``IGNORED``). This intention is determined by +analyzing the current state of the task; which is determined by looking at the +state in the task detail object for that task and analyzing edges of the graph +for things like retry atom which can influence what a tasks intention should be +(this is aided by the usage of the +:py:class:`~taskflow.engines.action_engine.graph_analyzer.GraphAnalyzer` helper +object which was designed to provide helper methods for this analysis). Once these intentions are determined and associated with each task (the intention is -also stored in the :py:class:`~taskflow.persistence.logbook.AtomDetail` object) the -scheduling stage starts. +also stored in the :py:class:`~taskflow.persistence.logbook.AtomDetail` object) +the scheduling stage starts. Scheduling ^^^^^^^^^^ @@ -250,25 +259,26 @@ This stage selects which atoms are eligible to run (looking at there intention, checking if predecessor atoms have ran and so-on, again using the :py:class:`~taskflow.engines.action_engine.graph_analyzer.GraphAnalyzer` helper object) and submits those atoms to a previously provided compatible -`executor`_ for asynchronous execution. This executor will return a `future`_ object -for each atom submitted; all of which are collected into a list of not done -futures. This will end the initial round of scheduling and at this point the -engine enters the waiting stage. +`executor`_ for asynchronous execution. This executor will return a `future`_ +object for each atom submitted; all of which are collected into a list of not +done futures. This will end the initial round of scheduling and at this point +the engine enters the waiting stage. Waiting ^^^^^^^ -In this stage the engine waits for any of the future objects previously submitted -to complete. Once one of the future objects completes (or fails) that atoms result -will be examined and persisted to the persistence backend (saved into the -corresponding :py:class:`~taskflow.persistence.logbook.AtomDetail` object) and -the state of the atom is changed. At this point what happens falls into two categories, -one for if that atom failed and one for if it did not. If the atom failed it may -be set to a new intention such as ``RETRY`` or ``REVERT`` (other atoms that were -predecessors of this failing atom may also have there intention altered). Once this -intention adjustment has happened a new round of scheduling occurs and this process -repeats until the engine succeeds or fails (if the process running the engine -dies the above stages will be restarted and resuming will occur). +In this stage the engine waits for any of the future objects previously +submitted to complete. Once one of the future objects completes (or fails) that +atoms result will be examined and persisted to the persistence backend (saved +into the corresponding :py:class:`~taskflow.persistence.logbook.AtomDetail` +object) and the state of the atom is changed. At this point what happens falls +into two categories, one for if that atom failed and one for if it did not. If +the atom failed it may be set to a new intention such as ``RETRY`` or +``REVERT`` (other atoms that were predecessors of this failing atom may also +have there intention altered). Once this intention adjustment has happened a +new round of scheduling occurs and this process repeats until the engine +succeeds or fails (if the process running the engine dies the above stages will +be restarted and resuming will occur). .. note:: @@ -280,15 +290,17 @@ dies the above stages will be restarted and resuming will occur). Finishing --------- -At this point the :py:class:`~taskflow.engines.action_engine.graph_action.FutureGraphAction` -has now finished successfully, failed, or the execution was suspended. Depending -on which one of these occurs will cause the flow to enter a new state (typically one -of ``FAILURE``, ``SUSPENDED``, ``SUCCESS`` or ``REVERTED``). :doc:`Notifications ` -will be sent out about this final state change (other state changes also send out notifications) -and any failures that occurred will be reraised (the failure objects are wrapped -exceptions). If no failures have occurred then the engine will have finished and -if so desired the :doc:`persistence ` can be used to cleanup any -details that were saved for this execution. +At this point the +:py:class:`~taskflow.engines.action_engine.graph_action.FutureGraphAction` has +now finished successfully, failed, or the execution was suspended. Depending on +which one of these occurs will cause the flow to enter a new state (typically +one of ``FAILURE``, ``SUSPENDED``, ``SUCCESS`` or ``REVERTED``). +:doc:`Notifications ` will be sent out about this final state +change (other state changes also send out notifications) and any failures that +occurred will be reraised (the failure objects are wrapped exceptions). If no +failures have occurred then the engine will have finished and if so desired the +:doc:`persistence ` can be used to cleanup any details that were +saved for this execution. Interfaces ========== diff --git a/doc/source/index.rst b/doc/source/index.rst index a0e869fc..387980e4 100644 --- a/doc/source/index.rst +++ b/doc/source/index.rst @@ -6,7 +6,8 @@ easy, consistent, and reliable.* .. note:: - Additional documentation is also hosted on wiki: https://wiki.openstack.org/wiki/TaskFlow + Additional documentation is also hosted on wiki: + https://wiki.openstack.org/wiki/TaskFlow Contents ======== diff --git a/doc/source/inputs_and_outputs.rst b/doc/source/inputs_and_outputs.rst index 26171e9a..d16105c9 100644 --- a/doc/source/inputs_and_outputs.rst +++ b/doc/source/inputs_and_outputs.rst @@ -4,19 +4,19 @@ Inputs and Outputs In TaskFlow there are multiple ways to provide inputs for your tasks and flows and get information from them. This document describes one of them, that -involves task arguments and results. There are also -:doc:`notifications `, which allow you to get notified when task -or flow changed state. You may also opt to use the :doc:`persistence ` -layer itself directly. +involves task arguments and results. There are also :doc:`notifications +`, which allow you to get notified when task or flow changed +state. You may also opt to use the :doc:`persistence ` layer +itself directly. ----------------------- Flow Inputs and Outputs ----------------------- Tasks accept inputs via task arguments and provide outputs via task results -(see :doc:`arguments and results ` for more details). This -is the standard and recommended way to pass data from one task to another. Of -course not every task argument needs to be provided to some other task of a +(see :doc:`arguments and results ` for more details). +This is the standard and recommended way to pass data from one task to another. +Of course not every task argument needs to be provided to some other task of a flow, and not every task result should be consumed by every task. If some value is required by one or more tasks of a flow, but is not provided @@ -54,10 +54,12 @@ For example: .. make vim syntax highlighter happy** -As you can see, this flow does not require b, as it is provided by the fist task. +As you can see, this flow does not require b, as it is provided by the fist +task. .. note:: - There is no difference between processing of Task and Retry inputs and outputs. + There is no difference between processing of Task and Retry inputs + and outputs. ------------------ Engine and Storage @@ -93,7 +95,8 @@ prior to running: >>> engines.run(flo) Traceback (most recent call last): ... - taskflow.exceptions.MissingDependencies: taskflow.patterns.linear_flow.Flow: cat-dog; + taskflow.exceptions.MissingDependencies: + taskflow.patterns.linear_flow.Flow: cat-dog; 2 requires ['meow', 'woof'] but no other entity produces said requirements The recommended way to provide flow inputs is to use the ``store`` parameter @@ -120,10 +123,10 @@ of the engine helpers (:py:func:`~taskflow.engines.helpers.run` or woof {'meow': 'meow', 'woof': 'woof', 'dog': 'dog'} -You can also directly interact with the engine storage layer to add -additional values, note that if this route is used you can't use -:py:func:`~taskflow.engines.helpers.run` in this case to run your engine (instead -your must activate the engines run method directly): +You can also directly interact with the engine storage layer to add additional +values, note that if this route is used you can't use +:py:func:`~taskflow.engines.helpers.run` in this case to run your engine +(instead your must activate the engines run method directly): .. doctest:: @@ -142,8 +145,8 @@ Outputs As you can see from examples above, the run method returns all flow outputs in a ``dict``. This same data can be fetched via :py:meth:`~taskflow.storage.Storage.fetch_all` method of the storage. You can -also get single results using :py:meth:`~taskflow.storage.Storage.fetch_all`. For -example: +also get single results using :py:meth:`~taskflow.storage.Storage.fetch_all`. +For example: .. doctest:: diff --git a/doc/source/jobs.rst b/doc/source/jobs.rst index 374eb3c9..85ddd517 100644 --- a/doc/source/jobs.rst +++ b/doc/source/jobs.rst @@ -6,13 +6,13 @@ Overview ======== Jobs and jobboards are a **novel** concept that taskflow provides to allow for -automatic ownership transfer of workflows between capable -owners (those owners usually then use :doc:`engines ` to complete the -workflow). They provide the necessary semantics to be able to atomically -transfer a job from a producer to a consumer in a reliable and fault tolerant -manner. They are modeled off the concept used to post and acquire work in the -physical world (typically a job listing in a newspaper or online website -serves a similar role). +automatic ownership transfer of workflows between capable owners (those owners +usually then use :doc:`engines ` to complete the workflow). They +provide the necessary semantics to be able to atomically transfer a job from a +producer to a consumer in a reliable and fault tolerant manner. They are +modeled off the concept used to post and acquire work in the physical world +(typically a job listing in a newspaper or online website serves a similar +role). **TLDR:** It's similar to a queue, but consumers lock items on the queue when claiming them, and only remove them from the queue when they're done with the @@ -25,20 +25,22 @@ Definitions =========== Jobs - A :py:class:`job ` consists of a unique identifier, name, - and a reference to a :py:class:`logbook ` - which contains the details of the work that has been or should be/will be - completed to finish the work that has been created for that job. + A :py:class:`job ` consists of a unique identifier, + name, and a reference to a :py:class:`logbook + ` which contains the details of the + work that has been or should be/will be completed to finish the work that has + been created for that job. Jobboards - A :py:class:`jobboard ` is responsible for managing - the posting, ownership, and delivery of jobs. It acts as the location where jobs - can be posted, claimed and searched for; typically by iteration or notification. - Jobboards may be backed by different *capable* implementations (each with potentially differing - configuration) but all jobboards implement the same interface and semantics so - that the backend usage is as transparent as possible. This allows deployers or - developers of a service that uses TaskFlow to select a jobboard implementation - that fits their setup (and there intended usage) best. + A :py:class:`jobboard ` is responsible for + managing the posting, ownership, and delivery of jobs. It acts as the + location where jobs can be posted, claimed and searched for; typically by + iteration or notification. Jobboards may be backed by different *capable* + implementations (each with potentially differing configuration) but all + jobboards implement the same interface and semantics so that the backend + usage is as transparent as possible. This allows deployers or developers of a + service that uses TaskFlow to select a jobboard implementation that fits + their setup (and there intended usage) best. Features ======== @@ -197,18 +199,19 @@ non-issues but for now they are worth mentioning. Dual-engine jobs ---------------- -**What:** Since atoms and engines are not currently `preemptable`_ we can not force -a engine (or the threads/remote workers... it is using to run) to stop working on -an atom (it is general bad behavior to force code to stop without its consent anyway) if it has -already started working on an atom (short of doing a ``kill -9`` on the running interpreter). -This could cause problems since the points an engine can notice that it no longer owns a -claim is at any :doc:`state ` change that occurs (transitioning to a -new atom or recording a result for example), where upon noticing the claim has -been lost the engine can immediately stop doing further work. The effect that this -causes is that when a claim is lost another engine can immediately attempt to acquire -the claim that was previously lost and it *could* begin working on the unfinished tasks -that the later engine may also still be executing (since that engine is not yet -aware that it has lost the claim). +**What:** Since atoms and engines are not currently `preemptable`_ we can not +force a engine (or the threads/remote workers... it is using to run) to stop +working on an atom (it is general bad behavior to force code to stop without +its consent anyway) if it has already started working on an atom (short of +doing a ``kill -9`` on the running interpreter). This could cause problems +since the points an engine can notice that it no longer owns a claim is at any +:doc:`state ` change that occurs (transitioning to a new atom or +recording a result for example), where upon noticing the claim has been lost +the engine can immediately stop doing further work. The effect that this causes +is that when a claim is lost another engine can immediately attempt to acquire +the claim that was previously lost and it *could* begin working on the +unfinished tasks that the later engine may also still be executing (since that +engine is not yet aware that it has lost the claim). **TLDR:** not `preemptable`_, possible to become aware of losing a claim after the fact (at the next state change), another engine could have acquired @@ -219,17 +222,18 @@ the claim by then, therefore both would be *working* on a job. #. Ensure your atoms are `idempotent`_, this will cause an engine that may be executing the same atom to be able to continue executing without causing any conflicts/problems (idempotency guarantees this). -#. On claiming jobs that have been claimed previously enforce a policy that happens - before the jobs workflow begins to execute (possibly prior to an engine beginning - the jobs work) that ensures that any prior work has been rolled back before - continuing rolling forward. For example: +#. On claiming jobs that have been claimed previously enforce a policy that + happens before the jobs workflow begins to execute (possibly prior to an + engine beginning the jobs work) that ensures that any prior work has been + rolled back before continuing rolling forward. For example: * Rolling back the last atom/set of atoms that finished. * Rolling back the last state change that occurred. -#. Delay claiming partially completed work by adding a wait period (to allow the - previous engine to coalesce) before working on a partially completed job (combine - this with the prior suggestions and dual-engine issues should be avoided). +#. Delay claiming partially completed work by adding a wait period (to allow + the previous engine to coalesce) before working on a partially completed job + (combine this with the prior suggestions and dual-engine issues should be + avoided). .. _idempotent: http://en.wikipedia.org/wiki/Idempotence .. _preemptable: http://en.wikipedia.org/wiki/Preemption_%28computing%29 diff --git a/doc/source/notifications.rst b/doc/source/notifications.rst index 327792d4..f477e396 100644 --- a/doc/source/notifications.rst +++ b/doc/source/notifications.rst @@ -21,8 +21,8 @@ To receive these notifications you should register a callback in Each engine provides two of them: one notifies about flow state changes, and another notifies about changes of tasks. -TaskFlow also has a set of predefined :ref:`listeners `, and provides -means to write your own listeners, which can be more convenient than +TaskFlow also has a set of predefined :ref:`listeners `, and +provides means to write your own listeners, which can be more convenient than using raw callbacks. -------------------------------------- diff --git a/doc/source/persistence.rst b/doc/source/persistence.rst index 8cfe91d1..5ad12ef9 100644 --- a/doc/source/persistence.rst +++ b/doc/source/persistence.rst @@ -17,17 +17,18 @@ This abstraction serves the following *major* purposes: * Tracking of what was done (introspection). * Saving *memory* which allows for restarting from the last saved state which is a critical feature to restart and resume workflows (checkpointing). -* Associating additional metadata with atoms while running (without having those - atoms need to save this data themselves). This makes it possible to add-on - new metadata in the future without having to change the atoms themselves. For - example the following can be saved: +* Associating additional metadata with atoms while running (without having + those atoms need to save this data themselves). This makes it possible to + add-on new metadata in the future without having to change the atoms + themselves. For example the following can be saved: * Timing information (how long a task took to run). * User information (who the task ran as). * When a atom/workflow was ran (and why). -* Saving historical data (failures, successes, intermediary results...) to allow - for retry atoms to be able to decide if they should should continue vs. stop. +* Saving historical data (failures, successes, intermediary results...) + to allow for retry atoms to be able to decide if they should should continue + vs. stop. * *Something you create...* .. _stevedore: http://stevedore.readthedocs.org/ @@ -35,39 +36,47 @@ This abstraction serves the following *major* purposes: How it is used ============== -On :doc:`engine ` construction typically a backend (it can be optional) -will be provided which satisfies the :py:class:`~taskflow.persistence.backends.base.Backend` -abstraction. Along with providing a backend object a :py:class:`~taskflow.persistence.logbook.FlowDetail` -object will also be created and provided (this object will contain the details about -the flow to be ran) to the engine constructor (or associated :py:meth:`load() ` helper functions). -Typically a :py:class:`~taskflow.persistence.logbook.FlowDetail` object is created from -a :py:class:`~taskflow.persistence.logbook.LogBook` object (the book object -acts as a type of container for :py:class:`~taskflow.persistence.logbook.FlowDetail` +On :doc:`engine ` construction typically a backend (it can be +optional) will be provided which satisfies the +:py:class:`~taskflow.persistence.backends.base.Backend` abstraction. Along with +providing a backend object a +:py:class:`~taskflow.persistence.logbook.FlowDetail` object will also be +created and provided (this object will contain the details about the flow to be +ran) to the engine constructor (or associated :py:meth:`load() +` helper functions). Typically a +:py:class:`~taskflow.persistence.logbook.FlowDetail` object is created from a +:py:class:`~taskflow.persistence.logbook.LogBook` object (the book object acts +as a type of container for :py:class:`~taskflow.persistence.logbook.FlowDetail` and :py:class:`~taskflow.persistence.logbook.AtomDetail` objects). -**Preparation**: Once an engine starts to run it will create a :py:class:`~taskflow.storage.Storage` -object which will act as the engines interface to the underlying backend storage -objects (it provides helper functions that are commonly used by the engine, -avoiding repeating code when interacting with the provided :py:class:`~taskflow.persistence.logbook.FlowDetail` -and :py:class:`~taskflow.persistence.backends.base.Backend` objects). As an engine -initializes it will extract (or create) :py:class:`~taskflow.persistence.logbook.AtomDetail` -objects for each atom in the workflow the engine will be executing. +**Preparation**: Once an engine starts to run it will create a +:py:class:`~taskflow.storage.Storage` object which will act as the engines +interface to the underlying backend storage objects (it provides helper +functions that are commonly used by the engine, avoiding repeating code when +interacting with the provided +:py:class:`~taskflow.persistence.logbook.FlowDetail` and +:py:class:`~taskflow.persistence.backends.base.Backend` objects). As an engine +initializes it will extract (or create) +:py:class:`~taskflow.persistence.logbook.AtomDetail` objects for each atom in +the workflow the engine will be executing. -**Execution:** When an engine beings to execute (see :doc:`engine ` for more -of the details about how an engine goes about this process) it will examine any -previously existing :py:class:`~taskflow.persistence.logbook.AtomDetail` objects to -see if they can be used for resuming; see :doc:`resumption ` for more details -on this subject. For atoms which have not finished (or did not finish correctly from a -previous run) they will begin executing only after any dependent inputs are ready. This -is done by analyzing the execution graph and looking at predecessor :py:class:`~taskflow.persistence.logbook.AtomDetail` -outputs and states (which may have been persisted in a past run). This will result -in either using there previous information or by running those predecessors and -saving their output to the :py:class:`~taskflow.persistence.logbook.FlowDetail` and -:py:class:`~taskflow.persistence.backends.base.Backend` objects. This execution, analysis -and interaction with the storage objects continues (what is described here is -a simplification of what really happens; which is quite a bit more complex) -until the engine has finished running (at which point the engine will have -succeeded or failed in its attempt to run the workflow). +**Execution:** When an engine beings to execute (see :doc:`engine ` +for more of the details about how an engine goes about this process) it will +examine any previously existing +:py:class:`~taskflow.persistence.logbook.AtomDetail` objects to see if they can +be used for resuming; see :doc:`resumption ` for more details on +this subject. For atoms which have not finished (or did not finish correctly +from a previous run) they will begin executing only after any dependent inputs +are ready. This is done by analyzing the execution graph and looking at +predecessor :py:class:`~taskflow.persistence.logbook.AtomDetail` outputs and +states (which may have been persisted in a past run). This will result in +either using there previous information or by running those predecessors and +saving their output to the :py:class:`~taskflow.persistence.logbook.FlowDetail` +and :py:class:`~taskflow.persistence.backends.base.Backend` objects. This +execution, analysis and interaction with the storage objects continues (what is +described here is a simplification of what really happens; which is quite a bit +more complex) until the engine has finished running (at which point the engine +will have succeeded or failed in its attempt to run the workflow). **Post-execution:** Typically when an engine is done running the logbook would be discarded (to avoid creating a stockpile of useless data) and the backend @@ -91,23 +100,24 @@ A few scenarios come to mind: It should be emphasized that logbook is the authoritative, and, preferably, the **only** (see :doc:`inputs and outputs `) source of - run-time state information (breaking this principle makes it hard/impossible - to restart or resume in any type of automated fashion). When an atom returns - a result, it should be written directly to a logbook. When atom or flow state - changes in any way, logbook is first to know (see :doc:`notifications ` - for how a user may also get notified of those same state changes). The logbook - and a backend and associated storage helper class are responsible to store the actual data. - These components used together specify the persistence mechanism (how data - is saved and where -- memory, database, whatever...) and the persistence policy - (when data is saved -- every time it changes or at some particular moments - or simply never). + run-time state information (breaking this principle makes it + hard/impossible to restart or resume in any type of automated fashion). + When an atom returns a result, it should be written directly to a logbook. + When atom or flow state changes in any way, logbook is first to know (see + :doc:`notifications ` for how a user may also get notified + of those same state changes). The logbook and a backend and associated + storage helper class are responsible to store the actual data. These + components used together specify the persistence mechanism (how data is + saved and where -- memory, database, whatever...) and the persistence + policy (when data is saved -- every time it changes or at some particular + moments or simply never). Usage ===== -To select which persistence backend to use you should use the -:py:meth:`fetch() ` function which uses -entrypoints (internally using `stevedore`_) to fetch and configure your backend. This makes +To select which persistence backend to use you should use the :py:meth:`fetch() +` function which uses entrypoints +(internally using `stevedore`_) to fetch and configure your backend. This makes it simpler than accessing the backend data types directly and provides a common function from which a backend can be fetched. @@ -158,11 +168,11 @@ Sqlalchemy **Connection**: ``'mysql'`` or ``'postgres'`` or ``'sqlite'`` -Retains all data in a `ACID`_ compliant database using the `sqlalchemy`_ library -for schemas, connections, and database interaction functionality. Useful when -you need a higher level of durability than offered by the previous solutions. When -using these connection types it is possible to resume a engine from a peer machine (this -does not apply when using sqlite). +Retains all data in a `ACID`_ compliant database using the `sqlalchemy`_ +library for schemas, connections, and database interaction functionality. +Useful when you need a higher level of durability than offered by the previous +solutions. When using these connection types it is possible to resume a engine +from a peer machine (this does not apply when using sqlite). .. _sqlalchemy: http://www.sqlalchemy.org/docs/ .. _ACID: https://en.wikipedia.org/wiki/ACID diff --git a/doc/source/resumption.rst b/doc/source/resumption.rst index b80fa909..cc6e9eec 100644 --- a/doc/source/resumption.rst +++ b/doc/source/resumption.rst @@ -5,34 +5,34 @@ Resumption Overview ======== -**Question**: *How can we persist the flow so that it can be resumed, restarted or -rolled-back on engine failure?* +**Question**: *How can we persist the flow so that it can be resumed, restarted +or rolled-back on engine failure?* -**Answer:** Since a flow is a set of :doc:`atoms ` and relations between atoms we -need to create a model and corresponding information that allows us to persist -the *right* amount of information to preserve, resume, and rollback a flow on -software or hardware failure. +**Answer:** Since a flow is a set of :doc:`atoms ` and relations between +atoms we need to create a model and corresponding information that allows us to +persist the *right* amount of information to preserve, resume, and rollback a +flow on software or hardware failure. -To allow for resumption taskflow must be able to re-create the flow and re-connect -the links between atom (and between atoms->atom details and so on) in order to -revert those atoms or resume those atoms in the correct ordering. Taskflow provides -a pattern that can help in automating this process (it does **not** prohibit the user -from creating their own strategies for doing this). +To allow for resumption taskflow must be able to re-create the flow and +re-connect the links between atom (and between atoms->atom details and so on) +in order to revert those atoms or resume those atoms in the correct ordering. +Taskflow provides a pattern that can help in automating this process (it does +**not** prohibit the user from creating their own strategies for doing this). Factories ========= -The default provided way is to provide a `factory`_ function which will create (or -recreate your workflow). This function can be provided when loading -a flow and corresponding engine via the provided -:py:meth:`load_from_factory() ` method. This -`factory`_ function is expected to be a function (or ``staticmethod``) which is reimportable (aka -has a well defined name that can be located by the ``__import__`` function in python, this -excludes ``lambda`` style functions and ``instance`` methods). The `factory`_ function -name will be saved into the logbook and it will be imported and called to create the -workflow objects (or recreate it if resumption happens). This allows for the flow -to be recreated if and when that is needed (even on remote machines, as long as the -reimportable name can be located). +The default provided way is to provide a `factory`_ function which will create +(or recreate your workflow). This function can be provided when loading a flow +and corresponding engine via the provided :py:meth:`load_from_factory() +` method. This `factory`_ function +is expected to be a function (or ``staticmethod``) which is reimportable (aka +has a well defined name that can be located by the ``__import__`` function in +python, this excludes ``lambda`` style functions and ``instance`` methods). The +`factory`_ function name will be saved into the logbook and it will be imported +and called to create the workflow objects (or recreate it if resumption +happens). This allows for the flow to be recreated if and when that is needed +(even on remote machines, as long as the reimportable name can be located). .. _factory: https://en.wikipedia.org/wiki/Factory_%28object-oriented_programming%29 @@ -40,10 +40,10 @@ Names ===== When a flow is created it is expected that each atom has a unique name, this -name serves a special purpose in the resumption process (as well as serving -a useful purpose when running, allowing for atom identification in the -:doc:`notification ` process). The reason for having names is that -an atom in a flow needs to be somehow matched with (a potentially) +name serves a special purpose in the resumption process (as well as serving a +useful purpose when running, allowing for atom identification in the +:doc:`notification ` process). The reason for having names is +that an atom in a flow needs to be somehow matched with (a potentially) existing :py:class:`~taskflow.persistence.logbook.AtomDetail` during engine resumption & subsequent running. @@ -61,27 +61,29 @@ Names provide this although they do have weaknesses: .. note:: - Even though these weaknesses names were selected as a *good enough* solution for the above - matching requirements (until something better is invented/created that can satisfy those - same requirements). + Even though these weaknesses names were selected as a *good enough* + solution for the above matching requirements (until something better is + invented/created that can satisfy those same requirements). Scenarios ========= -When new flow is loaded into engine, there is no persisted data -for it yet, so a corresponding :py:class:`~taskflow.persistence.logbook.FlowDetail` object -will be created, as well as a :py:class:`~taskflow.persistence.logbook.AtomDetail` object for -each atom that is contained in it. These will be immediately saved into the persistence backend -that is configured. If no persistence backend is configured, then as expected nothing will be -saved and the atoms and flow will be ran in a non-persistent manner. +When new flow is loaded into engine, there is no persisted data for it yet, so +a corresponding :py:class:`~taskflow.persistence.logbook.FlowDetail` object +will be created, as well as a +:py:class:`~taskflow.persistence.logbook.AtomDetail` object for each atom that +is contained in it. These will be immediately saved into the persistence +backend that is configured. If no persistence backend is configured, then as +expected nothing will be saved and the atoms and flow will be ran in a +non-persistent manner. -**Subsequent run:** When we resume the flow from a persistent backend (for example, -if the flow was interrupted and engine destroyed to save resources or if the -service was restarted), we need to re-create the flow. For that, we will call -the function that was saved on first-time loading that builds the flow for -us (aka; the flow factory function described above) and the engine will run. The -following scenarios explain some expected structural changes and how they can -be accommodated (and what the effect will be when resuming & running). +**Subsequent run:** When we resume the flow from a persistent backend (for +example, if the flow was interrupted and engine destroyed to save resources or +if the service was restarted), we need to re-create the flow. For that, we will +call the function that was saved on first-time loading that builds the flow for +us (aka; the flow factory function described above) and the engine will run. +The following scenarios explain some expected structural changes and how they +can be accommodated (and what the effect will be when resuming & running). Same atoms ---------- @@ -96,61 +98,64 @@ and then the engine resumes. Atom was added -------------- -When the factory function mentioned above alters the flow by adding -a new atom in (for example for changing the runtime structure of what was previously -ran in the first run). +When the factory function mentioned above alters the flow by adding a new atom +in (for example for changing the runtime structure of what was previously ran +in the first run). -**Runtime change:** By default when the engine resumes it will notice that -a corresponding :py:class:`~taskflow.persistence.logbook.AtomDetail` does not +**Runtime change:** By default when the engine resumes it will notice that a +corresponding :py:class:`~taskflow.persistence.logbook.AtomDetail` does not exist and one will be created and associated. Atom was removed ---------------- -When the factory function mentioned above alters the flow by removing -a new atom in (for example for changing the runtime structure of what was previously +When the factory function mentioned above alters the flow by removing a new +atom in (for example for changing the runtime structure of what was previously ran in the first run). -**Runtime change:** Nothing should be done -- flow structure is reloaded from factory -function, and removed atom is not in it -- so, flow will be ran as if it was -not there, and any results it returned if it was completed before will be ignored. +**Runtime change:** Nothing should be done -- flow structure is reloaded from +factory function, and removed atom is not in it -- so, flow will be ran as if +it was not there, and any results it returned if it was completed before will +be ignored. Atom code was changed --------------------- -When the factory function mentioned above alters the flow by deciding that a newer -version of a previously existing atom should be ran (possibly to perform some -kind of upgrade or to fix a bug in a prior atoms code). +When the factory function mentioned above alters the flow by deciding that a +newer version of a previously existing atom should be ran (possibly to perform +some kind of upgrade or to fix a bug in a prior atoms code). **Factory change:** The atom name & version will have to be altered. The factory should replace this name where it was being used previously. -**Runtime change:** This will fall under the same runtime adjustments that exist -when a new atom is added. In the future taskflow could make this easier by -providing a ``upgrade()`` function that can be used to give users the ability -to upgrade atoms before running (manual introspection & modification of a -:py:class:`~taskflow.persistence.logbook.LogBook` can be done before engine loading -and running to accomplish this in the meantime). +**Runtime change:** This will fall under the same runtime adjustments that +exist when a new atom is added. In the future taskflow could make this easier +by providing a ``upgrade()`` function that can be used to give users the +ability to upgrade atoms before running (manual introspection & modification of +a :py:class:`~taskflow.persistence.logbook.LogBook` can be done before engine +loading and running to accomplish this in the meantime). Atom was split in two atoms or merged from two (or more) to one atom -------------------------------------------------------------------- -When the factory function mentioned above alters the flow by deciding that a previously -existing atom should be split into N atoms or the factory function decides that N atoms -should be merged in `_ transports). +connected via `amqp`_ (or other supported `kombu +`_ transports). .. note:: @@ -43,8 +43,9 @@ Worker configured to run in as many threads (green or not) as desired. Proxy - Executors interact with workers via a proxy. The proxy maintains the underlying - transport and publishes messages (and invokes callbacks on message reception). + Executors interact with workers via a proxy. The proxy maintains the + underlying transport and publishes messages (and invokes callbacks on message + reception). Requirements ------------ @@ -122,12 +123,13 @@ engine executor in the following manner: 1. The executor initiates task execution/reversion using a proxy object. 2. :py:class:`~taskflow.engines.worker_based.proxy.Proxy` publishes task request (format is described below) into a named exchange using a routing - key that is used to deliver request to particular workers topic. The executor - then waits for the task requests to be accepted and confirmed by workers. If - the executor doesn't get a task confirmation from workers within the given - timeout the task is considered as timed-out and a timeout exception is - raised. -3. A worker receives a request message and starts a new thread for processing it. + key that is used to deliver request to particular workers topic. The + executor then waits for the task requests to be accepted and confirmed by + workers. If the executor doesn't get a task confirmation from workers within + the given timeout the task is considered as timed-out and a timeout + exception is raised. +3. A worker receives a request message and starts a new thread for processing + it. 1. The worker dispatches the request (gets desired endpoint that actually executes the task). @@ -141,8 +143,8 @@ engine executor in the following manner: handled by the engine, dispatching to listeners and so-on). 4. The executor gets the task request confirmation from the worker and the task - request state changes from the ``PENDING`` to the ``RUNNING`` state. Once - a task request is in the ``RUNNING`` state it can't be timed-out (considering + request state changes from the ``PENDING`` to the ``RUNNING`` state. Once a + task request is in the ``RUNNING`` state it can't be timed-out (considering that task execution process may take unpredictable time). 5. The executor gets the task execution result from the worker and passes it back to the executor and worker-based engine to finish task processing (this @@ -303,7 +305,8 @@ Additional supported keyword arguments: * ``executor``: a class that provides a :py:class:`~taskflow.engines.worker_based.executor.WorkerTaskExecutor` - interface; it will be used for executing, reverting and waiting for remote tasks. + interface; it will be used for executing, reverting and waiting for remote + tasks. Limitations =========== diff --git a/tools/check_doc.py b/tools/check_doc.py new file mode 100644 index 00000000..04c70dc2 --- /dev/null +++ b/tools/check_doc.py @@ -0,0 +1,114 @@ +#!/usr/bin/env python +# -*- coding: utf-8 -*- + +# Copyright (C) 2014 Ivan Melnikov +# +# Licensed under the Apache License, Version 2.0 (the "License"); you may +# not use this file except in compliance with the License. You may obtain +# a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the +# License for the specific language governing permissions and limitations +# under the License. + + +"""Check documentation for simple style requirements. + +What is checked: + - lines should not be longer than 79 characters + - exception: line with no whitespace except maybe in the beginning + - exception: line that starts with '..' -- longer directives are allowed, + including footnotes + - no tabulation for indentation + - no trailing whitespace +""" + +import fnmatch +import os +import re +import sys + + +FILE_PATTERNS = ['*.rst', '*.txt'] +MAX_LINE_LENGTH = 79 +TRAILING_WHITESPACE_REGEX = re.compile('\s$') +STARTING_WHITESPACE_REGEX = re.compile('^(\s+)') + + +def check_max_length(line): + if len(line) > MAX_LINE_LENGTH: + stripped = line.strip() + if not any(( + line.startswith('..'), # this is directive + stripped.startswith('>>>'), # this is doctest + stripped.startswith('...'), # and this + stripped.startswith('taskflow.'), + ' ' not in stripped # line can't be split + )): + yield ('D001', 'Line too long') + + +def check_trailing_whitespace(line): + if TRAILING_WHITESPACE_REGEX.search(line): + yield ('D002', 'Trailing whitespace') + + +def check_indentation_no_tab(line): + match = STARTING_WHITESPACE_REGEX.search(line) + if match: + spaces = match.group(1) + if '\t' in spaces: + yield ('D003', 'Tabulation used for indentation') + + +LINE_CHECKS = (check_max_length, + check_trailing_whitespace, + check_indentation_no_tab) + + +def check_lines(lines): + for idx, line in enumerate(lines, 1): + line = line.rstrip('\n') + for check in LINE_CHECKS: + for code, message in check(line): + yield idx, code, message + + +def check_files(filenames): + for fn in filenames: + with open(fn) as f: + for line_num, code, message in check_lines(f): + yield fn, line_num, code, message + + +def find_files(pathes, patterns): + for path in pathes: + if os.path.isfile(path): + yield path + elif os.path.isdir(path): + for root, dirnames, filenames in os.walk(path): + for filename in filenames: + if any(fnmatch.fnmatch(filename, pattern) + for pattern in patterns): + yield os.path.join(root, filename) + else: + print('Invalid path: %s' % path) + + +def main(): + ok = True + if len(sys.argv) > 1: + dirs = sys.argv[1:] + else: + dirs = ['.'] + for error in check_files(find_files(dirs, FILE_PATTERNS)): + ok = False + print('%s:%s: %s %s' % error) + sys.exit(0 if ok else 1) + +if __name__ == '__main__': + main() diff --git a/tox-tmpl.ini b/tox-tmpl.ini index 3b4c6cac..96df6f62 100644 --- a/tox-tmpl.ini +++ b/tox-tmpl.ini @@ -54,6 +54,7 @@ deps = -r{toxinidir}/requirements.txt commands = python setup.py testr --slowest --testr-args='{posargs}' sphinx-build -b doctest doc/source doc/build + python tools/check_doc.py doc/source [testenv:py33] deps = {[testenv]deps} diff --git a/tox.ini b/tox.ini index 08666fe0..4768beaa 100644 --- a/tox.ini +++ b/tox.ini @@ -81,6 +81,7 @@ deps = -r{toxinidir}/requirements.txt commands = python setup.py testr --slowest --testr-args='{posargs}' sphinx-build -b doctest doc/source doc/build + python tools/check_doc.py doc/source [testenv:py33] deps = {[testenv]deps} From 4422644de3c6c498c85acc63298cd55e8b46f486 Mon Sep 17 00:00:00 2001 From: Joshua Harlow Date: Mon, 5 May 2014 15:30:28 -0700 Subject: [PATCH 060/188] Allow command and connection retry configuration When kazoo needs to retry commands or retry connections there is a specific set of configuration that can be provided to adjust these; allow that configuration to be passed to the client configuration method. Change-Id: I9c57e81ae3f82c7358ddabc7804a28fee7734c93 --- taskflow/utils/kazoo_utils.py | 6 ++++++ 1 file changed, 6 insertions(+) diff --git a/taskflow/utils/kazoo_utils.py b/taskflow/utils/kazoo_utils.py index a2b5390c..8ca8bf52 100644 --- a/taskflow/utils/kazoo_utils.py +++ b/taskflow/utils/kazoo_utils.py @@ -75,10 +75,16 @@ def check_compatible(client, min_version=None, max_version=None): def make_client(conf): """Creates a kazoo client given a configuration dictionary.""" + # See: http://kazoo.readthedocs.org/en/latest/api/client.html client_kwargs = { 'read_only': bool(conf.get('read_only')), 'randomize_hosts': bool(conf.get('randomize_hosts')), } + # See: http://kazoo.readthedocs.org/en/latest/api/retry.html + if 'command_retry' in conf: + client_kwargs['command_retry'] = conf['command_retry'] + if 'connection_retry' in conf: + client_kwargs['connection_retry'] = conf['connection_retry'] hosts = _parse_hosts(conf.get("hosts", "localhost:2181")) if not hosts or not isinstance(hosts, six.string_types): raise TypeError("Invalid hosts format, expected " From 449d221d822d2d68a32b81700d798a5fb6aa0584 Mon Sep 17 00:00:00 2001 From: Joshua Harlow Date: Fri, 9 May 2014 11:05:29 -0700 Subject: [PATCH 061/188] Remove the _clear method and do not reset the job_watcher The _clear method is only used in one place (in close) so the benefit of it being a function is not really valueable so move its logic to the close method itself. Also fix that the job_watcher does not need to be reset since it can now survive session lose/disconnection, setting it to none is not useful anymore and actually creates more watchers that provide the same data (which is bad). Change-Id: I243ec0551a341df74f478b0161ffd559a75278fe --- taskflow/jobs/backends/impl_zookeeper.py | 8 ++------ 1 file changed, 2 insertions(+), 6 deletions(-) diff --git a/taskflow/jobs/backends/impl_zookeeper.py b/taskflow/jobs/backends/impl_zookeeper.py index 8fa0c484..4de2091d 100644 --- a/taskflow/jobs/backends/impl_zookeeper.py +++ b/taskflow/jobs/backends/impl_zookeeper.py @@ -571,11 +571,6 @@ class ZookeeperJobBoard(jobboard.NotifyingJobBoard): def _state_change_listener(self, state): LOG.debug("Kazoo client has changed to state: %s", state) - def _clear(self): - with self._job_lock: - self._known_jobs.clear() - self._job_watcher = None - def wait(self, timeout=None): # Wait until timeout expires (or forever) for jobs to appear. watch = None @@ -620,7 +615,8 @@ class ZookeeperJobBoard(jobboard.NotifyingJobBoard): LOG.debug("Shutting down the notifier") self._worker.shutdown() self._worker = None - self._clear() + with self._job_lock: + self._known_jobs.clear() LOG.debug("Stopped & cleared local state") @lock_utils.locked(lock='_open_close_lock') From fb5d54d50664a2f814e54d6c7c9db61ee8bf1cc1 Mon Sep 17 00:00:00 2001 From: Joshua Harlow Date: Fri, 9 May 2014 19:33:41 -0700 Subject: [PATCH 062/188] Use a more stable flush method Instead of using the zake provided flush method (which does have issues, since it does not guarantee that the associated watches will have been called) instead use a new method which creates nodes, waits for there watches to be triggered, does other work, and then deletes the node and waits for that watcher to be triggered; this is more stable (as it depends on the linearity guarantee of zookeeper and the kazoo threading model). Change-Id: I12fd9c7bcc5cd9009b4175166edfc924e94161bf --- taskflow/tests/unit/jobs/test_zk_job.py | 109 +++++++++++++++--------- test-requirements.txt | 2 +- 2 files changed, 71 insertions(+), 40 deletions(-) diff --git a/taskflow/tests/unit/jobs/test_zk_job.py b/taskflow/tests/unit/jobs/test_zk_job.py index 9154994d..ef652c57 100644 --- a/taskflow/tests/unit/jobs/test_zk_job.py +++ b/taskflow/tests/unit/jobs/test_zk_job.py @@ -21,6 +21,8 @@ import time import six +from kazoo.recipe import watchers + from zake import fake_client from zake import utils as zake_utils @@ -30,6 +32,7 @@ from taskflow import states from taskflow import test from taskflow.openstack.common import jsonutils +from taskflow.openstack.common import uuidutils from taskflow.persistence.backends import impl_dir from taskflow.utils import misc from taskflow.utils import persistence_utils as p_utils @@ -55,6 +58,40 @@ def create_board(client=None, persistence=None): return (client, board) +@contextlib.contextmanager +def flush(client, path=None): + # This uses the linearity guarantee of zookeeper (and associated libraries) + # to create a temporary node, wait until a watcher notifies it's created, + # then yield back for more work, and then at the end of that work delete + # the created node. This ensures that the operations done in the yield + # of this context manager will be applied and all watchers will have fired + # before this context manager exits. + if not path: + path = "/tmp-%s" % uuidutils.generate_uuid() + created = threading.Event() + deleted = threading.Event() + + def on_created(data, stat): + if stat is not None: + created.set() + return False # cause this watcher to cease to exist + + def on_deleted(data, stat): + if stat is None: + deleted.set() + return False # cause this watcher to cease to exist + + watchers.DataWatch(client, path, func=on_created) + client.create(path) + created.wait() + try: + yield + finally: + watchers.DataWatch(client, path, func=on_deleted) + client.delete(path, recursive=True) + deleted.wait() + + class TestZookeeperJobs(test.TestCase): def setUp(self): super(TestZookeeperJobs, self).setUp() @@ -66,7 +103,6 @@ class TestZookeeperJobs(test.TestCase): def test_connect(self): self.assertFalse(self.board.connected) with connect_close(self.board): - self.client.flush() self.assertTrue(self.board.connected) @mock.patch("taskflow.jobs.backends.impl_zookeeper.misc." @@ -77,7 +113,6 @@ class TestZookeeperJobs(test.TestCase): with connect_close(self.board): j = self.board.post('test', p_utils.temporary_log_book()) - self.client.flush() self.assertEqual(epoch, j.created_on) self.assertEqual(epoch, j.last_modified) @@ -99,8 +134,6 @@ class TestZookeeperJobs(test.TestCase): with connect_close(self.board): book = p_utils.temporary_log_book() self.board.post('test', book) - self.client.flush() - jobs = list(self.board.iterjobs(ensure_fresh=True)) self.assertEqual(1, len(jobs)) @@ -138,11 +171,9 @@ class TestZookeeperJobs(test.TestCase): book = p_utils.temporary_log_book() with connect_close(self.board): - self.client.flush() self.assertTrue(self.board.connected) self.assertEqual(0, self.board.job_count) posted_job = self.board.post('test', book) - self.client.flush() self.assertEqual(self.board, posted_job.board) self.assertEqual(1, self.board.job_count) @@ -174,8 +205,8 @@ class TestZookeeperJobs(test.TestCase): def test_posting_claim(self): with connect_close(self.board): - self.board.post('test', p_utils.temporary_log_book()) - self.client.flush() + with flush(self.client): + self.board.post('test', p_utils.temporary_log_book()) self.assertEqual(1, self.board.job_count) possible_jobs = list(self.board.iterjobs(only_unclaimed=True)) @@ -183,8 +214,9 @@ class TestZookeeperJobs(test.TestCase): j = possible_jobs[0] self.assertEqual(states.UNCLAIMED, j.state) - self.board.claim(j, self.board.name) - self.client.flush() + with flush(self.client): + self.board.claim(j, self.board.name) + self.assertEqual(self.board.name, self.board.find_owner(j)) self.assertEqual(states.CLAIMED, j.state) @@ -198,19 +230,19 @@ class TestZookeeperJobs(test.TestCase): def test_posting_claim_consume(self): with connect_close(self.board): - self.board.post('test', p_utils.temporary_log_book()) - self.client.flush() + with flush(self.client): + self.board.post('test', p_utils.temporary_log_book()) possible_jobs = list(self.board.iterjobs(only_unclaimed=True)) self.assertEqual(1, len(possible_jobs)) j = possible_jobs[0] - self.board.claim(j, self.board.name) - self.client.flush() + with flush(self.client): + self.board.claim(j, self.board.name) possible_jobs = list(self.board.iterjobs(only_unclaimed=True)) self.assertEqual(0, len(possible_jobs)) - self.board.consume(j, self.board.name) - self.client.flush() + with flush(self.client): + self.board.consume(j, self.board.name) self.assertEqual(0, len(list(self.board.iterjobs()))) self.assertRaises(excp.NotFound, @@ -219,20 +251,19 @@ class TestZookeeperJobs(test.TestCase): def test_posting_claim_abandon(self): with connect_close(self.board): - self.board.post('test', p_utils.temporary_log_book()) - self.client.flush() + with flush(self.client): + self.board.post('test', p_utils.temporary_log_book()) possible_jobs = list(self.board.iterjobs(only_unclaimed=True)) self.assertEqual(1, len(possible_jobs)) j = possible_jobs[0] - self.board.claim(j, self.board.name) - self.client.flush() + with flush(self.client): + self.board.claim(j, self.board.name) possible_jobs = list(self.board.iterjobs(only_unclaimed=True)) self.assertEqual(0, len(possible_jobs)) - - self.board.abandon(j, self.board.name) - self.client.flush() + with flush(self.client): + self.board.abandon(j, self.board.name) possible_jobs = list(self.board.iterjobs(only_unclaimed=True)) self.assertEqual(1, len(possible_jobs)) @@ -240,13 +271,13 @@ class TestZookeeperJobs(test.TestCase): def test_posting_claim_diff_owner(self): with connect_close(self.board): - self.board.post('test', p_utils.temporary_log_book()) - self.client.flush() + with flush(self.client): + self.board.post('test', p_utils.temporary_log_book()) possible_jobs = list(self.board.iterjobs(only_unclaimed=True)) self.assertEqual(1, len(possible_jobs)) - self.board.claim(possible_jobs[0], self.board.name) - self.client.flush() + with flush(self.client): + self.board.claim(possible_jobs[0], self.board.name) possible_jobs = list(self.board.iterjobs()) self.assertEqual(1, len(possible_jobs)) @@ -258,11 +289,11 @@ class TestZookeeperJobs(test.TestCase): def test_posting_state_lock_lost(self): with connect_close(self.board): - j = self.board.post('test', p_utils.temporary_log_book()) - self.client.flush() + with flush(self.client): + j = self.board.post('test', p_utils.temporary_log_book()) self.assertEqual(states.UNCLAIMED, j.state) - self.board.claim(j, self.board.name) - self.client.flush() + with flush(self.client): + self.board.claim(j, self.board.name) self.assertEqual(states.CLAIMED, j.state) # Forcefully delete the lock from the backend storage to make @@ -287,11 +318,11 @@ class TestZookeeperJobs(test.TestCase): def test_posting_owner_lost(self): with connect_close(self.board): - j = self.board.post('test', p_utils.temporary_log_book()) - self.client.flush() + with flush(self.client): + j = self.board.post('test', p_utils.temporary_log_book()) self.assertEqual(states.UNCLAIMED, j.state) - self.board.claim(j, self.board.name) - self.client.flush() + with flush(self.client): + self.board.claim(j, self.board.name) self.assertEqual(states.CLAIMED, j.state) # Forcefully delete the owner from the backend storage to make @@ -317,8 +348,8 @@ class TestZookeeperJobs(test.TestCase): self.addCleanup(board.close) with connect_close(board): - board.post('test', book) - client.flush() + with flush(client): + board.post('test', book) possible_jobs = list(board.iterjobs(only_unclaimed=True)) self.assertEqual(1, len(possible_jobs)) @@ -334,8 +365,8 @@ class TestZookeeperJobs(test.TestCase): def test_posting_abandon_no_owner(self): with connect_close(self.board): - self.board.post('test', p_utils.temporary_log_book()) - self.client.flush() + with flush(self.client): + self.board.post('test', p_utils.temporary_log_book()) self.assertEqual(1, self.board.job_count) possible_jobs = list(self.board.iterjobs(only_unclaimed=True)) diff --git a/test-requirements.txt b/test-requirements.txt index fc0bcba4..d37163d8 100644 --- a/test-requirements.txt +++ b/test-requirements.txt @@ -5,7 +5,7 @@ mock>=1.0 python-subunit>=0.0.18 testrepository>=0.0.18 testtools>=0.9.34 -zake>=0.0.15 +zake>=0.0.18 # docs build jobs sphinx>=1.1.2,<1.2 oslosphinx From 125d015d92e715bb6290d58c5f756defec0b8ccc Mon Sep 17 00:00:00 2001 From: Joshua Harlow Date: Fri, 9 May 2014 20:03:58 -0700 Subject: [PATCH 063/188] Cleanup zookeeper integration testing In the persistence tests only use zake when zookeeper is not available (and of the right version). When zookeeper is available skip running zake. In the jobboard tests split out the tests which are not specific for zake into a base class (allowing for a future commit to add a zookeeper integration test). Change-Id: I50d51639a7f6c03c29d559c485676fddb9a7cf20 --- taskflow/tests/unit/jobs/base.py | 273 ++++++++++++++ taskflow/tests/unit/jobs/test_zk_job.py | 348 +++--------------- .../unit/persistence/test_zake_persistence.py | 45 --- .../unit/persistence/test_zk_persistence.py | 58 +-- taskflow/tests/utils.py | 25 ++ 5 files changed, 379 insertions(+), 370 deletions(-) create mode 100644 taskflow/tests/unit/jobs/base.py delete mode 100644 taskflow/tests/unit/persistence/test_zake_persistence.py diff --git a/taskflow/tests/unit/jobs/base.py b/taskflow/tests/unit/jobs/base.py new file mode 100644 index 00000000..6995870f --- /dev/null +++ b/taskflow/tests/unit/jobs/base.py @@ -0,0 +1,273 @@ +# -*- coding: utf-8 -*- + +# Copyright (C) 2014 Yahoo! Inc. All Rights Reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); you may +# not use this file except in compliance with the License. You may obtain +# a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the +# License for the specific language governing permissions and limitations +# under the License. + +import contextlib +import mock +import threading +import time + +from kazoo.recipe import watchers + +from taskflow import exceptions as excp +from taskflow.openstack.common import uuidutils +from taskflow.persistence.backends import impl_dir +from taskflow import states +from taskflow.utils import misc +from taskflow.utils import persistence_utils as p_utils + + +@contextlib.contextmanager +def connect_close(*args): + try: + for a in args: + a.connect() + yield + finally: + for a in args: + a.close() + + +@contextlib.contextmanager +def flush(client, path=None): + # This uses the linearity guarantee of zookeeper (and associated libraries) + # to create a temporary node, wait until a watcher notifies it's created, + # then yield back for more work, and then at the end of that work delete + # the created node. This ensures that the operations done in the yield + # of this context manager will be applied and all watchers will have fired + # before this context manager exits. + if not path: + path = "/tmp-%s" % uuidutils.generate_uuid() + created = threading.Event() + deleted = threading.Event() + + def on_created(data, stat): + if stat is not None: + created.set() + return False # cause this watcher to cease to exist + + def on_deleted(data, stat): + if stat is None: + deleted.set() + return False # cause this watcher to cease to exist + + watchers.DataWatch(client, path, func=on_created) + client.create(path) + created.wait() + try: + yield + finally: + watchers.DataWatch(client, path, func=on_deleted) + client.delete(path, recursive=True) + deleted.wait() + + +class BoardTestMixin(object): + def test_connect(self): + self.assertFalse(self.board.connected) + with connect_close(self.board): + self.assertTrue(self.board.connected) + + @mock.patch("taskflow.jobs.backends.impl_zookeeper.misc." + "millis_to_datetime") + def test_posting_dates(self, mock_dt): + epoch = misc.millis_to_datetime(0) + mock_dt.return_value = epoch + + with connect_close(self.board): + j = self.board.post('test', p_utils.temporary_log_book()) + self.assertEqual(epoch, j.created_on) + self.assertEqual(epoch, j.last_modified) + + self.assertTrue(mock_dt.called) + + def test_board_iter(self): + with connect_close(self.board): + it = self.board.iterjobs() + self.assertEqual(it.board, self.board) + self.assertFalse(it.only_unclaimed) + self.assertFalse(it.ensure_fresh) + + def test_board_iter_empty(self): + with connect_close(self.board): + jobs_found = list(self.board.iterjobs()) + self.assertEqual([], jobs_found) + + def test_fresh_iter(self): + with connect_close(self.board): + book = p_utils.temporary_log_book() + self.board.post('test', book) + jobs = list(self.board.iterjobs(ensure_fresh=True)) + self.assertEqual(1, len(jobs)) + + def test_wait_timeout(self): + with connect_close(self.board): + self.assertRaises(excp.NotFound, self.board.wait, timeout=0.1) + + def test_wait_arrival(self): + ev = threading.Event() + jobs = [] + + def poster(wait_post=0.2): + ev.wait() # wait until the waiter is active + time.sleep(wait_post) + self.board.post('test', p_utils.temporary_log_book()) + + def waiter(): + ev.set() + it = self.board.wait() + jobs.extend(it) + + with connect_close(self.board): + t1 = threading.Thread(target=poster) + t1.daemon = True + t1.start() + t2 = threading.Thread(target=waiter) + t2.daemon = True + t2.start() + for t in (t1, t2): + t.join() + + self.assertEqual(1, len(jobs)) + + def test_posting_claim(self): + + with connect_close(self.board): + with flush(self.client): + self.board.post('test', p_utils.temporary_log_book()) + + self.assertEqual(1, self.board.job_count) + possible_jobs = list(self.board.iterjobs(only_unclaimed=True)) + self.assertEqual(1, len(possible_jobs)) + j = possible_jobs[0] + self.assertEqual(states.UNCLAIMED, j.state) + + with flush(self.client): + self.board.claim(j, self.board.name) + + self.assertEqual(self.board.name, self.board.find_owner(j)) + self.assertEqual(states.CLAIMED, j.state) + + possible_jobs = list(self.board.iterjobs(only_unclaimed=True)) + self.assertEqual(0, len(possible_jobs)) + + self.assertRaisesAttrAccess(excp.NotFound, j, 'state') + self.assertRaises(excp.NotFound, + self.board.consume, j, self.board.name) + + def test_posting_claim_consume(self): + + with connect_close(self.board): + with flush(self.client): + self.board.post('test', p_utils.temporary_log_book()) + + possible_jobs = list(self.board.iterjobs(only_unclaimed=True)) + self.assertEqual(1, len(possible_jobs)) + j = possible_jobs[0] + with flush(self.client): + self.board.claim(j, self.board.name) + + possible_jobs = list(self.board.iterjobs(only_unclaimed=True)) + self.assertEqual(0, len(possible_jobs)) + with flush(self.client): + self.board.consume(j, self.board.name) + + self.assertEqual(0, len(list(self.board.iterjobs()))) + self.assertRaises(excp.NotFound, + self.board.consume, j, self.board.name) + + def test_posting_claim_abandon(self): + + with connect_close(self.board): + with flush(self.client): + self.board.post('test', p_utils.temporary_log_book()) + + possible_jobs = list(self.board.iterjobs(only_unclaimed=True)) + self.assertEqual(1, len(possible_jobs)) + j = possible_jobs[0] + with flush(self.client): + self.board.claim(j, self.board.name) + + possible_jobs = list(self.board.iterjobs(only_unclaimed=True)) + self.assertEqual(0, len(possible_jobs)) + with flush(self.client): + self.board.abandon(j, self.board.name) + + possible_jobs = list(self.board.iterjobs(only_unclaimed=True)) + self.assertEqual(1, len(possible_jobs)) + + def test_posting_claim_diff_owner(self): + + with connect_close(self.board): + with flush(self.client): + self.board.post('test', p_utils.temporary_log_book()) + + possible_jobs = list(self.board.iterjobs(only_unclaimed=True)) + self.assertEqual(1, len(possible_jobs)) + with flush(self.client): + self.board.claim(possible_jobs[0], self.board.name) + + possible_jobs = list(self.board.iterjobs()) + self.assertEqual(1, len(possible_jobs)) + self.assertRaises(excp.UnclaimableJob, self.board.claim, + possible_jobs[0], self.board.name + "-1") + possible_jobs = list(self.board.iterjobs(only_unclaimed=True)) + self.assertEqual(0, len(possible_jobs)) + + def test_posting_no_post(self): + with connect_close(self.board): + with mock.patch.object(self.client, 'create') as create_func: + create_func.side_effect = IOError("Unable to post") + self.assertRaises(IOError, self.board.post, + 'test', p_utils.temporary_log_book()) + self.assertEqual(0, self.board.job_count) + + def test_posting_with_book(self): + backend = impl_dir.DirBackend(conf={ + 'path': self.makeTmpDir(), + }) + backend.get_connection().upgrade() + book, flow_detail = p_utils.temporary_flow_detail(backend) + self.assertEqual(1, len(book)) + + client, board = self._create_board(persistence=backend) + self.addCleanup(board.close) + + with connect_close(board): + with flush(client): + board.post('test', book) + + possible_jobs = list(board.iterjobs(only_unclaimed=True)) + self.assertEqual(1, len(possible_jobs)) + j = possible_jobs[0] + self.assertEqual(1, len(j.book)) + self.assertEqual(book.name, j.book.name) + self.assertEqual(book.uuid, j.book.uuid) + + flow_details = list(j.book) + self.assertEqual(flow_detail.uuid, flow_details[0].uuid) + self.assertEqual(flow_detail.name, flow_details[0].name) + + def test_posting_abandon_no_owner(self): + + with connect_close(self.board): + with flush(self.client): + self.board.post('test', p_utils.temporary_log_book()) + + self.assertEqual(1, self.board.job_count) + possible_jobs = list(self.board.iterjobs(only_unclaimed=True)) + self.assertEqual(1, len(possible_jobs)) + j = possible_jobs[0] + self.assertRaises(excp.JobFailure, self.board.abandon, j, j.name) diff --git a/taskflow/tests/unit/jobs/test_zk_job.py b/taskflow/tests/unit/jobs/test_zk_job.py index ef652c57..9003cfdd 100644 --- a/taskflow/tests/unit/jobs/test_zk_job.py +++ b/taskflow/tests/unit/jobs/test_zk_job.py @@ -14,163 +14,83 @@ # License for the specific language governing permissions and limitations # under the License. -import contextlib -import mock -import threading -import time - import six -from kazoo.recipe import watchers - from zake import fake_client from zake import utils as zake_utils -from taskflow import exceptions as excp from taskflow.jobs.backends import impl_zookeeper from taskflow import states from taskflow import test from taskflow.openstack.common import jsonutils -from taskflow.openstack.common import uuidutils -from taskflow.persistence.backends import impl_dir +from taskflow.tests.unit.jobs import base from taskflow.utils import misc from taskflow.utils import persistence_utils as p_utils -@contextlib.contextmanager -def connect_close(*args): - try: - for a in args: - a.connect() - yield - finally: - for a in args: - a.close() +class ZakeJobboardTest(test.TestCase, base.BoardTestMixin): + def _create_board(self, client=None, persistence=None): + if not client: + client = fake_client.FakeClient() + board = impl_zookeeper.ZookeeperJobBoard('test-board', {}, + client=client, + persistence=persistence) + return (client, board) - -def create_board(client=None, persistence=None): - if not client: - client = fake_client.FakeClient() - board = impl_zookeeper.ZookeeperJobBoard('test-board', {}, - client=client, - persistence=persistence) - return (client, board) - - -@contextlib.contextmanager -def flush(client, path=None): - # This uses the linearity guarantee of zookeeper (and associated libraries) - # to create a temporary node, wait until a watcher notifies it's created, - # then yield back for more work, and then at the end of that work delete - # the created node. This ensures that the operations done in the yield - # of this context manager will be applied and all watchers will have fired - # before this context manager exits. - if not path: - path = "/tmp-%s" % uuidutils.generate_uuid() - created = threading.Event() - deleted = threading.Event() - - def on_created(data, stat): - if stat is not None: - created.set() - return False # cause this watcher to cease to exist - - def on_deleted(data, stat): - if stat is None: - deleted.set() - return False # cause this watcher to cease to exist - - watchers.DataWatch(client, path, func=on_created) - client.create(path) - created.wait() - try: - yield - finally: - watchers.DataWatch(client, path, func=on_deleted) - client.delete(path, recursive=True) - deleted.wait() - - -class TestZookeeperJobs(test.TestCase): def setUp(self): - super(TestZookeeperJobs, self).setUp() - self.client, self.board = create_board() + super(ZakeJobboardTest, self).setUp() + self.client, self.board = self._create_board() self.addCleanup(self.board.close) self.bad_paths = [self.board.path] self.bad_paths.extend(zake_utils.partition_path(self.board.path)) - def test_connect(self): - self.assertFalse(self.board.connected) - with connect_close(self.board): - self.assertTrue(self.board.connected) + def test_posting_owner_lost(self): - @mock.patch("taskflow.jobs.backends.impl_zookeeper.misc." - "millis_to_datetime") - def test_posting_dates(self, mock_dt): - epoch = misc.millis_to_datetime(0) - mock_dt.return_value = epoch + with base.connect_close(self.board): + with base.flush(self.client): + j = self.board.post('test', p_utils.temporary_log_book()) + self.assertEqual(states.UNCLAIMED, j.state) + with base.flush(self.client): + self.board.claim(j, self.board.name) + self.assertEqual(states.CLAIMED, j.state) - with connect_close(self.board): - j = self.board.post('test', p_utils.temporary_log_book()) - self.assertEqual(epoch, j.created_on) - self.assertEqual(epoch, j.last_modified) + # Forcefully delete the owner from the backend storage to make + # sure the job becomes unclaimed (this may happen if some admin + # manually deletes the lock). + paths = list(six.iteritems(self.client.storage.paths)) + for (path, value) in paths: + if path in self.bad_paths: + continue + if path.endswith('lock'): + value['data'] = misc.binary_encode(jsonutils.dumps({})) + self.assertEqual(states.UNCLAIMED, j.state) - self.assertTrue(mock_dt.called) + def test_posting_state_lock_lost(self): - def test_board_iter(self): - with connect_close(self.board): - it = self.board.iterjobs() - self.assertEqual(it.board, self.board) - self.assertFalse(it.only_unclaimed) - self.assertFalse(it.ensure_fresh) + with base.connect_close(self.board): + with base.flush(self.client): + j = self.board.post('test', p_utils.temporary_log_book()) + self.assertEqual(states.UNCLAIMED, j.state) + with base.flush(self.client): + self.board.claim(j, self.board.name) + self.assertEqual(states.CLAIMED, j.state) - def test_board_iter_empty(self): - with connect_close(self.board): - jobs_found = list(self.board.iterjobs()) - self.assertEqual([], jobs_found) - - def test_fresh_iter(self): - with connect_close(self.board): - book = p_utils.temporary_log_book() - self.board.post('test', book) - jobs = list(self.board.iterjobs(ensure_fresh=True)) - self.assertEqual(1, len(jobs)) - - def test_wait_timeout(self): - with connect_close(self.board): - self.assertRaises(excp.NotFound, self.board.wait, timeout=0.1) - - def test_wait_arrival(self): - ev = threading.Event() - jobs = [] - - def poster(wait_post=0.2): - ev.wait() # wait until the waiter is active - time.sleep(wait_post) - self.board.post('test', p_utils.temporary_log_book()) - - def waiter(): - ev.set() - it = self.board.wait() - jobs.extend(it) - - with connect_close(self.board): - t1 = threading.Thread(target=poster) - t1.daemon = True - t1.start() - t2 = threading.Thread(target=waiter) - t2.daemon = True - t2.start() - for t in (t1, t2): - t.join() - - self.assertEqual(1, len(jobs)) + # Forcefully delete the lock from the backend storage to make + # sure the job becomes unclaimed (this may happen if some admin + # manually deletes the lock). + paths = list(six.iteritems(self.client.storage.paths)) + for (path, value) in paths: + if path in self.bad_paths: + continue + if path.endswith("lock"): + self.client.storage.pop(path) + self.assertEqual(states.UNCLAIMED, j.state) def test_posting_received_raw(self): book = p_utils.temporary_log_book() - with connect_close(self.board): + with base.connect_close(self.board): self.assertTrue(self.board.connected) self.assertEqual(0, self.board.job_count) posted_job = self.board.post('test', book) @@ -201,175 +121,3 @@ class TestZookeeperJobs(test.TestCase): }, 'details': {}, }, jsonutils.loads(misc.binary_decode(paths[path_key]['data']))) - - def test_posting_claim(self): - - with connect_close(self.board): - with flush(self.client): - self.board.post('test', p_utils.temporary_log_book()) - - self.assertEqual(1, self.board.job_count) - possible_jobs = list(self.board.iterjobs(only_unclaimed=True)) - self.assertEqual(1, len(possible_jobs)) - j = possible_jobs[0] - self.assertEqual(states.UNCLAIMED, j.state) - - with flush(self.client): - self.board.claim(j, self.board.name) - - self.assertEqual(self.board.name, self.board.find_owner(j)) - self.assertEqual(states.CLAIMED, j.state) - - possible_jobs = list(self.board.iterjobs(only_unclaimed=True)) - self.assertEqual(0, len(possible_jobs)) - - self.assertRaisesAttrAccess(excp.NotFound, j, 'state') - self.assertRaises(excp.NotFound, - self.board.consume, j, self.board.name) - - def test_posting_claim_consume(self): - - with connect_close(self.board): - with flush(self.client): - self.board.post('test', p_utils.temporary_log_book()) - - possible_jobs = list(self.board.iterjobs(only_unclaimed=True)) - self.assertEqual(1, len(possible_jobs)) - j = possible_jobs[0] - with flush(self.client): - self.board.claim(j, self.board.name) - - possible_jobs = list(self.board.iterjobs(only_unclaimed=True)) - self.assertEqual(0, len(possible_jobs)) - with flush(self.client): - self.board.consume(j, self.board.name) - - self.assertEqual(0, len(list(self.board.iterjobs()))) - self.assertRaises(excp.NotFound, - self.board.consume, j, self.board.name) - - def test_posting_claim_abandon(self): - - with connect_close(self.board): - with flush(self.client): - self.board.post('test', p_utils.temporary_log_book()) - - possible_jobs = list(self.board.iterjobs(only_unclaimed=True)) - self.assertEqual(1, len(possible_jobs)) - j = possible_jobs[0] - with flush(self.client): - self.board.claim(j, self.board.name) - - possible_jobs = list(self.board.iterjobs(only_unclaimed=True)) - self.assertEqual(0, len(possible_jobs)) - with flush(self.client): - self.board.abandon(j, self.board.name) - - possible_jobs = list(self.board.iterjobs(only_unclaimed=True)) - self.assertEqual(1, len(possible_jobs)) - - def test_posting_claim_diff_owner(self): - - with connect_close(self.board): - with flush(self.client): - self.board.post('test', p_utils.temporary_log_book()) - - possible_jobs = list(self.board.iterjobs(only_unclaimed=True)) - self.assertEqual(1, len(possible_jobs)) - with flush(self.client): - self.board.claim(possible_jobs[0], self.board.name) - - possible_jobs = list(self.board.iterjobs()) - self.assertEqual(1, len(possible_jobs)) - self.assertRaises(excp.UnclaimableJob, self.board.claim, - possible_jobs[0], self.board.name + "-1") - possible_jobs = list(self.board.iterjobs(only_unclaimed=True)) - self.assertEqual(0, len(possible_jobs)) - - def test_posting_state_lock_lost(self): - - with connect_close(self.board): - with flush(self.client): - j = self.board.post('test', p_utils.temporary_log_book()) - self.assertEqual(states.UNCLAIMED, j.state) - with flush(self.client): - self.board.claim(j, self.board.name) - self.assertEqual(states.CLAIMED, j.state) - - # Forcefully delete the lock from the backend storage to make - # sure the job becomes unclaimed (this may happen if some admin - # manually deletes the lock). - paths = list(six.iteritems(self.client.storage.paths)) - for (path, value) in paths: - if path in self.bad_paths: - continue - if path.endswith("lock"): - self.client.storage.pop(path) - self.assertEqual(states.UNCLAIMED, j.state) - - def test_posting_no_post(self): - with connect_close(self.board): - with mock.patch.object(self.client, 'create') as create_func: - create_func.side_effect = IOError("Unable to post") - self.assertRaises(IOError, self.board.post, - 'test', p_utils.temporary_log_book()) - self.assertEqual(0, self.board.job_count) - - def test_posting_owner_lost(self): - - with connect_close(self.board): - with flush(self.client): - j = self.board.post('test', p_utils.temporary_log_book()) - self.assertEqual(states.UNCLAIMED, j.state) - with flush(self.client): - self.board.claim(j, self.board.name) - self.assertEqual(states.CLAIMED, j.state) - - # Forcefully delete the owner from the backend storage to make - # sure the job becomes unclaimed (this may happen if some admin - # manually deletes the lock). - paths = list(six.iteritems(self.client.storage.paths)) - for (path, value) in paths: - if path in self.bad_paths: - continue - if path.endswith('lock'): - value['data'] = misc.binary_encode(jsonutils.dumps({})) - self.assertEqual(states.UNCLAIMED, j.state) - - def test_posting_with_book(self): - backend = impl_dir.DirBackend(conf={ - 'path': self.makeTmpDir(), - }) - backend.get_connection().upgrade() - book, flow_detail = p_utils.temporary_flow_detail(backend) - self.assertEqual(1, len(book)) - - client, board = create_board(persistence=backend) - self.addCleanup(board.close) - - with connect_close(board): - with flush(client): - board.post('test', book) - - possible_jobs = list(board.iterjobs(only_unclaimed=True)) - self.assertEqual(1, len(possible_jobs)) - j = possible_jobs[0] - self.assertEqual(1, len(j.book)) - self.assertEqual(book.name, j.book.name) - self.assertEqual(book.uuid, j.book.uuid) - - flow_details = list(j.book) - self.assertEqual(flow_detail.uuid, flow_details[0].uuid) - self.assertEqual(flow_detail.name, flow_details[0].name) - - def test_posting_abandon_no_owner(self): - - with connect_close(self.board): - with flush(self.client): - self.board.post('test', p_utils.temporary_log_book()) - - self.assertEqual(1, self.board.job_count) - possible_jobs = list(self.board.iterjobs(only_unclaimed=True)) - self.assertEqual(1, len(possible_jobs)) - j = possible_jobs[0] - self.assertRaises(excp.JobFailure, self.board.abandon, j, j.name) diff --git a/taskflow/tests/unit/persistence/test_zake_persistence.py b/taskflow/tests/unit/persistence/test_zake_persistence.py deleted file mode 100644 index 91f04f9d..00000000 --- a/taskflow/tests/unit/persistence/test_zake_persistence.py +++ /dev/null @@ -1,45 +0,0 @@ -# -*- coding: utf-8 -*- - -# Copyright (C) 2014 AT&T Labs All Rights Reserved. -# -# Licensed under the Apache License, Version 2.0 (the "License"); you may -# not use this file except in compliance with the License. You may obtain -# a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT -# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the -# License for the specific language governing permissions and limitations -# under the License. - -import contextlib - -from zake import fake_client - -from taskflow.persistence import backends -from taskflow.persistence.backends import impl_zookeeper -from taskflow import test -from taskflow.tests.unit.persistence import base - - -class ZakePersistenceTest(test.TestCase, base.PersistenceTestMixin): - def _get_connection(self): - return self._backend.get_connection() - - def setUp(self): - super(ZakePersistenceTest, self).setUp() - conf = { - "path": "/taskflow", - } - client = fake_client.FakeClient() - client.start() - self._backend = impl_zookeeper.ZkBackend(conf, client=client) - conn = self._backend.get_connection() - conn.upgrade() - - def test_zk_persistence_entry_point(self): - conf = {'connection': 'zookeeper:'} - with contextlib.closing(backends.fetch(conf)) as be: - self.assertIsInstance(be, impl_zookeeper.ZkBackend) diff --git a/taskflow/tests/unit/persistence/test_zk_persistence.py b/taskflow/tests/unit/persistence/test_zk_persistence.py index 04eac378..414db09b 100644 --- a/taskflow/tests/unit/persistence/test_zk_persistence.py +++ b/taskflow/tests/unit/persistence/test_zk_persistence.py @@ -17,37 +17,18 @@ import contextlib import testtools +from zake import fake_client from taskflow.openstack.common import uuidutils +from taskflow.persistence import backends from taskflow.persistence.backends import impl_zookeeper from taskflow import test from taskflow.tests.unit.persistence import base -from taskflow.utils import kazoo_utils +from taskflow.tests import utils as test_utils -TEST_CONFIG = { - 'timeout': 1.0, - 'hosts': ["localhost:2181"], -} TEST_PATH_TPL = '/taskflow/persistence-test/%s' - - -def _zookeeper_available(): - client = kazoo_utils.make_client(TEST_CONFIG) - try: - # NOTE(imelnikov): 3 seconds we should be enough for localhost - client.start(timeout=3) - zk_ver = client.server_version() - if zk_ver >= impl_zookeeper.MIN_ZK_VERSION: - return True - else: - return False - except Exception: - return False - finally: - kazoo_utils.finalize_client(client) - - -_ZOOKEEPER_AVAILABLE = _zookeeper_available() +_ZOOKEEPER_AVAILABLE = test_utils.zookeeper_available( + impl_zookeeper.MIN_ZK_VERSION) @testtools.skipIf(not _ZOOKEEPER_AVAILABLE, 'zookeeper is not available') @@ -61,7 +42,7 @@ class ZkPersistenceTest(test.TestCase, base.PersistenceTestMixin): def setUp(self): super(ZkPersistenceTest, self).setUp() - conf = TEST_CONFIG.copy() + conf = test_utils.ZK_TEST_CONFIG.copy() # Create a unique path just for this test (so that we don't overwrite # what other tests are doing). conf['path'] = TEST_PATH_TPL % (uuidutils.generate_uuid()) @@ -74,3 +55,30 @@ class ZkPersistenceTest(test.TestCase, base.PersistenceTestMixin): with contextlib.closing(self._get_connection()) as conn: conn.upgrade() self.addCleanup(self._clear_all) + + def test_zk_persistence_entry_point(self): + conf = {'connection': 'zookeeper:'} + with contextlib.closing(backends.fetch(conf)) as be: + self.assertIsInstance(be, impl_zookeeper.ZkBackend) + + +@testtools.skipIf(_ZOOKEEPER_AVAILABLE, 'zookeeper is available') +class ZakePersistenceTest(test.TestCase, base.PersistenceTestMixin): + def _get_connection(self): + return self._backend.get_connection() + + def setUp(self): + super(ZakePersistenceTest, self).setUp() + conf = { + "path": "/taskflow", + } + self.client = fake_client.FakeClient() + self.client.start() + self._backend = impl_zookeeper.ZkBackend(conf, client=self.client) + conn = self._backend.get_connection() + conn.upgrade() + + def test_zk_persistence_entry_point(self): + conf = {'connection': 'zookeeper:'} + with contextlib.closing(backends.fetch(conf)) as be: + self.assertIsInstance(be, impl_zookeeper.ZkBackend) diff --git a/taskflow/tests/utils.py b/taskflow/tests/utils.py index d8793215..ce3289ec 100644 --- a/taskflow/tests/utils.py +++ b/taskflow/tests/utils.py @@ -23,12 +23,18 @@ from taskflow import exceptions from taskflow.persistence.backends import impl_memory from taskflow import retry from taskflow import task +from taskflow.utils import kazoo_utils from taskflow.utils import misc ARGS_KEY = '__args__' KWARGS_KEY = '__kwargs__' ORDER_KEY = '__order__' +ZK_TEST_CONFIG = { + 'timeout': 1.0, + 'hosts': ["localhost:2181"], +} + @contextlib.contextmanager def wrap_all_failures(): @@ -44,6 +50,25 @@ def wrap_all_failures(): raise exceptions.WrappedFailure([misc.Failure()]) +def zookeeper_available(min_version, timeout=3): + client = kazoo_utils.make_client(ZK_TEST_CONFIG.copy()) + try: + # NOTE(imelnikov): 3 seconds we should be enough for localhost + client.start(timeout=float(timeout)) + if min_version: + zk_ver = client.server_version() + if zk_ver >= min_version: + return True + else: + return False + else: + return True + except Exception: + return False + finally: + kazoo_utils.finalize_client(client) + + class DummyTask(task.Task): def execute(self, context, *args, **kwargs): From 9c9377027edbd22509cc0c94dae253c927c36852 Mon Sep 17 00:00:00 2001 From: Joshua Harlow Date: Fri, 9 May 2014 20:55:12 -0700 Subject: [PATCH 064/188] Add a zookeeper jobboard integration test Test the base jobboard tests against a real zookeeper server if one is available (if not skip the tests). Change-Id: I3a4d582e347ce9de48a19a17f5ce890590a4adcb --- taskflow/tests/unit/jobs/base.py | 2 -- taskflow/tests/unit/jobs/test_zk_job.py | 41 ++++++++++++++++++++++--- 2 files changed, 37 insertions(+), 6 deletions(-) diff --git a/taskflow/tests/unit/jobs/base.py b/taskflow/tests/unit/jobs/base.py index 6995870f..addc7edf 100644 --- a/taskflow/tests/unit/jobs/base.py +++ b/taskflow/tests/unit/jobs/base.py @@ -243,8 +243,6 @@ class BoardTestMixin(object): self.assertEqual(1, len(book)) client, board = self._create_board(persistence=backend) - self.addCleanup(board.close) - with connect_close(board): with flush(client): board.post('test', book) diff --git a/taskflow/tests/unit/jobs/test_zk_job.py b/taskflow/tests/unit/jobs/test_zk_job.py index 9003cfdd..3d5f8228 100644 --- a/taskflow/tests/unit/jobs/test_zk_job.py +++ b/taskflow/tests/unit/jobs/test_zk_job.py @@ -15,6 +15,7 @@ # under the License. import six +import testtools from zake import fake_client from zake import utils as zake_utils @@ -24,24 +25,56 @@ from taskflow import states from taskflow import test from taskflow.openstack.common import jsonutils +from taskflow.openstack.common import uuidutils from taskflow.tests.unit.jobs import base +from taskflow.tests import utils as test_utils +from taskflow.utils import kazoo_utils from taskflow.utils import misc from taskflow.utils import persistence_utils as p_utils +TEST_PATH_TPL = '/taskflow/board-test/%s' +_ZOOKEEPER_AVAILABLE = test_utils.zookeeper_available( + impl_zookeeper.MIN_ZK_VERSION) + + +@testtools.skipIf(not _ZOOKEEPER_AVAILABLE, 'zookeeper is not available') +class ZookeeperJobboardTest(test.TestCase, base.BoardTestMixin): + def _create_board(self, persistence=None): + + def cleanup_path(client, path): + if not client.connected: + return + client.delete(path, recursive=True) + + client = kazoo_utils.make_client(test_utils.ZK_TEST_CONFIG.copy()) + path = TEST_PATH_TPL % (uuidutils.generate_uuid()) + board = impl_zookeeper.ZookeeperJobBoard('test-board', {'path': path}, + client=client, + persistence=persistence) + self.addCleanup(kazoo_utils.finalize_client, client) + self.addCleanup(cleanup_path, client, path) + self.addCleanup(board.close) + return (client, board) + + def setUp(self): + super(ZookeeperJobboardTest, self).setUp() + self.client, self.board = self._create_board() + + class ZakeJobboardTest(test.TestCase, base.BoardTestMixin): - def _create_board(self, client=None, persistence=None): - if not client: - client = fake_client.FakeClient() + def _create_board(self, persistence=None): + client = fake_client.FakeClient() board = impl_zookeeper.ZookeeperJobBoard('test-board', {}, client=client, persistence=persistence) + self.addCleanup(board.close) + self.addCleanup(kazoo_utils.finalize_client, client) return (client, board) def setUp(self): super(ZakeJobboardTest, self).setUp() self.client, self.board = self._create_board() - self.addCleanup(self.board.close) self.bad_paths = [self.board.path] self.bad_paths.extend(zake_utils.partition_path(self.board.path)) From cb87c6e2c58593d5ef5710c4d7db0c18831e354c Mon Sep 17 00:00:00 2001 From: Joshua Harlow Date: Fri, 9 May 2014 22:00:23 -0700 Subject: [PATCH 065/188] Add a helper tool which clears zookeeper test dirs Create a tool that can clear any leftover garbage left by the testing of taskflow unit tests with zookeeper for when this is needed (for example, a test does not clean up correctly on its own). Change-Id: Icfaf28273b76a6ca27683d174f111fba2858f055 --- tools/clear_zk.py | 50 +++++++++++++++++++++++++++++++++++++++++++++++ 1 file changed, 50 insertions(+) create mode 100644 tools/clear_zk.py diff --git a/tools/clear_zk.py b/tools/clear_zk.py new file mode 100644 index 00000000..d0a9e3da --- /dev/null +++ b/tools/clear_zk.py @@ -0,0 +1,50 @@ +#!/usr/bin/env python + +import contextlib +import os +import re +import sys + +top_dir = os.path.abspath(os.path.join(os.path.dirname(__file__), + os.pardir)) +sys.path.insert(0, top_dir) + +from taskflow.utils import kazoo_utils + + +@contextlib.contextmanager +def finalize_client(client): + try: + yield client + finally: + kazoo_utils.finalize_client(client) + + +def iter_children(client, path): + if client.exists(path): + for child_path in client.get_children(path): + if path == "/": + child_path = "/%s" % (child_path) + else: + child_path = "%s/%s" % (path, child_path) + yield child_path + for child_child_path in iter_children(client, child_path): + yield child_child_path + + +def main(): + conf = {} + if len(sys.argv) > 1: + conf['hosts'] = sys.argv[1:] + with finalize_client(kazoo_utils.make_client(conf)) as client: + client.start(timeout=1.0) + children = list(iter_children(client, "/taskflow")) + for child_path in reversed(children): + if not re.match(r"^/taskflow/(.*?)-test/(.*)$", child_path): + continue + print("Deleting %s" % child_path) + client.delete(child_path) + + +if __name__ == "__main__": + main() From 504a88430c385a39dc3c4a87e0c0be923d110e3e Mon Sep 17 00:00:00 2001 From: Joshua Harlow Date: Sat, 10 May 2014 10:16:52 -0700 Subject: [PATCH 066/188] Fix spelling mistake Fix usage of 'occurred' (misspelled). Change-Id: Ie71814bc1cda6536578a3bec2e072863990b8efa --- taskflow/examples/delayed_return.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/taskflow/examples/delayed_return.py b/taskflow/examples/delayed_return.py index e77b961c..cbdc66d5 100644 --- a/taskflow/examples/delayed_return.py +++ b/taskflow/examples/delayed_return.py @@ -31,7 +31,7 @@ sys.path.insert(0, self_dir) # INTRO: in this example linear_flow we will attach a listener to an engine # and delay the return from a function until after the result of a task has -# occured in that engine. The engine will continue running (in the background) +# occurred in that engine. The engine will continue running (in the background) # while the function will have returned. import taskflow.engines From 5d5ed4c1c605d42f460c43e66ee5e3fda990b1fd Mon Sep 17 00:00:00 2001 From: Joshua Harlow Date: Sat, 10 May 2014 23:14:50 -0700 Subject: [PATCH 067/188] Default the impl_memory conf to none This configuration isn't used for anything in the memory backend so default it to none so that it doesn't need to be provided (since it's not used). Change-Id: I9e333aec52b697ebd3d5b1f4ec14d810e99bab19 --- taskflow/persistence/backends/impl_memory.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/taskflow/persistence/backends/impl_memory.py b/taskflow/persistence/backends/impl_memory.py index d3ad18fa..2d4c5e09 100644 --- a/taskflow/persistence/backends/impl_memory.py +++ b/taskflow/persistence/backends/impl_memory.py @@ -32,7 +32,7 @@ class MemoryBackend(base.Backend): """A backend that writes logbooks, flow details, and task details to in memory dictionaries. """ - def __init__(self, conf): + def __init__(self, conf=None): super(MemoryBackend, self).__init__(conf) self._log_books = {} self._flow_details = {} From 961b8f029154778a1466024ca0ff38333a9d746c Mon Sep 17 00:00:00 2001 From: Joshua Harlow Date: Sat, 10 May 2014 23:24:55 -0700 Subject: [PATCH 068/188] Add a reset nodes function Instead of using the retry subflow function to reset all nodes, create a reset nodes function that will reset the state of a given iterator of nodes to PENDING and then change there desired intention to a provided intention. Change-Id: Ia3f4fd4537e30a88d0d7b2271ca5be54832f724d --- .../engines/action_engine/graph_action.py | 24 +++++++++---------- 1 file changed, 12 insertions(+), 12 deletions(-) diff --git a/taskflow/engines/action_engine/graph_action.py b/taskflow/engines/action_engine/graph_action.py index 691d3b55..b649875f 100644 --- a/taskflow/engines/action_engine/graph_action.py +++ b/taskflow/engines/action_engine/graph_action.py @@ -215,19 +215,19 @@ class FutureGraphAction(object): next_nodes.add(node) return next_nodes - def reset_all(self): - self._retry_subflow(None) - - def _retry_subflow(self, retry): - if retry is not None: - self._storage.set_atom_intention(retry.name, st.EXECUTE) - nodes_iter = self._analyzer.iterate_subgraph(retry) - else: - nodes_iter = self._analyzer.iterate_all_nodes() - + def _reset_nodes(self, nodes_iter, intention=st.EXECUTE): for node in nodes_iter: if isinstance(node, task.BaseTask): self._task_action.change_state(node, st.PENDING, progress=0.0) - else: + elif isinstance(node, r.Retry): self._retry_action.change_state(node, st.PENDING) - self._storage.set_atom_intention(node.name, st.EXECUTE) + else: + raise TypeError("Unknown how to reset node %s" % node) + self._storage.set_atom_intention(node.name, intention) + + def reset_all(self): + self._reset_nodes(self._analyzer.iterate_all_nodes()) + + def _retry_subflow(self, retry): + self._storage.set_atom_intention(retry.name, st.EXECUTE) + self._reset_nodes(self._analyzer.iterate_subgraph(retry)) From e565212ed5646fc87c3a7c22e75b71afbd27ecf2 Mon Sep 17 00:00:00 2001 From: Joshua Harlow Date: Sat, 10 May 2014 10:42:25 -0700 Subject: [PATCH 069/188] Use /taskflow/flush-test in the flush function Instead of using /tmp use a taskflow specific directory for the flush testing utility function to make it possible to cleanup any leftover taskflow test directories if/when a test crashes or fails without cleaning itself up. Change-Id: I4d06adf9b9fbf9ec70b511e763edb52a41293e39 --- taskflow/tests/unit/jobs/base.py | 6 ++++-- 1 file changed, 4 insertions(+), 2 deletions(-) diff --git a/taskflow/tests/unit/jobs/base.py b/taskflow/tests/unit/jobs/base.py index addc7edf..6c101e4f 100644 --- a/taskflow/tests/unit/jobs/base.py +++ b/taskflow/tests/unit/jobs/base.py @@ -28,6 +28,8 @@ from taskflow import states from taskflow.utils import misc from taskflow.utils import persistence_utils as p_utils +FLUSH_PATH_TPL = '/taskflow/flush-test/%s' + @contextlib.contextmanager def connect_close(*args): @@ -49,7 +51,7 @@ def flush(client, path=None): # of this context manager will be applied and all watchers will have fired # before this context manager exits. if not path: - path = "/tmp-%s" % uuidutils.generate_uuid() + path = FLUSH_PATH_TPL % uuidutils.generate_uuid() created = threading.Event() deleted = threading.Event() @@ -64,7 +66,7 @@ def flush(client, path=None): return False # cause this watcher to cease to exist watchers.DataWatch(client, path, func=on_created) - client.create(path) + client.create(path, makepath=True) created.wait() try: yield From 7655ae02ce07f53b1611983d8988552bb4f71bd6 Mon Sep 17 00:00:00 2001 From: Joshua Harlow Date: Sun, 11 May 2014 09:12:55 -0700 Subject: [PATCH 070/188] Use futures wait() when possible Instead of always using our custom future wait functionality, only use that functionality if there are green futures and in other cases just use the future wait() function instead. Change-Id: I1eadcf53eb4b5f47b9543965610bfe04fec52e70 --- taskflow/engines/action_engine/executor.py | 2 +- .../engines/action_engine/graph_action.py | 8 +-- taskflow/tests/unit/test_utils_async_utils.py | 24 +++----- taskflow/utils/async_utils.py | 61 +++---------------- taskflow/utils/eventlet_utils.py | 51 +++++++++++++++- 5 files changed, 71 insertions(+), 75 deletions(-) diff --git a/taskflow/engines/action_engine/executor.py b/taskflow/engines/action_engine/executor.py index 846cc568..816060f5 100644 --- a/taskflow/engines/action_engine/executor.py +++ b/taskflow/engines/action_engine/executor.py @@ -99,7 +99,7 @@ class SerialTaskExecutor(TaskExecutorBase): def wait_for_any(self, fs, timeout=None): # NOTE(imelnikov): this executor returns only done futures. - return fs, [] + return (fs, set()) class ParallelTaskExecutor(TaskExecutorBase): diff --git a/taskflow/engines/action_engine/graph_action.py b/taskflow/engines/action_engine/graph_action.py index 691d3b55..77930e93 100644 --- a/taskflow/engines/action_engine/graph_action.py +++ b/taskflow/engines/action_engine/graph_action.py @@ -58,10 +58,10 @@ class FutureGraphAction(object): def _schedule(self, nodes): """Schedule a group of nodes for execution.""" - futures = [] + futures = set() for node in nodes: try: - futures.append(self._schedule_node(node)) + futures.add(self._schedule_node(node)) except Exception: # Immediately stop scheduling future work so that we can # exit execution early (rather than later) if a single task @@ -83,7 +83,7 @@ class FutureGraphAction(object): if self.is_running(): not_done, failures = self._schedule(next_nodes) else: - not_done, failures = ([], []) + not_done, failures = (set(), []) # Run! # @@ -129,7 +129,7 @@ class FutureGraphAction(object): # Recheck incase someone suspended it. if self.is_running(): more_not_done, failures = self._schedule(next_nodes) - not_done.extend(more_not_done) + not_done.update(more_not_done) if failures: misc.Failure.reraise_if_any(failures) diff --git a/taskflow/tests/unit/test_utils_async_utils.py b/taskflow/tests/unit/test_utils_async_utils.py index 32944c22..8e9ab944 100644 --- a/taskflow/tests/unit/test_utils_async_utils.py +++ b/taskflow/tests/unit/test_utils_async_utils.py @@ -54,40 +54,34 @@ class WaitForAnyTestsMixin(object): self.assertIs(done.pop(), f2) -class WaiterTestsMixin(object): +@testtools.skipIf(not eu.EVENTLET_AVAILABLE, 'eventlet is not available') +class AsyncUtilsEventletTest(test.TestCase, + WaitForAnyTestsMixin): + executor_cls = eu.GreenExecutor + is_green = True def test_add_result(self): - waiter = au._Waiter(self.is_green) + waiter = eu._GreenWaiter() self.assertFalse(waiter.event.is_set()) waiter.add_result(futures.Future()) self.assertTrue(waiter.event.is_set()) def test_add_exception(self): - waiter = au._Waiter(self.is_green) + waiter = eu._GreenWaiter() self.assertFalse(waiter.event.is_set()) waiter.add_exception(futures.Future()) self.assertTrue(waiter.event.is_set()) def test_add_cancelled(self): - waiter = au._Waiter(self.is_green) + waiter = eu._GreenWaiter() self.assertFalse(waiter.event.is_set()) waiter.add_cancelled(futures.Future()) self.assertTrue(waiter.event.is_set()) -@testtools.skipIf(not eu.EVENTLET_AVAILABLE, 'eventlet is not available') -class AsyncUtilsEventletTest(test.TestCase, - WaitForAnyTestsMixin, - WaiterTestsMixin): - executor_cls = eu.GreenExecutor - is_green = True - - class AsyncUtilsThreadedTest(test.TestCase, - WaitForAnyTestsMixin, - WaiterTestsMixin): + WaitForAnyTestsMixin): executor_cls = futures.ThreadPoolExecutor - is_green = False class MakeCompletedFutureTest(test.TestCase): diff --git a/taskflow/utils/async_utils.py b/taskflow/utils/async_utils.py index 4805230e..0599870d 100644 --- a/taskflow/utils/async_utils.py +++ b/taskflow/utils/async_utils.py @@ -14,71 +14,24 @@ # License for the specific language governing permissions and limitations # under the License. -import threading - from concurrent import futures from taskflow.utils import eventlet_utils as eu -DONE_STATES = frozenset([ - futures._base.CANCELLED_AND_NOTIFIED, - futures._base.FINISHED, -]) - - -class _Waiter(object): - """Provides the event that wait_for_any() blocks on.""" - def __init__(self, is_green): - if is_green: - assert eu.EVENTLET_AVAILABLE, ('eventlet is needed to use this' - ' feature') - self.event = eu.green_threading.Event() - else: - self.event = threading.Event() - - def add_result(self, future): - self.event.set() - - def add_exception(self, future): - self.event.set() - - def add_cancelled(self, future): - self.event.set() - - -def _partition_futures(fs): - """Partitions the input futures into done and not done lists.""" - done = [] - not_done = [] - for f in fs: - if f._state in DONE_STATES: - done.append(f) - else: - not_done.append(f) - return (done, not_done) - def wait_for_any(fs, timeout=None): """Wait for one of the futures to complete. Works correctly with both green and non-green futures. + Returns pair (done, not_done). """ - with futures._base._AcquireFutures(fs): - (done, not_done) = _partition_futures(fs) - if done: - return (done, not_done) - is_green = any(isinstance(f, eu.GreenFuture) for f in fs) - waiter = _Waiter(is_green) - for f in fs: - f._waiters.append(waiter) - - waiter.event.wait(timeout) - for f in fs: - f._waiters.remove(waiter) - - with futures._base._AcquireFutures(fs): - return _partition_futures(fs) + any_green = any(isinstance(f, eu.GreenFuture) for f in fs) + if any_green: + return eu.wait_for_any(fs, timeout=timeout) + else: + return tuple(futures.wait(fs, timeout=timeout, + return_when=futures.FIRST_COMPLETED)) def make_completed_future(result): diff --git a/taskflow/utils/eventlet_utils.py b/taskflow/utils/eventlet_utils.py index 6cafa5b5..347fba31 100644 --- a/taskflow/utils/eventlet_utils.py +++ b/taskflow/utils/eventlet_utils.py @@ -37,6 +37,11 @@ LOG = logging.getLogger(__name__) # working and rest in peace. _TOMBSTONE = object() +_DONE_STATES = frozenset([ + futures._base.CANCELLED_AND_NOTIFIED, + futures._base.FINISHED, +]) + class _WorkItem(object): def __init__(self, future, fn, args, kwargs): @@ -82,6 +87,7 @@ class _Worker(object): class GreenFuture(futures.Future): def __init__(self): super(GreenFuture, self).__init__() + assert EVENTLET_AVAILABLE, 'eventlet is needed to use a green future' # NOTE(harlowja): replace the built-in condition with a greenthread # compatible one so that when getting the result of this future the # functions will correctly yield to eventlet. If this is not done then @@ -95,7 +101,7 @@ class GreenExecutor(futures.Executor): """A greenthread backed executor.""" def __init__(self, max_workers=1000): - assert EVENTLET_AVAILABLE, 'eventlet is needed to use GreenExecutor' + assert EVENTLET_AVAILABLE, 'eventlet is needed to use a green executor' assert int(max_workers) > 0, 'Max workers must be greater than zero' self._max_workers = int(max_workers) self._pool = greenpool.GreenPool(self._max_workers) @@ -128,3 +134,46 @@ class GreenExecutor(futures.Executor): self._work_queue.put(_TOMBSTONE) if wait: self._pool.waitall() + + +class _GreenWaiter(object): + """Provides the event that wait_for_any() blocks on.""" + def __init__(self): + self.event = green_threading.Event() + + def add_result(self, future): + self.event.set() + + def add_exception(self, future): + self.event.set() + + def add_cancelled(self, future): + self.event.set() + + +def _partition_futures(fs): + """Partitions the input futures into done and not done lists.""" + done = set() + not_done = set() + for f in fs: + if f._state in _DONE_STATES: + done.add(f) + else: + not_done.add(f) + return (done, not_done) + + +def wait_for_any(fs, timeout=None): + assert EVENTLET_AVAILABLE, ('eventlet is needed to wait on green futures') + with futures._base._AcquireFutures(fs): + (done, not_done) = _partition_futures(fs) + if done: + return (done, not_done) + waiter = _GreenWaiter() + for f in fs: + f._waiters.append(waiter) + waiter.event.wait(timeout) + for f in fs: + f._waiters.remove(waiter) + with futures._base._AcquireFutures(fs): + return _partition_futures(fs) From 268d935a0a5adff7f0d59bdf3882210c49e45f5c Mon Sep 17 00:00:00 2001 From: "Ivan A. Melnikov" Date: Thu, 15 May 2014 22:18:45 +0400 Subject: [PATCH 071/188] Don't create fake LogBook when we can not fetch one When there is no LogBook nor backend in ZookeeperJob or something is wrong with book data, we now return None from `book` property instead of new (fake) LogBook. Having two (or more) logbooks with same uuid but different data is too confusing and makes root cause of the problem (backend or jobboard misconfiguration) less obvious. This change also adds `book_name` and `book_uuid` properties to Job abstract class to provide a way to access book data even if book itself cannot be fetched. Change-Id: Iae1f918e35d41794fc348860e6aa3b52ae2211f4 --- taskflow/jobs/backends/impl_zookeeper.py | 51 ++++++++++++++---------- taskflow/jobs/job.py | 19 ++++++++- taskflow/tests/unit/jobs/base.py | 2 + 3 files changed, 51 insertions(+), 21 deletions(-) diff --git a/taskflow/jobs/backends/impl_zookeeper.py b/taskflow/jobs/backends/impl_zookeeper.py index 4de2091d..fd73a097 100644 --- a/taskflow/jobs/backends/impl_zookeeper.py +++ b/taskflow/jobs/backends/impl_zookeeper.py @@ -32,7 +32,6 @@ from taskflow.jobs import jobboard from taskflow.openstack.common import excutils from taskflow.openstack.common import jsonutils from taskflow.openstack.common import uuidutils -from taskflow.persistence import logbook from taskflow import states from taskflow.utils import kazoo_utils from taskflow.utils import lock_utils @@ -145,17 +144,18 @@ class ZookeeperJob(base_job.Job): def board(self): return self._board - def _load_book(self, book_uuid, book_name): - # No backend to attempt to fetch from :-( - if self._backend is None: - return logbook.LogBook(name=book_name, uuid=book_uuid) - # TODO(harlowja): we are currently limited by assuming that the job - # posted has the same backend as this loader (to start this seems to - # be a ok assumption, and can be adjusted in the future if we determine - # there is a use-case for multi-backend loaders, aka a registry of - # loaders). - with contextlib.closing(self._backend.get_connection()) as conn: - return conn.get_logbook(book_uuid) + def _load_book(self): + book_uuid = self.book_uuid + if self._backend is not None and book_uuid is not None: + # TODO(harlowja): we are currently limited by assuming that the + # job posted has the same backend as this loader (to start this + # seems to be a ok assumption, and can be adjusted in the future + # if we determine there is a use-case for multi-backend loaders, + # aka a registry of loaders). + with contextlib.closing(self._backend.get_connection()) as conn: + return conn.get_logbook(book_uuid) + # No backend to fetch from or no uuid specified + return None @property def state(self): @@ -194,16 +194,27 @@ class ZookeeperJob(base_job.Job): @property def book(self): if self._book is None: - loaded_book = None - try: - book_uuid = self._book_data['uuid'] - book_name = self._book_data['name'] - loaded_book = self._load_book(book_uuid, book_name) - except (KeyError, TypeError): - pass - self._book = loaded_book + self._book = self._load_book() return self._book + @property + def book_uuid(self): + if self._book: + return self._book.uuid + if self._book_data: + return self._book_data.get('uuid') + else: + return None + + @property + def book_name(self): + if self._book: + return self._book.name + if self._book_data: + return self._book_data.get('name') + else: + return None + class ZookeeperJobBoardIterator(six.Iterator): """Iterator over a zookeeper jobboard. diff --git a/taskflow/jobs/job.py b/taskflow/jobs/job.py index a0264901..a4f0b416 100644 --- a/taskflow/jobs/job.py +++ b/taskflow/jobs/job.py @@ -66,7 +66,24 @@ class Job(object): @abc.abstractproperty def book(self): - """Any logbook associated with this job.""" + """Logbook associated with this job. + + If no logbook is associated with this job, this property is None. + """ + + @abc.abstractproperty + def book_uuid(self): + """UUID of logbook associated with this job. + + If no logbook is associated with this job, this property is None. + """ + + @abc.abstractproperty + def book_name(self): + """Name of logbook associated with this job. + + If no logbook is associated with this job, this property is None. + """ @property def uuid(self): diff --git a/taskflow/tests/unit/jobs/base.py b/taskflow/tests/unit/jobs/base.py index addc7edf..e5faad6a 100644 --- a/taskflow/tests/unit/jobs/base.py +++ b/taskflow/tests/unit/jobs/base.py @@ -253,6 +253,8 @@ class BoardTestMixin(object): self.assertEqual(1, len(j.book)) self.assertEqual(book.name, j.book.name) self.assertEqual(book.uuid, j.book.uuid) + self.assertEqual(book.name, j.book_name) + self.assertEqual(book.uuid, j.book_uuid) flow_details = list(j.book) self.assertEqual(flow_detail.uuid, flow_details[0].uuid) From b9afecb86e8432def11ed750999e5a9fd5b8a49c Mon Sep 17 00:00:00 2001 From: Joshua Harlow Date: Tue, 20 May 2014 00:05:10 -0700 Subject: [PATCH 072/188] Complete the cachedproperty descriptor protocol Refactor some parts of the cachedproperty property descriptor (renaming attributes to the standard names used in descriptor objects) and add on the __set__ and __delete__ methods to comply with the full descriptor protocol. Change-Id: I1f1e8e301271c060d14acc3a77c094dabd120f16 --- taskflow/utils/misc.py | 29 ++++++++++++++++++----------- 1 file changed, 18 insertions(+), 11 deletions(-) diff --git a/taskflow/utils/misc.py b/taskflow/utils/misc.py index 29e3cc0d..623fce32 100644 --- a/taskflow/utils/misc.py +++ b/taskflow/utils/misc.py @@ -180,30 +180,37 @@ class cachedproperty(object): those methods into properties that will be cached in the instance (avoiding repeated creation checking logic to do the equivalent). """ - def __init__(self, wrapped): + def __init__(self, fget): # If a name is provided (as an argument) then this will be the string # to place the cached attribute under if not then it will be the # function itself to be wrapped into a property. - if inspect.isfunction(wrapped): - self._wrapped = wrapped - self._wrapped_attr = "_%s" % (wrapped.__name__) + if inspect.isfunction(fget): + self._fget = fget + self._attr_name = "_%s" % (fget.__name__) else: - self._wrapped_attr = wrapped - self._wrapped = None + self._attr_name = fget + self._fget = None def __call__(self, fget): # If __init__ received a string then this will be the function to be # wrapped as a property (if __init__ got a function then this will not # be called). - self._wrapped = fget + self._fget = fget return self - def __get__(self, source, owner): + def __set__(self, instance, value): + raise AttributeError("can't set attribute") + + def __delete__(self, instance): + raise AttributeError("can't delete attribute") + + def __get__(self, instance, owner): try: - return getattr(source, self._wrapped_attr) + return getattr(instance, self._attr_name) except AttributeError: - setattr(source, self._wrapped_attr, self._wrapped(source)) - return getattr(source, self._wrapped_attr) + value = self._fget(instance) + setattr(instance, self._attr_name, value) + return value def wallclock(): From 41da9f2e3c4c68fc6bbbebbc3ce551a3cb8f167a Mon Sep 17 00:00:00 2001 From: Joshua Harlow Date: Tue, 20 May 2014 15:48:50 -0700 Subject: [PATCH 073/188] Add tests for the misc.cachedproperty descriptor Add a few tests that verify it is working as expected. Change-Id: I6c062f84f7c2a6baf8fc9a697ebe2d715f82d332 --- taskflow/tests/unit/test_utils.py | 59 +++++++++++++++++++++++++++++++ 1 file changed, 59 insertions(+) diff --git a/taskflow/tests/unit/test_utils.py b/taskflow/tests/unit/test_utils.py index fbdf3ec6..ffabe7df 100644 --- a/taskflow/tests/unit/test_utils.py +++ b/taskflow/tests/unit/test_utils.py @@ -312,6 +312,65 @@ class GetAllClassNamesTest(test.TestCase): self.assertEqual(names, test_utils.RUNTIME_ERROR_CLASSES[:-2]) +class CachedPropertyTest(test.TestCase): + def test_attribute_caching(self): + + class A(object): + def __init__(self): + self.call_counter = 0 + + @misc.cachedproperty + def b(self): + self.call_counter += 1 + return 'b' + + a = A() + self.assertEqual('b', a.b) + self.assertEqual('b', a.b) + self.assertEqual(1, a.call_counter) + + def test_custom_property(self): + + class A(object): + @misc.cachedproperty('_c') + def b(self): + return 'b' + + a = A() + self.assertEqual('b', a.b) + self.assertEqual('b', a._c) + + def test_no_delete(self): + + def try_del(a): + del a.b + + class A(object): + @misc.cachedproperty + def b(self): + return 'b' + + a = A() + self.assertEqual('b', a.b) + self.assertRaises(AttributeError, try_del, a) + self.assertEqual('b', a.b) + + def test_set(self): + + def try_set(a): + a.b = 'c' + + class A(object): + @misc.cachedproperty + def b(self): + return 'b' + + a = A() + self.assertEqual('b', a.b) + self.assertRaises(AttributeError, try_set, a) + self.assertEqual('b', a.b) + + class AttrDictTest(test.TestCase): def test_ok_create(self): attrs = { From 1a226baa893a56d1cb3f8c58511748f87469b556 Mon Sep 17 00:00:00 2001 From: Joshua Harlow Date: Wed, 21 May 2014 18:49:06 -0700 Subject: [PATCH 074/188] Add engine state diagram Add a nice visual representation of the states the action engine yields back while executing (which are available from its run_iter method) and give a brief overview of what these states mean and how they are used (in diagram and in text). Change-Id: Idc6abc83e0ed7510c0e4c58b7fcfc26cce3e98a2 --- doc/source/engines.rst | 4 ++- doc/source/img/engine_states.png | Bin 0 -> 24560 bytes doc/source/states.rst | 49 +++++++++++++++++++++++++------ 3 files changed, 43 insertions(+), 10 deletions(-) create mode 100644 doc/source/img/engine_states.png diff --git a/doc/source/engines.rst b/doc/source/engines.rst index 2deb8fa1..6aac42e0 100644 --- a/doc/source/engines.rst +++ b/doc/source/engines.rst @@ -232,7 +232,9 @@ The graph (and helper objects) previously created are now used for guiding further execution. The flow is put into the ``RUNNING`` :doc:`state ` and a :py:class:`~taskflow.engines.action_engine.graph_action.FutureGraphAction` -object starts to take over and begins going through the stages listed below. +object starts to take over and begins going through the stages listed +below (for a more visual diagram/representation see +the :ref:`engine state diagram `). Resumption ^^^^^^^^^^ diff --git a/doc/source/img/engine_states.png b/doc/source/img/engine_states.png new file mode 100644 index 0000000000000000000000000000000000000000..b91041d2eab40cdac78869f6bda3aeb544155f90 GIT binary patch literal 24560 zcmdSBcT|+w7AIPmKoPJ+0m&IeNs=Thl%RlQ$r%MC=bThRAwdNJ0RhP(S#nlD5Xq9W zAUR8tTyK~5?R#%`zghF9>xSyRBk{G4RxZ}# zgtWAupD?E<=RD4(i(z7JiH#y+%{CLWBzjuWxbQe;#;77|xj&SB*k)dRxVQ*WJaXb% zSDhz^F*`oaO{C%lH*az>gO%MwJD1f}eucAmBQM-)k18ByM;yd{zOHW#bXaZuOlEun zu7?&`-h%_LzIP=#u$QSU@xS=;eo^sZj9lMcyBj~b_xLbmGQ&MowY$FMjHfAfcL`f_ z^*)4`4;AP%)X98wf3+*ys5lh6;-T53vtY=-MHo7`b)(QWcaod{F5&j+a=iboFPYK~ z$QW)84vr3yjn?Uw=wbHD=bjc96;*At3lp=*t7i4umye8&ieA0C^n^&PbKcRt_~eNb zug=_YWuv9N!5p!**q>KkK4><NOJs*>e=l+hU2KsAsFJI-|$J?tJC8M$!AKPzjoCsRd5S((gu(ugDssVO3` zxqaorQ##3jY|X;Pd}DUYA=|lOf?g?R$&ABg8=r{g`?dR{tDD=;C8BSI*iAMBp?Gb-eK?6vlC4#2-u_z9yf_wZ()y}vXoxM> zssW8mmpGq5&TVvJoCUMjUwc+ouhO~AEMxv&yE=VDGlx!@L5*M|S(9CyfX&PzrKr2( z{rgkh3gXL4OL8b`YU*+CeXB~{Ar2KuXK~4gZXFiIL*)*$=at55yz1-fyf@;;o2(yST+24hdB4&L>VSEv;7GCbHpT3sNM( zrE60MyBl@w0RaK8UXjtSudlz$&E+3Iy}S^n)3BbPUuZ&rJcQ-P-s=3qH*da?*6y4{ zdaXCJz>0O;8j7co3N9`wIr{O;XKMVwQAk%hZpD$q=rH2uJ9BMq?ftDqQ&ZFafdSSf z5zieiq(6$ow&l`*}{KMDTAb zb@AdwYisLkF-w)JHN>m4EW0l+t2AITg82COPM$o8>=ItBGG?p8b{Ax7JXOg_m&DfgoYe59FU8NXV0tcZ+0sXcg=Ta_={dq z%cf24%~B`s8Z0#3+NQZ~dm}F(h?wQe{r6BLV2wI@d+%lSk_YtPE5DsbBXgyP$<*9C zTtlpkM9r+(;!}dC2iojw>K%!pu7QEM@844tS-lT_NUKQyxG8qH*=<4+Nz++we(`5+ zMP()Ni;0|5GkpB~|(r zTwPUFVmYX+sA%x6RUEV10lNcgm-uFI@rAv!MwY_-|>n&rJ`c5g|l|4#*F(V z5j{Rj5Yv<4q%&QF3vo{}e!;t+TGOXL;Qg-e$Pt?v}1$wr^=`ueFr z=|f9P)lAi3Qce=m{o(N=DU^DiUa_sNA9rI%ex^!Btc-!Aq-4BU1khB zp3#UWps(so7TSiiMi-^lrKdBO>sTB5_gq}0Z(U${;@6uX?vsF?X^p8Dzk2yHUGl)d zKwqC)v6kq=uZ-TcQYi(6g&uQBft!SwMSYA6ruyou@Nfx~Y%~Yo*}Sa2TN*a77aTFB zLo190p0@1SopnF^va_$2>#WSq!ZCB5ZKsl!mL?)1QsI2%=H_-lbqwy3yTVpjIh!5F z*S@}O_?YX9x|2>?^vZ0J407LEE|8M$@9%G88%Z*MPmV|#+AEXo~2$JI#fwW(CD;|-szQA1Z(H&M(>M@Q!@X=^1H z>HMhVL;n?w+;BW&6j84KMXe(rG0em0etn!(p%s%2GNZ8-``T0JW{c-nVCsxu4mC?;j zM{i8Lu*>~xgBE$!jY8p2T%JCCN=ZdEIy&0Y(n5gWGQAFKp}|SbW0M=-O;wIpm_7U8 z+F-t6{ncmPXR8_O!^;g#Is*wPm?N66UcRIIGMH;I;p|tn7X7Fiw+&`v%O8`U?8~dt z(*wA-+l5yfE;!wUYKlRAiZh zrk_|C`bSl%t-X8qPDDgxZ+rPlbPKc|r5~bOMhCOjQel*rYg)8NbU0sOTgcp@w6N!n z?NOWM=3G~)_2~KNB}LMrqN3NL9^21+4!+kDiOZn~2?@u2Fx5~d+ioo^E<&}wGD3AY z=CO+G0^FmC>ws+3FfyV?vTMF?$hU=CayFWvwW{1XEGBwrKRd=WefA#Py}-U(&{Q{= z*t7Fz`)|3b313Rh+1~2cXGu;?Weof7>E#87_rq#OTN@J`1C2(F`}Y}<)6>&IL{s8J zB^Lc0h8^NZKZvfC7>pH{<1JQW@AZZpd(7{9?QDkL?Px#OB{1%3T=b!d#zzjdy}fXR#$sTNE|~R?vGHtgMWf zeH(%A;r-?1Wz^=NY2u}vsl%V+7-(gCKUc@&Uy$1!9k|8IlTuP%U7By$oO`V&cxh$a z?=F(jdz1pRvrIu(;Qc_a!-KmJl+K+%;yfBA9z4o_zwH2Hl z9kX+CdiN=HqU#mK&It+JX+3N0(kkSwM&af2(A@lKNQkAG86H0V{^6?6c+i)x9} z-j&<7t(4ZAG#A9UFZiS+XNDU+6IsVYLf2Dp-?telyNOcCkiWQnmV#d{N%3W5#Uxbn3&XR z>1c_SHp{8c&ojwaGEDG25-_ekok^0r!$hBU@(yNga7iZTW1NsjvNgMv3~nUyM6vFTH*3F0n!&_Y80G&s0B>*?wh!&|XV z9Tj+aKMB$0g$?;!L^#Mlltr9Vlc5of7ET~I~;W^lc>%4j6~04E>cRxoFc z>+W>)ZldBmFAGbC%eYr`?WopOZtjAT5@*cOA%Mt7j~+2dg>;%1lXB{1zkTbvzg(&i zFVq{M-~2$Z#p5WabgaT_-`U*!U2QD}TC5l_8$cq7!Y*Oo%K+v&?d{J2Ru1?S8yj15 zxIL_0YQ4L&!^pxCj9VnxU2j%7DK*^N!Ymp6O2q9N=H!cmJDf_W1{DHx8f*_p&oN@e0+bgUo}+o9#d}%GU&940NsSj?nXzwqJ;WKoyRLr1|Q(x58=)62wr$Ki_qmo=KdYCH6zF=&Y-G6Cxm=(Y?_ilfPI-XO#sP9hNQ&x1AmH)cD= zfn2x83j+;%Cbnm$s(PWxP8k<|-buX1@Cc=h(MXnKZt3Xo*_jC34FrI9xc`%rjg6ew z?71qPQv?VehV=n=gs0c)Uwx>TLo^$nKafjj`J0m;x%u|;%gX{lDuScTISqssP- zQczi4Jvt#FSCREyR@TPGhJWEX8{#g94$+J*voI=QDHJ{_M~@lr!-o%1jsR}qwBzb% zYGx|3PJaJxVQzly+BNjsqN0~JSJ>FzB_%z|tIf`~TJ0LqJq4*e`DFCkqA@hzx)yoG5p4bb??llKdgPek5cxVXwHt*1LtuZ_y+(#zS z@EhXdwSeFzK1@yBZ^mph5+Slrm>j!J7@_EE!K?Pjhto8KPe7olv5`Zo=(g)SU`X}# zPcSdrfUU2LRWHuWI0N#8H=v0 z21EnU=VS&Tr=Z(q5~^BSWz{Ev-!~T`uG0(CXJeu_qrvG1s<}302YnsqfrJu2}vUcvvILRhdbj02j z9S)MLVBm*4>h(N$fS!~zT3ydAZPXMH-Ned@bu*n*5D_i{!F946&2I1F;`2RJV8|=# zd)b#SGO~o+GM?8#o4)+{meA_jL_H1;jA`dA9Vhs_o-ej9C#j=-FQK6NoN-b!gnw!fZDyo~LsH(0G7bM8a$|@l6 z6J!|JP{AbZ=NA%rOv9v7nm>Q0O|EoVz1DdC{Q2aR6j@nWIHQlBJb@iwVm-PrJ-xL% zo2ag)mLTHxwWH%1dL3%cnxEAV`Fi(|MrJ}i&+Eurw{G$A@}{PxA(b>WW4hG*mSOq) zC6F{U`YI~*iSiF0W|pOPsfAEK;FUjO4g2ngYEZRtas2GJu{`pqv&G} zV`IB;=F?cU2NbL8VOe>3{qv3>!^q3Ze#gIX;R0T*tt`s4BjG`a9i=qpkXzfw2eZCz zUvtC6Bpr%wVxqpXa(E}o-u^vPIoeQE%9t28FFN?Opo0`;+^!C=x0$IaUVi?P;^GGn z9?Vl2y}OtqeJM0T+>oMOH5NO|xpLm;!xA-s~nb zOOy6b-Q6mj{5(K&3g5qHmVbR6)M_|#+V<$ol-35f`LAi6ot;+0CB@obq12_n7IOMh zw}wF6kWprlR7)a0>>L>Q%;{uSJOtYh#SuMT>C!Pgyub6~yXPa2^`ZCZzD?A}N7_`D zm-qJdtqa4t+S}WoBBFl%`t?;_ySYxfz10f5CcBUV9@F;cAD{aFv1+9p$)wtq{bs=C zJ_VCrG8T#tyQ)pf?f$as`fdt2Ik~-q!~W*{2Jo_&1*?tYC=^@d7h9(j4%Du%PSXm? zN#OL|61tPo?H?FOBkY`=miDQ=T|QG4_Mgkg$0sTHEkAd3)IxEDZV5_+b$S3AjRxg1 zIx-T#-YqF9W@cuNe<)xID0z8#)XNtylDf>CS4JM*fm^(|8M7`>f987F_v`IrV|u_* zuTTPgUZ%F*WaY6F5)uj{<2E|n+v4WtHj!Bwtt>4m=~L|{4>bYMsX>K<$ans=ln#ga zHsdunpPXu*w)gSzIqG|!o}M0gMO{c(Sor#N6bhxPrgmnFA~(3Fudh8(eEj3bn>lJA zZx-d|=H}$E)6p4z`|!xV{NRN&wV?A78O=cY-8fid5Kp#O#}!UpAqm(b3%%P~=k#K4 zZ_n2k5h;`A@)>Gok9l!rlrV6^G4=hJji&M8V1(gCg9jO+9M541VZED z6n*DD9THa7a0S>((^ZC|aUKk6_d*7WV zX2v7JtVh>={-t_v5Xs8O>?j<+BwF(3&4;;FOiUA5@YAREwzhLR9}tL9oN_$h`x26p zocc8*Jv|NLBA^-TaERMYp1`ct`QwB7Lq|s^^~}l6&TTqU9X;0=O0lxCLQr}aNU%x)kLmmKkn!={) zCW^A@o|>B0fRU^$zKv=HZc4K$91BmIQ96Geg0DX6NciFW|mO!Mow6I`E z252XjAQHnF^ZGUAxpN@U%uY-cxNVr6*G-uqm9MI*+6fONqM4E1+udCrER@c>Rb3Yu z8L6bC6c!c+n-tsuGi&RaEVwc!Cnpf*vUfD<$=N3`YjFps_Y3@D$V zIcKI*kduc|ivm3VK07Oo^4aT7G%_-ZV%L^J+0^XLfTZ)uzpWwPeal>0x(U1)VkM9h zfWqghvMD5r9rmawcpdy`fwi-auU>tY@qk&?74jilDGJczBRr~t;R)rX5P=*BYK zodT5hXWANVsxTq{udk9KY^$SSIf$I`*sJ+jxisMSEezJFYfW&F2oIW zA)liINg~k;u;kR#98OmeYX%>VA(13sEdM9!=pV&3#2^Hqo#6HBx2e!+XU?2)|ESQp z6;_Dw4Ff7~fVy+1IY}~5Mn*>NVwv4{C~Y1tyrW)x+L8`GzvG;_h!8pfr4E6bWymsKt$T&1U7_;XvA7qH#ath3r(Mu zoo@skQwJ)@ty?Gclcr5&P=6_fo%=Xd|Bd05^z^J}SpP>{pLEX@q=JNm1SqH-;uudA zm54I-dg^Zc-f)hSu=2+nJBdo_>k2?TAeA8>Ey@1?!3p_9vC+=X=YM1LEY>T-HnqCY zAOZuWu@JfBKTvbTix)RhAa_Awlu26$j|?jI{QUeG|Ky5_Sn;RduhSt7LqMW4`rwNL zMHV!a{3*GADxXzlYcp+e;6npm19(G4O;9aQISE1ak&M}oeSShG_01dK@zvE;eLcN)8&eY#cMlJqM$jY0 z4%X_wefw5m+yv~rvZh8ak3UB(E+V4)mo*uM?U2yjpxB)8H(DL5hAu(b)Z5>`=qPmb z)@iX12{$9bmL4(&^0#lVZEayA5#WO6cF(uIe(2>@t;0b|s30r5v9)!Plv>2q76^Q; zuA<`e@NlCUR{A|SY-E5~=Il7C3?(m7H!)(aBbv|LUv&*gMk)#(0EvM<;9weDLXhBzi?k(cgD z{~LE9z$`?+1u;YNK&9(GPOy$3Pr#8+kqX6@04OI=G(-A8R4sP>F$rk} zNKRy=rmhL|k%tx*Zh+d8_3j;56lJy(5)(C{Jv}|TmCoRC!>xR}c=^hemiG1hL|!Bzad&rritp;~PC`n$F%0Gap!I?&R-Rn_+IE|%*;#yn$7^Rj ziq@$rDt*e#+N+in-HjT;tkb*f%wbbPiytR%HiBZp&CFJY5DR6d{BA@24Z63 zAUnl6*}9=7`d z7WzP`)u{FM0y)LOfv3q1(1^oK>*>>{;Q$G{a9qCJJQw)Hk2prq!Nk&X38;6jZe

4av+~hTpR<$0d~eTPmuGoYYf03-@cs#@i4Dy{mbK%_^m%*T~}2(xxF$9 zSSYLisZ`2Mp<@QUj2giOVsp5|h7%f#x`YEo4A+3q21Mo<)hHL2oMlbc&u z%yuNMe*5-DEi0NsS4&-;#X7g5LWr9i7+R8m%~(&C8#EmA;-S$|FVKSw4GjSxg%p4s zL_p4anHg**PpI zsLPCZvJSA|74=+4STI{#U|_JNcpWZi+J-W7b9X`Q>FWC7Ppa1mzgft gfz>YQ0d zmn4`VlXhM|`=h?-FL{pUDsvr?!0>qsY76g+4ejl@S!|Y8R`=Kd2P z;=dED1NJTK8Z$Fi&7X1zibr=?Kk$(wEa;+JD!Gt!^jUXNsgHrH^kEZtw{HNs(yytS z9lixC>7RcC<>!A5nqdi_l#iuCn(}FZV#Mu}oYHV`kx+>yn%n_5mzS3-Hyt})4;O}S zBl^dWK57h;X9xTC*0sDq{0v6;T1|-GWpKKHSi@kYcM5teXVEx4MUODJ@*IuOV8MPz zjQx%szWc>vurvnoi^mvDuRWrt_sstiWsq$Z3gUBXE3X{t4;?Ez7;(eKgco>E*5lL= zyyi^yj~d5`ek-USD*icC=(`DZ*|aTodTA+Jl?`$$UQk;0Gjp(1g{W?eE5&fc)H0+1 zv;|z;zYQ4UIigIi#SekFL} z9CA*Iv{CI(%948;iO4=~J;_wp)>_hVrfIKyHleVz6w@SrM7M-Z7=UizwK5{ua^>=` z`}e9io;(lVc(LwXwk@-7&c|Q;?1qe$l@*|P&0_ON*ID?R5#ixGApLxStA)6Tx_bY` zD{8y~0>m%mm?0PTAELOwpWivKvPlmQgwTd}eiTkF|E1G|g=2RcAo;-f(Y63RNU7m1 z)4&N(sonEBy1L!n-9fB?J%0=J_4URZGi{S!*q?=RZ_mWp5K@b1fYy$Si$!Ygq>c^u zE-h2m9QE{o5Y`X;Vyu4vapRfc2eQ47&KkoSq!%wDUr42J-!lNf(Vc$x3Pcp!+6c~^ z0d3zj1gWH|stS=D=r2eB5YtfCGML9os_p}7addP93Ie6f*49=>=k@gv;A*X4PjN%9 zO3dtCfWQ)WV-5KFHD3Gt{QPQaYHSyt?(FP9cLep8o{gRTB3r3Q2^gdnps#|z59%xM z9BO{cS7WY0L;^#Co{u2A5-;Xek(BgNu0wkm4vpV0H9p|9p=`EQHt2eHutZZTF()rC zx@8(`(bj{|1d-21GGx4HyuqjBqIoR8q zKYXZJ>AW03K=D%Mfs@k~ND*Lpwv%<67sDb${t|98#L(OcG^vMy*Z|DB%Im(fX2=6w zw0#1r$`uW7c#1+X8{N>?!)3j21~hmmoNLuMB~P-Krf*AtaYV}mMgc6u5%8=;Qlg4+>N<54OMtD z9OPvuN5?A1`L6&}fMkQ*G5{DHT2?zOKl*P{n)jZ20W>UN6*8{QP$o>m___swKooHK z0+Y_v_lr@>s&@ThWM(!8MoKAY2oRoX5kB|EP$@S;TD75a&moG;pRNb^Zj!W3u+>E52nVyaj?30`QJWcfVN}NOz z77NX%MMEsF3H&WSi@w`b$QueC&yS37+|$G%RR=W$82c{Z<}>*CE-NFu;+PHbIDRX7 zdN845cuX4`8^M@b1lttkSmyYK(wCh z63;8nLMuMpgDmPA_~ScpTvNkNzA?R1!T*pQi=Bm<_Mof6F?0YAg7pIMTCCUapYf%o z1>Ph}ITP3vaH^e3ogh|#cAOt<`uqDKEBRKH&BRtAK<`fDf5~o~FChvNFYZ$V2|}$L zy$Z*N;PmP}2b2UI4@=s~jA4I+#sd}@aROvB;93r`_Dn3kHlxjd{+`DCm*Dg=o-Ob zy|8N)u?h=U=Hx76QAYLl5YX#1!gm?K@keUBv=>+r?l0r9+4+Meh>!L4^zb?@^Z>Pl zpMz&LaKO$`9WIdsX>ny~oX(U#+vlkSzK#P_i+{$B8AdUyD zYyQ;HehIIGnf=04`pU&4@EZ*AS&-Zlw=Gv);<7UVIZT#r_JxBzR@ zv(n1phcZ-9PymYgl`FwmHwnznzd1e!N^l?iY?y>&9W zV+6bR_T0;LI4BdMqGlEsuW&#;`38;8yYkH&S|+AXtxBj(h#Yb??r>fwwV9or-BkCJnH6jg)&R%|v5EznZVVaZT&v@$FRXbm1Z3&1$CwZh!ChRVE)^Qf{g}U3v^&| z0qfT>L~Q?3_au&hq9kQxWR&@4WSH97ao5I2N0&lMO*ZV>_r0yf11WFs8e0auj{K(0 zFso9K`LS@Ds-9E*{{8!Ays_AJn>e`|&8u?b91>aJzO?{}f#l?55C|I@8i0Jawzfiu ziIEo&j_%3awW4Iw z(AY?F@uF_IJ@^EAn)wEgcBzR2&EI{%nwb_B?3Ppr#_sRx{@F+Xo{rYpTh@2~uVI8( z2)S|cZ0W8qkaCDiQ&CaryH1>hN-T!|OaC&9eoFDxxXov)kCMWX?raf4Ttm)}#=3-w zs8eGjix#@A?ma^b3uxd=?}jSWZr=O|B*Wd_EZdD)!@;uCpR4mRheynD-(03>NRBR7 zDX7Jv5x$}Ihx76a?a-!6P&t{)3Ve#snf338(+{#}=W3?a9XrF*)HO z_(63 zMR|H{*cD+=d5wqXZcgyGyT{%%eFFqBkPE*D0rN;1CMQ}1KLM=B055od;n*p=62f^r z-y-aXqNT!le*V`-hM#@C2|rt-xh;)gd_*KbJRv#uYn6F;CIIV!nDtTQ(Mmux1K7F< zgf}xUFE0;|3ZM{6yzp!Ej2XXvxAnv?Z4i4m{s*w)|NGB$jiEHcfKI5u*jz&a#2#R# z%;LM@ALzu00i)v)huSFTj1 z%&%mXH*G#varFhW5B^SbGstHOgg^lB@Xm3Dg5L<*nVW}4R__AHNN4>26Q5koot=iZ zG2u_Tvm6j`DpJO2cl`nV!oI#tnsOPs^KW61E4gfpjDBd4z2;`4sr{Cn@z*F|=mKm! zaLD+^Df9l&!TFtPu%4gGAD*AiY2&NFK3G}b!X#NJf@s@(X~lua(^iE;`{f?Ns0|kP5nJRP?gUq z0p0cX7NhwJ>cH~MOdrUXWWtb1Wq2l1XZN}+3C31n-0R5jAGk4FlDx`)!J$@xpUb7= z9FX*{ub1=`3gJ$f)&8f5+iuGBFyix&bB8^D>MYqg8XBMPjRY!(L&a_^_4)>v6CMH5q3aIbz zEZR;TNYQn|`r6uY(b4^Imzv4qfUbd2OJ{8cCJ}T65Eu`z%^2XPw=fS+H@KxbWm)zw z9zQgZXQS#txB4x&gPfFSVOqE9gYl?ln(f!~xN9~CJgyEh zNn*xJFdhTu9O%BW0yYVH9Cc*c(1~CY6#mrm@-mp7l>KIqQK-(NCKzvRZ&qdFz-M~g&78TpYvg{*B;0K zSq^8C-4rTMOPdA~7{PoOt2^4+6->m$26f9hr0=0tFPtT+^>GzEKQOOf2)VN`pd-L< zgMxxMt2fWrvcS4BF+q-*{eT_MU4Z_tKH_R<1R7*ufF0M(d){Klw)Xbcg#k*FaOmtn zY8<2#JSH%nLA*{<%L;m1QnC$V*o@?Z5J=s?7vaU{Ez{|-AY=R%!#xcQzCqUk0f}Id z*S+S?3?B%_u(4mfcmWCj>6VQ2bZl^wi_5k-oC)e4a^X)ETGN(+!{Y`O%5^bCwvzQIjz?<1%uHbo+Tf|4Ns;KWJ}4uxeOh!sS- z`uZTYA9X93Rh9zH`-e01!M&P6fO(aEB&$Ke}(>js2t?Po1A&| zkb{ehi-%_eyp6V&778)X9bna;_5G3kmYbeVLb_ld1HK0ehRq<#Jb*FP-4vKGk`5s& z02U6N1d@Vb-yswVqhrM9&)1QG0;2;7s$GR)^2Ucng0k{4;wO2!B>JB_Yc|Tfp|Qlp z9C^F-}#0)P+ zo1Rj?#BVhOWqS%D4L%reZyzr&uNyZYVtei0jl5GEZTCPCXJ#g(650ei%bkw`_N66! z9I)y^8U+0a_LV#KApXwFOh8-@b{BS3RxOKgNE(D&tHMExr@4S7F1@0#tR5iVrLs6ZURCScTrad2?emc$UdA@gr9wl<5%f`AWS*<#;C@oe?NmgB#1+%-BH8 z&<}_>G&MEJQr$8IFRK2aKTW$9QYtXm5c%@uE4FWwlj~ouQ=@vsF5Hz9K=K?%{Rewe$thR7N=*h24WM)bk=PyH-IpvSEq&pYA7bte% zv0S&GLi=%x%wdtmD+iFVA!NHY49$rU4=*3AreDS^Q7w<4V9faZ@8Al3ET|?>!FEky zbT2E*0+=dPR9m_BN$sb0o8AvHO>momLJ9eNuk(k5VNN&22M9y> z)~+U=hS~{LAf#Yn!8}db4OBwV2>?R*OF(i%_wU4?nbEHJOIr=Gi=oO zd+RHsJ5nDsX^V9N{R|p^eVx?tbC|2I^R3DZ*}PRuN6QV!{vq{Jb*}!<5x>@|sI)Dy zc&uIi^$Vu=g@rIRxvNr0`CqLrs^bKJiRmvF8#Qj-fHl(YgDED*ui~FMBX^OX+;qoWNxX;6Y+P7L=jSfPEYOI;Uqw0oT+vPGFflP9AAJUhQRp$Z93EIV zrxK=lU^aesW@ddDylkkppyE<5~TZJ}L0d;o}7olJm&gp^s#hzkSuF&UtGj zHTiZ4q(Ac%gxsY_%&n}Ll3{8nrQ`gIF2@tUEF`S=i{%t8rjIi+W+8#{OBei=Gky3U z$;vzcL>$rjRjx0%)4S*WbQob*Lbe6~zckcy^F)k0Frwc8%pH$J;=Zt9fV`LB&9mQgaUN7h1t}*_|;8zO~gz+yjkGdS- z0SffyW@cP_SFzIy^}nukr1A{HpdAVoxJE&@|B>AO8HZU0bPhdM zUeb3@(P)iocP9|^Aw>nK&d4Y^#+d-0g5X1Qa}XL@0cb+dMnly;jlX)hX-hu@`^m~G zXIDO7STCY1Tyqp&+ zvBKsoaSjcHx%xg7dNNo29PXJJgeANv^O3W%8GL`MY`QOB++RQkLYG*k_2d(3XTs!rpqava}&KiwOTl3I5q5@aqwXH-WTB?lag2 zqufxqL2q;hOB#Kx15piUv0Vc(iXVh_+fk)JijA6r;0?nzzSIm6&A>_th>^dd!0<~I zFS<}C8sMFM+1(*#pGt^hdGh=VXTv`kJ&c>cVFVWy-uvaREBN(tBN}3Y+QsI*P$3{K zl9vbL_ZE;|h*<|32wgEH2OKD}!vgLNg)OmvS*2mrW-8Suh)%%ebv^)JH0|KvAS5IN zf(6(CKd}0H-`!Kj?R5a`2y*~q#(aO0#MG90#GIkCGE4gUNf)jZuo$4pesT)M~;PN_N0-F(Rc}T>s5rF{@fp+xU;$l9q z6~Qw~Q@(}5=FIT}J)vTZjg6TXyMudq06OM>MV-I^jhFwqx0tuTv%?zs1wQ8Yrf1z9 zoV;Y!a|Rkjf16VZ$e@jA26O3CV3E_Jn#w&YQTLvvrVI!Rp`rH&5Ydbc43M2aFM)zV zpMYL+tNwf~9zF<9{--3K3{R0!HLN}aTI<;j@J@nd<6kQJ0ax*`AY#-X#~9KKj|x`Q zpnk(UflR#Ho9A5&3NL7B8~cz`puBM5Wn7#GOxLWftpT+Cu^(gx!WyW{3=B`ukOgWX zlLZ(Z7RJHO-qzfFnvgIoIC!|iDGQhv_HiYenr`b;cf`fT>AygTN=jvbWR*7?KF8mO z8W!V^{dMx-9hosaT0!sq{bmTdfYDOBJ3~?3_erkULL(=W{2%*lX5Hi=7!zmw>BYoq zfTpcZfsR^(XtI-_H)6g$CkE{04Fg%dUp~`JlMoM2VaBofVh9NWyTndlLe`k|!c(~0 z+>O{oeNX=zO(t5d**k0CH?$M9N*s9z|yGL+w$@yjHKMz z8UMf&Gf2g;25$Q3b24K?~gEV}MaEta;oT;$&jNY*KglHZM;>MrQK5`~W;|XcgQ9U+#28 zR+tbtOGK9pI`8S>1BzP6C*_R-SM-;#+gi9c0W+hP)jCsf3Hi&@ z+NDj}|8`Q^cmEawjRtX#&$#hyW{PV*kcV5+(tsk)cmUc01|RgfySuQg?A3V8Z-qH2 zWWtO34-58+>Te4css=cse#hW(8y8D18Dn$9CS=bHMgJs$e-3gb?X*V{`lC=F0b?K6 z0FU)KS_>3^wR%ZqnLMYD6$XA{gKG65K2_B6>C*!+S>Q1-AV|V^?>0D0KLf=N=!Aq~(Ii(j zfT0FJv4W8t$RTZmQf{&k^tYBh`Tju*q@p+Ac@)5!<3a0$yeSIU6ACL*!e|4SrBOX8 z-Sc(z^#I`ezJ4`=*q16B97~Jl%u8yY>_#>^#A`uSPfbk)4GSh6&iL=f-T&870V&L+ z{q-dniw5Nlc7fGE{-ae5o*kKg4_RPs88kd1X!qD(B36Z&w4fE!unIf8D1P{n>Z)fKRGRQ7zmD{Hk78X8vA_<1v zg9l9d2}d4CRF$r_wy@8UCoV27Xu;t712!)yDe?33dvqN4%k;W5^{&tIsE^`>Ee!5j z{}|o3)3~l&H)&~Z-6Fg7nb!ny@yR+a`HMFxGv(TSN`;_9`INm z^Z4lwkJs_NwNU+Y45$tM>C=Eu@GsY-C~Y%5odk{w4>{o@zpJYHlGo~1m_sx?0}1FB zh!-fO1DGX+Z3U?oYB5hTVq&;WFGsaTbKh$Vi;e)=f;mPAv(eHTz}GR-+N-Rs|Ouhom7b{^3y7$GgDFCyvK$vupLgm7 zgbt3FJJ|t0wQDlyTiX4%Y|YlrT|bV~JCS<(@v7Me;(Fy5`iH?ocQdE4it%pFkQZ_xkCh{sx^gX$e-&Y>N?~G^5Q_m0y zCQ3P%<0R-f?UZLxuM>UaUpio6c$v`X7U(pkOtJA+!R)?zWn)a%P8f_>B6|F^0K)ul zeCbIqMjPA(wF%N#V6ctfmksL4PD-MKVZO_k@8tDc@Ww(k7K$dE^TtN0;4v5-uv`w` zR0cRoiw9x`tjqhMGRQBI2)|i+?Q{*|C(HsYmpM47fi0m>erj30^Nye#;*(s3#9qTVHzzSOdr437|YWazj+QO`-yiY3J0N`Gvd`jH)S6JVeHJ~&T2_0q@U@uWlJPtXi_$wT$>Y8__7)_Y+O-8=_E z9=JuAf_5tVdyal7%aK`-Sj4ECKtlhIU>5|*mx^8*TZKp&10h60vj~MJEr2kE%^p=J zV5td&fT~LMs@(^NnC8>qwSXI*bM@@~Gz^aA5XRn`%_hEi4jzKBY$Jc+R^i~m(f(?o z7&!dU%>V&WpF4L01)>m?U32@}ardv>XFtt^`3!v%mOB;=4~~Ic87x=at53ub>V>uYFg zHis30-Wbk_0eu_I%FNuD9098Fe3|FB4>(zsETJ2q)?Dr&fe|zMa8AtvBY%7LE@O%L z$6C|zLki~uAGdTHMl?Nr*q#3Cc_-{q0{r}9KN^TpMKJ8EbRma&@)RNEZ4{VOCENM& zGv?a;^k3e@w#^8Pxz^KjIg3#1UdnkGA#B~pxs+=74t1kBoJP)a&jumEN!%oN<#(oj+1fu2i$hJc{j2UBx+(=43F z9C`Et21f~zF!s0!H1TU4bO(5YKr}q?kA0lxNGr8@B!8&&{xx=ud|$5}n4b}&fCT!o`VY1Ni1l)G&$<1?qu>7B+3mIvy-A5mY+y#q- zgO2VI8Y-^kvY~L>y)6r*(gjEvpjqkYlA5FJK~Mc9Q3Ovz($;8VYW}Wfa0mdGy#5yP z2QfIuPcbqwvJ&a3ZTOzF2MySnUtLpkes-4h+&NpY9)e9w3U}d=BFxF~OoGqb2wOWT z_Bz3J$m#>eWoAx?;lku;*p@+NAP=n{+Bvh=-8V5g-vur()u>~k1+rh^)Wu5da|V!y z2OAw5HRO^&iV5nOG1>&djpdl|gggamW8^$%Qay~iRgD>vP!B*O>;}le$1vv~< z>T7d@#4fX<3N;8i1A>5Q(}Ak^W=QPj=88GaJ@+)!)EtD(FxXEZ62+kl%1Z@g#^DAJ znis-;1XTk>BWwzqg!A-cE(jlj2M@{uu1hVuZP)=M1lmkCAf^P|*R1Qkeyumw96Np5 zj(3h-z+>Bz-iT$~s=hiR$^R4qISK_Cy0767hj04y3C?)}+Iw%F<>4+EYC&k-H&Kvx z$buLtfH2@s3=H2PF%4wed2EdScL8B*}o5z7~> z%da*BSwm2d03m{|+%q*&yd9m@&T6}qZM}738vHA!WWYGg+F$O%Kpi$q)bU!-c=4%P zMD>fw3Urql4BmsI5iS6QkqHTTx|od$H0#r=^}xI>FXn?0hjQGQxw~&u=x^zt&6G4n zxeHfFjGSc>YbP7DY3??JoS9V0#L{!E?-l5|_2xH5r6A^`;K4zkj;fp%Ns!YsGqG~a z)p}UHBdWiz&)v<EF7 zgWduGuC_F=0<{!WWQ`uoMj{46>{mefhk1gj!Q9^H)N>G)IdS5|XU<9j%IjPZT`h86 zdmo8wQYeTeaoESZWQ9Vaj^LD}igBNE``cUBrS&(o~5_bXgWqE1@)7H_^>P}9@tWU8F;l5n}?bAsZ zq~;}ubf`b;Q=mUk?LeRXfkHiZBxN?8i@AYg=$m>ygRvRT$DTM4vKARtQ&n^@8T62o%9f<^|SRu(Zz zi2_0gnJ4zXx09RiA}8OM_k8d2Uw+Rcl}F@whztVyF)_cxCm}ZjksS^V^TpnQl*reI%5^lC z-A-y;hWC|nu5z~yS&j1bLv<$XLq2qQ9O1z~k;bhn-ZIOOcILlBtt}C!S<>QJZx}ZK`b>;ku{6;WXpxm`-&-Mq`1l*a?vUnFFtMc%lV@}M} z_C~^jJQ{T)w{l&0k+6L3=(o?A;9WUM13K;P#3Q%s$mRp#sKUuo{Lv#o>uHj>`=7}$ zrUTjqJQrc+C9VTQk?=(HVaY`8C;?#an}vnEPDk@kcx;c&&iz_)!1%_dhoHde@1GgD1%=TjA(oh90Au)jk2aIz#M+xgxxy4YlY z?10mrNY!gY=l}*#akUCn8xxpn1d$lcIv?NgMz&?ukF5>2_4?z!tw0PiqnS<&OW_BkwM1E0jYWfZtFia=@=Bd4zAzCi^^X`v z*iO|#$ZwdW_&81Cp_p>pmuBf#OigQv#BALxH=|_#zOkT*l;R}n54F)m0vo_y0NELJ zQw#xZ^ul9<*6MfcgD40=FGH|mz>UJZx5;Md*K61lKnPcsFg@wR>2Jafgx**4y}Z4F^f|#@_giCk zE~3f|Vwijd-J~LrweXKo9dv75ur4{-64Qlv>M1i8K^9o))8`BWYe=SJSElx*7C&|{ ze&I(8SY@G{CzVLBhk8&boYjQ~g+r~Y!r$nvv+3)42WDv$U%y~e4v)vrkHsG};i2m3 zZw_}ic`Ba#DSfypjKZh=fi2v>Ipn9;qRp1KoF^xr!6TyM&Ye@J^ud)(a|pCRZglOd z>Jb~_qLmQ)%oh{ijhsXyjy638=%7OPZ^s))NM6h-{X|C62^D(%hGf4e=GgA`dsKS6 z)k0*T><@LfE_&XzBa?A+OZA1q?CfEv9U=JwVJj|mYr-*2xHmBH;~Kjq$MhP?P9{8+ zb~C=a$Fq$+d)nbKM?k*FdNkJ9)p_ei-^Khx1 zqNf@S?Z!sy2v{407}_c$zE;@diQDqB5+WkZ3ZEMF+c$1{QbjG_qV@}l1c*Z`%*g#U zWK|NWt1B6}HJKr^Jf}jFQOypq=F!( zQm;(Fm4uw3IAY7Y%gWpp#%!h$pC(JbX9`h`F($ZZJo{{2H>WgN?4!$pD16zg)+j$q zI}Tyc@j$B!$B9qh579aDE9>{GHMr(iLaoMr9wg8`Wg}w|b+;caOzSA;=YvZ(4F6E= zy!PKX(Ped-UXu}yKOG&NSF)x`%suGkaOx!h6h|t6Y6@4=_($&>1Oev-UkEXR%3YN4 zd>FAn_h~D@&+Vw4H#E%m+sXCOW*FgzgoR<@&H+Mzx+B!NY&Z%{uE^2a;QRbZTCLwa zz5u3)pbjL<5`xRl&UgP#-GSf{Gu8>E-2kWTG{ZGkvu%-KjymhZahx5o**Ni z^?+g-*uOX*8#Li=_x!W2MA=!YKj?!PL3VwgA@3*w)72ZtRHOk}VCp8Wer%{h7% zEf%62Jsqixa%yU7>VRR8rSgZ3({5vDt*a)Yaki^8*~?jHtQ3(-Y$Eq|Hp;ad7vg6D zzNCk2H*UzG6FV$KEuUetY?7OCvSZW=?vhH*UMU*U5^%ncyZcL~QEW$;deVXQoFw03 z{`Eb%24W8vcMHLhLEX5)#L8FKY?5c??(31~muwNJv{qH>|JRE|I;6e9C8p=%(J_)~ znZT&v=fvwlB}X;L)R%1hyAKAh%!T7}^wX;MCKi8rS=)GdmECae>XBWlUwV*zT Date: Sat, 17 May 2014 16:59:56 -0700 Subject: [PATCH 075/188] Create and use a new compilation module To allow compilation into an execution graph to be done via other methods than our pattern flattening concept create a module that defines what a compiler should do and what the expected return type from compilation of a atom/flow is and how it will be used by our existing engine types. Part of blueprint plug-engine Change-Id: I324172571f36a574195bcdeac71c6ec14cb11944 --- taskflow/engines/action_engine/compiler.py | 49 ++++++++++++++++++++++ taskflow/engines/action_engine/engine.py | 16 ++++--- 2 files changed, 59 insertions(+), 6 deletions(-) create mode 100644 taskflow/engines/action_engine/compiler.py diff --git a/taskflow/engines/action_engine/compiler.py b/taskflow/engines/action_engine/compiler.py new file mode 100644 index 00000000..446ded95 --- /dev/null +++ b/taskflow/engines/action_engine/compiler.py @@ -0,0 +1,49 @@ +# -*- coding: utf-8 -*- + +# Copyright (C) 2014 Yahoo! Inc. All Rights Reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); you may +# not use this file except in compliance with the License. You may obtain +# a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the +# License for the specific language governing permissions and limitations +# under the License. + +import collections + +from taskflow import exceptions as exc +from taskflow.utils import flow_utils + +# The result of a compilers compile() is this tuple (for now it is just a +# execution graph but in the future it may grow to include more attributes +# that help the runtime units execute in a more optimal/featureful manner). +Compilation = collections.namedtuple("Compilation", ["execution_graph"]) + + +class PatternCompiler(object): + """Compiles patterns & atoms (potentially nested) into an compilation + unit with a *logically* equivalent directed acyclic graph representation. + + NOTE(harlowja): during this pattern translation process any nested flows + will be converted into there equivalent subgraphs. This currently implies + that contained atoms in those nested flows, post-translation will no longer + be associated with there previously containing flow but instead will lose + this identity and what will remain is the logical constraints that there + contained flow mandated. In the future this may be changed so that this + association is not lost via the compilation process (since it is sometime + useful to retain part of this relationship). + """ + def compile(self, root): + graph = flow_utils.flatten(root) + if graph.number_of_nodes() == 0: + # Try to get a name attribute, otherwise just use the object + # string representation directly if that attribute does not exist. + name = getattr(root, 'name', root) + raise exc.Empty("Root container '%s' (%s) is empty." + % (name, type(root))) + return Compilation(graph) diff --git a/taskflow/engines/action_engine/engine.py b/taskflow/engines/action_engine/engine.py index eecba801..ae69aac2 100644 --- a/taskflow/engines/action_engine/engine.py +++ b/taskflow/engines/action_engine/engine.py @@ -16,6 +16,7 @@ import threading +from taskflow.engines.action_engine import compiler from taskflow.engines.action_engine import executor from taskflow.engines.action_engine import graph_action from taskflow.engines.action_engine import graph_analyzer @@ -29,7 +30,6 @@ from taskflow import retry from taskflow import states from taskflow import storage as t_storage -from taskflow.utils import flow_utils from taskflow.utils import lock_utils from taskflow.utils import misc from taskflow.utils import reflection @@ -53,6 +53,7 @@ class ActionEngine(base.EngineBase): _task_action_factory = task_action.TaskAction _task_executor_factory = executor.SerialTaskExecutor _retry_action_factory = retry_action.RetryAction + _compiler_factory = compiler.PatternCompiler def __init__(self, flow, flow_detail, backend, conf): super(ActionEngine, self).__init__(flow, flow_detail, backend, conf) @@ -208,15 +209,18 @@ class ActionEngine(base.EngineBase): return self._task_action_factory(self.storage, self._task_executor, self.task_notifier) + @misc.cachedproperty + def _compiler(self): + return self._compiler_factory() + @lock_utils.locked def compile(self): if self._compiled: return - execution_graph = flow_utils.flatten(self._flow) - if execution_graph.number_of_nodes() == 0: - raise exc.Empty("Flow %s is empty." % self._flow.name) - self._analyzer = self._graph_analyzer_factory(execution_graph, - self.storage) + compilation = self._compiler.compile(self._flow) + if self._analyzer is None: + self._analyzer = self._graph_analyzer_factory( + compilation.execution_graph, self.storage) self._root = self._graph_action_factory(self._analyzer, self.storage, self._task_action, From 9ccca6900c15b563b1000b2626e036d75af5f6f0 Mon Sep 17 00:00:00 2001 From: Joshua Harlow Date: Mon, 19 May 2014 22:25:25 -0700 Subject: [PATCH 076/188] Rename t_storage to atom_storage This name reflects better what the storage module is actually being used for/does. The prior name was not very informative and meaningful. Part of blueprint plug-engine Change-Id: I7015620f5536fc766cff3a877edd0e94d25db1ed --- taskflow/engines/action_engine/engine.py | 6 +++--- 1 file changed, 3 insertions(+), 3 deletions(-) diff --git a/taskflow/engines/action_engine/engine.py b/taskflow/engines/action_engine/engine.py index ae69aac2..b83dca88 100644 --- a/taskflow/engines/action_engine/engine.py +++ b/taskflow/engines/action_engine/engine.py @@ -28,7 +28,7 @@ from taskflow import exceptions as exc from taskflow.openstack.common import excutils from taskflow import retry from taskflow import states -from taskflow import storage as t_storage +from taskflow import storage as atom_storage from taskflow.utils import lock_utils from taskflow.utils import misc @@ -231,12 +231,12 @@ class ActionEngine(base.EngineBase): class SingleThreadedActionEngine(ActionEngine): """Engine that runs tasks in serial manner.""" - _storage_factory = t_storage.SingleThreadedStorage + _storage_factory = atom_storage.SingleThreadedStorage class MultiThreadedActionEngine(ActionEngine): """Engine that runs tasks in parallel manner.""" - _storage_factory = t_storage.MultiThreadedStorage + _storage_factory = atom_storage.MultiThreadedStorage def _task_executor_factory(self): return executor.ParallelTaskExecutor(self._executor) From 620e754ee82f24eb03b696b108112024f2051e0d Mon Sep 17 00:00:00 2001 From: Joshua Harlow Date: Thu, 22 May 2014 12:18:41 -0700 Subject: [PATCH 077/188] Fix error string interpolation The values were not correctly being interpolated into the value error that is thrown when merging graphs together and they have overlapping nodes. Fix this by correctly interpolating and expanding the messaging/reason why we do not support overlapping graphs. Change-Id: Iff29f793c7dfd6b39a4936c7f4f31fd660262ca6 --- taskflow/types/graph.py | 3 ++- 1 file changed, 2 insertions(+), 1 deletion(-) diff --git a/taskflow/types/graph.py b/taskflow/types/graph.py index f6759127..358f018a 100644 --- a/taskflow/types/graph.py +++ b/taskflow/types/graph.py @@ -113,7 +113,8 @@ def merge_graphs(graphs, allow_overlaps=False): overlaps = graph.subgraph(g.nodes_iter()) if len(overlaps): raise ValueError("Can not merge graph %s into %s since there " - "are %s overlapping nodes" (g, graph, + "are %s overlapping nodes (and we do not " + "support merging nodes)" % (g, graph, len(overlaps))) # Keep the target graphs name. name = graph.name From aae364daa30361e3d7dcddc9ff4137e79bd0b042 Mon Sep 17 00:00:00 2001 From: Joshua Harlow Date: Thu, 22 May 2014 18:46:14 -0700 Subject: [PATCH 078/188] Add a example which uses the run_iter function in a for loop The run iteration capability can be used to iterate over the engines activity and do various other activities in-between the engine state iteration; this example shows a primitive use of this to output the state counter and the transition itself. Change-Id: I63ac343566a4a090a20a84c626ddea3651033926 --- .../examples/run_by_iter_enumerate.out.txt | 42 +++++++++++++ taskflow/examples/run_by_iter_enumerate.py | 59 +++++++++++++++++++ 2 files changed, 101 insertions(+) create mode 100644 taskflow/examples/run_by_iter_enumerate.out.txt create mode 100644 taskflow/examples/run_by_iter_enumerate.py diff --git a/taskflow/examples/run_by_iter_enumerate.out.txt b/taskflow/examples/run_by_iter_enumerate.out.txt new file mode 100644 index 00000000..4f845b84 --- /dev/null +++ b/taskflow/examples/run_by_iter_enumerate.out.txt @@ -0,0 +1,42 @@ +Transition 1: RESUMING +Transition 2: SCHEDULING +echo_1 +Transition 3: WAITING +Transition 4: ANALYZING +Transition 5: SCHEDULING +echo_2 +Transition 6: WAITING +Transition 7: ANALYZING +Transition 8: SCHEDULING +echo_3 +Transition 9: WAITING +Transition 10: ANALYZING +Transition 11: SCHEDULING +echo_4 +Transition 12: WAITING +Transition 13: ANALYZING +Transition 14: SCHEDULING +echo_5 +Transition 15: WAITING +Transition 16: ANALYZING +Transition 17: SCHEDULING +echo_6 +Transition 18: WAITING +Transition 19: ANALYZING +Transition 20: SCHEDULING +echo_7 +Transition 21: WAITING +Transition 22: ANALYZING +Transition 23: SCHEDULING +echo_8 +Transition 24: WAITING +Transition 25: ANALYZING +Transition 26: SCHEDULING +echo_9 +Transition 27: WAITING +Transition 28: ANALYZING +Transition 29: SCHEDULING +echo_10 +Transition 30: WAITING +Transition 31: ANALYZING +Transition 32: SUCCESS diff --git a/taskflow/examples/run_by_iter_enumerate.py b/taskflow/examples/run_by_iter_enumerate.py new file mode 100644 index 00000000..66b1859f --- /dev/null +++ b/taskflow/examples/run_by_iter_enumerate.py @@ -0,0 +1,59 @@ +# -*- coding: utf-8 -*- + +# Copyright (C) 2014 Yahoo! Inc. All Rights Reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); you may +# not use this file except in compliance with the License. You may obtain +# a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the +# License for the specific language governing permissions and limitations +# under the License. + +import logging +import os +import sys + +logging.basicConfig(level=logging.ERROR) + +self_dir = os.path.abspath(os.path.dirname(__file__)) +top_dir = os.path.abspath(os.path.join(os.path.dirname(__file__), + os.pardir, + os.pardir)) +sys.path.insert(0, top_dir) +sys.path.insert(0, self_dir) + +from taskflow.engines.action_engine import engine +from taskflow.patterns import linear_flow as lf +from taskflow.persistence.backends import impl_memory +from taskflow import task +from taskflow.utils import persistence_utils + +# INTRO: This examples shows how to run a engine using the engine iteration +# capability, in between iterations other activities occur (in this case a +# value is output to stdout); but more complicated actions can occur at the +# boundary when a engine yields its current state back to the caller. + + +class EchoNameTask(task.Task): + def execute(self): + print(self.name) + + +f = lf.Flow("counter") +for i in range(0, 10): + f.add(EchoNameTask("echo_%s" % (i + 1))) + +be = impl_memory.MemoryBackend() +book = persistence_utils.temporary_log_book(be) +fd = persistence_utils.create_flow_detail(f, book, be) +e = engine.SingleThreadedActionEngine(f, fd, be, {}) +e.compile() +e.prepare() + +for i, st in enumerate(e.run_iter(), 1): + print("Transition %s: %s" % (i, st)) From d6809a331658fc68938d28cd3931229b8a4b6e18 Mon Sep 17 00:00:00 2001 From: Joshua Harlow Date: Thu, 22 May 2014 23:04:47 -0700 Subject: [PATCH 079/188] Add kwarg check_pending argument to fake lock To ensure we retain the same API for the fake lock class make sure we have the is_writer function take in the same kwarg as the base class. Change-Id: Id39deeb8cc944dad05a36bd48993f11f2f11118e --- taskflow/utils/lock_utils.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/taskflow/utils/lock_utils.py b/taskflow/utils/lock_utils.py index 7f6a91bf..942e27bb 100644 --- a/taskflow/utils/lock_utils.py +++ b/taskflow/utils/lock_utils.py @@ -262,7 +262,7 @@ class DummyReaderWriterLock(_ReaderWriterLockBase): def is_reader(self): return False - def is_writer(self): + def is_writer(self, check_pending=True): return False @property From 257d85fbb262ac2192609b7554f91c5b2699e47c Mon Sep 17 00:00:00 2001 From: Joshua Harlow Date: Fri, 23 May 2014 10:29:56 -0700 Subject: [PATCH 080/188] Add source of engine-state sequence diagram To allow others to recreate this sequence diagram include the source to that diagram so that it can be regenerated (if needed) in the future. Change-Id: I68dfea274e679e09a9d5e34af28d05d766856686 --- doc/source/img/engine_states.txt | 13 +++++++++++++ 1 file changed, 13 insertions(+) create mode 100644 doc/source/img/engine_states.txt diff --git a/doc/source/img/engine_states.txt b/doc/source/img/engine_states.txt new file mode 100644 index 00000000..3b33255e --- /dev/null +++ b/doc/source/img/engine_states.txt @@ -0,0 +1,13 @@ +# Created using web sequence diagrams. +# +# https://www.websequencediagrams.com/ + +note over RESUMING +Running starts here +end note + +RESUMING->SCHEDULING: Resumes and \nschedules initial tasks. +SCHEDULING->WAITING: Waits for any \nfuture to complete. +WAITING->WAITING: Continue waiting for \nfuture to complete. +WAITING->ANALYZING: Analyze future results. +ANALYZING->SCHEDULING: Schedules next set of tasks. From 97853bee5706d2137d4c93b5c409b9958a6b979d Mon Sep 17 00:00:00 2001 From: Joshua Harlow Date: Fri, 23 May 2014 10:32:57 -0700 Subject: [PATCH 081/188] Regenerate engine-state sequence diagram There was a spelling mistake in the image, fix it and regenerate it so that this spelling mistake no longer exists. Change-Id: I9090916fe133d979af25c5828f3006c46d59aaeb --- doc/source/img/engine_states.png | Bin 24560 -> 24631 bytes 1 file changed, 0 insertions(+), 0 deletions(-) diff --git a/doc/source/img/engine_states.png b/doc/source/img/engine_states.png index b91041d2eab40cdac78869f6bda3aeb544155f90..3fc83da326a6e3cd280c2d3f2d374047c8e88405 100644 GIT binary patch literal 24631 zcmdqJbyU{twmWYCggD(azoxzi&Kw?p(a5kMG#diH(WTd#t7npJSdef!*XDk@?Qfr;e*BFF0B+z~y}F z`&dX0pNOrq_ZZ+q9R2_FgV&moC8}MJ{_+C9WP>BQ#8)BbX2LZiR?R!R2P_z}kreME z8HOsN8ylt5me@C?Tkr4ilJ8c8SAA)(o}uHWm8vwEif#(Vfophgm(AGV^}Xg9Yjo`5bj^adE%g z=l(h9=;+3rnyI4RB&4Ka*C<3h%JdL{F`F?3nx)wT-_ORe>4yget!;0s(&ID6FtW3+ z;rQ-JXlf?(kyKy6@_ZRYWOO&2EJ4(}x~8V)cAA~DbBdtzk2Hy=L`Ktw*TQi%h55EB znD9eBJUL&aUD;i1#9}pKx1fz(9w!^w{IJBCL|?kJ35vcFirb}hllfu`Rch%VKmiUkofvAo_NV6^u<1A zSHo<>@s4(t&KtFSN@2GLdsbRniL$h$><`4fz3b*|HAM_Ql~kBnx;Kp7lW4JOe{(Ty9ZOD!TzX?)K`W?^I)em7UzGs%C zqF(P%&n*y#vU|6^;bfbwDYDXLoO8t71D@JPXvHf_KA6czjQe z=3jbtfBl*eBrGBV7dfg{Q>L07xU3lXdE@1fMLApj9u@A_(NTpY-WAkU?XAzuISAn1 zk(6{GqYH0~lw!frBjobTRVU%x@6uRl39ro*+JV0tKg`13* zSM8Ku!O*3LZ7*&<*fbi?=6IXgsHVTcq*ib}JxI4b6YoTiX5%9olh;lnRPv#?xkCLZf?7*V`#~EjVoT?w+~O)bgOS# z73AkX3k|(V#l}|j>67$kN^f5HJqG60ednb?+`b~;^4)8q-0NXakslh&9!*hlI&ObZXpxkZ1Pj2($VgdVpUp~ETAGSV zTYD59Mv##8>B%v&%&-NsPI|)k)SC(u8=EG5@?@t{vWbhQ{s=+ro#%S_@6QRI_s^d{ zVY$LPz_Xp7{}Iig+=1FczOw z%tshk#B1-q?O}x2$w! zL~eEHpD2+j+|9wS6>$81rEfmfcj8|Uq{SlD^QQonpzF`%;)erys>m{z6)mc2yQ#+$ zJ5SEz{1{+2Xgr%B>vFiW0?+7Obv0aF(^{r9gU8W*7cwbyUa`U0h!+6_S{sp*HLh&9 za*5nhA1Qt;EqQx+;bXSoh#ft!U=#g_+5w%?)_f@ry3-r7I5;>|oWj_)dayP7>Lf#n zN{rgVX=Gx{*y7zczYl!*k}T@2Ra^4v)whvy%OjF{nhM*=pL28N4?ALE=_@}x+3)ck z92l@3E;i_Wb<0Dc-T3YB@G#sQH#c_~$5HnKgPzkF%F}QBKMN}-9PI3RnFL&xhvz!t zJUu)roO>z9EsN?Tkw*=UDH)~;fp<|4C^Cpuzn%xR9>esV^>G&FI|IAtBH zb&m@A-jh0MAyop`ir3d1jMs4ZQ%HG9rf^X5Tt5Ex_BLaT#p36zX+v21`oRl1sXlM* zVjgp}_7vwT=OliTfNB{;FD5p5WFShVEU`l`VQ$b9LCO~%#H?diY8ToWSC-ir7%Cq# ze!3CY(Cgb%PA=#a6co#*Z$tD_B_F>a2j^j%`<9v{a2aAgP0_AB z#KgorIzHZ4q~y2zmS0rwv9~q_3twi0=OCKhe{;Ua)y3r++hdW-yi%DUU;NiwyX)(U zxhk>I(TrIO)2OEa3;X=}^K5KvCo3Bn5u#M-Gi?!Et=~HnP_;r?xrwO8ZVDZDk>((@ zBsgKsK_nzFJH?8oV8!*^aR2GV_{Pv6c6d%Q0UFP}3qHVBA49qO8lD}hz5 zuIC;QBzm+akbs{Wlcz}{ zp1i(uytuHs&HJ;D4<}Ct^9$t|Fj7mhGc(slS0@|TqS_}smRJU3qaSm`rlzhgEG$e- zDQan1XlSs;iR}yxrn-!uWIP=bk#Wa3z3I_6YWRYOZ~MECqo4tvI+z{wyzcYhMBx*hIVAMC3t z_lTX=?!_?q(-}yA@YuTVl6?2laAZk^S15BqZ)BWsQF?k~e}DhPgdSYY_7~;p{Dzi2 zAtdbg8}$^LV>&qT4k;;03&*|!R>SNkT;^Y1EAIXr$BVO4CgkSf(S2t(1#qLuC_+RM zdHM2XKAW-POx_HJJJ!~Pb#--iXls+FC$3c0KF5NVM$#XMo<&5^r9*`vzE)9M%5_9H z(P?UY81LTZ)tu;V;-QzNRBj&A7|Fq@|G5!U$T#Nk&_lW>&iUiD?p(MuH*eaJ%`Ytp z`5d|Vc&e#g!-YM|(F%J}0SSF>ntVU0*kyHs#cIDz?BrH!Y*CgRgGLYer|``nG;Pk` z1+c}hdk!p<4=>5srvEOmePG=W5O=W8dL{RmqK~zj8g&w zFA;y{QHDbAv$fD?e4rJgTjSD|z%>t*^0shFloB^DFC#uC7E?5D*23GU57h(NSy}73 z5-u*hxOMyAla5?Oo7|4p+lY*8Ds1u^&c1+ zZRUs_2M75aEYcZd=&(8n-8?mNjtBcoE+ zRlO>mdWSjbI!l}VZ9U~miZ25L1o-%687=Zq&=nFmVgH;SIcCIo?M*#R^*yNqRCrg? zqB%<@&Ph=B{{830S>*j0-X62YZceawE6dC49Oga^>HEqdm2>3hDmQaJb8>RN(QnYZ ze}4_t6OTx00ZD|B@-45dY^%AFlauG?=WqWUjwB0w%T;b$&=sn9SC+!Zf#f*e%*-ra z%-2U(cN|c(&*|Z0WroA%)Bf^oox8IPCN6vjtLUkd!3~5xDpDnFl)lM9K1b_!>sDNU zj@?Ly(u}DMJ1_4(y|lFSmh^@)9W!(2sd6%bn!5Vv$VmOsM#m{Mgy4lwF-(CELjx_g zPAN`mYI$wO=vg~+?u&|wO0qGGS6FKco%{GamxLnA1h99*3)7F!ZyXW07#{5wZK~H| zt~+%zVw{F`9AWqgW92-hY3h@sdoqxp!fhU@l$*?B8Jm)lvZpP3nL_v;a`SsvMyfBv z6(g&TSQaEcIeDQv!K8ZQ0sB(CDyCi!DXeUIG;RJrR>3|*VSz0 zH!_*{N6OPO`^@{p-)zxHxJ+bk-Fj49QeXe_wDXF7dyDVr3;;As`v_B0)6JaIfF=8_ zK)b;LO{mNLISRYm+XSqY=K9i#+S)=7-Xl&vkuU>G54$ zTy(jzifiZhSIynOCzv+Vl)ilnt5Fh3>Am%W|7Ey51MH>!)dF%po8-L%2Don(hC77G zm(+@Qd3j@lfI93s2<%LXz2j{Cc?B;JdT@wfjhtwnKAbbG2?)Pf@Wi7>^EmD4l%raD(qDT=J z=V#;WF!c2F6ol8C0i`ZXPg9*tzY6{JM=?Wky>d1fn}pkJx;Yr0i?H_rx9^f}+lTuu zkMELMHgQ;321M*d>Q_0?;N}ke$haE;40B5{FWrV?PLN80qqHX1mk_$aEhOA_*8R_s z@y>$dcPYK`uU?&AUS^JJKiFF2vY&2lI6YpKWTQBO4k!v}^n`@i)pZvjn9q6(;WVd* zroo*%0ZO?y({1bbTUCggo^WpZD!945Gr+MUi%U)xcKj}Tl7|l8vbJV-qSm=P;kk+{ zld1Z?5a!a>{$Sez_%%Kz25zu#nqOP0r_ag$d<*IRqy@(|d6O@^_j_A3*G#epsoyBA zd3=V(=yKj_UqhQ;GFIKJT}H2SL6+;(XJB(e#JKaZ(X}<#O>I4jg6=QR4d^}7E|WNQ zW_1s(9Mq0tT=HNhyR|gAjs}8)$p39!;XnR9KRIpJ=k3PixaXZjFVWn?aJ;A$AoZE_ zBazWy_=f{y&MgVsM+%N)Vtb%WLek$y7XCn)h#OL{eZ^P#M zIZ=1mEwt(s7ejzREMON)_ob|*=qM_3KuJD1KJ?w{y9>o-@N!<#2PDRym)pTrLkUIh z%NG?C_bYt;y4Zyut{i!fp_)NRsK(#l|J%23u(Q6;&1nje!QW)!kH^Z=9&V)lWqBZ9ohDZRF3?jqmOiT8!O`*8vk`x~5ULsifNdPMI^0`T zRh5Fswu+k@f17=TUg5K69i+Ck>kHdK<5iArO-&ipm}fvRvm!sK!{7itQ;CFLHnu(9kb)h%y+KyBbX4Eb9dly{qXkYfm5biwt9P@kRtJ0#Z z`}Fw8Or_j~c~jQrh=-hCe~#AxE*r4e2L3N+&E0@yv^?%QNzQNgN-6geCg#CbzXEno zpRp+|f^g~vx8Al0m*igcQ34IBCO zCo(b;nmj=5XTFSdba#hkj^cwFo0Ff&2WE z8(Rcx?0Djg@iLFQ?=du!k&%7w@88eq%*3DXNi_{!kJFi*oh2GK^NG755eQH3q5=Hh z;vEe&we0)!J1e8=s;VvaIsl60bMS_82P{DKSz21s*3yC>(-QXm`}ZF$R%N_RZvaj| zp0M9PJdBHt-N5+#`EzYuU5gwM%iphofBl263uh}Rf*I-QvCNu=8iB{c`v4UwiM@Kt z`ogkbc6D@+3OHo0EeGLJZ+!ykj}oK|ATMogrep8y+uPe6_Io^<1_yaXM7$gQFT$&v zsQ0ef%LN&*vYc0Mvfg|9@(p0A{ryo5)Fz*z8Q>xHS~jXcmGfiGQZC9WE-lrrb~0B} zi*=(;m5q*$4&o!}BJG^y%-gks6%Kb8AXZvhy8X$Ln*_)gmua_(ii#Ed?!NRpSy@@r z=zuO!`=QiSK~l06^vGrf;%uzKL6aX>Kqk%0&)<*E&&y+^rj{tWsEpcl)zRJc!pgZS z4rFaPxbpJy$mqnxygoCRpJNQx5-(o70994Y^{0-UTvy98Lg0hjm0>0_ilJv~ntPOjbypWzQLEiZ=^dc+ver@u0dm6Y36TU=aRZaD;Wb9NqHtC$#c z^uk}i`7!8c@FuLStvfz_3WANWv{E_wlFxSH)vYHcJug|TB}z(4*cy&DY<)IKVeey; z@>)&QYOZqCE2N6nxU7VPhB~>p=z$6exN|H=ffB?rKR*N?A0O4m+S)Ln#(VdIii(O- zQc%$K_U+rvJV98bg33!6gN4X6a5%+>ls8*MCj98MUh;xOoNw6_lLEP{k5 z4VPIt>MYNcP=XYGk+=6$yrNKLR`Bg^k_MkR4HZ}ku@4dA&sNtWpWU$$C;MXB?w}$a3>o5f;9Tu4W zEaoCRzpA1W?f<%Y{H@I}G(6X)jgrzJMcoi}WETGT!P32T=REk=hzoS0@bQ1{7W_9Z zLxBQq`hEJ=aB|P>AF7t;2W~)#UUw{q-c6ts!a4;v3~C4zX&)P#pUMg%1QZnA_Q~Z> zz+W*tbM%_;MY->o4sF!j?5r?w_oLL9m>3%y8-R^anTpl!k>C7Xk_S^9_HKCo`J6Pe z793DWBv*y;q$fK&8=3@EM_4GYU(*+=$snbqq|ngN01bm21LQp`HC6eFSZurTYQt%L zYHF%ETN^%hAqs)6B88j?Ad*y>wWp`& zhjRB1G;zX5znAPQYNFB||6ZAm-^Dh-^!WH#Mn-184Jhfv!~{sXm;r`Rrl7!+wG9jo z{%{iHVrbc&^z9+)Uv%<_aCC5BF489}R?F||>?|lMvNboCQcxhwqe=&LimP=G>z0Fq z!_m>v)vH(i$?@>;3QtB#JrKQj9{k2B{Of%*Yn1JwC9SQkuV23g;q!vR-26O^h^DXY3VQGq`% zFAugYH%VA&=_X)mMn*>9o$t*0(m#Jzs%QUIeKjTwlYY#rs;DsQRy*~)6d@)ihQ54N zEXy_Gby3l3R}x=Gl}F3uWvcz8$&VkX=E&Zw=BfPlM)2L->KA96_P zLJX<5mHV;bXoaj1&g{^Y7+a7|HI0R_Sd zH~!*vf=(K=pV>5xx-E4VTS?sh_%HHaP;vauJ_lR%fBWHdEE` zTc}X9!(PYTy^5xWh-L!ZQV-!w{FS)qX#6WzK&zRVpRWYzh04Ol2E4VMwYB@XDh37y z)0At%;HOkpR%#Val$ql-WaZ|XZ~uUYy*?ZHcdWuB0baO>8szP>)~N}KJCjf3fDY@a$h zdXfd;&hH|DWZ!mcG+LKz@(69Ey{uO&gd6bm>CnIc4ILexqLfr{pPGN$zg3(Nv1HJC z?WHmbiN-t3l8^Mr+Ai+<=T6uxJ-^sCT2G$#^n)xS_W|q0UffVntz^5rH2vmgkL;xC24jofYPEG*8cs(jBN=`*tv`n<{wwcmIV~2mY;W%A>gm|XK1uVg-LRklZ38eXsM_{9wl9xc#Iw);3pD~ zhZBnZ0W;B)_sr;k4ueKUck|}n-d-pvAFF_XCMeo9uC}0Mcs&B7>HqX8)*QG#GRW!v zYtVyUzI@4A$Suc%NsXtz!tMC&ty=}mKE^aDF~QCgY8j}((iN; z-u}UXhtJe4*nlD)Dxe{I?e&h19)8JH_Id;_9~K|h_9heFbQ=`Qfq{XeR;Xp2ot=-) zoTsIw#groxaPU8=k^W#$X0&{?ziC|T^Lt@_SSnOnXc_zwj8moKGxv$0HC{f4xS|Crf$kMGoQ$a0CQ)9e0u~TxNf5vJzUlY71vV)!n8M1E=ZJ+N&!u84|? z%E_6BhyC%v@%v(mjFMnY_yOA5q*ty0;oEZgT49}}RR{%T7~DO8o@3+VVQ-XzT=}Zw z`uJN^!5_Uu%FE2k+WiGL_{k7;r(In+W~cj~u!hrQdCSX|Ix3CQgJeDH2BZ%FfZn|6 z&px=O|F4QF-t&~}Is%ab%y|p*BO?MKND^xqRBSbXM_fmz2CapgB$Vfh-u88JD9M74 z7aH&kn}cvQH8lsrF$BrJ3ttgECoUK-gYI+|mlmnuuZqczQ{;i7w}>GnaoZ1*y9O)t zP%oJ?@q3wqR;e)%e&K%6=pLf8cz^s-yN`76OV2A|F0?;a0GLSvUukqB6&}3oyX}Ar zm56O5P;3^KF80*-8fS30&@vxa-F(l8u_Y_Hc@e&f|6^g$bE6$vBFjJTTO$kl1LETK zzkd`b@>mXrQi|#AZT9${l*-c9cpvhfni?3~@SP0l^9Bz*?1VefJ+uTwuJh-g?qFO( zJU9nIl>CbqFIM>+dq_x}Ih}|8KI^c**q^g6#-?9yWob!DMrP%tPe>TYH{tdq2ng)V z)Rck2vw(mA$aOF{`m1@ z*+P0~Kd5elZ2r}30$6|;tshEX5kb7~`Sk;Qjt|0!IHot|I*?B5(|4e@5iqHFfrZM# z!h&>y)(kO$O&UN;XVB5DtgL>cPh>hCCKTpXc3Cj&z;+v4wTRd;P0GcGRf>C>k| zf`YJN-)NQRm6W_F+5rg_!Xt#NI#*ULiO!&`Gw49T%%EEpYFA$E18sF@ql0a;2khfq zl|0~eGNZPD%s3ugI_~v!DlgG1w-~@;t9?)82_}X58=bFXW6RUi4sdy`t&(7OfHu^s z0nPMwT2OFsZeE_WoE%L$paFocCmR0eLreZs!(y8@p+DGD<5L7`R9i*7}}^I(=N)&eAKq<#WLXO`%lWgnF!L@b%p>o2dozpLJyw#5c z6=7>8LhLdT`}^+h?(y-IAaLo!2DAGY_cpIXlwfzv5t zVkhenP+)!qLcslTg$^;@Xkw^Q<5HNBF#}p#(Ch4M6Oe$Vvo^-69D76$m%*5>Hg1n} z(UHVNjN=0I(ddV|0kz5v6m4K0K0x}B_<#dJ;f2TySk&F^?ZH7-*4E$`(#giNdu%O0 z?F8ZsYB}U#U=ag91x-grUOu)%uD%txGJp8y?(Y2Q*6uDaD$!H7wQnCh0=k-ei}b~) zu5O$QxQ%=58i@EP7L_94XadrJ^^Yrvh5BM?v*4jWL(QQT*7(Y&k})}btI z2dE-~PV~3{7S`L*aEWQDObpp|{J}OlNx$<8275;&NpXN3| zYuoVkaC5U7tzfx%vw3J}sJq){q|AJpQWFpp=7kG(j*isnkpEa7DQDv3{QUK+J9IA; z^o8hHCwHCgq-cyZkJsMXt5>h!)h9MQY@*-fSeWYZ{_^EZ?cVgW*lj2yXZMb|sgA;R zY#*2WBmP43KV;Kaq$|6|4{p~F<}}1Qc4qlzDumR?HG|x8i3g=^y{l~bAy9Az5Lc2ZcO0H zx$1Q3rd8eT3%iZ6kOB{u2$zgZ772@MYjePD%03@s?>wm3kc+E>K*?W~OZ-zNeGh3F z86J|bcP3QQva**hTqrInS^uSf;x%wUwVPb{UG3~?LQBBf2gnOCi2B?&R*~CTLsWj(mTkTXV3plCP9& zXlUqvB4GDT96$;gIk~>aVix2=?6m_$vI+BObTJVrA!tSmPbZohOJHZ>N|Sh789ai3 zO)>=H-n?OjKU7rdVEuv=M^8@=!qC_lm`OEOUpW;NdQbq_`rk0~pDZ1!UX1h{paBaD z3ne8be9Su!AJQ}sw%Okn{Xit-ec%L?8|*S@Lq_c{I9nm)7XZDA5Fg+8;X{mb=USVa zgRpEz${3(@GBM?YzQN7y1Tiy{9jbJYWfa3Lr>{MvaV$VH?ju6|aVQpG_DSfBG>WzQ+?|0zIGYP z3O&G`p&?iBL7{ell425GR#BnG8i!Q_v97~|gAgnnGJbXefdt>=q$JOU-Wyz=xP5=o z9rj${k?+?*jpfqU*SBYf7Z#^Pw4x*d+g!48<}Y_d!IAaOc`0^3`gW1u8t$9>^v(HC z9IBG_Z);RoeFdFJAe?K2+h`dI56V*%aG0%i-`vA7YSoaI{sbT!R##g%dD8Re*Oz%7 zeu{=-1s#8pTprf(RMS&B@G`B2iyuEmSH7DV7k3v4>+dL4?075ccG?v}HG2HpSFf{| z$6YDxYxAm^uA6nJ;wNgN=wBjkLhmG^;!ITi_WN4DX<;Isv9|_+5}zrDF}rbE+x&+{ ztF833DZxFUqAp?(ZV86GUF>GSoauCBE`8d&H{_J9D2jT zz|h_Uim$Jam)8-*en5DKmsbkN+n_11ZsQZvQ7*Q97g?J$8VA6llOfp`n11X8Bopc${I!eE9Gov;<6E+M73t>cZ~uKmQLcVcHOjkTo6{3 zzW9u-{xyUfQc|d?sZoX04oq!uhal-dPDTde6O_rWeEWmCz}Wwg0x7M`P&+iWVnmUkXS+}ENCkbCAM6PZjAJPou4Mn=@C0kF0~hR7Sq zj7droa$4w>C&F4Evd>e=18?H_^D93_xX`KzfnxyDaUyW2(rNU198_&3YQVyO2E8{8 z8iOCf>wzBtm0f&lfTsvv{!LSo9u#MZA)LYbkyU$VEPx-yYJOkR#~)^hvAAVGb4j1L22P)nUGy z93rD9CBLoAKC~AVVOIkEO9-xAqW~YOxcGkJ6*O_Bck(e8hL(0{b?pk^hXA`djPN~@ z7`{qMdf3G`sZ;CLw;>JJ0n+8jz~wMH1FNfQ0~n_0PRhUl0XJwU%gA&cy-Lb984b{hY!V>dz$Mr z|Ip!?E-x3T08-PWJQ~^;oDvFvcaj#^KW(IN{R8%0OLvwlZ_;GqhMvH+J zphl1pLQ5d!r&FjFK`s=_u>*!KB-H^R0{ense|&sAXf7zfoRO9F8e^q!1^B48@BPrA z8v&&OF&7~5>j}4DSH2Meum1uN-LgHdAZxr=xIW7^77JB2k@d_(mH=QbvjTjF3Qnsv4H&9SXkyLbF#ABJw2aM@Bge>O-@b* z&0?@fpOTa`Uxg76hwJh%2PAA-S$^CYhAdrVbo5PbP!^48v)ECWI5sw>=J~RS z?)vptMkktYmrwd9{LTj5yZ3Rr<=IeyX8+ijmHT;gCk7}51yZ>x-7iHv;nhrs zK}7jYVWG3V{rcJ(ggRiYAMHT0{Ddo>S%$VVdNwLJGV-dyvuDp_oSk`2Z@?YKym)bg z6=ZMI&iM8H`P9j3XG_?kD9Q&&4J6pM9Q?m~ugoHDPliD(PfbW*I7&!Ku{P1?N}-N( z&2i<0hr6_N2+_uqwI&>hEi4TdmbxGjD_>Id+z=w4Q%|;XK%^|vsoMD7l>}D@BKOfG z3=#m;ObUWm4gM%O0fF=CL>*|mEkMwb$oO38)kNtG_{$)A9v;<&|LHa_Yh2;fp7Mz?*8l%yxEm_wxsU zITz*T_F=uv%lp*Y`W>nrwNXKR{lwtl&i0T#;Y)}N{#}6!U}uhy%jvfUaMRHtlXhP= zk$bT>O#+tkG|?7o5BD-z9lD8ASHgRuQ7Flv)j-0e>{eWgObU)`|JA8C(-UWn#$(9E ze7pcaWJrchpUQ$p4yZa6^TqwRl#3V`7^vwGU#2TgSh_B(&Z5bC2){Se@StU4ncF{p zguHhZJP~c%x^mDN!6Ndut4yu274r>U3H*)H>R9FdZKU+TKU~NhQ!fC+#5*w1! z?<&#b3#N3BC?d+v|EqsK8j3N!tsahj$)NTuY)sJ3u8K?l=Z{{&Rt>1-Po6v}KT{ID z$Jbm_^wJ(5Q3vv!-{8M`Nw3PI-(HFnvY%YtQQhpPqEp6wG>?nWQ41mUMMcF$`GBk~ zCN9qPKYRh!<;%1%uEKV(Z&ADUZUFu02@rgkn$UY^m-bf!<Pu7>CF;{X2c(FJ$H&^syBAw$<%}r6N2<7ncgV>kuplXR5xwzSN`(X62&gbGRHc%gf6F zG!Bah2z(tHQrB^V7XvN&Xa<^*#M8?OU!MilpPqQO5}jK2Pq%@);8g>r){$PS_f7&N zJAUyh8HP}oT;hM+5OO4X8YH3b%9wTNjd$MxHDkN>Y0kmtq5*Vws8@6R-DdrvCD4e% zp&-tFe1S>^pPul${eMRR35gFAoToS@-?*_=>9AgIEpFTLXe~x?sBy4*aAAh# zeK|g2xLmchxbKkuAuI~?B75rwvMbvl0Yn>K91Jj4>PUxVfqGAdB zE4&M1V`DNfaVKB*nE?|3V^10W3h>*VXV)$caRaI}gsB2RSb#~UJs^P%b~hjmK8V$G zxweAp_0gD9^v?D4+S*#!4Z&DjAPT>F<>w1OtL|aPHJKgG8R)u4PXKmIQC)m#5V5#2 zYJrAWFoSM7(u$HTm6VlvNV>XHgdklUcQ%pX`UNa3?HU(rh+g>kfKa|UGc(f`N*om% zi_#+>nwsK}@-D}y>rE+G2U$YCeuf{?5`}7@=(|u^SXq5pbV^H0qf>ZkWnuBQqy%yn zP@a$Uz@3FYGg)jP0XGb+q|F{2bC8a)FJCqZ)va~ofCScpHFM*m&X*!WjEp}ZWDKF8 z5TKGlL01U1-`N=eUzC^chIbA4sE6)s;m?PXkeh-_1OPMXHWL-C3O!gA{m`&d=4=9{t|yHgD3>3}Cnm zb_zU0ptgXKKmp}(&h+SlnpqEuBIwj#zY1`2DnWY_{xYzSbev7MzW>@x18qGz5i$&ds1ptN8Hx7pTk-c^`k(A zdy&`p{kut)5`(Js=TBW-Fz{q%dRb2W_$ zU3wRbjwnzlR0Fv38G6)v>JR^=PF8QoE_^C^bR#qlkq=qe13+MwD_0QJl&El%m?MBb z@IN(#F#Ud>hY;xd)79m#rI2bXbHPW9Uj-M(@ATVdu$pgCxx}KAU>zIb`2|)WbBHU> zVL&vYeL@p0AUz{~h_a0q828WOW@TriREEKD^j9KwOIWxzXg}oG2jAoMoXyzxSN$&K z?7Yq9v;Is$?3kOL{`IEz)141BA*gHX&38`O4hE0|HUtdq0Y`LFMls0UADH|8@;RW6 z41@FmKn4aH7>_s+^JqcnxCK%)kH4Bjr-1^`?kMcZqFVymW2 z@Pk0}CSc`N zQ_iX(KXQ2f?*-%kbyfp_$M0}QnGK*=&|OnpC(PUG0n`H6A8{ix0_FuFl+S9T_JZkn z)kc>@KsiGAsj;_S#lr(<)(*rz%eD>5e_3zbs^6fZfJ8;!=hQYZX72I+hLapT5`@-z z6ONd%iHQ$P!N5!uO5rPSx%9iF58Dyy3dCywI}>2Sz$j#L-8?)%_m|S?`O_zhfSjxf;{eR|27f$hh?dSO=A8WU zrOaa+BJ(TXzh5ID$QrO{QN=ia9`6O1bjMyI`t2+XV1QD7`E@S9m<8I&hu=|NHl245 z>IL)wm`G;e4q5$hi;9fg3;Uf)BT#Q7dKMmj`O+n3CZ-uC?#cbwX7l0XJ_+6t&+i!l z6$bB#4$^kPLhac|cyB)jiskO|*CwJ-py^;~vA($}EL;n+Gzh0bP~buJA;7#ND=Q1V z!@`0Q7ZSSjv$Jbm-jH;Ir~rsLzd9a3@fotVU{XvRoQTlS+0#%UIzd4wiMF#7^2#BA zYVXo~G5=8#@MpZYx>|6v%Nukrn3~z#+QO&2Y}C214wIn&h_Xrhu!Ya~dp+iG0f(#J z`w+Bil&7F_T1k?V@LhI0cuzB6j=NgB8U8K}ydZG# z`FMFZd=LI2XtxApNi%>9(#1X0+q<^jMtL3sLu7w8%5!Hqx3F*^ziVL~?3aIpR{mdv zY>(qfi)52Tt^vAf(=bRU&}RP1oVXJF0j$m%VGDPdHm>@fde*g!S5A1yyq*PI(l`TM zwRITCEA&S$huM$74HmQTLrXxNE`l=%z{D6FtN?+>Yz}}dil5ba?)tM}2HYNdt;0%q z_aURvH@5BKTEDe*^I@_E|6&j=;5$Rqm7B|nMfCn>CEMBI?@D$PH4onJ*E4a0=%)=n zE^!y*UtiFaEHIcw$Z$f%UGXgh(HJuFM|TU>E;0(UvH>xk1JBqX6smMn^yAaE16vpd zDeVzuDpS(dK5WKQNEY!_8!M}+@k9~ocOYhSaVZ}a7_`2B>U47CuFMdSfG(L-V?QGa zCWL*tydp{G=d?>UpWxSkLUjJ>CHjYfTk1X+qfV>vl9`qGpAnZN%+G(vM(S*d z3=guaK#qacff}%ah7I8rPxvW&%B0hYVH8F3Iat`(BJLZ;%F0oIT$FOlDl6xA9h{uV zAk7CtFz|2)4GM(I$jPD963CPpErZAb{_uC(RqoJ!GsjpmPTvdJ>EjS*b#^YFVErA2 z0o#TL&!EU;B{^G097qi`D@}epwKqD%BqWRzjrL>DZ%9nI&0w2Y!8morKQYSEaVj}6 z@#42{_m!2YFSia34$Av;sQp#tcCdFhS!*~PwQX1duYGaR{o8vqA5RE&!(khs;i-xj zo!jg}NknaJIK-T@fI&TYNGaINFe^teOi9w;W88y$&s}DE!Yv;eO z2@!8aj(@RiHwJAU9X1+Mw?7Ou0{+_t$ErknDdO?pK@GW_7hRromb8=aS`9a?x zY%NaLXHgFP1V-!nAgWwQF5~%e&f((bMXDc?`~DQkXdIXTPQ>Y|0Cqz-{k8<^Pa!#$ zFrzU+V6l#b|HFJx2e4s6#EdCJOaMe}ZW8S8S8>=FscNgXe&Yj!f8}qgMQZcc)^?=* zBNG1gT?I;Im_sNBx9q&{SG8q(pfyo-~V)G7B3rxcM!$+ z8yC5LeWAwLeSi0NVIdbPqkiEL16pxyh=4$tuE`4>#*tLvU>3hY;x0PzOK>M!lBiNy zF5zc$ZXcQdUpIjG{3mqy%ji;XHJ~C0;cTr=>Zje`J2*g1l7iIA0CUw)Pe(_jZEW83 z>6>t#s`NDdp>RLXtk4^9exQqjGR{Ty2Nn?Vf<_HfdoQT(cBhIpm~@eV2;%-bdDCak zga0o{dxISz7#&s)iCUX)kbr=)Ld;`*z6IrU3n#l(`FAwYl@nLvN9en zu?bED#X0e(jf@zGGKiEog9Hg`?@=lsXpjz&U&hA3>jmtcrj!fj2mrAF-Eaz#A1f<` zYWWcLZnibIu+XaWDAy`SSZ>%j0RKQ5Pr@+=43wQG=yUWgB0>R7Uets&q`g3#66NQQ4+5%nDW7NL zjX}AI1$b|u?ZXiPPR`D{GakA&8f*s7m?VrA6&QzshxbI#?Elv7C#s$E zug^^_e!1<003~ z)%tJoQW2+xE2r`Q3Yhj>B{KS-1E!F{2bmJ^3S0w!cs}3(h`X=j^#KCW#t!CC)6`sq zDR~Iz$R+VE2BfvA)C3MB=B54euu+4)zO=t=Y^_KbcOQW$=E1=MpBVWHf;d*Arwhc`Z)r`%c@jpKk3Cr&OdY}Xs< zfKo3;?DyCP+`gel1?>+rU>c$(S9d;r{MakQ0D}e(EyVEO-?#ytT$e-0383kf+;ZAvf^+ZXE5X}?bDbrek zME{(_J`n!x@W#gHL`ER2f*pIjUF2H_hv%FyD5tJxkMI->b}|OY(&9O!fIG@1L*hXv z3IRXx8HtPpQ^LilGT;V)l*8tSR_hIkHWRAf_Yh}_-V0O-!e{0u&#rlJfximto`Z^N z8ki^afJyJ&@zbN8QzR03r4MW-n0bPs2iQ2pppig?Hz^lp*sUPU1*uF32ZJ{g5fSm9 z^7)TYI{hC~{PI{;f20S@>YJG@K-e!SF%c9A;1n=5Nb}T0gP_fxt#0cxXvM(Tz(RDK zZoZht?o0kUKGy|1pGWmcR8&7)9ZaP`>vV+*Gd!$OZ`AdTY3d&m3U@zFc=)Nl=Pm{Q z!|hYFqs?WiIE-s{2L9qoDy$h75?U|4nkM{&MUwVvQ60N5y76X*NvCW-ZAxqHo!lt* z8!Wb(i3z;fW>3E9%5d0oAzkor^C_a1;8Syz`exGKJ<$9$Fw-($~$jRnUYxWhF8kM;wr7E z82l6&SQZ6peBOWs1liz>8af&p$k>+7yx4)G3}CLK8m4t*sN=e?+BMDxqMZz1#64M% zF*U@u?BBjt}pJVomNAr<>cH*|`g&x{zNB!U-{@A>K2g&6u#ioW8KzIP9Jf!`Kr zE{I}0_(7TXRY5MmqQP*|dkvXjWB*8WK^_$t19rG2g_c5|2OvQ6RVG4Q6)xe7j^rKx zj&?yjrV>#&BBG{Q!vlH9b31=hGz~esA2Xz7}vtGnj6UbB(G~Jc)7Ty#4hn|4$?GsxOTrGkn~UnYuHw0!M2o zTY^(u>i_PCSzqLB_O{VcPsrXwF4pXA)}u5{0B(RptgH&O3L&WjYBb1fAXPvom#l;F z2W^tbsWb=@-?{?n4oGd9RoDZR2s`&+L}2MAEZ6opE_!BWl(Gt^Za_FbR|WhD$d>{d^c`!`1dQwrxOl@iU->)E6GaL^k(p>F5&^`hC4G3eQ+GL&Q&{iM#o42mO zj(}|C>0PzE_XPT`RaEeKS3}Ckmj;f@yFyGXg=CD$d;R*Ov<$$N-FkQ&K-I6StquCv)3XW}Cns+n#T?N2^8QATm$&yZXc|Eu2L^Z; z7|daK7RHP*10DxK6WsQwa}pNjxVrDqRHg|a^1*}axX=}J8+^v4Ph%;j-5bjKjm-*e zU_eU-2_ACh)&qBUfx~2SE|bu$J+Kjfx&M+W(Xp{4qH!>wVCLc$=#C1UW__JrT$Dn7 zre;!q!|feAY<_!@|Lvt=j{@ruT!p7{^>g6%-a^8$9ejclFt-gf_DUZl36YB2hf+Ll zYY(U#?Cndq8(k6|Sag-*v-HLSOiNI`M{<^IAGV*Z{q>@0GcrZ7IRMctk5y9PtlK<+}d%I$o7vtiJ z3W8HDQ0M!3Urn41ihS(*eZDh60ttPx0YXJn-{6}Mx0miBLCts({{KSQJ*0Zn&^;DDJ9raXb z<&$5liHq9@sKaBBTX2k))=LCy zI6VWuY5lu*8fo`oglA}QkfdA~=Gk$vi8+yi6nEqB;mA8oTnJb9493~GNcHVRr4iJS z3pm`wJwDp(38XHn-N3}H+ZnM$MyICQI(b1%OH!n{wH5QR-ApUCf`S4VvM`p@Kh>_P z4ak-f{u%~)_J&fvDRwUOqXFH|EXB6_XD2)RAuMNO;c0IJH~Dvio3 zzmc=*+1lQwq3TbaDg-sdB$WUcw*-bwKmzBsWe1lAWIISE#8NObFhFs*q}?=M0>M&W z-%}X$f;BO&lmvG^^yLejnNkE=h$IeF3m7hhEEWiKq)#WW@|*hfXUloi`M~5KOwdZQ zfWZ3v#fz!=c~i^Y6+X`$D=NdBMwSdCx+f`+0lAF?)in?% zL6lb%b#%s{EV2}`@q8tPCJt#g$W}v=-@Fv+B5ogmJAQQrq@po#Y<#>~?qxvC*HPeD z^VD{vUFqZUMVk3e2;@Bv;wK#AH!`lHQFsa*5Nb%NM5Iy7ZL7`p-jMNY`1n^Ku*i5g z4+|lf9*zvm>4y_qz%FsAg+AZjo{M^}3a1Mh5et)(BQOUqg#;HayD7>t=JxVJniJXK z`7z^yeWT{DH|Et*X%z_bg3DH}Weul;!jL3P7H9R;K@fOnX&-VhVH7`z7=}L4w7^*a zK#iK!QZt{WBEJBXV8_x(0@n{hbl0{~<3)3E`V-}r>QK3QmJ+=`UTK6K3V!+`)i8*@ z)KpYR$JAS~8{$Pn5SYd51I%hrQ34*QWQMs#_wpQ61jp|K7S7`0h*N%+J7Z<20)WiF zg>&f=G$~=0IYx#NH_kG{N;sGf76w4D20Us*{B&?;~rq+(}J4;@v; zZGHTfTTjk`8VWmB5g&|4$fg3mmy0WU$jl(htET2N8J%g7iK1bsu6as?j2XPN*_$eX}|5@rcA)KOO61V<2cilfy$FYd+1&!eMJ zI$*|x)&En>)&4bEhS4ZfGzBUY4G?4vHVtf9%8Qf02jtyYiZF7(5D)^a%2#p03K$k>{9{MjJ}%OqAwd_TAn-y^DCnTNnE!UeS&-ME~AZT zG`CtDQeN0R5HjF%CKdPn@1gCV;yV*Jr!2af`%|%*8xN1%W%T%$)9&7d)`rI6g3QbX zz{%LMWq2}>5~!U>rt>p=2;wNuy>fEc^q>Khr;@iP!U6Vqc-34?G%bY}Ym9Wz>q9dFouwIv!6Fqt;jeFCE(q)pAY2Kjt zT7dTzQ*#AVx)%X~qwf4KTiSgt-yQM~j)>?^sD-_Xog7b;_V^|rj�HMD1<(>8^g zLCeIZ7PE^^DFI}4g^SIxQ_W@}Lc-{^xn{RP1$8zT7L9#KlHN68tleD)Fm2s1a59k? zfv7(qp|Vmciy6b04AEw8qC=cOCB>IaZR^OLfut}b6NnEo`0U$}KCFydk_hO=`W zDuTD=-oBXBlI-+q9^rdCc%#;tZ<6nD_Zj04-K~;jZWCuSfjx7qP1C+7EBig+tzO{I zs0lyGt`Y{12iea^dW*(b-ejA=!%KiHjHd{?f z2$nP1!NJ%Ak!vaY+L6-`#aBgLg%yVZ+ss*JZDLceQa%qBY@o)Qsn?dYx;#dkq{#58 z3&wZA-h+GbKwW*FvMIqEo?V!ek&+POtjwO=DK#lrIF%(7k{+Big9h+eNMkTgTF1G| zAvi-;U{P@~cFZf3)A#V!oH&j5$Ho$>>K{x@aKggkdDp}`Gllrq#?Wx#Ny@3?)KXQs z5O@n^7o;AP=P1HW%Ov5m`l_r^TX|+@?X5sTuWl&pLTF=5xLH^_0 zyNeOO0mBnaq8}vOV8sEi>CPyBtf;JnJLWHB5Bx?33rg0fDXT=PqSH)$)U;vWsAO-k zC~W@XrAv46{gLD8|NWb`pNjWYl*V-x#90_7$L?f}SLAZ1BjVgRi~fu+*t&q<9#dm< z;`abk>27kmd$J1~fd-!T1DKX9EG)F!1He^#6X=8Yj8?kW2K-lPt=U2So2*i|7#0M~c}=jBQD>;z(29TEn9QNfrKw01mN4 zTL=rN?D4fv;KI|(PLKKSKSAsj$%FA2?jriJk z^qQ=syK=}%oRV+<@)=|3^w6*XV(7qQQQ+p}Qo2LG?4ksfyKi27)bID9XDwWxR;g-x z&j)1y>;)VF_6K{S*e(gkR5rNnECGL2wD|aI>hG*qJW3u_?bzl8M;0 zkSRtm=;gfgkwSMKJB{(7mRW3V0BT|nie)Mcw@5$e;G6rGfu*ag`~n3HKneM(Kn>_X zkaz$wML-DzF$CL?6Yy*%E*&0w__d)%G@U?U#Ff^_!7M%;!MooPwU#%`S5ihJ<3M*QJyKVT-A3dC&*7uf^XdOI@ ze4PovK7J4zv*SCBFNg$+lhXujlbE<-zZ-Nh@l?I4%dU>kP8`40Rr{>7V2#kHEJRNN zH7Rq7=QVCOY!zG@npQ9G$q)#P?Ob{HPZUwR^RbnI2NlE(xZOD(NO8h86*Wkry7@Jp zI!h#q*}vn3EysXJVB4hVEsJ5|z#<_-kYnS%>j#ncphA(>4Wc_R4$U3Z$23GHiQ5k2 zkRe>C!8`@;g&$ORzhcz}oT6$7e$KJWmdv@*-|q&Xgwex0eEfoDE(WHD8bHgbZAJ(P z3e;1a4DxxSSvBqC-9h`#^P|4$JL_q5HGMxk6O3mNXP?s8F9~6FbfI-n zGL(BxGb5jernCuKHcD$7^Hc$so~PY;w1GM2mP!E0om7w;9?3yu8T|Myo=%?1y%Ih(RyV=wod|AmK>NA8JtC5LS@ lxu1VS5)o?Vf9EA;-2HnpXR1clQ}Ktd_<98q7d<17{0m{`pfLad literal 24560 zcmdSBcT|+w7AIPmKoPJ+0m&IeNs=Thl%RlQ$r%MC=bThRAwdNJ0RhP(S#nlD5Xq9W zAUR8tTyK~5?R#%`zghF9>xSyRBk{G4RxZ}# zgtWAupD?E<=RD4(i(z7JiH#y+%{CLWBzjuWxbQe;#;77|xj&SB*k)dRxVQ*WJaXb% zSDhz^F*`oaO{C%lH*az>gO%MwJD1f}eucAmBQM-)k18ByM;yd{zOHW#bXaZuOlEun zu7?&`-h%_LzIP=#u$QSU@xS=;eo^sZj9lMcyBj~b_xLbmGQ&MowY$FMjHfAfcL`f_ z^*)4`4;AP%)X98wf3+*ys5lh6;-T53vtY=-MHo7`b)(QWcaod{F5&j+a=iboFPYK~ z$QW)84vr3yjn?Uw=wbHD=bjc96;*At3lp=*t7i4umye8&ieA0C^n^&PbKcRt_~eNb zug=_YWuv9N!5p!**q>KkK4><NOJs*>e=l+hU2KsAsFJI-|$J?tJC8M$!AKPzjoCsRd5S((gu(ugDssVO3` zxqaorQ##3jY|X;Pd}DUYA=|lOf?g?R$&ABg8=r{g`?dR{tDD=;C8BSI*iAMBp?Gb-eK?6vlC4#2-u_z9yf_wZ()y}vXoxM> zssW8mmpGq5&TVvJoCUMjUwc+ouhO~AEMxv&yE=VDGlx!@L5*M|S(9CyfX&PzrKr2( z{rgkh3gXL4OL8b`YU*+CeXB~{Ar2KuXK~4gZXFiIL*)*$=at55yz1-fyf@;;o2(yST+24hdB4&L>VSEv;7GCbHpT3sNM( zrE60MyBl@w0RaK8UXjtSudlz$&E+3Iy}S^n)3BbPUuZ&rJcQ-P-s=3qH*da?*6y4{ zdaXCJz>0O;8j7co3N9`wIr{O;XKMVwQAk%hZpD$q=rH2uJ9BMq?ftDqQ&ZFafdSSf z5zieiq(6$ow&l`*}{KMDTAb zb@AdwYisLkF-w)JHN>m4EW0l+t2AITg82COPM$o8>=ItBGG?p8b{Ax7JXOg_m&DfgoYe59FU8NXV0tcZ+0sXcg=Ta_={dq z%cf24%~B`s8Z0#3+NQZ~dm}F(h?wQe{r6BLV2wI@d+%lSk_YtPE5DsbBXgyP$<*9C zTtlpkM9r+(;!}dC2iojw>K%!pu7QEM@844tS-lT_NUKQyxG8qH*=<4+Nz++we(`5+ zMP()Ni;0|5GkpB~|(r zTwPUFVmYX+sA%x6RUEV10lNcgm-uFI@rAv!MwY_-|>n&rJ`c5g|l|4#*F(V z5j{Rj5Yv<4q%&QF3vo{}e!;t+TGOXL;Qg-e$Pt?v}1$wr^=`ueFr z=|f9P)lAi3Qce=m{o(N=DU^DiUa_sNA9rI%ex^!Btc-!Aq-4BU1khB zp3#UWps(so7TSiiMi-^lrKdBO>sTB5_gq}0Z(U${;@6uX?vsF?X^p8Dzk2yHUGl)d zKwqC)v6kq=uZ-TcQYi(6g&uQBft!SwMSYA6ruyou@Nfx~Y%~Yo*}Sa2TN*a77aTFB zLo190p0@1SopnF^va_$2>#WSq!ZCB5ZKsl!mL?)1QsI2%=H_-lbqwy3yTVpjIh!5F z*S@}O_?YX9x|2>?^vZ0J407LEE|8M$@9%G88%Z*MPmV|#+AEXo~2$JI#fwW(CD;|-szQA1Z(H&M(>M@Q!@X=^1H z>HMhVL;n?w+;BW&6j84KMXe(rG0em0etn!(p%s%2GNZ8-``T0JW{c-nVCsxu4mC?;j zM{i8Lu*>~xgBE$!jY8p2T%JCCN=ZdEIy&0Y(n5gWGQAFKp}|SbW0M=-O;wIpm_7U8 z+F-t6{ncmPXR8_O!^;g#Is*wPm?N66UcRIIGMH;I;p|tn7X7Fiw+&`v%O8`U?8~dt z(*wA-+l5yfE;!wUYKlRAiZh zrk_|C`bSl%t-X8qPDDgxZ+rPlbPKc|r5~bOMhCOjQel*rYg)8NbU0sOTgcp@w6N!n z?NOWM=3G~)_2~KNB}LMrqN3NL9^21+4!+kDiOZn~2?@u2Fx5~d+ioo^E<&}wGD3AY z=CO+G0^FmC>ws+3FfyV?vTMF?$hU=CayFWvwW{1XEGBwrKRd=WefA#Py}-U(&{Q{= z*t7Fz`)|3b313Rh+1~2cXGu;?Weof7>E#87_rq#OTN@J`1C2(F`}Y}<)6>&IL{s8J zB^Lc0h8^NZKZvfC7>pH{<1JQW@AZZpd(7{9?QDkL?Px#OB{1%3T=b!d#zzjdy}fXR#$sTNE|~R?vGHtgMWf zeH(%A;r-?1Wz^=NY2u}vsl%V+7-(gCKUc@&Uy$1!9k|8IlTuP%U7By$oO`V&cxh$a z?=F(jdz1pRvrIu(;Qc_a!-KmJl+K+%;yfBA9z4o_zwH2Hl z9kX+CdiN=HqU#mK&It+JX+3N0(kkSwM&af2(A@lKNQkAG86H0V{^6?6c+i)x9} z-j&<7t(4ZAG#A9UFZiS+XNDU+6IsVYLf2Dp-?telyNOcCkiWQnmV#d{N%3W5#Uxbn3&XR z>1c_SHp{8c&ojwaGEDG25-_ekok^0r!$hBU@(yNga7iZTW1NsjvNgMv3~nUyM6vFTH*3F0n!&_Y80G&s0B>*?wh!&|XV z9Tj+aKMB$0g$?;!L^#Mlltr9Vlc5of7ET~I~;W^lc>%4j6~04E>cRxoFc z>+W>)ZldBmFAGbC%eYr`?WopOZtjAT5@*cOA%Mt7j~+2dg>;%1lXB{1zkTbvzg(&i zFVq{M-~2$Z#p5WabgaT_-`U*!U2QD}TC5l_8$cq7!Y*Oo%K+v&?d{J2Ru1?S8yj15 zxIL_0YQ4L&!^pxCj9VnxU2j%7DK*^N!Ymp6O2q9N=H!cmJDf_W1{DHx8f*_p&oN@e0+bgUo}+o9#d}%GU&940NsSj?nXzwqJ;WKoyRLr1|Q(x58=)62wr$Ki_qmo=KdYCH6zF=&Y-G6Cxm=(Y?_ilfPI-XO#sP9hNQ&x1AmH)cD= zfn2x83j+;%Cbnm$s(PWxP8k<|-buX1@Cc=h(MXnKZt3Xo*_jC34FrI9xc`%rjg6ew z?71qPQv?VehV=n=gs0c)Uwx>TLo^$nKafjj`J0m;x%u|;%gX{lDuScTISqssP- zQczi4Jvt#FSCREyR@TPGhJWEX8{#g94$+J*voI=QDHJ{_M~@lr!-o%1jsR}qwBzb% zYGx|3PJaJxVQzly+BNjsqN0~JSJ>FzB_%z|tIf`~TJ0LqJq4*e`DFCkqA@hzx)yoG5p4bb??llKdgPek5cxVXwHt*1LtuZ_y+(#zS z@EhXdwSeFzK1@yBZ^mph5+Slrm>j!J7@_EE!K?Pjhto8KPe7olv5`Zo=(g)SU`X}# zPcSdrfUU2LRWHuWI0N#8H=v0 z21EnU=VS&Tr=Z(q5~^BSWz{Ev-!~T`uG0(CXJeu_qrvG1s<}302YnsqfrJu2}vUcvvILRhdbj02j z9S)MLVBm*4>h(N$fS!~zT3ydAZPXMH-Ned@bu*n*5D_i{!F946&2I1F;`2RJV8|=# zd)b#SGO~o+GM?8#o4)+{meA_jL_H1;jA`dA9Vhs_o-ej9C#j=-FQK6NoN-b!gnw!fZDyo~LsH(0G7bM8a$|@l6 z6J!|JP{AbZ=NA%rOv9v7nm>Q0O|EoVz1DdC{Q2aR6j@nWIHQlBJb@iwVm-PrJ-xL% zo2ag)mLTHxwWH%1dL3%cnxEAV`Fi(|MrJ}i&+Eurw{G$A@}{PxA(b>WW4hG*mSOq) zC6F{U`YI~*iSiF0W|pOPsfAEK;FUjO4g2ngYEZRtas2GJu{`pqv&G} zV`IB;=F?cU2NbL8VOe>3{qv3>!^q3Ze#gIX;R0T*tt`s4BjG`a9i=qpkXzfw2eZCz zUvtC6Bpr%wVxqpXa(E}o-u^vPIoeQE%9t28FFN?Opo0`;+^!C=x0$IaUVi?P;^GGn z9?Vl2y}OtqeJM0T+>oMOH5NO|xpLm;!xA-s~nb zOOy6b-Q6mj{5(K&3g5qHmVbR6)M_|#+V<$ol-35f`LAi6ot;+0CB@obq12_n7IOMh zw}wF6kWprlR7)a0>>L>Q%;{uSJOtYh#SuMT>C!Pgyub6~yXPa2^`ZCZzD?A}N7_`D zm-qJdtqa4t+S}WoBBFl%`t?;_ySYxfz10f5CcBUV9@F;cAD{aFv1+9p$)wtq{bs=C zJ_VCrG8T#tyQ)pf?f$as`fdt2Ik~-q!~W*{2Jo_&1*?tYC=^@d7h9(j4%Du%PSXm? zN#OL|61tPo?H?FOBkY`=miDQ=T|QG4_Mgkg$0sTHEkAd3)IxEDZV5_+b$S3AjRxg1 zIx-T#-YqF9W@cuNe<)xID0z8#)XNtylDf>CS4JM*fm^(|8M7`>f987F_v`IrV|u_* zuTTPgUZ%F*WaY6F5)uj{<2E|n+v4WtHj!Bwtt>4m=~L|{4>bYMsX>K<$ans=ln#ga zHsdunpPXu*w)gSzIqG|!o}M0gMO{c(Sor#N6bhxPrgmnFA~(3Fudh8(eEj3bn>lJA zZx-d|=H}$E)6p4z`|!xV{NRN&wV?A78O=cY-8fid5Kp#O#}!UpAqm(b3%%P~=k#K4 zZ_n2k5h;`A@)>Gok9l!rlrV6^G4=hJji&M8V1(gCg9jO+9M541VZED z6n*DD9THa7a0S>((^ZC|aUKk6_d*7WV zX2v7JtVh>={-t_v5Xs8O>?j<+BwF(3&4;;FOiUA5@YAREwzhLR9}tL9oN_$h`x26p zocc8*Jv|NLBA^-TaERMYp1`ct`QwB7Lq|s^^~}l6&TTqU9X;0=O0lxCLQr}aNU%x)kLmmKkn!={) zCW^A@o|>B0fRU^$zKv=HZc4K$91BmIQ96Geg0DX6NciFW|mO!Mow6I`E z252XjAQHnF^ZGUAxpN@U%uY-cxNVr6*G-uqm9MI*+6fONqM4E1+udCrER@c>Rb3Yu z8L6bC6c!c+n-tsuGi&RaEVwc!Cnpf*vUfD<$=N3`YjFps_Y3@D$V zIcKI*kduc|ivm3VK07Oo^4aT7G%_-ZV%L^J+0^XLfTZ)uzpWwPeal>0x(U1)VkM9h zfWqghvMD5r9rmawcpdy`fwi-auU>tY@qk&?74jilDGJczBRr~t;R)rX5P=*BYK zodT5hXWANVsxTq{udk9KY^$SSIf$I`*sJ+jxisMSEezJFYfW&F2oIW zA)liINg~k;u;kR#98OmeYX%>VA(13sEdM9!=pV&3#2^Hqo#6HBx2e!+XU?2)|ESQp z6;_Dw4Ff7~fVy+1IY}~5Mn*>NVwv4{C~Y1tyrW)x+L8`GzvG;_h!8pfr4E6bWymsKt$T&1U7_;XvA7qH#ath3r(Mu zoo@skQwJ)@ty?Gclcr5&P=6_fo%=Xd|Bd05^z^J}SpP>{pLEX@q=JNm1SqH-;uudA zm54I-dg^Zc-f)hSu=2+nJBdo_>k2?TAeA8>Ey@1?!3p_9vC+=X=YM1LEY>T-HnqCY zAOZuWu@JfBKTvbTix)RhAa_Awlu26$j|?jI{QUeG|Ky5_Sn;RduhSt7LqMW4`rwNL zMHV!a{3*GADxXzlYcp+e;6npm19(G4O;9aQISE1ak&M}oeSShG_01dK@zvE;eLcN)8&eY#cMlJqM$jY0 z4%X_wefw5m+yv~rvZh8ak3UB(E+V4)mo*uM?U2yjpxB)8H(DL5hAu(b)Z5>`=qPmb z)@iX12{$9bmL4(&^0#lVZEayA5#WO6cF(uIe(2>@t;0b|s30r5v9)!Plv>2q76^Q; zuA<`e@NlCUR{A|SY-E5~=Il7C3?(m7H!)(aBbv|LUv&*gMk)#(0EvM<;9weDLXhBzi?k(cgD z{~LE9z$`?+1u;YNK&9(GPOy$3Pr#8+kqX6@04OI=G(-A8R4sP>F$rk} zNKRy=rmhL|k%tx*Zh+d8_3j;56lJy(5)(C{Jv}|TmCoRC!>xR}c=^hemiG1hL|!Bzad&rritp;~PC`n$F%0Gap!I?&R-Rn_+IE|%*;#yn$7^Rj ziq@$rDt*e#+N+in-HjT;tkb*f%wbbPiytR%HiBZp&CFJY5DR6d{BA@24Z63 zAUnl6*}9=7`d z7WzP`)u{FM0y)LOfv3q1(1^oK>*>>{;Q$G{a9qCJJQw)Hk2prq!Nk&X38;6jZe
4av+~hTpR<$0d~eTPmuGoYYf03-@cs#@i4Dy{mbK%_^m%*T~}2(xxF$9 zSSYLisZ`2Mp<@QUj2giOVsp5|h7%f#x`YEo4A+3q21Mo<)hHL2oMlbc&u z%yuNMe*5-DEi0NsS4&-;#X7g5LWr9i7+R8m%~(&C8#EmA;-S$|FVKSw4GjSxg%p4s zL_p4anHg**PpI zsLPCZvJSA|74=+4STI{#U|_JNcpWZi+J-W7b9X`Q>FWC7Ppa1mzgft gfz>YQ0d zmn4`VlXhM|`=h?-FL{pUDsvr?!0>qsY76g+4ejl@S!|Y8R`=Kd2P z;=dED1NJTK8Z$Fi&7X1zibr=?Kk$(wEa;+JD!Gt!^jUXNsgHrH^kEZtw{HNs(yytS z9lixC>7RcC<>!A5nqdi_l#iuCn(}FZV#Mu}oYHV`kx+>yn%n_5mzS3-Hyt})4;O}S zBl^dWK57h;X9xTC*0sDq{0v6;T1|-GWpKKHSi@kYcM5teXVEx4MUODJ@*IuOV8MPz zjQx%szWc>vurvnoi^mvDuRWrt_sstiWsq$Z3gUBXE3X{t4;?Ez7;(eKgco>E*5lL= zyyi^yj~d5`ek-USD*icC=(`DZ*|aTodTA+Jl?`$$UQk;0Gjp(1g{W?eE5&fc)H0+1 zv;|z;zYQ4UIigIi#SekFL} z9CA*Iv{CI(%948;iO4=~J;_wp)>_hVrfIKyHleVz6w@SrM7M-Z7=UizwK5{ua^>=` z`}e9io;(lVc(LwXwk@-7&c|Q;?1qe$l@*|P&0_ON*ID?R5#ixGApLxStA)6Tx_bY` zD{8y~0>m%mm?0PTAELOwpWivKvPlmQgwTd}eiTkF|E1G|g=2RcAo;-f(Y63RNU7m1 z)4&N(sonEBy1L!n-9fB?J%0=J_4URZGi{S!*q?=RZ_mWp5K@b1fYy$Si$!Ygq>c^u zE-h2m9QE{o5Y`X;Vyu4vapRfc2eQ47&KkoSq!%wDUr42J-!lNf(Vc$x3Pcp!+6c~^ z0d3zj1gWH|stS=D=r2eB5YtfCGML9os_p}7addP93Ie6f*49=>=k@gv;A*X4PjN%9 zO3dtCfWQ)WV-5KFHD3Gt{QPQaYHSyt?(FP9cLep8o{gRTB3r3Q2^gdnps#|z59%xM z9BO{cS7WY0L;^#Co{u2A5-;Xek(BgNu0wkm4vpV0H9p|9p=`EQHt2eHutZZTF()rC zx@8(`(bj{|1d-21GGx4HyuqjBqIoR8q zKYXZJ>AW03K=D%Mfs@k~ND*Lpwv%<67sDb${t|98#L(OcG^vMy*Z|DB%Im(fX2=6w zw0#1r$`uW7c#1+X8{N>?!)3j21~hmmoNLuMB~P-Krf*AtaYV}mMgc6u5%8=;Qlg4+>N<54OMtD z9OPvuN5?A1`L6&}fMkQ*G5{DHT2?zOKl*P{n)jZ20W>UN6*8{QP$o>m___swKooHK z0+Y_v_lr@>s&@ThWM(!8MoKAY2oRoX5kB|EP$@S;TD75a&moG;pRNb^Zj!W3u+>E52nVyaj?30`QJWcfVN}NOz z77NX%MMEsF3H&WSi@w`b$QueC&yS37+|$G%RR=W$82c{Z<}>*CE-NFu;+PHbIDRX7 zdN845cuX4`8^M@b1lttkSmyYK(wCh z63;8nLMuMpgDmPA_~ScpTvNkNzA?R1!T*pQi=Bm<_Mof6F?0YAg7pIMTCCUapYf%o z1>Ph}ITP3vaH^e3ogh|#cAOt<`uqDKEBRKH&BRtAK<`fDf5~o~FChvNFYZ$V2|}$L zy$Z*N;PmP}2b2UI4@=s~jA4I+#sd}@aROvB;93r`_Dn3kHlxjd{+`DCm*Dg=o-Ob zy|8N)u?h=U=Hx76QAYLl5YX#1!gm?K@keUBv=>+r?l0r9+4+Meh>!L4^zb?@^Z>Pl zpMz&LaKO$`9WIdsX>ny~oX(U#+vlkSzK#P_i+{$B8AdUyD zYyQ;HehIIGnf=04`pU&4@EZ*AS&-Zlw=Gv);<7UVIZT#r_JxBzR@ zv(n1phcZ-9PymYgl`FwmHwnznzd1e!N^l?iY?y>&9W zV+6bR_T0;LI4BdMqGlEsuW&#;`38;8yYkH&S|+AXtxBj(h#Yb??r>fwwV9or-BkCJnH6jg)&R%|v5EznZVVaZT&v@$FRXbm1Z3&1$CwZh!ChRVE)^Qf{g}U3v^&| z0qfT>L~Q?3_au&hq9kQxWR&@4WSH97ao5I2N0&lMO*ZV>_r0yf11WFs8e0auj{K(0 zFso9K`LS@Ds-9E*{{8!Ays_AJn>e`|&8u?b91>aJzO?{}f#l?55C|I@8i0Jawzfiu ziIEo&j_%3awW4Iw z(AY?F@uF_IJ@^EAn)wEgcBzR2&EI{%nwb_B?3Ppr#_sRx{@F+Xo{rYpTh@2~uVI8( z2)S|cZ0W8qkaCDiQ&CaryH1>hN-T!|OaC&9eoFDxxXov)kCMWX?raf4Ttm)}#=3-w zs8eGjix#@A?ma^b3uxd=?}jSWZr=O|B*Wd_EZdD)!@;uCpR4mRheynD-(03>NRBR7 zDX7Jv5x$}Ihx76a?a-!6P&t{)3Ve#snf338(+{#}=W3?a9XrF*)HO z_(63 zMR|H{*cD+=d5wqXZcgyGyT{%%eFFqBkPE*D0rN;1CMQ}1KLM=B055od;n*p=62f^r z-y-aXqNT!le*V`-hM#@C2|rt-xh;)gd_*KbJRv#uYn6F;CIIV!nDtTQ(Mmux1K7F< zgf}xUFE0;|3ZM{6yzp!Ej2XXvxAnv?Z4i4m{s*w)|NGB$jiEHcfKI5u*jz&a#2#R# z%;LM@ALzu00i)v)huSFTj1 z%&%mXH*G#varFhW5B^SbGstHOgg^lB@Xm3Dg5L<*nVW}4R__AHNN4>26Q5koot=iZ zG2u_Tvm6j`DpJO2cl`nV!oI#tnsOPs^KW61E4gfpjDBd4z2;`4sr{Cn@z*F|=mKm! zaLD+^Df9l&!TFtPu%4gGAD*AiY2&NFK3G}b!X#NJf@s@(X~lua(^iE;`{f?Ns0|kP5nJRP?gUq z0p0cX7NhwJ>cH~MOdrUXWWtb1Wq2l1XZN}+3C31n-0R5jAGk4FlDx`)!J$@xpUb7= z9FX*{ub1=`3gJ$f)&8f5+iuGBFyix&bB8^D>MYqg8XBMPjRY!(L&a_^_4)>v6CMH5q3aIbz zEZR;TNYQn|`r6uY(b4^Imzv4qfUbd2OJ{8cCJ}T65Eu`z%^2XPw=fS+H@KxbWm)zw z9zQgZXQS#txB4x&gPfFSVOqE9gYl?ln(f!~xN9~CJgyEh zNn*xJFdhTu9O%BW0yYVH9Cc*c(1~CY6#mrm@-mp7l>KIqQK-(NCKzvRZ&qdFz-M~g&78TpYvg{*B;0K zSq^8C-4rTMOPdA~7{PoOt2^4+6->m$26f9hr0=0tFPtT+^>GzEKQOOf2)VN`pd-L< zgMxxMt2fWrvcS4BF+q-*{eT_MU4Z_tKH_R<1R7*ufF0M(d){Klw)Xbcg#k*FaOmtn zY8<2#JSH%nLA*{<%L;m1QnC$V*o@?Z5J=s?7vaU{Ez{|-AY=R%!#xcQzCqUk0f}Id z*S+S?3?B%_u(4mfcmWCj>6VQ2bZl^wi_5k-oC)e4a^X)ETGN(+!{Y`O%5^bCwvzQIjz?<1%uHbo+Tf|4Ns;KWJ}4uxeOh!sS- z`uZTYA9X93Rh9zH`-e01!M&P6fO(aEB&$Ke}(>js2t?Po1A&| zkb{ehi-%_eyp6V&778)X9bna;_5G3kmYbeVLb_ld1HK0ehRq<#Jb*FP-4vKGk`5s& z02U6N1d@Vb-yswVqhrM9&)1QG0;2;7s$GR)^2Ucng0k{4;wO2!B>JB_Yc|Tfp|Qlp z9C^F-}#0)P+ zo1Rj?#BVhOWqS%D4L%reZyzr&uNyZYVtei0jl5GEZTCPCXJ#g(650ei%bkw`_N66! z9I)y^8U+0a_LV#KApXwFOh8-@b{BS3RxOKgNE(D&tHMExr@4S7F1@0#tR5iVrLs6ZURCScTrad2?emc$UdA@gr9wl<5%f`AWS*<#;C@oe?NmgB#1+%-BH8 z&<}_>G&MEJQr$8IFRK2aKTW$9QYtXm5c%@uE4FWwlj~ouQ=@vsF5Hz9K=K?%{Rewe$thR7N=*h24WM)bk=PyH-IpvSEq&pYA7bte% zv0S&GLi=%x%wdtmD+iFVA!NHY49$rU4=*3AreDS^Q7w<4V9faZ@8Al3ET|?>!FEky zbT2E*0+=dPR9m_BN$sb0o8AvHO>momLJ9eNuk(k5VNN&22M9y> z)~+U=hS~{LAf#Yn!8}db4OBwV2>?R*OF(i%_wU4?nbEHJOIr=Gi=oO zd+RHsJ5nDsX^V9N{R|p^eVx?tbC|2I^R3DZ*}PRuN6QV!{vq{Jb*}!<5x>@|sI)Dy zc&uIi^$Vu=g@rIRxvNr0`CqLrs^bKJiRmvF8#Qj-fHl(YgDED*ui~FMBX^OX+;qoWNxX;6Y+P7L=jSfPEYOI;Uqw0oT+vPGFflP9AAJUhQRp$Z93EIV zrxK=lU^aesW@ddDylkkppyE<5~TZJ}L0d;o}7olJm&gp^s#hzkSuF&UtGj zHTiZ4q(Ac%gxsY_%&n}Ll3{8nrQ`gIF2@tUEF`S=i{%t8rjIi+W+8#{OBei=Gky3U z$;vzcL>$rjRjx0%)4S*WbQob*Lbe6~zckcy^F)k0Frwc8%pH$J;=Zt9fV`LB&9mQgaUN7h1t}*_|;8zO~gz+yjkGdS- z0SffyW@cP_SFzIy^}nukr1A{HpdAVoxJE&@|B>AO8HZU0bPhdM zUeb3@(P)iocP9|^Aw>nK&d4Y^#+d-0g5X1Qa}XL@0cb+dMnly;jlX)hX-hu@`^m~G zXIDO7STCY1Tyqp&+ zvBKsoaSjcHx%xg7dNNo29PXJJgeANv^O3W%8GL`MY`QOB++RQkLYG*k_2d(3XTs!rpqava}&KiwOTl3I5q5@aqwXH-WTB?lag2 zqufxqL2q;hOB#Kx15piUv0Vc(iXVh_+fk)JijA6r;0?nzzSIm6&A>_th>^dd!0<~I zFS<}C8sMFM+1(*#pGt^hdGh=VXTv`kJ&c>cVFVWy-uvaREBN(tBN}3Y+QsI*P$3{K zl9vbL_ZE;|h*<|32wgEH2OKD}!vgLNg)OmvS*2mrW-8Suh)%%ebv^)JH0|KvAS5IN zf(6(CKd}0H-`!Kj?R5a`2y*~q#(aO0#MG90#GIkCGE4gUNf)jZuo$4pesT)M~;PN_N0-F(Rc}T>s5rF{@fp+xU;$l9q z6~Qw~Q@(}5=FIT}J)vTZjg6TXyMudq06OM>MV-I^jhFwqx0tuTv%?zs1wQ8Yrf1z9 zoV;Y!a|Rkjf16VZ$e@jA26O3CV3E_Jn#w&YQTLvvrVI!Rp`rH&5Ydbc43M2aFM)zV zpMYL+tNwf~9zF<9{--3K3{R0!HLN}aTI<;j@J@nd<6kQJ0ax*`AY#-X#~9KKj|x`Q zpnk(UflR#Ho9A5&3NL7B8~cz`puBM5Wn7#GOxLWftpT+Cu^(gx!WyW{3=B`ukOgWX zlLZ(Z7RJHO-qzfFnvgIoIC!|iDGQhv_HiYenr`b;cf`fT>AygTN=jvbWR*7?KF8mO z8W!V^{dMx-9hosaT0!sq{bmTdfYDOBJ3~?3_erkULL(=W{2%*lX5Hi=7!zmw>BYoq zfTpcZfsR^(XtI-_H)6g$CkE{04Fg%dUp~`JlMoM2VaBofVh9NWyTndlLe`k|!c(~0 z+>O{oeNX=zO(t5d**k0CH?$M9N*s9z|yGL+w$@yjHKMz z8UMf&Gf2g;25$Q3b24K?~gEV}MaEta;oT;$&jNY*KglHZM;>MrQK5`~W;|XcgQ9U+#28 zR+tbtOGK9pI`8S>1BzP6C*_R-SM-;#+gi9c0W+hP)jCsf3Hi&@ z+NDj}|8`Q^cmEawjRtX#&$#hyW{PV*kcV5+(tsk)cmUc01|RgfySuQg?A3V8Z-qH2 zWWtO34-58+>Te4css=cse#hW(8y8D18Dn$9CS=bHMgJs$e-3gb?X*V{`lC=F0b?K6 z0FU)KS_>3^wR%ZqnLMYD6$XA{gKG65K2_B6>C*!+S>Q1-AV|V^?>0D0KLf=N=!Aq~(Ii(j zfT0FJv4W8t$RTZmQf{&k^tYBh`Tju*q@p+Ac@)5!<3a0$yeSIU6ACL*!e|4SrBOX8 z-Sc(z^#I`ezJ4`=*q16B97~Jl%u8yY>_#>^#A`uSPfbk)4GSh6&iL=f-T&870V&L+ z{q-dniw5Nlc7fGE{-ae5o*kKg4_RPs88kd1X!qD(B36Z&w4fE!unIf8D1P{n>Z)fKRGRQ7zmD{Hk78X8vA_<1v zg9l9d2}d4CRF$r_wy@8UCoV27Xu;t712!)yDe?33dvqN4%k;W5^{&tIsE^`>Ee!5j z{}|o3)3~l&H)&~Z-6Fg7nb!ny@yR+a`HMFxGv(TSN`;_9`INm z^Z4lwkJs_NwNU+Y45$tM>C=Eu@GsY-C~Y%5odk{w4>{o@zpJYHlGo~1m_sx?0}1FB zh!-fO1DGX+Z3U?oYB5hTVq&;WFGsaTbKh$Vi;e)=f;mPAv(eHTz}GR-+N-Rs|Ouhom7b{^3y7$GgDFCyvK$vupLgm7 zgbt3FJJ|t0wQDlyTiX4%Y|YlrT|bV~JCS<(@v7Me;(Fy5`iH?ocQdE4it%pFkQZ_xkCh{sx^gX$e-&Y>N?~G^5Q_m0y zCQ3P%<0R-f?UZLxuM>UaUpio6c$v`X7U(pkOtJA+!R)?zWn)a%P8f_>B6|F^0K)ul zeCbIqMjPA(wF%N#V6ctfmksL4PD-MKVZO_k@8tDc@Ww(k7K$dE^TtN0;4v5-uv`w` zR0cRoiw9x`tjqhMGRQBI2)|i+?Q{*|C(HsYmpM47fi0m>erj30^Nye#;*(s3#9qTVHzzSOdr437|YWazj+QO`-yiY3J0N`Gvd`jH)S6JVeHJ~&T2_0q@U@uWlJPtXi_$wT$>Y8__7)_Y+O-8=_E z9=JuAf_5tVdyal7%aK`-Sj4ECKtlhIU>5|*mx^8*TZKp&10h60vj~MJEr2kE%^p=J zV5td&fT~LMs@(^NnC8>qwSXI*bM@@~Gz^aA5XRn`%_hEi4jzKBY$Jc+R^i~m(f(?o z7&!dU%>V&WpF4L01)>m?U32@}ardv>XFtt^`3!v%mOB;=4~~Ic87x=at53ub>V>uYFg zHis30-Wbk_0eu_I%FNuD9098Fe3|FB4>(zsETJ2q)?Dr&fe|zMa8AtvBY%7LE@O%L z$6C|zLki~uAGdTHMl?Nr*q#3Cc_-{q0{r}9KN^TpMKJ8EbRma&@)RNEZ4{VOCENM& zGv?a;^k3e@w#^8Pxz^KjIg3#1UdnkGA#B~pxs+=74t1kBoJP)a&jumEN!%oN<#(oj+1fu2i$hJc{j2UBx+(=43F z9C`Et21f~zF!s0!H1TU4bO(5YKr}q?kA0lxNGr8@B!8&&{xx=ud|$5}n4b}&fCT!o`VY1Ni1l)G&$<1?qu>7B+3mIvy-A5mY+y#q- zgO2VI8Y-^kvY~L>y)6r*(gjEvpjqkYlA5FJK~Mc9Q3Ovz($;8VYW}Wfa0mdGy#5yP z2QfIuPcbqwvJ&a3ZTOzF2MySnUtLpkes-4h+&NpY9)e9w3U}d=BFxF~OoGqb2wOWT z_Bz3J$m#>eWoAx?;lku;*p@+NAP=n{+Bvh=-8V5g-vur()u>~k1+rh^)Wu5da|V!y z2OAw5HRO^&iV5nOG1>&djpdl|gggamW8^$%Qay~iRgD>vP!B*O>;}le$1vv~< z>T7d@#4fX<3N;8i1A>5Q(}Ak^W=QPj=88GaJ@+)!)EtD(FxXEZ62+kl%1Z@g#^DAJ znis-;1XTk>BWwzqg!A-cE(jlj2M@{uu1hVuZP)=M1lmkCAf^P|*R1Qkeyumw96Np5 zj(3h-z+>Bz-iT$~s=hiR$^R4qISK_Cy0767hj04y3C?)}+Iw%F<>4+EYC&k-H&Kvx z$buLtfH2@s3=H2PF%4wed2EdScL8B*}o5z7~> z%da*BSwm2d03m{|+%q*&yd9m@&T6}qZM}738vHA!WWYGg+F$O%Kpi$q)bU!-c=4%P zMD>fw3Urql4BmsI5iS6QkqHTTx|od$H0#r=^}xI>FXn?0hjQGQxw~&u=x^zt&6G4n zxeHfFjGSc>YbP7DY3??JoS9V0#L{!E?-l5|_2xH5r6A^`;K4zkj;fp%Ns!YsGqG~a z)p}UHBdWiz&)v<EF7 zgWduGuC_F=0<{!WWQ`uoMj{46>{mefhk1gj!Q9^H)N>G)IdS5|XU<9j%IjPZT`h86 zdmo8wQYeTeaoESZWQ9Vaj^LD}igBNE``cUBrS&(o~5_bXgWqE1@)7H_^>P}9@tWU8F;l5n}?bAsZ zq~;}ubf`b;Q=mUk?LeRXfkHiZBxN?8i@AYg=$m>ygRvRT$DTM4vKARtQ&n^@8T62o%9f<^|SRu(Zz zi2_0gnJ4zXx09RiA}8OM_k8d2Uw+Rcl}F@whztVyF)_cxCm}ZjksS^V^TpnQl*reI%5^lC z-A-y;hWC|nu5z~yS&j1bLv<$XLq2qQ9O1z~k;bhn-ZIOOcILlBtt}C!S<>QJZx}ZK`b>;ku{6;WXpxm`-&-Mq`1l*a?vUnFFtMc%lV@}M} z_C~^jJQ{T)w{l&0k+6L3=(o?A;9WUM13K;P#3Q%s$mRp#sKUuo{Lv#o>uHj>`=7}$ zrUTjqJQrc+C9VTQk?=(HVaY`8C;?#an}vnEPDk@kcx;c&&iz_)!1%_dhoHde@1GgD1%=TjA(oh90Au)jk2aIz#M+xgxxy4YlY z?10mrNY!gY=l}*#akUCn8xxpn1d$lcIv?NgMz&?ukF5>2_4?z!tw0PiqnS<&OW_BkwM1E0jYWfZtFia=@=Bd4zAzCi^^X`v z*iO|#$ZwdW_&81Cp_p>pmuBf#OigQv#BALxH=|_#zOkT*l;R}n54F)m0vo_y0NELJ zQw#xZ^ul9<*6MfcgD40=FGH|mz>UJZx5;Md*K61lKnPcsFg@wR>2Jafgx**4y}Z4F^f|#@_giCk zE~3f|Vwijd-J~LrweXKo9dv75ur4{-64Qlv>M1i8K^9o))8`BWYe=SJSElx*7C&|{ ze&I(8SY@G{CzVLBhk8&boYjQ~g+r~Y!r$nvv+3)42WDv$U%y~e4v)vrkHsG};i2m3 zZw_}ic`Ba#DSfypjKZh=fi2v>Ipn9;qRp1KoF^xr!6TyM&Ye@J^ud)(a|pCRZglOd z>Jb~_qLmQ)%oh{ijhsXyjy638=%7OPZ^s))NM6h-{X|C62^D(%hGf4e=GgA`dsKS6 z)k0*T><@LfE_&XzBa?A+OZA1q?CfEv9U=JwVJj|mYr-*2xHmBH;~Kjq$MhP?P9{8+ zb~C=a$Fq$+d)nbKM?k*FdNkJ9)p_ei-^Khx1 zqNf@S?Z!sy2v{407}_c$zE;@diQDqB5+WkZ3ZEMF+c$1{QbjG_qV@}l1c*Z`%*g#U zWK|NWt1B6}HJKr^Jf}jFQOypq=F!( zQm;(Fm4uw3IAY7Y%gWpp#%!h$pC(JbX9`h`F($ZZJo{{2H>WgN?4!$pD16zg)+j$q zI}Tyc@j$B!$B9qh579aDE9>{GHMr(iLaoMr9wg8`Wg}w|b+;caOzSA;=YvZ(4F6E= zy!PKX(Ped-UXu}yKOG&NSF)x`%suGkaOx!h6h|t6Y6@4=_($&>1Oev-UkEXR%3YN4 zd>FAn_h~D@&+Vw4H#E%m+sXCOW*FgzgoR<@&H+Mzx+B!NY&Z%{uE^2a;QRbZTCLwa zz5u3)pbjL<5`xRl&UgP#-GSf{Gu8>E-2kWTG{ZGkvu%-KjymhZahx5o**Ni z^?+g-*uOX*8#Li=_x!W2MA=!YKj?!PL3VwgA@3*w)72ZtRHOk}VCp8Wer%{h7% zEf%62Jsqixa%yU7>VRR8rSgZ3({5vDt*a)Yaki^8*~?jHtQ3(-Y$Eq|Hp;ad7vg6D zzNCk2H*UzG6FV$KEuUetY?7OCvSZW=?vhH*UMU*U5^%ncyZcL~QEW$;deVXQoFw03 z{`Eb%24W8vcMHLhLEX5)#L8FKY?5c??(31~muwNJv{qH>|JRE|I;6e9C8p=%(J_)~ znZT&v=fvwlB}X;L)R%1hyAKAh%!T7}^wX;MCKi8rS=)GdmECae>XBWlUwV*zT Date: Fri, 23 May 2014 19:47:39 -0700 Subject: [PATCH 082/188] Standardize on the same capitalization pattern Inside of mixing capitalization in title sections just be consistent and capitalize the first word and then have the rest of the title be lower cased. Change-Id: I1170ab2ee3d5d3801ec4022d1c81d8173cbdd8df --- doc/source/arguments_and_results.rst | 25 ++++++++++++------------- doc/source/inputs_and_outputs.rst | 4 ++-- doc/source/notifications.rst | 12 ++++++------ 3 files changed, 20 insertions(+), 21 deletions(-) diff --git a/doc/source/arguments_and_results.rst b/doc/source/arguments_and_results.rst index a74d8225..f16de065 100644 --- a/doc/source/arguments_and_results.rst +++ b/doc/source/arguments_and_results.rst @@ -1,4 +1,3 @@ - ========================== Atom Arguments and Results ========================== @@ -37,12 +36,12 @@ those ways to accomplish your desired usage pattern. from taskflow import task -Arguments Specification +Arguments specification ======================= There are different ways to specify the task argument ``requires`` set. -Arguments Inference +Arguments inference ------------------- Task arguments can be inferred from arguments of the |task.execute| method of @@ -130,7 +129,7 @@ passed to |task.execute| method as ``vm_name``, value from ``vm_image_id`` is passed as ``vm_image_id``, and value from ``admin_key_name`` is passed as ``admin_key_name`` parameter in ``kwargs``. -Manually Specifying Requirements +Manually specifying requirements -------------------------------- **Why:** It is often useful to manually specify the requirements of a task, @@ -187,7 +186,7 @@ avoid invalid argument mappings. .. make vim sphinx highlighter happy** -Results Specification +Results specification ===================== In python, function results are not named, so we can not infer what a task @@ -197,7 +196,7 @@ and it is typically (but not always) desirable to make those results accessible to other tasks. To accomplish this the task specifies names of those values via its ``provides`` task constructor parameter or other method (see below). -Returning One Value +Returning one value ------------------- If task returns just one value, ``provides`` should be string -- the @@ -212,8 +211,8 @@ name of the value. >>> TheAnswerReturningTask(provides='the_answer').provides set(['the_answer']) -Returning Tuple ---------------- +Returning a tuple +----------------- For a task that returns several values, one option (as usual in python) is to return those values via a ``tuple``. @@ -252,8 +251,8 @@ and passed to the |task.revert| method). warning is printed to logs and if use of such parameter is attempted a ``NotFound`` exception is raised. -Returning Dictionary --------------------- +Returning a dictionary +---------------------- Another option is to return several values as a dictionary (aka a ``dict``). @@ -293,7 +292,7 @@ will be able to get elements from storage by name: parameters are left undefined: a warning is printed to logs and if use of such parameter is attempted a ``NotFound`` exception is raised. -Default Provides +Default provides ---------------- As mentioned above, the default task base class provides nothing, which means @@ -329,7 +328,7 @@ the task from other tasks in the flow (e.g. to avoid naming conflicts): BitsAndPiecesTask(provides=()) -Revert Arguments +Revert arguments ================ To revert a task engine calls its |task.revert| method. This method @@ -372,7 +371,7 @@ task failed, exception:"`` and exception message on revert. If this task finished successfully, it will print ``"do_something returned"`` and representation of result. -Retry Arguments +Retry arguments =============== A Retry controller works with arguments in the same way as a Task. But it has diff --git a/doc/source/inputs_and_outputs.rst b/doc/source/inputs_and_outputs.rst index d16105c9..d89d6ab8 100644 --- a/doc/source/inputs_and_outputs.rst +++ b/doc/source/inputs_and_outputs.rst @@ -10,7 +10,7 @@ state. You may also opt to use the :doc:`persistence ` layer itself directly. ----------------------- -Flow Inputs and Outputs +Flow inputs and outputs ----------------------- Tasks accept inputs via task arguments and provide outputs via task results @@ -62,7 +62,7 @@ task. and outputs. ------------------ -Engine and Storage +Engine and storage ------------------ The storage layer is how an engine persists flow and task details (for more diff --git a/doc/source/notifications.rst b/doc/source/notifications.rst index f477e396..62f02c7d 100644 --- a/doc/source/notifications.rst +++ b/doc/source/notifications.rst @@ -26,7 +26,7 @@ provides means to write your own listeners, which can be more convenient than using raw callbacks. -------------------------------------- -Receiving Notifications with Callbacks +Receiving notifications with callbacks -------------------------------------- To manage notifications instances of @@ -34,7 +34,7 @@ To manage notifications instances of .. autoclass:: taskflow.utils.misc.Notifier -Flow Notifications +Flow notifications ------------------ To receive notification on flow state changes use @@ -67,7 +67,7 @@ To receive notification on flow state changes use woof Flow 'cat-dog' transition to state SUCCESS -Task Notifications +Task notifications ------------------ To receive notification on task state changes use @@ -146,12 +146,12 @@ For example, this is how you can use taskflow.engines.action_engine.engine.SingleThreadedActionEngine: ... has moved task 'DogTalk' (...) into state 'SUCCESS' with result 'dog' (failure=False) taskflow.engines.action_engine.engine.SingleThreadedActionEngine: ... has moved flow 'cat-dog' (...) into state 'SUCCESS' -Basic Listener +Basic listener -------------- .. autoclass:: taskflow.listeners.base.ListenerBase -Printing and Logging Listeners +Printing and logging listeners ------------------------------ .. autoclass:: taskflow.listeners.base.LoggingBase @@ -160,7 +160,7 @@ Printing and Logging Listeners .. autoclass:: taskflow.listeners.printing.PrintingListener -Timing Listener +Timing listener --------------- .. autoclass:: taskflow.listeners.timing.TimingListener From 7477c243cab8dd36896155ac0a110987d75e4e3c Mon Sep 17 00:00:00 2001 From: Joshua Harlow Date: Fri, 23 May 2014 20:01:14 -0700 Subject: [PATCH 083/188] Rework the overview of the notification mechanism Clean up the introduction where the notification mechanism is defined so that it becomes more clear what its usage is and what the attributes of an engine are that provide these capabilities. Change-Id: I4eeb9029b17d562111371c6f5155570b7a31859c --- doc/source/notifications.rst | 11 ++++++----- taskflow/engines/base.py | 8 +++++++- 2 files changed, 13 insertions(+), 6 deletions(-) diff --git a/doc/source/notifications.rst b/doc/source/notifications.rst index f477e396..b533e961 100644 --- a/doc/source/notifications.rst +++ b/doc/source/notifications.rst @@ -16,12 +16,13 @@ Engines provide a way to receive notification on task and flow state transitions, which is useful for monitoring, logging, metrics, debugging and plenty of other tasks. -To receive these notifications you should register a callback in -:py:class:`~taskflow.utils.misc.Notifier` provided by engine. -Each engine provides two of them: one notifies about flow state changes, -and another notifies about changes of tasks. +To receive these notifications you should register a callback with +an instance of the the :py:class:`notifier ` +class that is attached +to :py:class:`engine ` +attributes ``task_notifier`` and ``notifier``. -TaskFlow also has a set of predefined :ref:`listeners `, and +Taskflow also comes with a set of predefined :ref:`listeners `, and provides means to write your own listeners, which can be more convenient than using raw callbacks. diff --git a/taskflow/engines/base.py b/taskflow/engines/base.py index eb8d76ee..9255a3da 100644 --- a/taskflow/engines/base.py +++ b/taskflow/engines/base.py @@ -24,7 +24,13 @@ from taskflow.utils import misc @six.add_metaclass(abc.ABCMeta) class EngineBase(object): - """Base for all engines implementations.""" + """Base for all engines implementations. + + :ivar notifier: A notification object that will dispatch events that + occur related to the flow the engine contains. + :ivar task_notifier: A notification object that will dispatch events that + occur related to the tasks the engine contains. + """ def __init__(self, flow, flow_detail, backend, conf): self._flow = flow From bddfdf4eef7d47c7cf443b5bfadfaf4efa934d3a Mon Sep 17 00:00:00 2001 From: Joshua Harlow Date: Fri, 23 May 2014 21:57:24 -0700 Subject: [PATCH 084/188] Add doc link to examples To make it easy to find where the examples are located at a section of the developer docs that links to the source tree examples directory. Change-Id: Ie2222e7cb221764749353e0d05fa1df44c2db514 --- doc/source/examples.rst | 11 +++++++++++ doc/source/index.rst | 1 + 2 files changed, 12 insertions(+) create mode 100644 doc/source/examples.rst diff --git a/doc/source/examples.rst b/doc/source/examples.rst new file mode 100644 index 00000000..c6d2e3ed --- /dev/null +++ b/doc/source/examples.rst @@ -0,0 +1,11 @@ +======== +Examples +======== + +While developing TaskFlow the team has worked hard to make sure the concepts +that TaskFlow provides are explained by *relevant* examples. To explore these +please check out the `examples`_ directory in the TaskFlow source tree. If the +examples provided are not satisfactory (or up to your standards) contributions +are welcome and very much appreciated to improve them. + +.. _examples: http://git.openstack.org/cgit/openstack/taskflow/tree/taskflow/examples diff --git a/doc/source/index.rst b/doc/source/index.rst index 387980e4..dbd83c4b 100644 --- a/doc/source/index.rst +++ b/doc/source/index.rst @@ -27,6 +27,7 @@ Contents exceptions utils states + examples Indices and tables ================== From eece9e15e96e2fdaf6de4b6c24f0e238857ee11b Mon Sep 17 00:00:00 2001 From: Joshua Harlow Date: Fri, 23 May 2014 15:49:51 -0700 Subject: [PATCH 085/188] Rework atom documentation Expand on what atoms, tasks, and retries are used for an what there mission is and explain in more depth (with usage examples) how retry atoms can be used to resolve failures that happen. Change-Id: Icc585c8a9b87e070f12a267e531225f87412cd5f --- doc/source/atoms.rst | 173 ++++++++++++++++++++++++++++++++++++++++--- taskflow/atom.py | 37 ++++++--- taskflow/retry.py | 2 +- 3 files changed, 190 insertions(+), 22 deletions(-) diff --git a/doc/source/atoms.rst b/doc/source/atoms.rst index 98d1ba70..a5aef168 100644 --- a/doc/source/atoms.rst +++ b/doc/source/atoms.rst @@ -2,29 +2,178 @@ Atoms, Tasks and Retries ------------------------ -An atom is the smallest unit in taskflow which acts as the base for other -classes. Atoms have a name and a version (if applicable). An atom is expected -to name desired input values (requirements) and name outputs (provided -values), see the :doc:`arguments and results ` page for -a complete reference about these inputs and outputs. +Atom +==== + +An :py:class:`atom ` is the smallest unit in taskflow which +acts as the base for other classes (its naming was inspired from the +similarities between this type and `atoms`_ in the physical world). Atoms +have a name and may have a version. An atom is expected to name desired input +values (requirements) and name outputs (provided values). + +.. note:: + + For more details about atom inputs and outputs please visit + :doc:`arguments and results `. .. automodule:: taskflow.atom +.. _atoms: http://en.wikipedia.org/wiki/Atom + Task ===== -A task (derived from an atom) is the smallest possible unit of work that can -have an execute & rollback sequence associated with it. +A :py:class:`task ` (derived from an atom) is the +smallest possible unit of work that can have an execute & rollback sequence +associated with it. These task objects all derive +from :py:class:`~taskflow.task.BaseTask` which defines what a task must +provide in terms of properties and methods. -.. automodule:: taskflow.task +Currently the following *provided* types of task subclasses are: + +* :py:class:`~taskflow.task.Task`: useful for inheriting from and creating your + own subclasses. +* :py:class:`~taskflow.task.FunctorTask`: useful for wrapping existing + functions into task objects. + +.. note:: + + :py:class:`~taskflow.task.FunctorTask` task types can not currently be used + with the :doc:`worker based engine ` due to the fact that + arbitrary functions can not be guaranteed to be correctly + located (especially if they are lambda or anonymous functions) on the + worker nodes. Retry ===== -A retry (derived from an atom) is a special unit that handles flow errors, -controls flow execution and can retry atoms with another parameters if needed. -It is useful to allow for alternate ways of retrying atoms when they fail so -the whole flow can proceed even when a group of atoms fail. +A :py:class:`retry ` (derived from an atom) is a special +unit that handles errors, controls flow execution and can (for example) retry +other atoms with other parameters if needed. When an associated atom +fails, these retry units are *consulted* to determine what the resolution +method should be. The goal is that with this *consultation* the retry atom +will suggest a method for getting around the failure (perhaps by retrying, +reverting a single item, or reverting everything contained in the retries +associated scope). +Currently derivatives of the :py:class:`retry ` base +class must provide a ``on_failure`` method to determine how a failure should +be handled. + +The current enumeration set that can be returned from this method is: + +* ``RETRY`` - retries the surrounding subflow (a retry object is associated + with a flow, which is typically converted into a graph hierarchy at + compilation time) again. + +* ``REVERT`` - reverts only the surrounding subflow but *consult* the + parent atom before doing this to determine if the parent retry object + provides a different reconciliation strategy (retry atoms can be nested, this + is possible since flows themselves can be nested). + +* ``REVERT_ALL`` - completely reverts a whole flow. + +To aid in the reconciliation process the +:py:class:`retry ` base class also mandates ``execute`` +and ``revert`` methods (although subclasses are allowed to define these methods +as no-ops) that can be used by a retry atom to track interact with the runtime +execution model (for example, to track the number of times it has been +called which is useful for the :py:class:`~taskflow.retry.ForEach` retry +subclass). + +To avoid recreating common retry patterns the following provided retry +subclasses are provided: + +* :py:class:`~taskflow.retry.AlwaysRevert`: Always reverts subflow. +* :py:class:`~taskflow.retry.AlwaysRevertAll`: Always reverts the whole flow. +* :py:class:`~taskflow.retry.Times`: Retries subflow given number of times. +* :py:class:`~taskflow.retry.ForEach`: Allows for providing different values + to subflow atoms each time a failure occurs (making it possibly to resolve + the failure by altering subflow atoms inputs). +* :py:class:`~taskflow.retry.ParameterizedForEach`: Same as + :py:class:`~taskflow.retry.ForEach` but extracts values from storage + instead of the :py:class:`~taskflow.retry.ForEach` constructor. + +Usage +----- + +.. testsetup:: + + import taskflow + from taskflow import task + from taskflow import retry + from taskflow.patterns import linear_flow + from taskflow import engines + +.. doctest:: + + >>> class EchoTask(task.Task): + ... def execute(self, *args, **kwargs): + ... print(self.name) + ... print(args) + ... print(kwargs) + ... + >>> flow = linear_flow.Flow('f1').add( + ... EchoTask('t1'), + ... linear_flow.Flow('f2', retry=retry.ForEach(values=['a', 'b', 'c'], name='r1', provides='value')).add( + ... EchoTask('t2'), + ... EchoTask('t3', requires='value')), + ... EchoTask('t4')) + +In this example the flow ``f2`` has a retry controller ``r1``, that is an +instance of the default retry controller :py:class:`~taskflow.retry.ForEach`, +it accepts a collection of values and iterates over this collection when +each failure occurs. On each run :py:class:`~taskflow.retry.ForEach` retry +returns the next value from the collection and stops retrying a subflow if +there are no more values left in the collection. For example if tasks ``t2`` or +``t3`` fail, then the flow ``f2`` will be reverted and retry ``r1`` will retry +it with the next value from the given collection ``['a', 'b', 'c']``. But if +the task ``t1`` or the task ``t4`` fails, ``r1`` won't retry a flow, because +tasks ``t1`` and ``t4`` are in the flow ``f1`` and don't depend on +retry ``r1`` (so they will not *consult* ``r1`` on failure). + +.. doctest:: + + >>> class SendMessage(task.Task): + ... def execute(self, message): + ... print("Sending message: %s" % message) + ... + >>> flow = linear_flow.Flow('send_message', retry=retry.Times(5)).add( + ... SendMessage('sender')) + +In this example the ``send_message`` flow will try to execute the +``SendMessage`` five times when it fails. When it fails for the sixth time (if +it does) the task will be asked to ``REVERT`` (in this example task reverting +does not cause anything to happen but in other use cases it could). + +.. doctest:: + + >>> class ConnectToServer(task.Task): + ... def execute(self, ip): + ... print("Connecting to %s" % ip) + ... + >>> server_ips = ['192.168.1.1', '192.168.1.2', '192.168.1.3' ] + >>> flow = linear_flow.Flow('send_message', + ... retry=retry.ParameterizedForEach(rebind={'values': 'server_ips'}, + ... provides='ip')).add( + ... ConnectToServer(requires=['ip'])) + +In this example the flow tries to connect a server using a list (a tuple +can also be used) of possible IP addresses. Each time the retry will return +one IP from the list. In case of a failure it will return the next one until +it reaches the last one, then the flow will be reverted. + +Interfaces +========== + +.. automodule:: taskflow.task .. automodule:: taskflow.retry +Hierarchy +========= + +.. inheritance-diagram:: + taskflow.atom + taskflow.task + taskflow.retry + :parts: 1 diff --git a/taskflow/atom.py b/taskflow/atom.py index 07e57a49..d4840e50 100644 --- a/taskflow/atom.py +++ b/taskflow/atom.py @@ -117,18 +117,27 @@ class Atom(object): An atom is a named object that operates with input flow data to perform some action that furthers the overall flows progress. It usually also produces some of its own named output as a result of this process. + + :ivar version: An *immutable* version that associates version information + with this atom. It can be useful in resuming older versions + of atoms. Standard major, minor versioning concepts + should apply. + :ivar save_as: An *immutable* output ``resource`` name dict this atom + produces that other atoms may depend on this atom providing. + The format is output index (or key when a dictionary + is returned from the execute method) to stored argument + name. + :ivar rebind: An *immutable* input ``resource`` mapping dictionary that + can be used to alter the inputs given to this atom. It is + typically used for mapping a prior tasks output into + the names that this atom expects (in a way this is like + remapping a namespace of another atom into the namespace + of this atom). """ def __init__(self, name=None, provides=None): self._name = name - # An *immutable* output 'resource' name dict this atom - # produces that other atoms may depend on this atom providing. - # - # Format is output index:arg_name self.save_as = _save_as_to_mapping(provides) - # This identifies the version of the atom to be ran which - # can be useful in resuming older versions of atoms. Standard - # major, minor version semantics apply. self.version = (1, 0) def _build_arg_mapping(self, executor, requires=None, rebind=None, @@ -155,10 +164,20 @@ class Atom(object): @property def provides(self): - """Any outputs this atom produces.""" + """Any outputs this atom produces. + + NOTE(harlowja): there can be no intersection between what this atom + requires and what it produces (since this would be an impossible + dependency to satisfy). + """ return set(self.save_as) @property def requires(self): - """Any inputs this atom requires to execute.""" + """Any inputs this atom requires to function (if applicable). + + NOTE(harlowja): there can be no intersection between what this atom + requires and what it produces (since this would be an impossible + dependency to satisfy). + """ return set(self.rebind.values()) diff --git a/taskflow/retry.py b/taskflow/retry.py index b1a6ff48..9384d873 100644 --- a/taskflow/retry.py +++ b/taskflow/retry.py @@ -126,7 +126,7 @@ class Times(Retry): return REVERT def execute(self, history, *args, **kwargs): - return len(history)+1 + return len(history) + 1 class ForEachBase(Retry): From cca600f3952b688a0ab6fd0d5f388cc2ea960fbb Mon Sep 17 00:00:00 2001 From: Joshua Harlow Date: Sat, 24 May 2014 17:13:09 -0700 Subject: [PATCH 086/188] Put the job external wiki link in a note section Change-Id: I10cc55f984349de66caf2007748d74bbd9574af1 --- doc/source/jobs.rst | 7 +++++-- 1 file changed, 5 insertions(+), 2 deletions(-) diff --git a/doc/source/jobs.rst b/doc/source/jobs.rst index 85ddd517..f60bce03 100644 --- a/doc/source/jobs.rst +++ b/doc/source/jobs.rst @@ -19,7 +19,10 @@ claiming them, and only remove them from the queue when they're done with the work. If the consumer fails, the lock is *automatically* released and the item is back on the queue for further consumption. -For more information, please see `wiki page`_ for more details. +.. note:: + + For more information, please visit the `paradigm shift`_ page for + more details. Definitions =========== @@ -245,7 +248,7 @@ Interfaces .. automodule:: taskflow.jobs.job .. automodule:: taskflow.jobs.jobboard -.. _wiki page: https://wiki.openstack.org/wiki/TaskFlow/Paradigm_shifts#Workflow_ownership_transfer +.. _paradigm shift: https://wiki.openstack.org/wiki/TaskFlow/Paradigm_shifts#Workflow_ownership_transfer .. _zookeeper: http://zookeeper.apache.org/ .. _kazoo: http://kazoo.readthedocs.org/ .. _eventlet handler: https://pypi.python.org/pypi/kazoo-eventlet-handler/ From df5b1621a1e6e4367f47c45e2ede893a7ffa0e9d Mon Sep 17 00:00:00 2001 From: Joshua Harlow Date: Sat, 24 May 2014 15:54:20 -0700 Subject: [PATCH 087/188] Fix case of taskflow in docs Fix occurrences of the lowercase (or mixed case) version of taskflow and rename those occurrences to the cased version to be consistent with usage elsewhere in the docs. Change-Id: I31f552daa015724b443b099c2fcfe38d8e04605a --- doc/source/arguments_and_results.rst | 2 +- doc/source/atoms.rst | 2 +- doc/source/jobs.rst | 2 +- doc/source/notifications.rst | 2 +- doc/source/persistence.rst | 2 +- doc/source/resumption.rst | 8 ++++---- doc/source/utils.rst | 2 +- 7 files changed, 10 insertions(+), 10 deletions(-) diff --git a/doc/source/arguments_and_results.rst b/doc/source/arguments_and_results.rst index f16de065..e5870545 100644 --- a/doc/source/arguments_and_results.rst +++ b/doc/source/arguments_and_results.rst @@ -7,7 +7,7 @@ Atom Arguments and Results .. |retry.execute| replace:: :py:meth:`~taskflow.retry.Retry.execute` .. |retry.revert| replace:: :py:meth:`~taskflow.retry.Retry.revert` -In taskflow, all flow and task state goes to (potentially persistent) storage. +In TaskFlow, all flow and task state goes to (potentially persistent) storage. That includes all the information that :doc:`atoms ` (e.g. tasks) in the flow need when they are executed, and all the information task produces (via serializable task results). A developer who implements tasks or flows can diff --git a/doc/source/atoms.rst b/doc/source/atoms.rst index a5aef168..fb4b086b 100644 --- a/doc/source/atoms.rst +++ b/doc/source/atoms.rst @@ -5,7 +5,7 @@ Atoms, Tasks and Retries Atom ==== -An :py:class:`atom ` is the smallest unit in taskflow which +An :py:class:`atom ` is the smallest unit in TaskFlow which acts as the base for other classes (its naming was inspired from the similarities between this type and `atoms`_ in the physical world). Atoms have a name and may have a version. An atom is expected to name desired input diff --git a/doc/source/jobs.rst b/doc/source/jobs.rst index 85ddd517..1ffbd7c1 100644 --- a/doc/source/jobs.rst +++ b/doc/source/jobs.rst @@ -5,7 +5,7 @@ Jobs Overview ======== -Jobs and jobboards are a **novel** concept that taskflow provides to allow for +Jobs and jobboards are a **novel** concept that TaskFlow provides to allow for automatic ownership transfer of workflows between capable owners (those owners usually then use :doc:`engines ` to complete the workflow). They provide the necessary semantics to be able to atomically transfer a job from a diff --git a/doc/source/notifications.rst b/doc/source/notifications.rst index f59e3ac7..3fe430de 100644 --- a/doc/source/notifications.rst +++ b/doc/source/notifications.rst @@ -22,7 +22,7 @@ class that is attached to :py:class:`engine ` attributes ``task_notifier`` and ``notifier``. -Taskflow also comes with a set of predefined :ref:`listeners `, and +TaskFlow also comes with a set of predefined :ref:`listeners `, and provides means to write your own listeners, which can be more convenient than using raw callbacks. diff --git a/doc/source/persistence.rst b/doc/source/persistence.rst index 5ad12ef9..022773e5 100644 --- a/doc/source/persistence.rst +++ b/doc/source/persistence.rst @@ -8,7 +8,7 @@ Overview In order to be able to receive inputs and create outputs from atoms (or other engine processes) in a fault-tolerant way, there is a need to be able to place what atoms output in some kind of location where it can be re-used by other -atoms (or used for other purposes). To accommodate this type of usage taskflow +atoms (or used for other purposes). To accommodate this type of usage TaskFlow provides an abstraction (provided by pluggable `stevedore`_ backends) that is similar in concept to a running programs *memory*. diff --git a/doc/source/resumption.rst b/doc/source/resumption.rst index cc6e9eec..599d85fe 100644 --- a/doc/source/resumption.rst +++ b/doc/source/resumption.rst @@ -13,10 +13,10 @@ atoms we need to create a model and corresponding information that allows us to persist the *right* amount of information to preserve, resume, and rollback a flow on software or hardware failure. -To allow for resumption taskflow must be able to re-create the flow and +To allow for resumption TaskFlow must be able to re-create the flow and re-connect the links between atom (and between atoms->atom details and so on) in order to revert those atoms or resume those atoms in the correct ordering. -Taskflow provides a pattern that can help in automating this process (it does +TaskFlow provides a pattern that can help in automating this process (it does **not** prohibit the user from creating their own strategies for doing this). Factories @@ -129,7 +129,7 @@ some kind of upgrade or to fix a bug in a prior atoms code). factory should replace this name where it was being used previously. **Runtime change:** This will fall under the same runtime adjustments that -exist when a new atom is added. In the future taskflow could make this easier +exist when a new atom is added. In the future TaskFlow could make this easier by providing a ``upgrade()`` function that can be used to give users the ability to upgrade atoms before running (manual introspection & modification of a :py:class:`~taskflow.persistence.logbook.LogBook` can be done before engine @@ -144,7 +144,7 @@ decides that N atoms should be merged in Date: Sun, 25 May 2014 07:29:46 -0700 Subject: [PATCH 088/188] Remove wording issue (track does not make sense here) Change-Id: I68b28339d56e4421c68e2fd650e5de7e29638143 --- doc/source/atoms.rst | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/doc/source/atoms.rst b/doc/source/atoms.rst index fb4b086b..85086346 100644 --- a/doc/source/atoms.rst +++ b/doc/source/atoms.rst @@ -76,7 +76,7 @@ The current enumeration set that can be returned from this method is: To aid in the reconciliation process the :py:class:`retry ` base class also mandates ``execute`` and ``revert`` methods (although subclasses are allowed to define these methods -as no-ops) that can be used by a retry atom to track interact with the runtime +as no-ops) that can be used by a retry atom to interact with the runtime execution model (for example, to track the number of times it has been called which is useful for the :py:class:`~taskflow.retry.ForEach` retry subclass). From e60ce2db7ee8225534aa15c6470cde284011366d Mon Sep 17 00:00:00 2001 From: Ihar Hrachyshka Date: Tue, 27 May 2014 10:22:49 +0200 Subject: [PATCH 089/188] Synced jsonutils from oslo-incubator The sync includes change that makes sure we get unicode-only dicts from jsonutils no matter which json module implementation is selected. The latest commit in oslo-incubator: - 0f4586c0076183c6356eec682c8a593648125abd The sync adds a new 'strutils' module that is now used in jsonutils. Change-Id: Ic815ca3df94c33edec9104172048b2cd94b92e3f Closes-Bug: 1314129 --- openstack-common.conf | 1 + taskflow/openstack/common/gettextutils.py | 194 +++++++++++------- taskflow/openstack/common/jsonutils.py | 10 +- taskflow/openstack/common/strutils.py | 239 ++++++++++++++++++++++ 4 files changed, 368 insertions(+), 76 deletions(-) create mode 100644 taskflow/openstack/common/strutils.py diff --git a/openstack-common.conf b/openstack-common.conf index 8adc059c..8940a040 100644 --- a/openstack-common.conf +++ b/openstack-common.conf @@ -4,6 +4,7 @@ module=excutils module=importutils module=jsonutils +module=strutils module=timeutils module=uuidutils module=network_utils diff --git a/taskflow/openstack/common/gettextutils.py b/taskflow/openstack/common/gettextutils.py index 8aaa24ea..ad9dd71b 100644 --- a/taskflow/openstack/common/gettextutils.py +++ b/taskflow/openstack/common/gettextutils.py @@ -32,24 +32,113 @@ import os from babel import localedata import six -_localedir = os.environ.get('taskflow'.upper() + '_LOCALEDIR') -_t = gettext.translation('taskflow', localedir=_localedir, fallback=True) - -# We use separate translation catalogs for each log level, so set up a -# mapping between the log level name and the translator. The domain -# for the log level is project_name + "-log-" + log_level so messages -# for each level end up in their own catalog. -_t_log_levels = dict( - (level, gettext.translation('taskflow' + '-log-' + level, - localedir=_localedir, - fallback=True)) - for level in ['info', 'warning', 'error', 'critical'] -) - _AVAILABLE_LANGUAGES = {} + +# FIXME(dhellmann): Remove this when moving to oslo.i18n. USE_LAZY = False +class TranslatorFactory(object): + """Create translator functions + """ + + def __init__(self, domain, lazy=False, localedir=None): + """Establish a set of translation functions for the domain. + + :param domain: Name of translation domain, + specifying a message catalog. + :type domain: str + :param lazy: Delays translation until a message is emitted. + Defaults to False. + :type lazy: Boolean + :param localedir: Directory with translation catalogs. + :type localedir: str + """ + self.domain = domain + self.lazy = lazy + if localedir is None: + localedir = os.environ.get(domain.upper() + '_LOCALEDIR') + self.localedir = localedir + + def _make_translation_func(self, domain=None): + """Return a new translation function ready for use. + + Takes into account whether or not lazy translation is being + done. + + The domain can be specified to override the default from the + factory, but the localedir from the factory is always used + because we assume the log-level translation catalogs are + installed in the same directory as the main application + catalog. + + """ + if domain is None: + domain = self.domain + if self.lazy: + return functools.partial(Message, domain=domain) + t = gettext.translation( + domain, + localedir=self.localedir, + fallback=True, + ) + if six.PY3: + return t.gettext + return t.ugettext + + @property + def primary(self): + "The default translation function." + return self._make_translation_func() + + def _make_log_translation_func(self, level): + return self._make_translation_func(self.domain + '-log-' + level) + + @property + def log_info(self): + "Translate info-level log messages." + return self._make_log_translation_func('info') + + @property + def log_warning(self): + "Translate warning-level log messages." + return self._make_log_translation_func('warning') + + @property + def log_error(self): + "Translate error-level log messages." + return self._make_log_translation_func('error') + + @property + def log_critical(self): + "Translate critical-level log messages." + return self._make_log_translation_func('critical') + + +# NOTE(dhellmann): When this module moves out of the incubator into +# oslo.i18n, these global variables can be moved to an integration +# module within each application. + +# Create the global translation functions. +_translators = TranslatorFactory('taskflow') + +# The primary translation function using the well-known name "_" +_ = _translators.primary + +# Translators for log levels. +# +# The abbreviated names are meant to reflect the usual use of a short +# name like '_'. The "L" is for "log" and the other letter comes from +# the level. +_LI = _translators.log_info +_LW = _translators.log_warning +_LE = _translators.log_error +_LC = _translators.log_critical + +# NOTE(dhellmann): End of globals that will move to the application's +# integration module. + + def enable_lazy(): """Convenience function for configuring _() to use lazy gettext @@ -58,41 +147,18 @@ def enable_lazy(): your project is importing _ directly instead of using the gettextutils.install() way of importing the _ function. """ - global USE_LAZY + # FIXME(dhellmann): This function will be removed in oslo.i18n, + # because the TranslatorFactory makes it superfluous. + global _, _LI, _LW, _LE, _LC, USE_LAZY + tf = TranslatorFactory('taskflow', lazy=True) + _ = tf.primary + _LI = tf.log_info + _LW = tf.log_warning + _LE = tf.log_error + _LC = tf.log_critical USE_LAZY = True -def _(msg): - if USE_LAZY: - return Message(msg, domain='taskflow') - else: - if six.PY3: - return _t.gettext(msg) - return _t.ugettext(msg) - - -def _log_translation(msg, level): - """Build a single translation of a log message - """ - if USE_LAZY: - return Message(msg, domain='taskflow' + '-log-' + level) - else: - translator = _t_log_levels[level] - if six.PY3: - return translator.gettext(msg) - return translator.ugettext(msg) - -# Translators for log levels. -# -# The abbreviated names are meant to reflect the usual use of a short -# name like '_'. The "L" is for "log" and the other letter comes from -# the level. -_LI = functools.partial(_log_translation, level='info') -_LW = functools.partial(_log_translation, level='warning') -_LE = functools.partial(_log_translation, level='error') -_LC = functools.partial(_log_translation, level='critical') - - def install(domain, lazy=False): """Install a _() function using the given translation domain. @@ -112,26 +178,9 @@ def install(domain, lazy=False): any available locale. """ if lazy: - # NOTE(mrodden): Lazy gettext functionality. - # - # The following introduces a deferred way to do translations on - # messages in OpenStack. We override the standard _() function - # and % (format string) operation to build Message objects that can - # later be translated when we have more information. - def _lazy_gettext(msg): - """Create and return a Message object. - - Lazy gettext function for a given domain, it is a factory method - for a project/module to get a lazy gettext function for its own - translation domain (i.e. nova, glance, cinder, etc.) - - Message encapsulates a string so that we can translate - it later when needed. - """ - return Message(msg, domain=domain) - from six import moves - moves.builtins.__dict__['_'] = _lazy_gettext + tf = TranslatorFactory(domain, lazy=True) + moves.builtins.__dict__['_'] = tf.primary else: localedir = '%s_LOCALEDIR' % domain.upper() if six.PY3: @@ -274,13 +323,14 @@ class Message(six.text_type): def __radd__(self, other): return self.__add__(other) - def __str__(self): - # NOTE(luisg): Logging in python 2.6 tries to str() log records, - # and it expects specifically a UnicodeError in order to proceed. - msg = _('Message objects do not support str() because they may ' - 'contain non-ascii characters. ' - 'Please use unicode() or translate() instead.') - raise UnicodeError(msg) + if six.PY2: + def __str__(self): + # NOTE(luisg): Logging in python 2.6 tries to str() log records, + # and it expects specifically a UnicodeError in order to proceed. + msg = _('Message objects do not support str() because they may ' + 'contain non-ascii characters. ' + 'Please use unicode() or translate() instead.') + raise UnicodeError(msg) def get_available_languages(domain): diff --git a/taskflow/openstack/common/jsonutils.py b/taskflow/openstack/common/jsonutils.py index 80b85221..e3855ab1 100644 --- a/taskflow/openstack/common/jsonutils.py +++ b/taskflow/openstack/common/jsonutils.py @@ -31,6 +31,7 @@ This module provides a few things: ''' +import codecs import datetime import functools import inspect @@ -52,6 +53,7 @@ import six.moves.xmlrpc_client as xmlrpclib from taskflow.openstack.common import gettextutils from taskflow.openstack.common import importutils +from taskflow.openstack.common import strutils from taskflow.openstack.common import timeutils netaddr = importutils.try_import("netaddr") @@ -166,12 +168,12 @@ def dumps(value, default=to_primitive, **kwargs): return json.dumps(value, default=default, **kwargs) -def loads(s): - return json.loads(s) +def loads(s, encoding='utf-8'): + return json.loads(strutils.safe_decode(s, encoding)) -def load(s): - return json.load(s) +def load(fp, encoding='utf-8'): + return json.load(codecs.getreader(encoding)(fp)) try: diff --git a/taskflow/openstack/common/strutils.py b/taskflow/openstack/common/strutils.py new file mode 100644 index 00000000..0c8c6e1f --- /dev/null +++ b/taskflow/openstack/common/strutils.py @@ -0,0 +1,239 @@ +# Copyright 2011 OpenStack Foundation. +# All Rights Reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); you may +# not use this file except in compliance with the License. You may obtain +# a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the +# License for the specific language governing permissions and limitations +# under the License. + +""" +System-level utilities and helper functions. +""" + +import math +import re +import sys +import unicodedata + +import six + +from taskflow.openstack.common.gettextutils import _ + + +UNIT_PREFIX_EXPONENT = { + 'k': 1, + 'K': 1, + 'Ki': 1, + 'M': 2, + 'Mi': 2, + 'G': 3, + 'Gi': 3, + 'T': 4, + 'Ti': 4, +} +UNIT_SYSTEM_INFO = { + 'IEC': (1024, re.compile(r'(^[-+]?\d*\.?\d+)([KMGT]i?)?(b|bit|B)$')), + 'SI': (1000, re.compile(r'(^[-+]?\d*\.?\d+)([kMGT])?(b|bit|B)$')), +} + +TRUE_STRINGS = ('1', 't', 'true', 'on', 'y', 'yes') +FALSE_STRINGS = ('0', 'f', 'false', 'off', 'n', 'no') + +SLUGIFY_STRIP_RE = re.compile(r"[^\w\s-]") +SLUGIFY_HYPHENATE_RE = re.compile(r"[-\s]+") + + +def int_from_bool_as_string(subject): + """Interpret a string as a boolean and return either 1 or 0. + + Any string value in: + + ('True', 'true', 'On', 'on', '1') + + is interpreted as a boolean True. + + Useful for JSON-decoded stuff and config file parsing + """ + return bool_from_string(subject) and 1 or 0 + + +def bool_from_string(subject, strict=False, default=False): + """Interpret a string as a boolean. + + A case-insensitive match is performed such that strings matching 't', + 'true', 'on', 'y', 'yes', or '1' are considered True and, when + `strict=False`, anything else returns the value specified by 'default'. + + Useful for JSON-decoded stuff and config file parsing. + + If `strict=True`, unrecognized values, including None, will raise a + ValueError which is useful when parsing values passed in from an API call. + Strings yielding False are 'f', 'false', 'off', 'n', 'no', or '0'. + """ + if not isinstance(subject, six.string_types): + subject = six.text_type(subject) + + lowered = subject.strip().lower() + + if lowered in TRUE_STRINGS: + return True + elif lowered in FALSE_STRINGS: + return False + elif strict: + acceptable = ', '.join( + "'%s'" % s for s in sorted(TRUE_STRINGS + FALSE_STRINGS)) + msg = _("Unrecognized value '%(val)s', acceptable values are:" + " %(acceptable)s") % {'val': subject, + 'acceptable': acceptable} + raise ValueError(msg) + else: + return default + + +def safe_decode(text, incoming=None, errors='strict'): + """Decodes incoming text/bytes string using `incoming` if they're not + already unicode. + + :param incoming: Text's current encoding + :param errors: Errors handling policy. See here for valid + values http://docs.python.org/2/library/codecs.html + :returns: text or a unicode `incoming` encoded + representation of it. + :raises TypeError: If text is not an instance of str + """ + if not isinstance(text, (six.string_types, six.binary_type)): + raise TypeError("%s can't be decoded" % type(text)) + + if isinstance(text, six.text_type): + return text + + if not incoming: + incoming = (sys.stdin.encoding or + sys.getdefaultencoding()) + + try: + return text.decode(incoming, errors) + except UnicodeDecodeError: + # Note(flaper87) If we get here, it means that + # sys.stdin.encoding / sys.getdefaultencoding + # didn't return a suitable encoding to decode + # text. This happens mostly when global LANG + # var is not set correctly and there's no + # default encoding. In this case, most likely + # python will use ASCII or ANSI encoders as + # default encodings but they won't be capable + # of decoding non-ASCII characters. + # + # Also, UTF-8 is being used since it's an ASCII + # extension. + return text.decode('utf-8', errors) + + +def safe_encode(text, incoming=None, + encoding='utf-8', errors='strict'): + """Encodes incoming text/bytes string using `encoding`. + + If incoming is not specified, text is expected to be encoded with + current python's default encoding. (`sys.getdefaultencoding`) + + :param incoming: Text's current encoding + :param encoding: Expected encoding for text (Default UTF-8) + :param errors: Errors handling policy. See here for valid + values http://docs.python.org/2/library/codecs.html + :returns: text or a bytestring `encoding` encoded + representation of it. + :raises TypeError: If text is not an instance of str + """ + if not isinstance(text, (six.string_types, six.binary_type)): + raise TypeError("%s can't be encoded" % type(text)) + + if not incoming: + incoming = (sys.stdin.encoding or + sys.getdefaultencoding()) + + if isinstance(text, six.text_type): + return text.encode(encoding, errors) + elif text and encoding != incoming: + # Decode text before encoding it with `encoding` + text = safe_decode(text, incoming, errors) + return text.encode(encoding, errors) + else: + return text + + +def string_to_bytes(text, unit_system='IEC', return_int=False): + """Converts a string into an float representation of bytes. + + The units supported for IEC :: + + Kb(it), Kib(it), Mb(it), Mib(it), Gb(it), Gib(it), Tb(it), Tib(it) + KB, KiB, MB, MiB, GB, GiB, TB, TiB + + The units supported for SI :: + + kb(it), Mb(it), Gb(it), Tb(it) + kB, MB, GB, TB + + Note that the SI unit system does not support capital letter 'K' + + :param text: String input for bytes size conversion. + :param unit_system: Unit system for byte size conversion. + :param return_int: If True, returns integer representation of text + in bytes. (default: decimal) + :returns: Numerical representation of text in bytes. + :raises ValueError: If text has an invalid value. + + """ + try: + base, reg_ex = UNIT_SYSTEM_INFO[unit_system] + except KeyError: + msg = _('Invalid unit system: "%s"') % unit_system + raise ValueError(msg) + match = reg_ex.match(text) + if match: + magnitude = float(match.group(1)) + unit_prefix = match.group(2) + if match.group(3) in ['b', 'bit']: + magnitude /= 8 + else: + msg = _('Invalid string format: %s') % text + raise ValueError(msg) + if not unit_prefix: + res = magnitude + else: + res = magnitude * pow(base, UNIT_PREFIX_EXPONENT[unit_prefix]) + if return_int: + return int(math.ceil(res)) + return res + + +def to_slug(value, incoming=None, errors="strict"): + """Normalize string. + + Convert to lowercase, remove non-word characters, and convert spaces + to hyphens. + + Inspired by Django's `slugify` filter. + + :param value: Text to slugify + :param incoming: Text's current encoding + :param errors: Errors handling policy. See here for valid + values http://docs.python.org/2/library/codecs.html + :returns: slugified unicode representation of `value` + :raises TypeError: If text is not an instance of str + """ + value = safe_decode(value, incoming, errors) + # NOTE(aababilov): no need to use safe_(encode|decode) here: + # encodings are always "ascii", error handling is always "ignore" + # and types are always known (first: unicode; second: str) + value = unicodedata.normalize("NFKD", value).encode( + "ascii", "ignore").decode("ascii") + value = SLUGIFY_STRIP_RE.sub("", value).strip().lower() + return SLUGIFY_HYPHENATE_RE.sub("-", value) From 46fc1dd9ee0ad40d3f9bf4c589061566d745294a Mon Sep 17 00:00:00 2001 From: Greg Hill Date: Thu, 22 May 2014 17:23:57 -0500 Subject: [PATCH 090/188] add the ability to inject arguments into tasks at task creation Similar to the rebind functionality that lets you rename parameters from those in the store, inject lets you inject arbitrary key/value pairs that will be sent to your task at task creation time. This allows for flow and flow factories to reuse tasks with differing parameters without jumping through a lot of hoops. Change-Id: If167962811d22054b89d7d35a33d4ec5cb2cd648 Implements: blueprint wbe-workers-endpoints-constructors --- taskflow/atom.py | 9 ++++-- taskflow/engines/action_engine/engine.py | 3 ++ .../engines/action_engine/retry_action.py | 3 +- taskflow/engines/action_engine/task_action.py | 6 ++-- taskflow/storage.py | 19 ++++++++++-- taskflow/task.py | 13 ++++---- taskflow/tests/unit/test_arguments_passing.py | 30 +++++++++++++++++++ 7 files changed, 69 insertions(+), 14 deletions(-) diff --git a/taskflow/atom.py b/taskflow/atom.py index d4840e50..e3ed8b34 100644 --- a/taskflow/atom.py +++ b/taskflow/atom.py @@ -88,6 +88,7 @@ def _build_arg_mapping(task_name, reqs, rebind_args, function, do_infer, for arg in ignore_list: if arg in task_args: task_args.remove(arg) + result = {} if reqs: result.update((a, a) for a in reqs) @@ -135,10 +136,11 @@ class Atom(object): of this atom). """ - def __init__(self, name=None, provides=None): + def __init__(self, name=None, provides=None, inject=None): self._name = name self.save_as = _save_as_to_mapping(provides) self.version = (1, 0) + self.inject = inject def _build_arg_mapping(self, executor, requires=None, rebind=None, auto_extract=True, ignore_list=None): @@ -180,4 +182,7 @@ class Atom(object): requires and what it produces (since this would be an impossible dependency to satisfy). """ - return set(self.rebind.values()) + requires = set(self.rebind.values()) + if self.inject: + requires = requires - set(six.iterkeys(self.inject)) + return requires diff --git a/taskflow/engines/action_engine/engine.py b/taskflow/engines/action_engine/engine.py index eecba801..3291024a 100644 --- a/taskflow/engines/action_engine/engine.py +++ b/taskflow/engines/action_engine/engine.py @@ -172,6 +172,9 @@ class ActionEngine(base.EngineBase): self.storage.ensure_retry(node.name, version, node.save_as) else: self.storage.ensure_task(node.name, version, node.save_as) + if node.inject: + self.storage.inject_task_args(node.name, node.inject) + self._change_state(states.SUSPENDED) # does nothing in PENDING state @lock_utils.locked diff --git a/taskflow/engines/action_engine/retry_action.py b/taskflow/engines/action_engine/retry_action.py index a860f698..eaedf04b 100644 --- a/taskflow/engines/action_engine/retry_action.py +++ b/taskflow/engines/action_engine/retry_action.py @@ -33,7 +33,8 @@ class RetryAction(object): self._notifier = notifier def _get_retry_args(self, retry): - kwargs = self._storage.fetch_mapped_args(retry.rebind) + kwargs = self._storage.fetch_mapped_args(retry.rebind, + task_name=retry.name) kwargs['history'] = self._storage.get_retry_history(retry.name) return kwargs diff --git a/taskflow/engines/action_engine/task_action.py b/taskflow/engines/action_engine/task_action.py index 32c0a179..9ab8c460 100644 --- a/taskflow/engines/action_engine/task_action.py +++ b/taskflow/engines/action_engine/task_action.py @@ -65,7 +65,8 @@ class TaskAction(object): if not self.change_state(task, states.RUNNING, progress=0.0): raise exceptions.InvalidState("Task %s is in invalid state and" " can't be executed" % task.name) - kwargs = self._storage.fetch_mapped_args(task.rebind) + kwargs = self._storage.fetch_mapped_args(task.rebind, + task_name=task.name) task_uuid = self._storage.get_atom_uuid(task.name) return self._task_executor.execute_task(task, task_uuid, kwargs, self._on_update_progress) @@ -81,7 +82,8 @@ class TaskAction(object): if not self.change_state(task, states.REVERTING, progress=0.0): raise exceptions.InvalidState("Task %s is in invalid state and" " can't be reverted" % task.name) - kwargs = self._storage.fetch_mapped_args(task.rebind) + kwargs = self._storage.fetch_mapped_args(task.rebind, + task_name=task.name) task_uuid = self._storage.get_atom_uuid(task.name) task_result = self._storage.get(task.name) failures = self._storage.get_failures() diff --git a/taskflow/storage.py b/taskflow/storage.py index 35ba7d0e..e3a208a4 100644 --- a/taskflow/storage.py +++ b/taskflow/storage.py @@ -52,6 +52,7 @@ class Storage(object): self._flowdetail = flow_detail self._lock = self._lock_cls() self._transients = {} + self._injected_args = {} # NOTE(imelnikov): failure serialization looses information, # so we cache failures here, in atom name -> failure mapping. @@ -410,6 +411,10 @@ class Storage(object): if self._reset_atom(ad, state): self._with_connection(self._save_atom_detail, ad) + def inject_task_args(self, task_name, injected_args): + self._injected_args.setdefault(task_name, {}) + self._injected_args[task_name].update(injected_args) + def inject(self, pairs, transient=False): """Add values into storage. @@ -516,11 +521,19 @@ class Storage(object): pass return results - def fetch_mapped_args(self, args_mapping): + def fetch_mapped_args(self, args_mapping, task_name=None): """Fetch arguments for an atom using an atoms arguments mapping.""" with self._lock.read_lock(): - return dict((key, self.fetch(name)) - for key, name in six.iteritems(args_mapping)) + injected_args = {} + if task_name: + injected_args = self._injected_args.get(task_name, {}) + mapped_args = {} + for key, name in six.iteritems(args_mapping): + if name in injected_args: + mapped_args[key] = injected_args[name] + else: + mapped_args[key] = self.fetch(name) + return mapped_args def set_flow_state(self, state): """Set flow details state and save it.""" diff --git a/taskflow/task.py b/taskflow/task.py index 9f68710d..e66b435c 100644 --- a/taskflow/task.py +++ b/taskflow/task.py @@ -36,10 +36,10 @@ class BaseTask(atom.Atom): TASK_EVENTS = ('update_progress', ) - def __init__(self, name, provides=None): + def __init__(self, name, provides=None, inject=None): if name is None: name = reflection.get_class_name(self) - super(BaseTask, self).__init__(name, provides) + super(BaseTask, self).__init__(name, provides, inject=inject) # Map of events => lists of callbacks to invoke on task events. self._events_listeners = collections.defaultdict(list) @@ -172,11 +172,11 @@ class Task(BaseTask): default_provides = None def __init__(self, name=None, provides=None, requires=None, - auto_extract=True, rebind=None): + auto_extract=True, rebind=None, inject=None): """Initialize task instance.""" if provides is None: provides = self.default_provides - super(Task, self).__init__(name, provides=provides) + super(Task, self).__init__(name, provides=provides, inject=inject) self._build_arg_mapping(self.execute, requires, rebind, auto_extract) @@ -188,7 +188,7 @@ class FunctorTask(BaseTask): def __init__(self, execute, name=None, provides=None, requires=None, auto_extract=True, rebind=None, revert=None, - version=None): + version=None, inject=None): assert six.callable(execute), ("Function to use for executing must be" " callable") if revert: @@ -196,7 +196,8 @@ class FunctorTask(BaseTask): " be callable") if name is None: name = reflection.get_callable_name(execute) - super(FunctorTask, self).__init__(name, provides=provides) + super(FunctorTask, self).__init__(name, provides=provides, + inject=inject) self._execute = execute self._revert = revert if version is not None: diff --git a/taskflow/tests/unit/test_arguments_passing.py b/taskflow/tests/unit/test_arguments_passing.py index 4e8d5bb6..0281c1ff 100644 --- a/taskflow/tests/unit/test_arguments_passing.py +++ b/taskflow/tests/unit/test_arguments_passing.py @@ -90,6 +90,36 @@ class ArgumentsPassingTest(utils.EngineTestBase): 'result': 30, }) + def test_argument_injection(self): + flow = utils.TaskMultiArgOneReturn(provides='result', + inject={'x': 1, 'y': 4, 'z': 9}) + engine = self._make_engine(flow) + engine.run() + self.assertEqual(engine.storage.fetch_all(), { + 'result': 14, + }) + + def test_argument_injection_rebind(self): + flow = utils.TaskMultiArgOneReturn(provides='result', + rebind=['a', 'b', 'c'], + inject={'a': 1, 'b': 4, 'c': 9}) + engine = self._make_engine(flow) + engine.run() + self.assertEqual(engine.storage.fetch_all(), { + 'result': 14, + }) + + def test_argument_injection_required(self): + flow = utils.TaskMultiArgOneReturn(provides='result', + requires=['a', 'b', 'c'], + inject={'x': 1, 'y': 4, 'z': 9, + 'a': 0, 'b': 0, 'c': 0}) + engine = self._make_engine(flow) + engine.run() + self.assertEqual(engine.storage.fetch_all(), { + 'result': 14, + }) + def test_all_arguments_mapping(self): flow = utils.TaskMultiArgOneReturn(provides='result', rebind=['a', 'b', 'c']) From c386a5f9d448d7318a3d89e709e20f6959ce9110 Mon Sep 17 00:00:00 2001 From: Dan Krause Date: Fri, 16 May 2014 09:38:55 -0500 Subject: [PATCH 091/188] Adds a single threaded flow conductor Creates a new conductor module that can be used to connect into the jobboard, engine, and persistence mechanism. This commit adds in support for a simple conductor that will run jobs in its own thread and will dispatch them, and consume/abandon them. Implements: blueprint generic-flow-conductor Change-Id: Ic610bc825506db57b0c4364b0fc588b51d453a76 --- taskflow/conductors/__init__.py | 0 taskflow/conductors/base.py | 90 +++++++++++ taskflow/conductors/single_threaded.py | 150 +++++++++++++++++ taskflow/exceptions.py | 6 + taskflow/jobs/job.py | 5 + taskflow/jobs/jobboard.py | 4 + taskflow/tests/unit/conductor/__init__.py | 0 .../tests/unit/conductor/test_conductor.py | 153 ++++++++++++++++++ 8 files changed, 408 insertions(+) create mode 100644 taskflow/conductors/__init__.py create mode 100644 taskflow/conductors/base.py create mode 100644 taskflow/conductors/single_threaded.py create mode 100644 taskflow/tests/unit/conductor/__init__.py create mode 100644 taskflow/tests/unit/conductor/test_conductor.py diff --git a/taskflow/conductors/__init__.py b/taskflow/conductors/__init__.py new file mode 100644 index 00000000..e69de29b diff --git a/taskflow/conductors/base.py b/taskflow/conductors/base.py new file mode 100644 index 00000000..634c5dec --- /dev/null +++ b/taskflow/conductors/base.py @@ -0,0 +1,90 @@ +# -*- coding: utf-8 -*- + +# Licensed under the Apache License, Version 2.0 (the "License"); you may +# not use this file except in compliance with the License. You may obtain +# a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the +# License for the specific language governing permissions and limitations +# under the License. + +import abc +import threading + +import six + +import taskflow.engines +from taskflow import exceptions as excp +from taskflow.utils import lock_utils + + +@six.add_metaclass(abc.ABCMeta) +class Conductor(object): + """Conductors act as entities which extract jobs from a jobboard, assign + there work to some engine (using some desired configuration) and then wait + for that work to complete. If the work fails then they abandon the claimed + work (or if the process they are running in crashes or dies this + abandonment happens automatically) and then another conductor at a later + period of time will finish up the prior failed conductors work. + """ + + def __init__(self, name, jobboard, engine_conf, persistence): + self._name = name + self._jobboard = jobboard + self._engine_conf = engine_conf + self._persistence = persistence + self._lock = threading.RLock() + + def _engine_from_job(self, job): + try: + flow_uuid = job.details["flow_uuid"] + except (KeyError, TypeError): + raise excp.NotFound("No flow detail uuid found in job") + else: + try: + flow_detail = job.book.find(flow_uuid) + except (TypeError, AttributeError): + flow_detail = None + if flow_detail is None: + raise excp.NotFound("No matching flow detail found in" + " job for flow detail uuid %s" % flow_uuid) + try: + store = dict(job.details["store"]) + except (KeyError, TypeError): + store = {} + return taskflow.engines.load_from_detail( + flow_detail, + store=store, + engine_conf=dict(self._engine_conf), + backend=self._persistence) + + @lock_utils.locked + def connect(self): + """Ensures the jobboard is connected (noop if it is already).""" + if not self._jobboard.connected: + self._jobboard.connect() + + @lock_utils.locked + def close(self): + """Closes the jobboard, disallowing further use.""" + self._jobboard.close() + + @abc.abstractmethod + def run(self): + """Continuously claims, runs, and consumes jobs, and waits for more + jobs when there are none left on the jobboard. + """ + + @abc.abstractmethod + def _dispatch_job(self, job): + """Accepts a single (already claimed) job and causes it to be run in + an engine. The job is consumed upon completion (unless False is + returned which will signify the job should be abandoned instead) + + :param job: A Job instance that has already been claimed by the + jobboard. + """ diff --git a/taskflow/conductors/single_threaded.py b/taskflow/conductors/single_threaded.py new file mode 100644 index 00000000..87201107 --- /dev/null +++ b/taskflow/conductors/single_threaded.py @@ -0,0 +1,150 @@ +# -*- coding: utf-8 -*- + +# Licensed under the Apache License, Version 2.0 (the "License"); you may +# not use this file except in compliance with the License. You may obtain +# a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the +# License for the specific language governing permissions and limitations +# under the License. + +import logging +import threading + +import six + +from taskflow.conductors import base +from taskflow import exceptions as excp +from taskflow.listeners import logging as logging_listener +from taskflow.utils import lock_utils +from taskflow.utils import misc + +LOG = logging.getLogger(__name__) +WAIT_TIMEOUT = 0.5 +NO_CONSUME_EXCEPTIONS = tuple([ + excp.ExecutionFailure, + excp.StorageFailure, +]) + + +class SingleThreadedConductor(base.Conductor): + """A conductor that runs jobs in its own dispatching loop. + + This conductor iterates over jobs in the provided jobboard (waiting for + the given timeout if no jobs exist) and attempts to claim them, work on + those jobs in its local thread (blocking further work from being claimed + and consumed) and then consume those work units after completetion. This + process will repeat until the conductor has been stopped or other critical + error occurs. + + NOTE(harlowja): consumption occurs even if a engine fails to run due to + a task failure. This is only skipped when an execution failure or + a storage failure occurs which are *usually* correctable by re-running on + a different conductor (storage failures and execution failures may be + transient issues that can be worked around by later execution). If a job + after completing can not be consumed or abandoned the conductor relies + upon the jobboard capabilities to automatically abandon these jobs. + """ + + def __init__(self, name, jobboard, engine_conf, persistence, + wait_timeout=None): + super(SingleThreadedConductor, self).__init__(name, jobboard, + engine_conf, + persistence) + if wait_timeout is None: + wait_timeout = WAIT_TIMEOUT + if isinstance(wait_timeout, (int, float) + six.string_types): + self._wait_timeout = misc.Timeout(float(wait_timeout)) + elif isinstance(wait_timeout, misc.Timeout): + self._wait_timeout = wait_timeout + else: + raise ValueError("Invalid timeout literal: %s" % (wait_timeout)) + self._dead = threading.Event() + + @lock_utils.locked + def stop(self, timeout=None): + """Stops dispatching and returns whether the dispatcher loop is active + or whether it has ceased. If a timeout is provided the dispatcher + loop may not have ceased by the timeout reached (the request to cease + will be honored in the future). + """ + self._wait_timeout.interrupt() + self._dead.wait(timeout) + return self.dispatching + + @property + def dispatching(self): + if self._dead.is_set(): + return False + return True + + def _dispatch_job(self, job): + LOG.info("Dispatching job: %s", job) + try: + engine = self._engine_from_job(job) + except Exception as e: + raise excp.ConductorFailure("Failed creating an engine", cause=e) + with logging_listener.LoggingListener(engine, log=LOG): + consume = True + try: + engine.run() + except excp.WrappedFailure as e: + if all((f.check(*NO_CONSUME_EXCEPTIONS) for f in e)): + LOG.warn("Job execution failed (consumption being" + " skipped): %s", job, exc_info=True) + consume = False + else: + LOG.warn("Job execution failed: %s", job, exc_info=True) + except NO_CONSUME_EXCEPTIONS: + LOG.warn("Job execution failed (consumption being" + " skipped): %s", job, exc_info=True) + consume = False + except Exception: + LOG.warn("Job execution failed: %s", job, exc_info=True) + else: + LOG.info("Job completed successfully: %s", job) + return consume + + def run(self): + self._dead.clear() + try: + while True: + if self._wait_timeout.is_stopped(): + break + dispatched = 0 + for job in self._jobboard.iterjobs(): + if self._wait_timeout.is_stopped(): + break + LOG.debug("Trying to claim job: %s", job) + try: + self._jobboard.claim(job, self._name) + except (excp.UnclaimableJob, excp.NotFound): + LOG.debug("Job already claimed or consumed: %s", job) + continue + dispatched += 1 + try: + consume = self._dispatch_job(job) + except excp.ConductorFailure: + LOG.warn("Job dispatching failed: %s", job, + exc_info=True) + else: + try: + if consume: + self._jobboard.consume(job, self._name) + else: + self._jobboard.abandon(job, self._name) + except excp.JobFailure: + if consume: + LOG.warn("Failed job consumption: %s", job, + exc_info=True) + else: + LOG.warn("Failed job abandonment: %s", job, + exc_info=True) + if dispatched == 0 and not self._wait_timeout.is_stopped(): + self._wait_timeout.wait() + finally: + self._dead.set() diff --git a/taskflow/exceptions.py b/taskflow/exceptions.py index 95e378af..f3d21099 100644 --- a/taskflow/exceptions.py +++ b/taskflow/exceptions.py @@ -61,6 +61,12 @@ class StorageFailure(TaskFlowException): """Raised when storage backends can not be read/saved/deleted.""" +# Conductor related errors. + +class ConductorFailure(TaskFlowException): + """Errors related to conducting activities.""" + + # Job related errors. class JobFailure(TaskFlowException): diff --git a/taskflow/jobs/job.py b/taskflow/jobs/job.py index a4f0b416..796e5d11 100644 --- a/taskflow/jobs/job.py +++ b/taskflow/jobs/job.py @@ -99,3 +99,8 @@ class Job(object): def name(self): """The non-uniquely identifying name of this job.""" return self._name + + def __str__(self): + """Pretty formats the job into something *more* meaningful.""" + return "%s %s (%s): %s" % (type(self).__name__, + self.name, self.uuid, self.details) diff --git a/taskflow/jobs/jobboard.py b/taskflow/jobs/jobboard.py index 40c11797..5857d554 100644 --- a/taskflow/jobs/jobboard.py +++ b/taskflow/jobs/jobboard.py @@ -154,6 +154,10 @@ class JobBoard(object): this must be the same name that was used for claiming this job. """ + @abc.abstractproperty + def connected(self): + """Returns if this jobboard is connected.""" + @abc.abstractmethod def connect(self): """Opens the connection to any backend system.""" diff --git a/taskflow/tests/unit/conductor/__init__.py b/taskflow/tests/unit/conductor/__init__.py new file mode 100644 index 00000000..e69de29b diff --git a/taskflow/tests/unit/conductor/test_conductor.py b/taskflow/tests/unit/conductor/test_conductor.py new file mode 100644 index 00000000..7ac75d91 --- /dev/null +++ b/taskflow/tests/unit/conductor/test_conductor.py @@ -0,0 +1,153 @@ +# -*- coding: utf-8 -*- + +# Copyright (C) 2014 Yahoo! Inc. All Rights Reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); you may +# not use this file except in compliance with the License. You may obtain +# a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the +# License for the specific language governing permissions and limitations +# under the License. + +import contextlib +import threading + +from zake import fake_client + +from taskflow.conductors import single_threaded as stc +from taskflow import engines +from taskflow.jobs.backends import impl_zookeeper +from taskflow.jobs import jobboard +from taskflow.patterns import linear_flow as lf +from taskflow.persistence.backends import impl_memory +from taskflow import states as st +from taskflow import test +from taskflow.tests import utils as test_utils +from taskflow.utils import misc +from taskflow.utils import persistence_utils as pu + + +@contextlib.contextmanager +def close_many(*closeables): + try: + yield + finally: + for c in closeables: + c.close() + + +def test_factory(blowup): + f = lf.Flow("test") + if not blowup: + f.add(test_utils.SaveOrderTask('test1')) + else: + f.add(test_utils.FailingTask("test1")) + return f + + +def make_thread(conductor): + t = threading.Thread(target=conductor.run) + t.daemon = True + return t + + +class SingleThreadedConductorTest(test_utils.EngineTestBase, test.TestCase): + def make_components(self, name='testing', wait_timeout=0.1): + client = fake_client.FakeClient() + persistence = impl_memory.MemoryBackend() + board = impl_zookeeper.ZookeeperJobBoard(name, {}, + client=client, + persistence=persistence) + engine_conf = { + 'engine': 'default', + } + conductor = stc.SingleThreadedConductor(name, board, engine_conf, + persistence, wait_timeout) + return misc.AttrDict(board=board, + client=client, + persistence=persistence, + conductor=conductor) + + def test_connection(self): + components = self.make_components() + components.conductor.connect() + with close_many(components.conductor, components.client): + self.assertTrue(components.board.connected) + self.assertTrue(components.client.connected) + self.assertFalse(components.board.connected) + self.assertFalse(components.client.connected) + + def test_run_empty(self): + components = self.make_components() + components.conductor.connect() + with close_many(components.conductor, components.client): + t = make_thread(components.conductor) + t.start() + self.assertFalse(components.conductor.stop(0.5)) + t.join() + + def test_run(self): + components = self.make_components() + components.conductor.connect() + consumed_event = threading.Event() + + def on_consume(state, details): + consumed_event.set() + + components.board.notifier.register(jobboard.REMOVAL, on_consume) + with close_many(components.conductor, components.client): + t = make_thread(components.conductor) + t.start() + lb, fd = pu.temporary_flow_detail(components.persistence) + engines.save_factory_details(fd, test_factory, + [False], {}, + backend=components.persistence) + components.board.post('poke', lb, + details={'flow_uuid': fd.uuid}) + consumed_event.wait(1.0) + self.assertTrue(consumed_event.is_set()) + components.conductor.stop(1.0) + self.assertFalse(components.conductor.dispatching) + + persistence = components.persistence + with contextlib.closing(persistence.get_connection()) as conn: + lb = conn.get_logbook(lb.uuid) + fd = lb.find(fd.uuid) + self.assertIsNotNone(fd) + self.assertEqual(st.SUCCESS, fd.state) + + def test_fail_run(self): + components = self.make_components() + components.conductor.connect() + + consumed_event = threading.Event() + + def on_consume(state, details): + consumed_event.set() + + components.board.notifier.register(jobboard.REMOVAL, on_consume) + with close_many(components.conductor, components.client): + t = make_thread(components.conductor) + t.start() + lb, fd = pu.temporary_flow_detail(components.persistence) + engines.save_factory_details(fd, test_factory, + [True], {}, + backend=components.persistence) + components.board.post('poke', lb, + details={'flow_uuid': fd.uuid}) + consumed_event.wait(1.0) + self.assertTrue(consumed_event.is_set()) + components.conductor.stop(1.0) + self.assertFalse(components.conductor.dispatching) + + persistence = components.persistence + with contextlib.closing(persistence.get_connection()) as conn: + lb = conn.get_logbook(lb.uuid) + fd = lb.find(fd.uuid) + self.assertIsNotNone(fd) + self.assertEqual(st.REVERTED, fd.state) From d099f82de7239bc88614799df303a676c3f45f30 Mon Sep 17 00:00:00 2001 From: Joshua Harlow Date: Wed, 28 May 2014 14:05:07 -0700 Subject: [PATCH 092/188] Use a name property setter instead of a set_name method Change-Id: I4a41204e16e636e12d649592ae909d45fdb33f9f --- taskflow/flow.py | 2 +- taskflow/retry.py | 3 ++- 2 files changed, 3 insertions(+), 2 deletions(-) diff --git a/taskflow/flow.py b/taskflow/flow.py index 26d2dcfa..0fb94338 100644 --- a/taskflow/flow.py +++ b/taskflow/flow.py @@ -46,7 +46,7 @@ class Flow(object): # NOTE(akarpinska): if retry doesn't have a name, # the name of its owner will be assigned if self._retry and self._retry.name is None: - self._retry.set_name(self.name + "_retry") + self._retry.name = self.name + "_retry" @property def name(self): diff --git a/taskflow/retry.py b/taskflow/retry.py index 9384d873..b02279b3 100644 --- a/taskflow/retry.py +++ b/taskflow/retry.py @@ -54,7 +54,8 @@ class Retry(atom.Atom): def name(self): return self._name - def set_name(self, name): + @name.setter + def name(self, name): self._name = name @abc.abstractmethod From cb8564c9714a88509e72acfc09368bf193bbcbad Mon Sep 17 00:00:00 2001 From: Joshua Harlow Date: Wed, 28 May 2014 14:44:41 -0700 Subject: [PATCH 093/188] Factor out the on_failure to a mixin type To allow for failures to be handled in different ways extract out the on_failure definition of a retry object to allow it to be its own mixin or base class for resolvers that can only provide a on_failure method. Change-Id: Ief422bb57316b913897ca65496144edcfdd1a948 --- .../engines/action_engine/graph_action.py | 20 +++---- taskflow/retry.py | 52 ++++++++++++------- 2 files changed, 43 insertions(+), 29 deletions(-) diff --git a/taskflow/engines/action_engine/graph_action.py b/taskflow/engines/action_engine/graph_action.py index d0ed8bbb..c7bad3b7 100644 --- a/taskflow/engines/action_engine/graph_action.py +++ b/taskflow/engines/action_engine/graph_action.py @@ -16,9 +16,9 @@ from taskflow.engines.action_engine import executor as ex from taskflow import exceptions as excp -from taskflow import retry as r +from taskflow import retry as retry_atom from taskflow import states as st -from taskflow import task +from taskflow import task as task_atom from taskflow.utils import misc @@ -49,9 +49,9 @@ class FutureGraphAction(object): def _schedule_node(self, node): """Schedule a single node for execution.""" - if isinstance(node, task.BaseTask): + if isinstance(node, task_atom.BaseTask): return self._schedule_task(node) - elif isinstance(node, r.Retry): + elif isinstance(node, retry_atom.Retry): return self._schedule_retry(node) else: raise TypeError("Unknown how to schedule node %s" % node) @@ -108,7 +108,7 @@ class FutureGraphAction(object): for future in done: try: node, event, result = future.result() - if isinstance(node, task.BaseTask): + if isinstance(node, task_atom.BaseTask): self._complete_task(node, event, result) if isinstance(result, misc.Failure): if event == ex.EXECUTED: @@ -185,15 +185,15 @@ class FutureGraphAction(object): if retry: # Ask retry controller what to do in case of failure action = self._retry_action.on_failure(retry, atom, failure) - if action == r.RETRY: + if action == retry_atom.RETRY: # Prepare subflow for revert self._storage.set_atom_intention(retry.name, st.RETRY) for node in self._analyzer.iterate_subgraph(retry): self._storage.set_atom_intention(node.name, st.REVERT) - elif action == r.REVERT: + elif action == retry_atom.REVERT: # Ask parent checkpoint self._process_atom_failure(retry, failure) - elif action == r.REVERT_ALL: + elif action == retry_atom.REVERT_ALL: # Prepare all flow for revert self._revert_all() else: @@ -217,9 +217,9 @@ class FutureGraphAction(object): def _reset_nodes(self, nodes_iter, intention=st.EXECUTE): for node in nodes_iter: - if isinstance(node, task.BaseTask): + if isinstance(node, task_atom.BaseTask): self._task_action.change_state(node, st.PENDING, progress=0.0) - elif isinstance(node, r.Retry): + elif isinstance(node, retry_atom.Retry): self._retry_action.change_state(node, st.PENDING) else: raise TypeError("Unknown how to reset node %s" % node) diff --git a/taskflow/retry.py b/taskflow/retry.py index b02279b3..b3d435b3 100644 --- a/taskflow/retry.py +++ b/taskflow/retry.py @@ -26,18 +26,45 @@ from taskflow.utils import misc LOG = logging.getLogger(__name__) -# Retry actions +# Decision results. REVERT = "REVERT" REVERT_ALL = "REVERT_ALL" RETRY = "RETRY" @six.add_metaclass(abc.ABCMeta) -class Retry(atom.Atom): - """A base class for retry that controls subflow execution. - Retry can be executed multiple times and reverted. On subflow - failure it makes a decision about what should be done with the flow - (retry, revert to the previous retry, revert the whole flow, etc.). +class Decider(object): + """A base class or mixin for an object that can decide how to resolve + execution failures. + + A decider may be executed multiple times on subflow or other atom + failure and it is expected to make a decision about what should be done + to resolve the failure (retry, revert to the previous retry, revert + the whole flow, etc.). + """ + + @abc.abstractmethod + def on_failure(self, history, *args, **kwargs): + """On subflow failure makes a decision about the future flow + execution using information about prior previous failures (if this + historical failure information is not available or was not persisted + this history will be empty). + + Returns retry action constant: + * 'RETRY' when subflow must be reverted and restarted again (maybe + with new parameters). + * 'REVERT' when this subflow must be completely reverted and parent + subflow should make a decision about the flow execution. + * 'REVERT_ALL' in a case when the whole flow must be reverted and + marked as FAILURE. + """ + + +@six.add_metaclass(abc.ABCMeta) +class Retry(atom.Atom, Decider): + """A base class for a retry object that decides how to resolve subflow + execution failures and may also provide execute and revert methods to alter + the inputs of subflow atoms. """ default_provides = None @@ -78,19 +105,6 @@ class Retry(atom.Atom): all subflow's tasks will be reverted before the retry. """ - @abc.abstractmethod - def on_failure(self, history, *args, **kwargs): - """On subflow failure makes a decision about the future flow - execution using information about all previous failures. - Returns retry action constant: - 'RETRY' when subflow must be reverted and restarted again (maybe - with new parameters). - 'REVERT' when this subflow must be completely reverted and parent - subflow should make a decision about the flow execution. - 'REVERT_ALL' in a case when the whole flow must be reverted and - marked as FAILURE. - """ - class AlwaysRevert(Retry): """Retry that always reverts subflow.""" From 29b07d2e5711f8c5565a1ec317af6596ee1e431d Mon Sep 17 00:00:00 2001 From: Joshua Harlow Date: Wed, 28 May 2014 20:31:32 -0700 Subject: [PATCH 094/188] Allow indent text to be passed in Instead of forcing single space indenting allow the indenting string to be passed in so that others can provided their favorite indent (tabs, two spaces or other...) Change-Id: I2dfb3baeaecb8bed197429b591daed69eb8cc834 --- taskflow/exceptions.py | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/taskflow/exceptions.py b/taskflow/exceptions.py index 95e378af..2aff08aa 100644 --- a/taskflow/exceptions.py +++ b/taskflow/exceptions.py @@ -34,7 +34,7 @@ class TaskFlowException(Exception): def cause(self): return self._cause - def pformat(self, indent=2): + def pformat(self, indent=2, indent_text=" "): """Pretty formats a taskflow exception + any connected causes.""" if indent < 0: raise ValueError("indent must be greater than or equal to zero") @@ -45,7 +45,7 @@ class TaskFlowException(Exception): # We'll add our own newlines on at the end of formatting. if line.endswith("\n"): line = line[0:-1] - lines.append((" " * indent_by) + line) + lines.append((indent_text * indent_by) + line) try: lines.extend(_format(excp.cause, indent_by + indent)) except AttributeError: From 642ef06b2768c3a416f4026313c3c5215b7414b3 Mon Sep 17 00:00:00 2001 From: Joshua Harlow Date: Wed, 28 May 2014 23:36:36 -0700 Subject: [PATCH 095/188] Fix docstring list format Change-Id: Ia79560e965e94e2e4ce18f4667c33ae577b327ed --- taskflow/retry.py | 9 +++++---- 1 file changed, 5 insertions(+), 4 deletions(-) diff --git a/taskflow/retry.py b/taskflow/retry.py index b3d435b3..fb7330e2 100644 --- a/taskflow/retry.py +++ b/taskflow/retry.py @@ -51,12 +51,13 @@ class Decider(object): this history will be empty). Returns retry action constant: - * 'RETRY' when subflow must be reverted and restarted again (maybe + + * ``RETRY`` when subflow must be reverted and restarted again (maybe with new parameters). - * 'REVERT' when this subflow must be completely reverted and parent + * ``REVERT`` when this subflow must be completely reverted and parent subflow should make a decision about the flow execution. - * 'REVERT_ALL' in a case when the whole flow must be reverted and - marked as FAILURE. + * ``REVERT_ALL`` in a case when the whole flow must be reverted and + marked as ``FAILURE``. """ From ef9bf8320aa589ef6f9e5fa4c87e9e796db6667c Mon Sep 17 00:00:00 2001 From: "Ivan A. Melnikov" Date: Thu, 29 May 2014 18:45:15 +0400 Subject: [PATCH 096/188] Update sphinx pin from global requirements Since I63c6159eb23430a181eb20016b35366c2fbbf9f8 got merge we have to update sphinx version, too. Change-Id: Ic25b361fd34ee7df7010bc60c1ff44d3b8892a6c --- test-requirements.txt | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/test-requirements.txt b/test-requirements.txt index d37163d8..8c0d3106 100644 --- a/test-requirements.txt +++ b/test-requirements.txt @@ -7,5 +7,5 @@ testrepository>=0.0.18 testtools>=0.9.34 zake>=0.0.18 # docs build jobs -sphinx>=1.1.2,<1.2 +sphinx>=1.2.1,<1.3 oslosphinx From 7f525de0f9629be5957157ed2acd6be39e5b1103 Mon Sep 17 00:00:00 2001 From: Joshua Harlow Date: Mon, 19 May 2014 17:06:54 -0700 Subject: [PATCH 097/188] Finish factoring apart the graph_action module Factor out the scheduling, running and completion components of graph_action so that we can allow this to be plugged in with other types of scheduling, running and completion strategies. The newly added components are the following: - A runtime container class (serves as a holder of some small utility functions) and all the other runtime components. - A runner class that acts as the action engines run loop. - A scheduler class that schedules nodes using a provided executor and returns futures that can be used to introspect there results as they complete. - A completer class that completes nodes and futures that the scheduler started, persisting there results and doing any further post-execution analysis. Part of blueprint plug-engine Change-Id: I1dbf46654377fc34e9d90eeabf7b0062020bdc5e --- doc/source/engines.rst | 61 +++-- taskflow/engines/action_engine/engine.py | 74 ++---- .../engines/action_engine/graph_action.py | 233 ---------------- taskflow/engines/action_engine/runner.py | 137 ++++++++++ taskflow/engines/action_engine/runtime.py | 250 ++++++++++++++++++ 5 files changed, 455 insertions(+), 300 deletions(-) delete mode 100644 taskflow/engines/action_engine/graph_action.py create mode 100644 taskflow/engines/action_engine/runner.py create mode 100644 taskflow/engines/action_engine/runtime.py diff --git a/doc/source/engines.rst b/doc/source/engines.rst index 6aac42e0..03526491 100644 --- a/doc/source/engines.rst +++ b/doc/source/engines.rst @@ -204,15 +204,20 @@ Compiling --------- During this stage the flow will be converted into an internal graph -representation using a flow :py:func:`~taskflow.utils.flow_utils.flatten` -function. This function converts the flow objects and contained atoms into a +representation using a +:py:class:`~taskflow.engines.action_engine.compiler.Compiler` (the default +implementation for patterns is the +:py:class:`~taskflow.engines.action_engine.compiler.PatternCompiler`). This +class compiles/converts the flow objects and contained atoms into a `networkx`_ directed graph that contains the equivalent atoms defined in the flow and any nested flows & atoms as well as the constraints that are created by the application of the different flow patterns. This graph is then what will be analyzed & traversed during the engines execution. At this point a few helper object are also created and saved to internal engine variables (these object help in execution of atoms, analyzing the graph and performing other -internal engine activities). +internal engine activities). At the finishing of this stage a +:py:class:`~taskflow.engines.action_engine.runtime.Runtime` object is created +which contains references to all needed runtime components. Preparation ----------- @@ -231,7 +236,7 @@ Execution The graph (and helper objects) previously created are now used for guiding further execution. The flow is put into the ``RUNNING`` :doc:`state ` and a -:py:class:`~taskflow.engines.action_engine.graph_action.FutureGraphAction` +:py:class:`~taskflow.engines.action_engine.runner.Runner` implementation object starts to take over and begins going through the stages listed below (for a more visual diagram/representation see the :ref:`engine state diagram `). @@ -252,35 +257,45 @@ for things like retry atom which can influence what a tasks intention should be object which was designed to provide helper methods for this analysis). Once these intentions are determined and associated with each task (the intention is also stored in the :py:class:`~taskflow.persistence.logbook.AtomDetail` object) -the scheduling stage starts. +the :ref:`scheduling ` stage starts. + +.. _scheduling: Scheduling ^^^^^^^^^^ -This stage selects which atoms are eligible to run (looking at there intention, -checking if predecessor atoms have ran and so-on, again using the +This stage selects which atoms are eligible to run by using a +:py:class:`~taskflow.engines.action_engine.runtime.Scheduler` implementation +(the default implementation looks at there intention, checking if predecessor +atoms have ran and so-on, using a :py:class:`~taskflow.engines.action_engine.graph_analyzer.GraphAnalyzer` helper -object) and submits those atoms to a previously provided compatible -`executor`_ for asynchronous execution. This executor will return a `future`_ -object for each atom submitted; all of which are collected into a list of not -done futures. This will end the initial round of scheduling and at this point -the engine enters the waiting stage. +object as needed) and submits those atoms to a previously provided compatible +`executor`_ for asynchronous execution. This +:py:class:`~taskflow.engines.action_engine.runtime.Scheduler` will return a +`future`_ object for each atom scheduled; all of which are collected into a +list of not done futures. This will end the initial round of scheduling and at +this point the engine enters the :ref:`waiting ` stage. + +.. _waiting: Waiting ^^^^^^^ In this stage the engine waits for any of the future objects previously submitted to complete. Once one of the future objects completes (or fails) that -atoms result will be examined and persisted to the persistence backend (saved +atoms result will be examined and finalized using a +:py:class:`~taskflow.engines.action_engine.runtime.Completer` implementation. +It typically will persist results to a provided persistence backend (saved into the corresponding :py:class:`~taskflow.persistence.logbook.AtomDetail` -object) and the state of the atom is changed. At this point what happens falls -into two categories, one for if that atom failed and one for if it did not. If -the atom failed it may be set to a new intention such as ``RETRY`` or +and :py:class:`~taskflow.persistence.logbook.FlowDetail` objects) and reflect +the new state of the atom. At this point what typically happens falls into two +categories, one for if that atom failed and one for if it did not. If the atom +failed it may be set to a new intention such as ``RETRY`` or ``REVERT`` (other atoms that were predecessors of this failing atom may also have there intention altered). Once this intention adjustment has happened a -new round of scheduling occurs and this process repeats until the engine -succeeds or fails (if the process running the engine dies the above stages will -be restarted and resuming will occur). +new round of :ref:`scheduling ` occurs and this process repeats +until the engine succeeds or fails (if the process running the engine dies the +above stages will be restarted and resuming will occur). .. note:: @@ -293,7 +308,7 @@ Finishing --------- At this point the -:py:class:`~taskflow.engines.action_engine.graph_action.FutureGraphAction` has +:py:class:`~taskflow.engines.action_engine.runner.Runner` has now finished successfully, failed, or the execution was suspended. Depending on which one of these occurs will cause the flow to enter a new state (typically one of ``FAILURE``, ``SUSPENDED``, ``SUCCESS`` or ``REVERTED``). @@ -307,10 +322,12 @@ saved for this execution. Interfaces ========== -.. automodule:: taskflow.engines.base +.. automodule:: taskflow.engines.action_engine.compiler .. automodule:: taskflow.engines.action_engine.engine -.. automodule:: taskflow.engines.action_engine.graph_action .. automodule:: taskflow.engines.action_engine.graph_analyzer +.. automodule:: taskflow.engines.action_engine.runner +.. automodule:: taskflow.engines.action_engine.runtime +.. automodule:: taskflow.engines.base Hierarchy ========= diff --git a/taskflow/engines/action_engine/engine.py b/taskflow/engines/action_engine/engine.py index ef979168..e63aeb31 100644 --- a/taskflow/engines/action_engine/engine.py +++ b/taskflow/engines/action_engine/engine.py @@ -18,10 +18,8 @@ import threading from taskflow.engines.action_engine import compiler from taskflow.engines.action_engine import executor -from taskflow.engines.action_engine import graph_action -from taskflow.engines.action_engine import graph_analyzer -from taskflow.engines.action_engine import retry_action -from taskflow.engines.action_engine import task_action +from taskflow.engines.action_engine import runner +from taskflow.engines.action_engine import runtime from taskflow.engines import base from taskflow import exceptions as exc @@ -38,28 +36,27 @@ from taskflow.utils import reflection class ActionEngine(base.EngineBase): """Generic action-based engine. - This engine flattens the flow (and any subflows) into a execution graph + This engine compiles the flow (and any subflows) into a compilation unit which contains the full runtime definition to be executed and then uses - this graph in combination with the action classes & storage to attempt to - run your flow (and any subflows & contained tasks) to completion. + this compilation unit in combination with the executor, runtime, runner + and storage classes to attempt to run your flow (and any subflows & + contained atoms) to completion. - During this process it is permissible and valid to have a task or multiple - tasks in the execution graph fail, which will cause the process of - reversion to commence. See the valid states in the states module to learn - more about what other states the tasks & flow being ran can go through. + NOTE(harlowja): during this process it is permissible and valid to have a + task or multiple tasks in the execution graph fail (at the same time even), + which will cause the process of reversion or retrying to commence. See the + valid states in the states module to learn more about what other states + the tasks and flow being ran can go through. """ - _graph_action_factory = graph_action.FutureGraphAction - _graph_analyzer_factory = graph_analyzer.GraphAnalyzer - _task_action_factory = task_action.TaskAction - _task_executor_factory = executor.SerialTaskExecutor - _retry_action_factory = retry_action.RetryAction _compiler_factory = compiler.PatternCompiler + _task_executor_factory = executor.SerialTaskExecutor def __init__(self, flow, flow_detail, backend, conf): super(ActionEngine, self).__init__(flow, flow_detail, backend, conf) - self._analyzer = None - self._root = None + self._runner = None + self._runtime = None self._compiled = False + self._compilation = None self._lock = threading.RLock() self._state_lock = threading.RLock() self._storage_ensured = False @@ -80,8 +77,8 @@ class ActionEngine(base.EngineBase): NOTE(harlowja): Only accessible after compilation has completed. """ g = None - if self._compiled and self._analyzer: - g = self._analyzer.execution_graph + if self._compiled: + g = self._compilation.execution_graph return g def run(self): @@ -119,7 +116,7 @@ class ActionEngine(base.EngineBase): state = None try: self._change_state(states.RUNNING) - for state in self._root.execute_iter(timeout=timeout): + for state in self._runner.run_iter(timeout=timeout): try: try_suspend = yield state except GeneratorExit: @@ -131,7 +128,7 @@ class ActionEngine(base.EngineBase): with excutils.save_and_reraise_exception(): self._change_state(states.FAILURE) else: - ignorable_states = getattr(self._root, 'ignorable_states', []) + ignorable_states = getattr(self._runner, 'ignorable_states', []) if state and state not in ignorable_states: self._change_state(state) if state != states.SUSPENDED and state != states.SUCCESS: @@ -162,12 +159,12 @@ class ActionEngine(base.EngineBase): old_state=old_state) self.notifier.notify(state, details) - def _ensure_storage_for(self, execution_graph): + def _ensure_storage(self): # NOTE(harlowja): signal to the tasks that exist that we are about to # resume, if they have a previous state, they will now transition to # a resuming state (and then to suspended). self._change_state(states.RESUMING) # does nothing in PENDING state - for node in execution_graph.nodes_iter(): + for node in self._compilation.execution_graph.nodes_iter(): version = misc.get_version_string(node) if isinstance(node, retry.Retry): self.storage.ensure_retry(node.name, version, node.save_as) @@ -175,7 +172,6 @@ class ActionEngine(base.EngineBase): self.storage.ensure_task(node.name, version, node.save_as) if node.inject: self.storage.inject_task_args(node.name, node.inject) - self._change_state(states.SUSPENDED) # does nothing in PENDING state @lock_utils.locked @@ -184,7 +180,7 @@ class ActionEngine(base.EngineBase): raise exc.InvalidState("Can not prepare an engine" " which has not been compiled") if not self._storage_ensured: - self._ensure_storage_for(self.execution_graph) + self._ensure_storage() self._storage_ensured = True # At this point we can check to ensure all dependencies are either # flow/task provided or storage provided, if there are still missing @@ -196,22 +192,13 @@ class ActionEngine(base.EngineBase): raise exc.MissingDependencies(self._flow, sorted(missing)) # Reset everything back to pending (if we were previously reverted). if self.storage.get_flow_state() == states.REVERTED: - self._root.reset_all() + self._runtime.reset_all() self._change_state(states.PENDING) - @misc.cachedproperty - def _retry_action(self): - return self._retry_action_factory(self.storage, self.task_notifier) - @misc.cachedproperty def _task_executor(self): return self._task_executor_factory() - @misc.cachedproperty - def _task_action(self): - return self._task_action_factory(self.storage, self._task_executor, - self.task_notifier) - @misc.cachedproperty def _compiler(self): return self._compiler_factory() @@ -220,16 +207,13 @@ class ActionEngine(base.EngineBase): def compile(self): if self._compiled: return - compilation = self._compiler.compile(self._flow) - if self._analyzer is None: - self._analyzer = self._graph_analyzer_factory( - compilation.execution_graph, self.storage) - self._root = self._graph_action_factory(self._analyzer, - self.storage, - self._task_action, - self._retry_action) + self._compilation = self._compiler.compile(self._flow) + self._runtime = runtime.Runtime(self._compilation, + self.storage, + self.task_notifier, + self._task_executor) + self._runner = runner.Runner(self._runtime, self._task_executor) self._compiled = True - return class SingleThreadedActionEngine(ActionEngine): diff --git a/taskflow/engines/action_engine/graph_action.py b/taskflow/engines/action_engine/graph_action.py deleted file mode 100644 index c7bad3b7..00000000 --- a/taskflow/engines/action_engine/graph_action.py +++ /dev/null @@ -1,233 +0,0 @@ -# -*- coding: utf-8 -*- - -# Copyright (C) 2012 Yahoo! Inc. All Rights Reserved. -# -# Licensed under the Apache License, Version 2.0 (the "License"); you may -# not use this file except in compliance with the License. You may obtain -# a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT -# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the -# License for the specific language governing permissions and limitations -# under the License. - -from taskflow.engines.action_engine import executor as ex -from taskflow import exceptions as excp -from taskflow import retry as retry_atom -from taskflow import states as st -from taskflow import task as task_atom -from taskflow.utils import misc - - -_WAITING_TIMEOUT = 60 # in seconds - - -class FutureGraphAction(object): - """Graph action build around futures returned by task action. - - This graph action schedules all task it can for execution and than - waits on returned futures. If task executor is able to execute tasks - in parallel, this enables parallel flow run and reversion. - """ - - # Informational states this action yields while running, not useful to - # have the engine record but useful to provide to end-users when doing - # execution iterations. - ignorable_states = (st.SCHEDULING, st.WAITING, st.RESUMING, st.ANALYZING) - - def __init__(self, analyzer, storage, task_action, retry_action): - self._analyzer = analyzer - self._storage = storage - self._task_action = task_action - self._retry_action = retry_action - - def is_running(self): - return self._storage.get_flow_state() == st.RUNNING - - def _schedule_node(self, node): - """Schedule a single node for execution.""" - if isinstance(node, task_atom.BaseTask): - return self._schedule_task(node) - elif isinstance(node, retry_atom.Retry): - return self._schedule_retry(node) - else: - raise TypeError("Unknown how to schedule node %s" % node) - - def _schedule(self, nodes): - """Schedule a group of nodes for execution.""" - futures = set() - for node in nodes: - try: - futures.add(self._schedule_node(node)) - except Exception: - # Immediately stop scheduling future work so that we can - # exit execution early (rather than later) if a single task - # fails to schedule correctly. - return (futures, [misc.Failure()]) - return (futures, []) - - def execute_iter(self, timeout=None): - if timeout is None: - timeout = _WAITING_TIMEOUT - - # Prepare flow to be resumed - yield st.RESUMING - next_nodes = self._prepare_flow_for_resume() - next_nodes.update(self._analyzer.get_next_nodes()) - - # Schedule nodes to be worked on - yield st.SCHEDULING - if self.is_running(): - not_done, failures = self._schedule(next_nodes) - else: - not_done, failures = (set(), []) - - # Run! - # - # At this point we need to ensure we wait for all active nodes to - # finish running (even if we are asked to suspend) since we can not - # preempt those tasks (maybe in the future we will be better able to do - # this). - while not_done: - yield st.WAITING - - # TODO(harlowja): maybe we should start doing 'yield from' this - # call sometime in the future, or equivalent that will work in - # py2 and py3. - done, not_done = self._task_action.wait_for_any(not_done, timeout) - - # Analyze the results and schedule more nodes (unless we had - # failures). If failures occurred just continue processing what - # is running (so that we don't leave it abandoned) but do not - # schedule anything new. - yield st.ANALYZING - next_nodes = set() - for future in done: - try: - node, event, result = future.result() - if isinstance(node, task_atom.BaseTask): - self._complete_task(node, event, result) - if isinstance(result, misc.Failure): - if event == ex.EXECUTED: - self._process_atom_failure(node, result) - else: - failures.append(result) - except Exception: - failures.append(misc.Failure()) - else: - try: - more_nodes = self._analyzer.get_next_nodes(node) - except Exception: - failures.append(misc.Failure()) - else: - next_nodes.update(more_nodes) - if next_nodes and not failures and self.is_running(): - yield st.SCHEDULING - # Recheck incase someone suspended it. - if self.is_running(): - more_not_done, failures = self._schedule(next_nodes) - not_done.update(more_not_done) - - if failures: - misc.Failure.reraise_if_any(failures) - if self._analyzer.get_next_nodes(): - yield st.SUSPENDED - elif self._analyzer.is_success(): - yield st.SUCCESS - else: - yield st.REVERTED - - def _schedule_task(self, task): - """Schedules the given task for revert or execute depending - on its intention. - """ - intention = self._storage.get_atom_intention(task.name) - if intention == st.EXECUTE: - return self._task_action.schedule_execution(task) - elif intention == st.REVERT: - return self._task_action.schedule_reversion(task) - else: - raise excp.ExecutionFailure("Unknown how to schedule task with" - " intention: %s" % intention) - - def _complete_task(self, task, event, result): - """Completes the given task, process task failure.""" - if event == ex.EXECUTED: - self._task_action.complete_execution(task, result) - else: - self._task_action.complete_reversion(task, result) - - def _schedule_retry(self, retry): - """Schedules the given retry for revert or execute depending - on its intention. - """ - intention = self._storage.get_atom_intention(retry.name) - if intention == st.EXECUTE: - return self._retry_action.execute(retry) - elif intention == st.REVERT: - return self._retry_action.revert(retry) - elif intention == st.RETRY: - self._retry_action.change_state(retry, st.RETRYING) - self._retry_subflow(retry) - return self._retry_action.execute(retry) - else: - raise excp.ExecutionFailure("Unknown how to schedule retry with" - " intention: %s" % intention) - - def _process_atom_failure(self, atom, failure): - """On atom failure find its retry controller, ask for the action to - perform with failed subflow and set proper intention for subflow nodes. - """ - retry = self._analyzer.find_atom_retry(atom) - if retry: - # Ask retry controller what to do in case of failure - action = self._retry_action.on_failure(retry, atom, failure) - if action == retry_atom.RETRY: - # Prepare subflow for revert - self._storage.set_atom_intention(retry.name, st.RETRY) - for node in self._analyzer.iterate_subgraph(retry): - self._storage.set_atom_intention(node.name, st.REVERT) - elif action == retry_atom.REVERT: - # Ask parent checkpoint - self._process_atom_failure(retry, failure) - elif action == retry_atom.REVERT_ALL: - # Prepare all flow for revert - self._revert_all() - else: - self._revert_all() - - def _revert_all(self): - for node in self._analyzer.iterate_all_nodes(): - self._storage.set_atom_intention(node.name, st.REVERT) - - def _prepare_flow_for_resume(self): - for node in self._analyzer.iterate_all_nodes(): - if self._analyzer.get_state(node) == st.FAILURE: - self._process_atom_failure(node, self._storage.get(node.name)) - for retry in self._analyzer.iterate_retries(st.RETRYING): - self._retry_subflow(retry) - next_nodes = set() - for node in self._analyzer.iterate_all_nodes(): - if self._analyzer.get_state(node) in (st.RUNNING, st.REVERTING): - next_nodes.add(node) - return next_nodes - - def _reset_nodes(self, nodes_iter, intention=st.EXECUTE): - for node in nodes_iter: - if isinstance(node, task_atom.BaseTask): - self._task_action.change_state(node, st.PENDING, progress=0.0) - elif isinstance(node, retry_atom.Retry): - self._retry_action.change_state(node, st.PENDING) - else: - raise TypeError("Unknown how to reset node %s" % node) - self._storage.set_atom_intention(node.name, intention) - - def reset_all(self): - self._reset_nodes(self._analyzer.iterate_all_nodes()) - - def _retry_subflow(self, retry): - self._storage.set_atom_intention(retry.name, st.EXECUTE) - self._reset_nodes(self._analyzer.iterate_subgraph(retry)) diff --git a/taskflow/engines/action_engine/runner.py b/taskflow/engines/action_engine/runner.py new file mode 100644 index 00000000..dc8c1003 --- /dev/null +++ b/taskflow/engines/action_engine/runner.py @@ -0,0 +1,137 @@ +# -*- coding: utf-8 -*- + +# Copyright (C) 2012 Yahoo! Inc. All Rights Reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); you may +# not use this file except in compliance with the License. You may obtain +# a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the +# License for the specific language governing permissions and limitations +# under the License. + +from taskflow import states as st +from taskflow.utils import misc + + +_WAITING_TIMEOUT = 60 # in seconds + + +class Runner(object): + """Runner that iterates while executing nodes using the given runtime. + + This runner acts as the action engine run loop, it resumes the workflow, + schedules all task it can for execution using the runtimes scheduler and + analyzer components, and than waits on returned futures and then activates + the runtimes completion component to finish up those tasks. + + This process repeats until the analzyer runs out of next nodes, when the + scheduler can no longer schedule tasks or when the the engine has been + suspended or a task has failed and that failure could not be resolved. + + NOTE(harlowja): If the runtimes scheduler component is able to schedule + tasks in parallel, this enables parallel running and/or reversion. + """ + + # Informational states this action yields while running, not useful to + # have the engine record but useful to provide to end-users when doing + # execution iterations. + ignorable_states = (st.SCHEDULING, st.WAITING, st.RESUMING, st.ANALYZING) + + def __init__(self, runtime, waiter): + self._runtime = runtime + self._scheduler = runtime.scheduler + self._completer = runtime.completer + self._storage = runtime.storage + self._analyzer = runtime.graph_analyzer + self._waiter = waiter + + def is_running(self): + return self._storage.get_flow_state() == st.RUNNING + + def run_iter(self, timeout=None): + """Runs the nodes using the runtime components. + + NOTE(harlowja): the states that this generator will go through are: + + RESUMING -> SCHEDULING + SCHEDULING -> WAITING + WAITING -> ANALYZING + ANALYZING -> SCHEDULING + + Between any of these yielded states if the engine has been suspended + or the engine has failed (due to a non-resolveable task failure or + scheduling failure) the engine will stop executing new tasks (currently + running tasks will be allowed to complete) and this iteration loop + will be broken. + """ + if timeout is None: + timeout = _WAITING_TIMEOUT + + # Prepare flow to be resumed + yield st.RESUMING + next_nodes = self._completer.resume() + next_nodes.update(self._analyzer.get_next_nodes()) + + # Schedule nodes to be worked on + yield st.SCHEDULING + if self.is_running(): + not_done, failures = self._scheduler.schedule(next_nodes) + else: + not_done, failures = (set(), []) + + # Run! + # + # At this point we need to ensure we wait for all active nodes to + # finish running (even if we are asked to suspend) since we can not + # preempt those tasks (maybe in the future we will be better able to do + # this). + while not_done: + yield st.WAITING + + # TODO(harlowja): maybe we should start doing 'yield from' this + # call sometime in the future, or equivalent that will work in + # py2 and py3. + done, not_done = self._waiter.wait_for_any(not_done, timeout) + + # Analyze the results and schedule more nodes (unless we had + # failures). If failures occurred just continue processing what + # is running (so that we don't leave it abandoned) but do not + # schedule anything new. + yield st.ANALYZING + next_nodes = set() + for future in done: + try: + node, event, result = future.result() + retain = self._completer.complete(node, event, result) + if retain and isinstance(result, misc.Failure): + failures.append(result) + except Exception: + failures.append(misc.Failure()) + else: + try: + more_nodes = self._analyzer.get_next_nodes(node) + except Exception: + failures.append(misc.Failure()) + else: + next_nodes.update(more_nodes) + if next_nodes and not failures and self.is_running(): + yield st.SCHEDULING + # Recheck incase someone suspended it. + if self.is_running(): + more_not_done, failures = self._scheduler.schedule( + next_nodes) + not_done.update(more_not_done) + + if failures: + misc.Failure.reraise_if_any(failures) + if self._analyzer.get_next_nodes(): + yield st.SUSPENDED + elif self._analyzer.is_success(): + yield st.SUCCESS + else: + yield st.REVERTED diff --git a/taskflow/engines/action_engine/runtime.py b/taskflow/engines/action_engine/runtime.py new file mode 100644 index 00000000..709ff78a --- /dev/null +++ b/taskflow/engines/action_engine/runtime.py @@ -0,0 +1,250 @@ +# -*- coding: utf-8 -*- + +# Copyright (C) 2014 Yahoo! Inc. All Rights Reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); you may +# not use this file except in compliance with the License. You may obtain +# a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the +# License for the specific language governing permissions and limitations +# under the License. + +from taskflow import exceptions as excp +from taskflow import retry as retry_atom +from taskflow import states as st +from taskflow import task as task_atom +from taskflow.utils import misc + +from taskflow.engines.action_engine import executor as ex +from taskflow.engines.action_engine import graph_analyzer as ga +from taskflow.engines.action_engine import retry_action as ra +from taskflow.engines.action_engine import task_action as ta + + +class Runtime(object): + """An object that contains various utility methods and properties that + represent the collection of runtime components and functionality needed + for an action engine to run to completion. + """ + + def __init__(self, compilation, storage, task_notifier, task_executor): + self._task_notifier = task_notifier + self._task_executor = task_executor + self._storage = storage + self._compilation = compilation + + @property + def compilation(self): + return self._compilation + + @property + def storage(self): + return self._storage + + @misc.cachedproperty + def graph_analyzer(self): + return ga.GraphAnalyzer(self._compilation.execution_graph, + self._storage) + + @misc.cachedproperty + def completer(self): + return Completer(self) + + @misc.cachedproperty + def scheduler(self): + return Scheduler(self) + + @misc.cachedproperty + def retry_action(self): + return ra.RetryAction(self.storage, self._task_notifier) + + @misc.cachedproperty + def task_action(self): + return ta.TaskAction(self.storage, self._task_executor, + self._task_notifier) + + def reset_nodes(self, nodes, state=st.PENDING, intention=st.EXECUTE): + for node in nodes: + if state: + if isinstance(node, task_atom.BaseTask): + self.task_action.change_state(node, state, progress=0.0) + elif isinstance(node, retry_atom.Retry): + self.retry_action.change_state(node, state) + else: + raise TypeError("Unknown how to reset node %s, %s" + % (node, type(node))) + if intention: + self.storage.set_atom_intention(node.name, intention) + + def reset_all(self, state=st.PENDING, intention=st.EXECUTE): + self.reset_nodes(self.graph_analyzer.iterate_all_nodes(), + state=state, intention=intention) + + def reset_subgraph(self, node, state=st.PENDING, intention=st.EXECUTE): + self.reset_nodes(self.graph_analyzer.iterate_subgraph(node), + state=state, intention=intention) + + +# Various helper methods used by completer and scheduler. +def _retry_subflow(retry, runtime): + runtime.storage.set_atom_intention(retry.name, st.EXECUTE) + runtime.reset_subgraph(retry) + + +class Completer(object): + """Completes atoms using actions to complete them.""" + + def __init__(self, runtime): + self._analyzer = runtime.graph_analyzer + self._retry_action = runtime.retry_action + self._runtime = runtime + self._storage = runtime.storage + self._task_action = runtime.task_action + + def _complete_task(self, task, event, result): + """Completes the given task, processes task failure.""" + if event == ex.EXECUTED: + self._task_action.complete_execution(task, result) + else: + self._task_action.complete_reversion(task, result) + + def resume(self): + """Resumes nodes in the contained graph. + + This is done to allow any previously completed or failed nodes to + be analyzed, there results processed and any potential nodes affected + to be adjusted as needed. + + This should return a set of nodes which should be the initial set of + nodes that were previously not finished (due to a RUNNING or REVERTING + attempt not previously finishing). + """ + for node in self._analyzer.iterate_all_nodes(): + if self._analyzer.get_state(node) == st.FAILURE: + self._process_atom_failure(node, self._storage.get(node.name)) + for retry in self._analyzer.iterate_retries(st.RETRYING): + _retry_subflow(retry, self._runtime) + unfinished_nodes = set() + for node in self._analyzer.iterate_all_nodes(): + if self._analyzer.get_state(node) in (st.RUNNING, st.REVERTING): + unfinished_nodes.add(node) + return unfinished_nodes + + def complete(self, node, event, result): + """Performs post-execution completion of a node. + + Returns whether the result should be saved into an accumulator of + failures or whether this should not be done. + """ + if isinstance(node, task_atom.BaseTask): + self._complete_task(node, event, result) + if isinstance(result, misc.Failure): + if event == ex.EXECUTED: + self._process_atom_failure(node, result) + else: + return True + return False + + def _process_atom_failure(self, atom, failure): + """On atom failure find its retry controller, ask for the action to + perform with failed subflow and set proper intention for subflow nodes. + """ + retry = self._analyzer.find_atom_retry(atom) + if retry: + # Ask retry controller what to do in case of failure + action = self._retry_action.on_failure(retry, atom, failure) + if action == retry_atom.RETRY: + # Prepare subflow for revert + self._storage.set_atom_intention(retry.name, st.RETRY) + self._runtime.reset_subgraph(retry, state=None, + intention=st.REVERT) + elif action == retry_atom.REVERT: + # Ask parent checkpoint + self._process_atom_failure(retry, failure) + elif action == retry_atom.REVERT_ALL: + # Prepare all flow for revert + self._revert_all() + else: + # Prepare all flow for revert + self._revert_all() + + def _revert_all(self): + """Attempts to set all nodes to the REVERT intention.""" + self._runtime.reset_nodes(self._analyzer.iterate_all_nodes(), + state=None, intention=st.REVERT) + + +class Scheduler(object): + """Schedules atoms using actions to schedule.""" + + def __init__(self, runtime): + self._analyzer = runtime.graph_analyzer + self._retry_action = runtime.retry_action + self._runtime = runtime + self._storage = runtime.storage + self._task_action = runtime.task_action + + def _schedule_node(self, node): + """Schedule a single node for execution.""" + if isinstance(node, task_atom.BaseTask): + return self._schedule_task(node) + elif isinstance(node, retry_atom.Retry): + return self._schedule_retry(node) + else: + raise TypeError("Unknown how to schedule node %s, %s" + % (node, type(node))) + + def _schedule_retry(self, retry): + """Schedules the given retry for revert or execute depending + on its intention. + """ + intention = self._storage.get_atom_intention(retry.name) + if intention == st.EXECUTE: + return self._retry_action.execute(retry) + elif intention == st.REVERT: + return self._retry_action.revert(retry) + elif intention == st.RETRY: + self._retry_action.change_state(retry, st.RETRYING) + _retry_subflow(retry, self._runtime) + return self._retry_action.execute(retry) + else: + raise excp.ExecutionFailure("Unknown how to schedule retry with" + " intention: %s" % intention) + + def _schedule_task(self, task): + """Schedules the given task for revert or execute depending + on its intention. + """ + intention = self._storage.get_atom_intention(task.name) + if intention == st.EXECUTE: + return self._task_action.schedule_execution(task) + elif intention == st.REVERT: + return self._task_action.schedule_reversion(task) + else: + raise excp.ExecutionFailure("Unknown how to schedule task with" + " intention: %s" % intention) + + def schedule(self, nodes): + """Schedules the provided nodes for *future* completion. + + This method should schedule a future for each node provided and return + a set of those futures to be waited on (or used for other similar + purposes). It should also return any failure objects that represented + scheduling failures that may have occurred during this scheduling + process. + """ + futures = set() + for node in nodes: + try: + futures.add(self._schedule_node(node)) + except Exception: + # Immediately stop scheduling future work so that we can + # exit execution early (rather than later) if a single task + # fails to schedule correctly. + return (futures, [misc.Failure()]) + return (futures, []) From c9ab8b2931795d030a8bf6a2bff910f6ea505385 Mon Sep 17 00:00:00 2001 From: Joshua Harlow Date: Thu, 29 May 2014 14:17:04 -0700 Subject: [PATCH 098/188] Add docstring describing the inject instance variable Change-Id: I00c48fc6a5f579fe72badc034fbd293f2eb41d5c --- taskflow/atom.py | 5 +++++ 1 file changed, 5 insertions(+) diff --git a/taskflow/atom.py b/taskflow/atom.py index e3ed8b34..58f69f8d 100644 --- a/taskflow/atom.py +++ b/taskflow/atom.py @@ -134,6 +134,11 @@ class Atom(object): the names that this atom expects (in a way this is like remapping a namespace of another atom into the namespace of this atom). + :ivar inject: An *immutable* input_name => value dictionary which specifies + any initial inputs that should be automatically injected into + the atoms scope before the atom execution commences (this + allows for providing atom *local* values that do not need to + be provided by other atoms). """ def __init__(self, name=None, provides=None, inject=None): From e34c0c9f06bd498d08218c300fc53497bf11720d Mon Sep 17 00:00:00 2001 From: Joshua Harlow Date: Thu, 29 May 2014 15:37:04 -0700 Subject: [PATCH 099/188] Add docs related to the new conductor feature Change-Id: I68722a5b6bea0e404427ec7b121e7c6f90e42145 --- doc/source/conductors.rst | 44 +++++++++++++++++++++++++++++++++++++++ doc/source/index.rst | 21 ++++++++++++++++--- doc/source/resumption.rst | 2 ++ 3 files changed, 64 insertions(+), 3 deletions(-) create mode 100644 doc/source/conductors.rst diff --git a/doc/source/conductors.rst b/doc/source/conductors.rst new file mode 100644 index 00000000..4dfa3e33 --- /dev/null +++ b/doc/source/conductors.rst @@ -0,0 +1,44 @@ +---------- +Conductors +---------- + +Overview +======== + +Conductors in TaskFlow provide a mechanism that unifies the various TaskFlow +concepts under a single easy to use (as plug-and-play as we can make it) +construct. + +They are responsible for the following: + +* Interacting with :doc:`jobboards ` (examining and claiming + :doc:`jobs `). +* Creating :doc:`engines ` from the claimed jobs (using + :ref:`factories ` to reconstruct the contained + tasks and flows to be executed). +* Dispatching the engine using the provided :doc:`persistence ` + layer and engine configuration. +* Completing or abandoning the claimed job (depending on dispatching and + execution outcome). +* *Rinse and repeat*. + +.. note:: + + They are inspired by and have similar responsiblities + as `railroad conductors`_. + +Interfaces +========== + +.. automodule:: taskflow.conductors.base +.. automodule:: taskflow.conductors.single_threaded + +Hierarchy +========= + +.. inheritance-diagram:: + taskflow.conductors.base + taskflow.conductors.single_threaded + :parts: 1 + +.. _railroad conductors: http://en.wikipedia.org/wiki/Conductor_%28transportation%29 diff --git a/doc/source/index.rst b/doc/source/index.rst index 387980e4..bb1b8b6b 100644 --- a/doc/source/index.rst +++ b/doc/source/index.rst @@ -11,19 +11,34 @@ easy, consistent, and reliable.* Contents ======== + .. toctree:: :maxdepth: 2 atoms arguments_and_results + inputs_and_outputs + patterns engines - workers - jobs - inputs_and_outputs notifications persistence resumption + + jobs + conductors + +.. toctree:: + :hidden: + + workers + +Miscellaneous +------------- + +.. toctree:: + :maxdepth: 2 + exceptions utils states diff --git a/doc/source/resumption.rst b/doc/source/resumption.rst index cc6e9eec..120c16e3 100644 --- a/doc/source/resumption.rst +++ b/doc/source/resumption.rst @@ -19,6 +19,8 @@ in order to revert those atoms or resume those atoms in the correct ordering. Taskflow provides a pattern that can help in automating this process (it does **not** prohibit the user from creating their own strategies for doing this). +.. _resumption factories: + Factories ========= From f4b7dfd25b453520a441729bb72a5880fa8a4662 Mon Sep 17 00:00:00 2001 From: Joshua Harlow Date: Fri, 30 May 2014 13:44:44 -0700 Subject: [PATCH 100/188] Allow for two ways to find a flow detail in a job for a conductor Previously we had the code looking at the first logbook entry and running with that. That doesn't work so well especially since the logbook may be unordered. So we then switched to require a job to provide a 'flow_uuid' key to determine which one to run. This makes sense and avoids the problem of being unable to determine which one to run but makes it harder to use for those that have just logbooks with single entries (likely the common case). So add in a slightly more advanced finding logic that will check for existence of 'flow_uuid' and if found use it, otherwise if not found then check if the logbook is only a single item and if so use that instead (and otherwise abort). Change-Id: Id1e11e8b4e48af3389e5c4e0818777ff9abf9463 --- taskflow/conductors/base.py | 63 ++++++++++++++++++++++++++----------- taskflow/exceptions.py | 4 +++ 2 files changed, 48 insertions(+), 19 deletions(-) diff --git a/taskflow/conductors/base.py b/taskflow/conductors/base.py index 634c5dec..0595e72a 100644 --- a/taskflow/conductors/base.py +++ b/taskflow/conductors/base.py @@ -39,28 +39,53 @@ class Conductor(object): self._persistence = persistence self._lock = threading.RLock() - def _engine_from_job(self, job): - try: + def _flow_detail_from_job(self, job): + """Extracts a flow detail from a job (via some manner). + + The current mechanism to accomplish this is the following choices: + + * If the job details provide a 'flow_uuid' key attempt to load this + key from the jobs book and use that as the flow_detail to run. + * If the job details does not have have a 'flow_uuid' key then attempt + to examine the size of the book and if it's only one element in the + book (aka one flow_detail) then just use that. + * Otherwise if there is no 'flow_uuid' defined or there are > 1 + flow_details in the book raise an error that corresponds to being + unable to locate the correct flow_detail to run. + """ + book = job.book + if book is None: + raise excp.NotFound("No book found in job") + if job.details and 'flow_uuid' in job.details: flow_uuid = job.details["flow_uuid"] - except (KeyError, TypeError): - raise excp.NotFound("No flow detail uuid found in job") - else: - try: - flow_detail = job.book.find(flow_uuid) - except (TypeError, AttributeError): - flow_detail = None + flow_detail = book.find(flow_uuid) if flow_detail is None: raise excp.NotFound("No matching flow detail found in" - " job for flow detail uuid %s" % flow_uuid) - try: - store = dict(job.details["store"]) - except (KeyError, TypeError): - store = {} - return taskflow.engines.load_from_detail( - flow_detail, - store=store, - engine_conf=dict(self._engine_conf), - backend=self._persistence) + " jobs book for flow detail" + " with uuid %s" % flow_uuid) + else: + choices = len(book) + if choices == 1: + flow_detail = list(book)[0] + elif choices == 0: + raise excp.NotFound("No flow detail(s) found in jobs book") + else: + raise excp.MultipleChoices("No matching flow detail found (%s" + " choices) in jobs book" % choices) + return flow_detail + + def _engine_from_job(self, job): + """Extracts an engine from a job (via some manner).""" + flow_detail = self._flow_detail_from_job(job) + if job.details and 'store' in job.details: + store = dict(job.details["store"]) + else: + store = {} + engine_conf = dict(self._engine_conf) + return taskflow.engines.load_from_detail(flow_detail, + store=store, + engine_conf=engine_conf, + backend=self._persistence) @lock_utils.locked def connect(self): diff --git a/taskflow/exceptions.py b/taskflow/exceptions.py index 983eb55a..78186ef5 100644 --- a/taskflow/exceptions.py +++ b/taskflow/exceptions.py @@ -127,6 +127,10 @@ class Empty(TaskFlowException): """Raised when some object is empty when it shouldn't be.""" +class MultipleChoices(TaskFlowException): + """Raised when some decision can't be made due to many possible choices.""" + + # Others. class WrappedFailure(Exception): From 1ec96e07443d494945e1498532b3898235bca518 Mon Sep 17 00:00:00 2001 From: Joshua Harlow Date: Fri, 30 May 2014 16:23:42 -0700 Subject: [PATCH 101/188] Avoid forcing engine_conf to a dict It is valid to provide engine_conf that is None and when a engine is loaded it will notice that it is None and load its default configuration. Change-Id: Icde7ac85921f3b434cee39a9dfe7de997f2120bf --- taskflow/conductors/base.py | 3 +-- 1 file changed, 1 insertion(+), 2 deletions(-) diff --git a/taskflow/conductors/base.py b/taskflow/conductors/base.py index 0595e72a..fc9f7630 100644 --- a/taskflow/conductors/base.py +++ b/taskflow/conductors/base.py @@ -81,10 +81,9 @@ class Conductor(object): store = dict(job.details["store"]) else: store = {} - engine_conf = dict(self._engine_conf) return taskflow.engines.load_from_detail(flow_detail, store=store, - engine_conf=engine_conf, + engine_conf=self._engine_conf, backend=self._persistence) @lock_utils.locked From 133764f917653c853fed9adece9d9c3e87c58662 Mon Sep 17 00:00:00 2001 From: Joshua Harlow Date: Fri, 30 May 2014 16:49:46 -0700 Subject: [PATCH 102/188] Increase the level of usefulness of the dispatching logging Change-Id: Ief62a3436aa7fe86c98e026ec5522d2f5e2aa29f --- taskflow/conductors/base.py | 9 +++--- taskflow/conductors/single_threaded.py | 40 ++++++++++++-------------- 2 files changed, 24 insertions(+), 25 deletions(-) diff --git a/taskflow/conductors/base.py b/taskflow/conductors/base.py index fc9f7630..7cee0c64 100644 --- a/taskflow/conductors/base.py +++ b/taskflow/conductors/base.py @@ -106,9 +106,10 @@ class Conductor(object): @abc.abstractmethod def _dispatch_job(self, job): """Accepts a single (already claimed) job and causes it to be run in - an engine. The job is consumed upon completion (unless False is - returned which will signify the job should be abandoned instead) + an engine. Returns a boolean that signifies whether the job should + be consumed. The job is consumed upon completion (unless False is + returned which will signify the job should be abandoned instead). - :param job: A Job instance that has already been claimed by the - jobboard. + :param job: A job instance that has already been claimed by the + jobboard. """ diff --git a/taskflow/conductors/single_threaded.py b/taskflow/conductors/single_threaded.py index 87201107..cd8022df 100644 --- a/taskflow/conductors/single_threaded.py +++ b/taskflow/conductors/single_threaded.py @@ -83,13 +83,10 @@ class SingleThreadedConductor(base.Conductor): return True def _dispatch_job(self, job): - LOG.info("Dispatching job: %s", job) - try: - engine = self._engine_from_job(job) - except Exception as e: - raise excp.ConductorFailure("Failed creating an engine", cause=e) + engine = self._engine_from_job(job) + consume = True with logging_listener.LoggingListener(engine, log=LOG): - consume = True + LOG.debug("Dispatching engine %s for job: %s", engine, job) try: engine.run() except excp.WrappedFailure as e: @@ -107,7 +104,7 @@ class SingleThreadedConductor(base.Conductor): LOG.warn("Job execution failed: %s", job, exc_info=True) else: LOG.info("Job completed successfully: %s", job) - return consume + return consume def run(self): self._dead.clear() @@ -125,25 +122,26 @@ class SingleThreadedConductor(base.Conductor): except (excp.UnclaimableJob, excp.NotFound): LOG.debug("Job already claimed or consumed: %s", job) continue - dispatched += 1 + consume = False try: consume = self._dispatch_job(job) - except excp.ConductorFailure: + except Exception: LOG.warn("Job dispatching failed: %s", job, exc_info=True) else: - try: - if consume: - self._jobboard.consume(job, self._name) - else: - self._jobboard.abandon(job, self._name) - except excp.JobFailure: - if consume: - LOG.warn("Failed job consumption: %s", job, - exc_info=True) - else: - LOG.warn("Failed job abandonment: %s", job, - exc_info=True) + dispatched += 1 + try: + if consume: + self._jobboard.consume(job, self._name) + else: + self._jobboard.abandon(job, self._name) + except excp.JobFailure: + if consume: + LOG.warn("Failed job consumption: %s", job, + exc_info=True) + else: + LOG.warn("Failed job abandonment: %s", job, + exc_info=True) if dispatched == 0 and not self._wait_timeout.is_stopped(): self._wait_timeout.wait() finally: From a32f9245f951bc39c4cb7d746b784a9eca88392b Mon Sep 17 00:00:00 2001 From: Joshua Harlow Date: Fri, 30 May 2014 17:22:10 -0700 Subject: [PATCH 103/188] Move flattening to the action engine compiler Since flattening is only one way to compile a flow and nested flows and atoms into a compilation unit move this functionality into the engine module where it is used. Change-Id: Ifea6b56cf5f2a9c1d16acabfaae6f28aeb6534a0 --- doc/source/utils.rst | 5 - taskflow/engines/action_engine/compiler.py | 160 +++++++++++++++- ...ening.py => test_action_engine_compile.py} | 148 ++++++++------ taskflow/utils/flow_utils.py | 180 ------------------ 4 files changed, 247 insertions(+), 246 deletions(-) rename taskflow/tests/unit/{test_flattening.py => test_action_engine_compile.py} (74%) delete mode 100644 taskflow/utils/flow_utils.py diff --git a/doc/source/utils.rst b/doc/source/utils.rst index 8b5d53ab..87f3727a 100644 --- a/doc/source/utils.rst +++ b/doc/source/utils.rst @@ -22,8 +22,3 @@ The following classes and modules are *recommended* for external usage: .. autofunction:: taskflow.utils.persistence_utils.temporary_flow_detail .. autofunction:: taskflow.utils.persistence_utils.pformat - -Internal usage -============== - -.. automodule:: taskflow.utils.flow_utils diff --git a/taskflow/engines/action_engine/compiler.py b/taskflow/engines/action_engine/compiler.py index 446ded95..883e7f0b 100644 --- a/taskflow/engines/action_engine/compiler.py +++ b/taskflow/engines/action_engine/compiler.py @@ -15,9 +15,17 @@ # under the License. import collections +import logging from taskflow import exceptions as exc -from taskflow.utils import flow_utils +from taskflow import flow +from taskflow import retry +from taskflow import task +from taskflow.types import graph as gr +from taskflow.utils import misc + +LOG = logging.getLogger(__name__) + # The result of a compilers compile() is this tuple (for now it is just a # execution graph but in the future it may grow to include more attributes @@ -39,7 +47,7 @@ class PatternCompiler(object): useful to retain part of this relationship). """ def compile(self, root): - graph = flow_utils.flatten(root) + graph = _Flattener(root).flatten() if graph.number_of_nodes() == 0: # Try to get a name attribute, otherwise just use the object # string representation directly if that attribute does not exist. @@ -47,3 +55,151 @@ class PatternCompiler(object): raise exc.Empty("Root container '%s' (%s) is empty." % (name, type(root))) return Compilation(graph) + + +_RETRY_EDGE_DATA = { + 'retry': True, +} + + +class _Flattener(object): + """Flattens a root item (task/flow) into a execution graph.""" + + def __init__(self, root, freeze=True): + self._root = root + self._graph = None + self._history = set() + self._freeze = bool(freeze) + + def _add_new_edges(self, graph, nodes_from, nodes_to, edge_attrs): + """Adds new edges from nodes to other nodes in the specified graph, + with the following edge attributes (defaulting to the class provided + edge_data if None), if the edge does not already exist. + """ + nodes_to = list(nodes_to) + for u in nodes_from: + for v in nodes_to: + if not graph.has_edge(u, v): + # NOTE(harlowja): give each edge its own attr copy so that + # if it's later modified that the same copy isn't modified. + graph.add_edge(u, v, attr_dict=edge_attrs.copy()) + + def _flatten(self, item): + functor = self._find_flattener(item) + if not functor: + raise TypeError("Unknown type requested to flatten: %s (%s)" + % (item, type(item))) + self._pre_item_flatten(item) + graph = functor(item) + self._post_item_flatten(item, graph) + return graph + + def _find_flattener(self, item): + """Locates the flattening function to use to flatten the given item.""" + if isinstance(item, flow.Flow): + return self._flatten_flow + elif isinstance(item, task.BaseTask): + return self._flatten_task + elif isinstance(item, retry.Retry): + raise TypeError("Retry controller %s (%s) is used not as a flow " + "parameter" % (item, type(item))) + else: + return None + + def _connect_retry(self, retry, graph): + graph.add_node(retry) + + # All graph nodes that have no predecessors should depend on its retry + nodes_to = [n for n in graph.no_predecessors_iter() if n != retry] + self._add_new_edges(graph, [retry], nodes_to, _RETRY_EDGE_DATA) + + # Add link to retry for each node of subgraph that hasn't + # a parent retry + for n in graph.nodes_iter(): + if n != retry and 'retry' not in graph.node[n]: + graph.node[n]['retry'] = retry + + def _flatten_task(self, task): + """Flattens a individual task.""" + graph = gr.DiGraph(name=task.name) + graph.add_node(task) + return graph + + def _flatten_flow(self, flow): + """Flattens a graph flow.""" + graph = gr.DiGraph(name=flow.name) + + # Flatten all nodes into a single subgraph per node. + subgraph_map = {} + for item in flow: + subgraph = self._flatten(item) + subgraph_map[item] = subgraph + graph = gr.merge_graphs([graph, subgraph]) + + # Reconnect all node edges to their corresponding subgraphs. + for (u, v, attrs) in flow.iter_links(): + u_g = subgraph_map[u] + v_g = subgraph_map[v] + if any(attrs.get(k) for k in ('invariant', 'manual', 'retry')): + # Connect nodes with no predecessors in v to nodes with + # no successors in u (thus maintaining the edge dependency). + self._add_new_edges(graph, + u_g.no_successors_iter(), + v_g.no_predecessors_iter(), + edge_attrs=attrs) + else: + # This is dependency-only edge, connect corresponding + # providers and consumers. + for provider in u_g: + for consumer in v_g: + reasons = provider.provides & consumer.requires + if reasons: + graph.add_edge(provider, consumer, reasons=reasons) + + if flow.retry is not None: + self._connect_retry(flow.retry, graph) + return graph + + def _pre_item_flatten(self, item): + """Called before a item is flattened; any pre-flattening actions.""" + if id(item) in self._history: + raise ValueError("Already flattened item: %s (%s), recursive" + " flattening not supported" % (item, id(item))) + self._history.add(id(item)) + + def _post_item_flatten(self, item, graph): + """Called before a item is flattened; any post-flattening actions.""" + + def _pre_flatten(self): + """Called before the flattening of the item starts.""" + self._history.clear() + + def _post_flatten(self, graph): + """Called after the flattening of the item finishes successfully.""" + dup_names = misc.get_duplicate_keys(graph.nodes_iter(), + key=lambda node: node.name) + if dup_names: + dup_names = ', '.join(sorted(dup_names)) + raise exc.Duplicate("Atoms with duplicate names " + "found: %s" % (dup_names)) + self._history.clear() + # NOTE(harlowja): this one can be expensive to calculate (especially + # the cycle detection), so only do it if we know debugging is enabled + # and not under all cases. + if LOG.isEnabledFor(logging.DEBUG): + LOG.debug("Translated '%s' into a graph:", self._root) + for line in graph.pformat().splitlines(): + # Indent it so that it's slightly offset from the above line. + LOG.debug(" %s", line) + + def flatten(self): + """Flattens a item (a task or flow) into a single execution graph.""" + if self._graph is not None: + return self._graph + self._pre_flatten() + graph = self._flatten(self._root) + self._post_flatten(graph) + self._graph = graph + if self._freeze: + self._graph.freeze() + return self._graph diff --git a/taskflow/tests/unit/test_flattening.py b/taskflow/tests/unit/test_action_engine_compile.py similarity index 74% rename from taskflow/tests/unit/test_flattening.py rename to taskflow/tests/unit/test_action_engine_compile.py index 600a000a..57c248e0 100644 --- a/taskflow/tests/unit/test_flattening.py +++ b/taskflow/tests/unit/test_action_engine_compile.py @@ -24,7 +24,8 @@ from taskflow import retry from taskflow import test from taskflow.tests import utils as t_utils -from taskflow.utils import flow_utils as f_utils + +from taskflow.engines.action_engine import compiler def _make_many(amount): @@ -35,24 +36,26 @@ def _make_many(amount): return tasks -class FlattenTest(test.TestCase): - def test_flatten_task(self): +class PatternCompileTest(test.TestCase): + def test_task(self): task = t_utils.DummyTask(name='a') - g = f_utils.flatten(task) - + compilation = compiler.PatternCompiler().compile(task) + g = compilation.execution_graph self.assertEqual(list(g.nodes()), [task]) self.assertEqual(list(g.edges()), []) - def test_flatten_retry(self): + def test_retry(self): r = retry.AlwaysRevert('r1') msg_regex = "^Retry controller .* is used not as a flow parameter" - self.assertRaisesRegexp(TypeError, msg_regex, f_utils.flatten, r) + self.assertRaisesRegexp(TypeError, msg_regex, + compiler.PatternCompiler().compile, r) - def test_flatten_wrong_object(self): + def test_wrong_object(self): msg_regex = '^Unknown type requested to flatten' - self.assertRaisesRegexp(TypeError, msg_regex, f_utils.flatten, 42) + self.assertRaisesRegexp(TypeError, msg_regex, + compiler.PatternCompiler().compile, 42) - def test_linear_flatten(self): + def test_linear(self): a, b, c, d = _make_many(4) flo = lf.Flow("test") flo.add(a, b, c) @@ -60,7 +63,8 @@ class FlattenTest(test.TestCase): sflo.add(d) flo.add(sflo) - g = f_utils.flatten(flo) + compilation = compiler.PatternCompiler().compile(flo) + g = compilation.execution_graph self.assertEqual(4, len(g)) order = g.topological_sort() @@ -71,18 +75,20 @@ class FlattenTest(test.TestCase): self.assertEqual([d], list(g.no_successors_iter())) self.assertEqual([a], list(g.no_predecessors_iter())) - def test_invalid_flatten(self): + def test_invalid(self): a, b, c = _make_many(3) flo = lf.Flow("test") flo.add(a, b, c) flo.add(flo) - self.assertRaises(ValueError, f_utils.flatten, flo) + self.assertRaises(ValueError, + compiler.PatternCompiler().compile, flo) - def test_unordered_flatten(self): + def test_unordered(self): a, b, c, d = _make_many(4) flo = uf.Flow("test") flo.add(a, b, c, d) - g = f_utils.flatten(flo) + compilation = compiler.PatternCompiler().compile(flo) + g = compilation.execution_graph self.assertEqual(4, len(g)) self.assertEqual(0, g.number_of_edges()) self.assertEqual(set([a, b, c, d]), @@ -90,14 +96,16 @@ class FlattenTest(test.TestCase): self.assertEqual(set([a, b, c, d]), set(g.no_predecessors_iter())) - def test_linear_nested_flatten(self): + def test_linear_nested(self): a, b, c, d = _make_many(4) flo = lf.Flow("test") flo.add(a, b) flo2 = uf.Flow("test2") flo2.add(c, d) flo.add(flo2) - g = f_utils.flatten(flo) + + compilation = compiler.PatternCompiler().compile(flo) + g = compilation.execution_graph self.assertEqual(4, len(g)) lb = g.subgraph([a, b]) @@ -112,7 +120,7 @@ class FlattenTest(test.TestCase): self.assertTrue(g.has_edge(b, c)) self.assertTrue(g.has_edge(b, d)) - def test_unordered_nested_flatten(self): + def test_unordered_nested(self): a, b, c, d = _make_many(4) flo = uf.Flow("test") flo.add(a, b) @@ -120,7 +128,8 @@ class FlattenTest(test.TestCase): flo2.add(c, d) flo.add(flo2) - g = f_utils.flatten(flo) + compilation = compiler.PatternCompiler().compile(flo) + g = compilation.execution_graph self.assertEqual(4, len(g)) for n in [a, b]: self.assertFalse(g.has_edge(n, c)) @@ -134,14 +143,15 @@ class FlattenTest(test.TestCase): lb = g.subgraph([c, d]) self.assertEqual(1, lb.number_of_edges()) - def test_unordered_nested_in_linear_flatten(self): + def test_unordered_nested_in_linear(self): a, b, c, d = _make_many(4) flo = lf.Flow('lt').add( a, uf.Flow('ut').add(b, c), d) - g = f_utils.flatten(flo) + compilation = compiler.PatternCompiler().compile(flo) + g = compilation.execution_graph self.assertEqual(4, len(g)) self.assertItemsEqual(g.edges(), [ (a, b), @@ -150,16 +160,17 @@ class FlattenTest(test.TestCase): (c, d) ]) - def test_graph_flatten(self): + def test_graph(self): a, b, c, d = _make_many(4) flo = gf.Flow("test") flo.add(a, b, c, d) - g = f_utils.flatten(flo) + compilation = compiler.PatternCompiler().compile(flo) + g = compilation.execution_graph self.assertEqual(4, len(g)) self.assertEqual(0, g.number_of_edges()) - def test_graph_flatten_nested(self): + def test_graph_nested(self): a, b, c, d, e, f, g = _make_many(7) flo = gf.Flow("test") flo.add(a, b, c, d) @@ -168,14 +179,15 @@ class FlattenTest(test.TestCase): flo2.add(e, f, g) flo.add(flo2) - graph = f_utils.flatten(flo) + compilation = compiler.PatternCompiler().compile(flo) + graph = compilation.execution_graph self.assertEqual(7, len(graph)) self.assertItemsEqual(graph.edges(data=True), [ (e, f, {'invariant': True}), (f, g, {'invariant': True}) ]) - def test_graph_flatten_nested_graph(self): + def test_graph_nested_graph(self): a, b, c, d, e, f, g = _make_many(7) flo = gf.Flow("test") flo.add(a, b, c, d) @@ -184,11 +196,12 @@ class FlattenTest(test.TestCase): flo2.add(e, f, g) flo.add(flo2) - g = f_utils.flatten(flo) + compilation = compiler.PatternCompiler().compile(flo) + g = compilation.execution_graph self.assertEqual(7, len(g)) self.assertEqual(0, g.number_of_edges()) - def test_graph_flatten_links(self): + def test_graph_links(self): a, b, c, d = _make_many(4) flo = gf.Flow("test") flo.add(a, b, c, d) @@ -196,7 +209,8 @@ class FlattenTest(test.TestCase): flo.link(b, c) flo.link(c, d) - g = f_utils.flatten(flo) + compilation = compiler.PatternCompiler().compile(flo) + g = compilation.execution_graph self.assertEqual(4, len(g)) self.assertItemsEqual(g.edges(data=True), [ (a, b, {'manual': True}), @@ -206,12 +220,13 @@ class FlattenTest(test.TestCase): self.assertItemsEqual([a], g.no_predecessors_iter()) self.assertItemsEqual([d], g.no_successors_iter()) - def test_graph_flatten_dependencies(self): + def test_graph_dependencies(self): a = t_utils.ProvidesRequiresTask('a', provides=['x'], requires=[]) b = t_utils.ProvidesRequiresTask('b', provides=[], requires=['x']) flo = gf.Flow("test").add(a, b) - g = f_utils.flatten(flo) + compilation = compiler.PatternCompiler().compile(flo) + g = compilation.execution_graph self.assertEqual(2, len(g)) self.assertItemsEqual(g.edges(data=True), [ (a, b, {'reasons': set(['x'])}) @@ -219,7 +234,7 @@ class FlattenTest(test.TestCase): self.assertItemsEqual([a], g.no_predecessors_iter()) self.assertItemsEqual([b], g.no_successors_iter()) - def test_graph_flatten_nested_requires(self): + def test_graph_nested_requires(self): a = t_utils.ProvidesRequiresTask('a', provides=['x'], requires=[]) b = t_utils.ProvidesRequiresTask('b', provides=[], requires=[]) c = t_utils.ProvidesRequiresTask('c', provides=[], requires=['x']) @@ -228,7 +243,8 @@ class FlattenTest(test.TestCase): lf.Flow("test2").add(b, c) ) - g = f_utils.flatten(flo) + compilation = compiler.PatternCompiler().compile(flo) + g = compilation.execution_graph self.assertEqual(3, len(g)) self.assertItemsEqual(g.edges(data=True), [ (a, c, {'reasons': set(['x'])}), @@ -237,7 +253,7 @@ class FlattenTest(test.TestCase): self.assertItemsEqual([a, b], g.no_predecessors_iter()) self.assertItemsEqual([c], g.no_successors_iter()) - def test_graph_flatten_nested_provides(self): + def test_graph_nested_provides(self): a = t_utils.ProvidesRequiresTask('a', provides=[], requires=['x']) b = t_utils.ProvidesRequiresTask('b', provides=['x'], requires=[]) c = t_utils.ProvidesRequiresTask('c', provides=[], requires=[]) @@ -246,7 +262,8 @@ class FlattenTest(test.TestCase): lf.Flow("test2").add(b, c) ) - g = f_utils.flatten(flo) + compilation = compiler.PatternCompiler().compile(flo) + g = compilation.execution_graph self.assertEqual(3, len(g)) self.assertItemsEqual(g.edges(data=True), [ (b, c, {'invariant': True}), @@ -255,46 +272,50 @@ class FlattenTest(test.TestCase): self.assertItemsEqual([b], g.no_predecessors_iter()) self.assertItemsEqual([a, c], g.no_successors_iter()) - def test_flatten_checks_for_dups(self): + def test_checks_for_dups(self): flo = gf.Flow("test").add( t_utils.DummyTask(name="a"), t_utils.DummyTask(name="a") ) self.assertRaisesRegexp(exc.Duplicate, - '^Tasks with duplicate names', - f_utils.flatten, flo) + '^Atoms with duplicate names', + compiler.PatternCompiler().compile, flo) - def test_flatten_checks_for_dups_globally(self): + def test_checks_for_dups_globally(self): flo = gf.Flow("test").add( gf.Flow("int1").add(t_utils.DummyTask(name="a")), gf.Flow("int2").add(t_utils.DummyTask(name="a"))) self.assertRaisesRegexp(exc.Duplicate, - '^Tasks with duplicate names', - f_utils.flatten, flo) + '^Atoms with duplicate names', + compiler.PatternCompiler().compile, flo) - def test_flatten_retry_in_linear_flow(self): + def test_retry_in_linear_flow(self): flo = lf.Flow("test", retry.AlwaysRevert("c")) - g = f_utils.flatten(flo) + compilation = compiler.PatternCompiler().compile(flo) + g = compilation.execution_graph self.assertEqual(1, len(g)) self.assertEqual(0, g.number_of_edges()) - def test_flatten_retry_in_unordered_flow(self): + def test_retry_in_unordered_flow(self): flo = uf.Flow("test", retry.AlwaysRevert("c")) - g = f_utils.flatten(flo) + compilation = compiler.PatternCompiler().compile(flo) + g = compilation.execution_graph self.assertEqual(1, len(g)) self.assertEqual(0, g.number_of_edges()) - def test_flatten_retry_in_graph_flow(self): + def test_retry_in_graph_flow(self): flo = gf.Flow("test", retry.AlwaysRevert("c")) - g = f_utils.flatten(flo) + compilation = compiler.PatternCompiler().compile(flo) + g = compilation.execution_graph self.assertEqual(1, len(g)) self.assertEqual(0, g.number_of_edges()) - def test_flatten_retry_in_nested_flows(self): + def test_retry_in_nested_flows(self): c1 = retry.AlwaysRevert("c1") c2 = retry.AlwaysRevert("c2") flo = lf.Flow("test", c1).add(lf.Flow("test2", c2)) - g = f_utils.flatten(flo) + compilation = compiler.PatternCompiler().compile(flo) + g = compilation.execution_graph self.assertEqual(2, len(g)) self.assertItemsEqual(g.edges(data=True), [ @@ -304,11 +325,13 @@ class FlattenTest(test.TestCase): self.assertItemsEqual([c1], g.no_predecessors_iter()) self.assertItemsEqual([c2], g.no_successors_iter()) - def test_flatten_retry_in_linear_flow_with_tasks(self): + def test_retry_in_linear_flow_with_tasks(self): c = retry.AlwaysRevert("c") a, b = _make_many(2) flo = lf.Flow("test", c).add(a, b) - g = f_utils.flatten(flo) + compilation = compiler.PatternCompiler().compile(flo) + g = compilation.execution_graph + self.assertEqual(3, len(g)) self.assertItemsEqual(g.edges(data=True), [ (a, b, {'invariant': True}), @@ -320,11 +343,13 @@ class FlattenTest(test.TestCase): self.assertIs(c, g.node[a]['retry']) self.assertIs(c, g.node[b]['retry']) - def test_flatten_retry_in_unordered_flow_with_tasks(self): + def test_retry_in_unordered_flow_with_tasks(self): c = retry.AlwaysRevert("c") a, b = _make_many(2) flo = uf.Flow("test", c).add(a, b) - g = f_utils.flatten(flo) + compilation = compiler.PatternCompiler().compile(flo) + g = compilation.execution_graph + self.assertEqual(3, len(g)) self.assertItemsEqual(g.edges(data=True), [ (c, a, {'retry': True}), @@ -336,11 +361,12 @@ class FlattenTest(test.TestCase): self.assertIs(c, g.node[a]['retry']) self.assertIs(c, g.node[b]['retry']) - def test_flatten_retry_in_graph_flow_with_tasks(self): + def test_retry_in_graph_flow_with_tasks(self): r = retry.AlwaysRevert("cp") a, b, c = _make_many(3) flo = gf.Flow("test", r).add(a, b, c).link(b, c) - g = f_utils.flatten(flo) + compilation = compiler.PatternCompiler().compile(flo) + g = compilation.execution_graph self.assertEqual(4, len(g)) self.assertItemsEqual(g.edges(data=True), [ @@ -355,7 +381,7 @@ class FlattenTest(test.TestCase): self.assertIs(r, g.node[b]['retry']) self.assertIs(r, g.node[c]['retry']) - def test_flatten_retries_hierarchy(self): + def test_retries_hierarchy(self): c1 = retry.AlwaysRevert("cp1") c2 = retry.AlwaysRevert("cp2") a, b, c, d = _make_many(4) @@ -363,7 +389,9 @@ class FlattenTest(test.TestCase): a, lf.Flow("test", c2).add(b, c), d) - g = f_utils.flatten(flo) + compilation = compiler.PatternCompiler().compile(flo) + g = compilation.execution_graph + self.assertEqual(6, len(g)) self.assertItemsEqual(g.edges(data=True), [ (c1, a, {'retry': True}), @@ -379,14 +407,16 @@ class FlattenTest(test.TestCase): self.assertIs(c1, g.node[c2]['retry']) self.assertIs(None, g.node[c1].get('retry')) - def test_flatten_retry_subflows_hierarchy(self): + def test_retry_subflows_hierarchy(self): c1 = retry.AlwaysRevert("cp1") a, b, c, d = _make_many(4) flo = lf.Flow("test", c1).add( a, lf.Flow("test").add(b, c), d) - g = f_utils.flatten(flo) + compilation = compiler.PatternCompiler().compile(flo) + g = compilation.execution_graph + self.assertEqual(5, len(g)) self.assertItemsEqual(g.edges(data=True), [ (c1, a, {'retry': True}), diff --git a/taskflow/utils/flow_utils.py b/taskflow/utils/flow_utils.py deleted file mode 100644 index 6b54d563..00000000 --- a/taskflow/utils/flow_utils.py +++ /dev/null @@ -1,180 +0,0 @@ -# -*- coding: utf-8 -*- - -# Copyright (C) 2013 Yahoo! Inc. All Rights Reserved. -# -# Licensed under the Apache License, Version 2.0 (the "License"); you may -# not use this file except in compliance with the License. You may obtain -# a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT -# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the -# License for the specific language governing permissions and limitations -# under the License. - -import logging - -from taskflow import exceptions -from taskflow import flow -from taskflow import retry -from taskflow import task -from taskflow.types import graph as gr -from taskflow.utils import misc - - -LOG = logging.getLogger(__name__) - - -RETRY_EDGE_DATA = { - 'retry': True, -} - - -class Flattener(object): - def __init__(self, root, freeze=True): - self._root = root - self._graph = None - self._history = set() - self._freeze = bool(freeze) - - def _add_new_edges(self, graph, nodes_from, nodes_to, edge_attrs): - """Adds new edges from nodes to other nodes in the specified graph, - with the following edge attributes (defaulting to the class provided - edge_data if None), if the edge does not already exist. - """ - nodes_to = list(nodes_to) - for u in nodes_from: - for v in nodes_to: - if not graph.has_edge(u, v): - # NOTE(harlowja): give each edge its own attr copy so that - # if it's later modified that the same copy isn't modified. - graph.add_edge(u, v, attr_dict=edge_attrs.copy()) - - def _flatten(self, item): - functor = self._find_flattener(item) - if not functor: - raise TypeError("Unknown type requested to flatten: %s (%s)" - % (item, type(item))) - self._pre_item_flatten(item) - graph = functor(item) - self._post_item_flatten(item, graph) - return graph - - def _find_flattener(self, item): - """Locates the flattening function to use to flatten the given item.""" - if isinstance(item, flow.Flow): - return self._flatten_flow - elif isinstance(item, task.BaseTask): - return self._flatten_task - elif isinstance(item, retry.Retry): - raise TypeError("Retry controller %s (%s) is used not as a flow " - "parameter" % (item, type(item))) - else: - return None - - def _connect_retry(self, retry, graph): - graph.add_node(retry) - - # All graph nodes that have no predecessors should depend on its retry - nodes_to = [n for n in graph.no_predecessors_iter() if n != retry] - self._add_new_edges(graph, [retry], nodes_to, RETRY_EDGE_DATA) - - # Add link to retry for each node of subgraph that hasn't - # a parent retry - for n in graph.nodes_iter(): - if n != retry and 'retry' not in graph.node[n]: - graph.node[n]['retry'] = retry - - def _flatten_task(self, task): - """Flattens a individual task.""" - graph = gr.DiGraph(name=task.name) - graph.add_node(task) - return graph - - def _flatten_flow(self, flow): - """Flattens a graph flow.""" - graph = gr.DiGraph(name=flow.name) - - # Flatten all nodes into a single subgraph per node. - subgraph_map = {} - for item in flow: - subgraph = self._flatten(item) - subgraph_map[item] = subgraph - graph = gr.merge_graphs([graph, subgraph]) - - # Reconnect all node edges to their corresponding subgraphs. - for (u, v, attrs) in flow.iter_links(): - u_g = subgraph_map[u] - v_g = subgraph_map[v] - if any(attrs.get(k) for k in ('invariant', 'manual', 'retry')): - # Connect nodes with no predecessors in v to nodes with - # no successors in u (thus maintaining the edge dependency). - self._add_new_edges(graph, - u_g.no_successors_iter(), - v_g.no_predecessors_iter(), - edge_attrs=attrs) - else: - # This is dependency-only edge, connect corresponding - # providers and consumers. - for provider in u_g: - for consumer in v_g: - reasons = provider.provides & consumer.requires - if reasons: - graph.add_edge(provider, consumer, reasons=reasons) - - if flow.retry is not None: - self._connect_retry(flow.retry, graph) - return graph - - def _pre_item_flatten(self, item): - """Called before a item is flattened; any pre-flattening actions.""" - if id(item) in self._history: - raise ValueError("Already flattened item: %s (%s), recursive" - " flattening not supported" % (item, id(item))) - LOG.debug("Starting to flatten '%s'", item) - self._history.add(id(item)) - - def _post_item_flatten(self, item, graph): - """Called before a item is flattened; any post-flattening actions.""" - LOG.debug("Finished flattening '%s'", item) - # NOTE(harlowja): this one can be expensive to calculate (especially - # the cycle detection), so only do it if we know debugging is enabled - # and not under all cases. - if LOG.isEnabledFor(logging.DEBUG): - LOG.debug("Translated '%s' into a graph:", item) - for line in graph.pformat().splitlines(): - # Indent it so that it's slightly offset from the above line. - LOG.debug(" %s", line) - - def _pre_flatten(self): - """Called before the flattening of the item starts.""" - self._history.clear() - - def _post_flatten(self, graph): - """Called after the flattening of the item finishes successfully.""" - dup_names = misc.get_duplicate_keys(graph.nodes_iter(), - key=lambda node: node.name) - if dup_names: - dup_names = ', '.join(sorted(dup_names)) - raise exceptions.Duplicate("Tasks with duplicate names " - "found: %s" % (dup_names)) - self._history.clear() - - def flatten(self): - """Flattens a item (a task or flow) into a single execution graph.""" - if self._graph is not None: - return self._graph - self._pre_flatten() - graph = self._flatten(self._root) - self._post_flatten(graph) - self._graph = graph - if self._freeze: - self._graph.freeze() - return self._graph - - -def flatten(item, freeze=True): - """Flattens a item (a task or flow) into a single execution graph.""" - return Flattener(item, freeze=freeze).flatten() From 059023417e974d97cd540db401e91f3c059bec5d Mon Sep 17 00:00:00 2001 From: Joshua Harlow Date: Sat, 31 May 2014 11:02:54 -0700 Subject: [PATCH 104/188] Invert the conductor stop() returned result Instead of returning whether the conductor is still dispatching return whether the stop was successful or whether it was not. This matches better with the expected semantics of the stop routine. Change-Id: Iccdca017e174adbd8eb3c093c6d0ea0c570792f5 --- taskflow/conductors/single_threaded.py | 18 ++++++++++-------- .../tests/unit/conductor/test_conductor.py | 7 ++++--- 2 files changed, 14 insertions(+), 11 deletions(-) diff --git a/taskflow/conductors/single_threaded.py b/taskflow/conductors/single_threaded.py index 87201107..fb2332ae 100644 --- a/taskflow/conductors/single_threaded.py +++ b/taskflow/conductors/single_threaded.py @@ -67,20 +67,22 @@ class SingleThreadedConductor(base.Conductor): @lock_utils.locked def stop(self, timeout=None): - """Stops dispatching and returns whether the dispatcher loop is active - or whether it has ceased. If a timeout is provided the dispatcher - loop may not have ceased by the timeout reached (the request to cease - will be honored in the future). + """Requests the conductor to stop dispatching and returns whether the + stop request was successfully completed. If the dispatching is still + occurring then False is returned otherwise True will be returned to + signal that the conductor is no longer dispatching job requests. + + NOTE(harlowja): If a timeout is provided the dispatcher loop may + not have ceased by the timeout reached (the request to cease will + be honored in the future) and False will be returned indicating this. """ self._wait_timeout.interrupt() self._dead.wait(timeout) - return self.dispatching + return self._dead.is_set() @property def dispatching(self): - if self._dead.is_set(): - return False - return True + return not self._dead.is_set() def _dispatch_job(self, job): LOG.info("Dispatching job: %s", job) diff --git a/taskflow/tests/unit/conductor/test_conductor.py b/taskflow/tests/unit/conductor/test_conductor.py index 7ac75d91..b43ba035 100644 --- a/taskflow/tests/unit/conductor/test_conductor.py +++ b/taskflow/tests/unit/conductor/test_conductor.py @@ -88,7 +88,8 @@ class SingleThreadedConductorTest(test_utils.EngineTestBase, test.TestCase): with close_many(components.conductor, components.client): t = make_thread(components.conductor) t.start() - self.assertFalse(components.conductor.stop(0.5)) + self.assertTrue(components.conductor.stop(0.5)) + self.assertFalse(components.conductor.dispatching) t.join() def test_run(self): @@ -111,7 +112,7 @@ class SingleThreadedConductorTest(test_utils.EngineTestBase, test.TestCase): details={'flow_uuid': fd.uuid}) consumed_event.wait(1.0) self.assertTrue(consumed_event.is_set()) - components.conductor.stop(1.0) + self.assertTrue(components.conductor.stop(1.0)) self.assertFalse(components.conductor.dispatching) persistence = components.persistence @@ -142,7 +143,7 @@ class SingleThreadedConductorTest(test_utils.EngineTestBase, test.TestCase): details={'flow_uuid': fd.uuid}) consumed_event.wait(1.0) self.assertTrue(consumed_event.is_set()) - components.conductor.stop(1.0) + self.assertTrue(components.conductor.stop(1.0)) self.assertFalse(components.conductor.dispatching) persistence = components.persistence From 2157791295de7fc96f7ad1ffc81f12b060fd4477 Mon Sep 17 00:00:00 2001 From: Joshua Harlow Date: Sat, 31 May 2014 20:01:45 -0700 Subject: [PATCH 105/188] Rename to atom from task The variables, exception messages and other variable namings in the atom file should reflect atoms, not tasks (which is a leftover change from when these were in a different file). Change-Id: Ie7870c8240409f7c53d4f2806eafa912bcc620b9 --- taskflow/atom.py | 26 +++++++++++++------------- taskflow/tests/unit/test_task.py | 2 +- 2 files changed, 14 insertions(+), 14 deletions(-) diff --git a/taskflow/atom.py b/taskflow/atom.py index 58f69f8d..c288cbb7 100644 --- a/taskflow/atom.py +++ b/taskflow/atom.py @@ -52,7 +52,7 @@ def _save_as_to_mapping(save_as): # a unordered set) so the only way for us to easily map the result of # the atom will be via the key itself. return dict((key, key) for key in save_as) - raise TypeError('Task provides parameter ' + raise TypeError('Atom provides parameter ' 'should be str, set or tuple/list, not %r' % save_as) @@ -76,39 +76,39 @@ def _build_rebind_dict(args, rebind_args): raise TypeError('Invalid rebind value: %s' % rebind_args) -def _build_arg_mapping(task_name, reqs, rebind_args, function, do_infer, +def _build_arg_mapping(atom_name, reqs, rebind_args, function, do_infer, ignore_list=None): """Given a function, its requirements and a rebind mapping this helper function will build the correct argument mapping for the given function as well as verify that the final argument mapping does not have missing or extra arguments (where applicable). """ - task_args = reflection.get_callable_args(function, required_only=True) + atom_args = reflection.get_callable_args(function, required_only=True) if ignore_list: for arg in ignore_list: - if arg in task_args: - task_args.remove(arg) + if arg in atom_args: + atom_args.remove(arg) result = {} if reqs: result.update((a, a) for a in reqs) if do_infer: - result.update((a, a) for a in task_args) - result.update(_build_rebind_dict(task_args, rebind_args)) + result.update((a, a) for a in atom_args) + result.update(_build_rebind_dict(atom_args, rebind_args)) if not reflection.accepts_kwargs(function): all_args = reflection.get_callable_args(function, required_only=False) extra_args = set(result) - set(all_args) if extra_args: extra_args_str = ', '.join(sorted(extra_args)) - raise ValueError('Extra arguments given to task %s: %s' - % (task_name, extra_args_str)) + raise ValueError('Extra arguments given to atom %s: %s' + % (atom_name, extra_args_str)) # NOTE(imelnikov): don't use set to preserve order in error message - missing_args = [arg for arg in task_args if arg not in result] + missing_args = [arg for arg in atom_args if arg not in result] if missing_args: - raise ValueError('Missing arguments for task %s: %s' - % (task_name, ' ,'.join(missing_args))) + raise ValueError('Missing arguments for atom %s: %s' + % (atom_name, ' ,'.join(missing_args))) return result @@ -130,7 +130,7 @@ class Atom(object): name. :ivar rebind: An *immutable* input ``resource`` mapping dictionary that can be used to alter the inputs given to this atom. It is - typically used for mapping a prior tasks output into + typically used for mapping a prior atoms output into the names that this atom expects (in a way this is like remapping a namespace of another atom into the namespace of this atom). diff --git a/taskflow/tests/unit/test_task.py b/taskflow/tests/unit/test_task.py index 9db33f28..bcb75788 100644 --- a/taskflow/tests/unit/test_task.py +++ b/taskflow/tests/unit/test_task.py @@ -85,7 +85,7 @@ class TaskTest(test.TestCase): self.assertEqual(my_task.save_as, {'food': 0}) def test_bad_provides(self): - self.assertRaisesRegexp(TypeError, '^Task provides', + self.assertRaisesRegexp(TypeError, '^Atom provides', MyTask, provides=object()) def test_requires_by_default(self): From 76d3d06f7ece80108252a8c5144b750bf28d0b79 Mon Sep 17 00:00:00 2001 From: Joshua Harlow Date: Sat, 31 May 2014 22:04:03 -0700 Subject: [PATCH 106/188] Warn about internal helper/utility usage Adjust the utils doc and add in a warning that lets users know that the internal functions and modules that appear to be utility related should not be used externally (and if they use these they are using them at their own risk). Change-Id: I5d813028c8c7f35359853cab45fcdc8454bbf6fd --- doc/source/utils.rst | 21 ++++++--------------- 1 file changed, 6 insertions(+), 15 deletions(-) diff --git a/doc/source/utils.rst b/doc/source/utils.rst index 87f3727a..75fe91b6 100644 --- a/doc/source/utils.rst +++ b/doc/source/utils.rst @@ -2,23 +2,14 @@ Utils ----- -There are various helper utils that are part of TaskFlows internal usage (and -external/public usage of these helpers should be kept to a minimum as these -utility functions may be altered more often in the future). +.. warning:: -External usage -============== + External usage of internal helpers and other internal utility functions + and modules should be kept to a *minimum* as these may be altered, + refactored or moved *without* notice. -The following classes and modules are *recommended* for external usage: +The following classes and modules though may be used: .. autoclass:: taskflow.utils.misc.Failure - :members: - .. autoclass:: taskflow.utils.eventlet_utils.GreenExecutor - :members: - -.. autofunction:: taskflow.utils.persistence_utils.temporary_log_book - -.. autofunction:: taskflow.utils.persistence_utils.temporary_flow_detail - -.. autofunction:: taskflow.utils.persistence_utils.pformat +.. automodule:: taskflow.utils.persistence_utils From 31b84e1c51c7660d0fed5a3ee98c67c62207a01c Mon Sep 17 00:00:00 2001 From: Joshua Harlow Date: Mon, 2 Jun 2014 11:14:50 -0700 Subject: [PATCH 107/188] Ensure cachedproperty descriptor picks up docstrings Closes-Bug: #1325677 Change-Id: I6366dea5458adec30e945a9834e15da946813461 --- taskflow/tests/unit/test_utils.py | 20 ++++++++++++++++++++ taskflow/utils/misc.py | 5 +++++ 2 files changed, 25 insertions(+) diff --git a/taskflow/tests/unit/test_utils.py b/taskflow/tests/unit/test_utils.py index ffabe7df..1d1ea336 100644 --- a/taskflow/tests/unit/test_utils.py +++ b/taskflow/tests/unit/test_utils.py @@ -16,6 +16,7 @@ import collections import functools +import inspect import sys import time @@ -370,6 +371,25 @@ class CachedPropertyTest(test.TestCase): self.assertRaises(AttributeError, try_set, a) self.assertEqual('b', a.b) + def test_documented_property(self): + + class A(object): + @misc.cachedproperty + def b(self): + """I like bees.""" + return 'b' + + self.assertEqual("I like bees.", inspect.getdoc(A.b)) + + def test_undocumented_property(self): + + class A(object): + @misc.cachedproperty + def b(self): + return 'b' + + self.assertEqual(None, inspect.getdoc(A.b)) + class AttrDictTest(test.TestCase): def test_ok_create(self): diff --git a/taskflow/utils/misc.py b/taskflow/utils/misc.py index 623fce32..d2360c9b 100644 --- a/taskflow/utils/misc.py +++ b/taskflow/utils/misc.py @@ -187,15 +187,18 @@ class cachedproperty(object): if inspect.isfunction(fget): self._fget = fget self._attr_name = "_%s" % (fget.__name__) + self.__doc__ = getattr(fget, '__doc__', None) else: self._attr_name = fget self._fget = None + self.__doc__ = None def __call__(self, fget): # If __init__ received a string then this will be the function to be # wrapped as a property (if __init__ got a function then this will not # be called). self._fget = fget + self.__doc__ = getattr(fget, '__doc__', None) return self def __set__(self, instance, value): @@ -205,6 +208,8 @@ class cachedproperty(object): raise AttributeError("can't delete attribute") def __get__(self, instance, owner): + if instance is None: + return self try: return getattr(instance, self._attr_name) except AttributeError: From 5a2dcfb6247f782b6406d9956bd2ee9d642b236e Mon Sep 17 00:00:00 2001 From: Joshua Harlow Date: Mon, 2 Jun 2014 16:41:22 -0700 Subject: [PATCH 108/188] Provide the compilation object instead of just a part of it The compilation result will be able to contain more than just an execution graph in the near future so we should provide it back instead of a subcomponent of the compilation result. Breaking change: removes access to the execution graph property and replaces it with a compilation property (that will contain the execution graph and any other compilation related objects). Change-Id: Ieba63f5ab3a59c38f4aa2a22d9caad412f8fa85d --- taskflow/engines/action_engine/engine.py | 14 ++++++++------ taskflow/engines/action_engine/graph_analyzer.py | 4 ---- taskflow/tests/unit/test_action_engine.py | 4 ++-- 3 files changed, 10 insertions(+), 12 deletions(-) diff --git a/taskflow/engines/action_engine/engine.py b/taskflow/engines/action_engine/engine.py index e63aeb31..3bdbfd74 100644 --- a/taskflow/engines/action_engine/engine.py +++ b/taskflow/engines/action_engine/engine.py @@ -71,15 +71,17 @@ class ActionEngine(base.EngineBase): self._change_state(states.SUSPENDING) @property - def execution_graph(self): - """The graph of nodes to be executed. + def compilation(self): + """The compilation result. - NOTE(harlowja): Only accessible after compilation has completed. + NOTE(harlowja): Only accessible after compilation has completed (None + will be returned when this property is accessed before compilation has + completed successfully). """ - g = None if self._compiled: - g = self._compilation.execution_graph - return g + return self._compilation + else: + return None def run(self): with lock_utils.try_lock(self._lock) as was_locked: diff --git a/taskflow/engines/action_engine/graph_analyzer.py b/taskflow/engines/action_engine/graph_analyzer.py index 7ca7182f..2e910f6b 100644 --- a/taskflow/engines/action_engine/graph_analyzer.py +++ b/taskflow/engines/action_engine/graph_analyzer.py @@ -31,10 +31,6 @@ class GraphAnalyzer(object): self._graph = graph self._storage = storage - @property - def execution_graph(self): - return self._graph - def get_next_nodes(self, node=None): if node is None: execute = self.browse_nodes_for_execute() diff --git a/taskflow/tests/unit/test_action_engine.py b/taskflow/tests/unit/test_action_engine.py index fa9f3de5..533b5eef 100644 --- a/taskflow/tests/unit/test_action_engine.py +++ b/taskflow/tests/unit/test_action_engine.py @@ -509,7 +509,7 @@ class EngineGraphFlowTest(utils.EngineTestBase): engine = self._make_engine(flow) engine.compile() - graph = engine.execution_graph + graph = engine.compilation.execution_graph self.assertIsInstance(graph, gr.DiGraph) def test_task_graph_property_for_one_task(self): @@ -517,7 +517,7 @@ class EngineGraphFlowTest(utils.EngineTestBase): engine = self._make_engine(flow) engine.compile() - graph = engine.execution_graph + graph = engine.compilation.execution_graph self.assertIsInstance(graph, gr.DiGraph) From 7b5dad30ed660de6e09ad46152fe26dc13d92abd Mon Sep 17 00:00:00 2001 From: Joshua Harlow Date: Mon, 2 Jun 2014 17:56:49 -0700 Subject: [PATCH 109/188] Rename the graph analyzer to analyzer Adjust the graph analyzer to be a more generic compilation analyzer which analyzes compilation objects (which are now changed to be an object and not a named tuple) and provides utility functions ontop of that object. This helps it become possible to provide other useful analysis functions that are not directly tied to the execution graph component but can be provided ontop of other compilation components. Change-Id: I2ab08db4f566d5f329d7e79b1c50ed65aad9e4b3 --- doc/source/engines.rst | 6 +++--- .../{graph_analyzer.py => analyzer.py} | 18 ++++++++++-------- taskflow/engines/action_engine/compiler.py | 18 +++++++++++++----- taskflow/engines/action_engine/runner.py | 3 +-- taskflow/engines/action_engine/runtime.py | 15 +++++++-------- 5 files changed, 34 insertions(+), 26 deletions(-) rename taskflow/engines/action_engine/{graph_analyzer.py => analyzer.py} (89%) diff --git a/doc/source/engines.rst b/doc/source/engines.rst index 03526491..752f9f0e 100644 --- a/doc/source/engines.rst +++ b/doc/source/engines.rst @@ -253,7 +253,7 @@ analyzing the current state of the task; which is determined by looking at the state in the task detail object for that task and analyzing edges of the graph for things like retry atom which can influence what a tasks intention should be (this is aided by the usage of the -:py:class:`~taskflow.engines.action_engine.graph_analyzer.GraphAnalyzer` helper +:py:class:`~taskflow.engines.action_engine.analyzer.Analyzer` helper object which was designed to provide helper methods for this analysis). Once these intentions are determined and associated with each task (the intention is also stored in the :py:class:`~taskflow.persistence.logbook.AtomDetail` object) @@ -268,7 +268,7 @@ This stage selects which atoms are eligible to run by using a :py:class:`~taskflow.engines.action_engine.runtime.Scheduler` implementation (the default implementation looks at there intention, checking if predecessor atoms have ran and so-on, using a -:py:class:`~taskflow.engines.action_engine.graph_analyzer.GraphAnalyzer` helper +:py:class:`~taskflow.engines.action_engine.analyzer.Analyzer` helper object as needed) and submits those atoms to a previously provided compatible `executor`_ for asynchronous execution. This :py:class:`~taskflow.engines.action_engine.runtime.Scheduler` will return a @@ -322,9 +322,9 @@ saved for this execution. Interfaces ========== +.. automodule:: taskflow.engines.action_engine.analyzer .. automodule:: taskflow.engines.action_engine.compiler .. automodule:: taskflow.engines.action_engine.engine -.. automodule:: taskflow.engines.action_engine.graph_analyzer .. automodule:: taskflow.engines.action_engine.runner .. automodule:: taskflow.engines.action_engine.runtime .. automodule:: taskflow.engines.base diff --git a/taskflow/engines/action_engine/graph_analyzer.py b/taskflow/engines/action_engine/analyzer.py similarity index 89% rename from taskflow/engines/action_engine/graph_analyzer.py rename to taskflow/engines/action_engine/analyzer.py index 2e910f6b..ef960afc 100644 --- a/taskflow/engines/action_engine/graph_analyzer.py +++ b/taskflow/engines/action_engine/analyzer.py @@ -17,19 +17,21 @@ from networkx.algorithms import traversal import six -from taskflow import retry as r +from taskflow import retry as retry_atom from taskflow import states as st -class GraphAnalyzer(object): - """Analyzes a execution graph to get the next nodes for execution or - reversion by utilizing the graphs nodes and edge relations and comparing - the node state against the states stored in storage. +class Analyzer(object): + """Analyzes a compilation output to get the next atoms for execution or + reversion by utilizing the compilations underlying structures (graphs, + nodes and edge relations...) and using this information along with the + atom state/states stored in storage to provide useful analysis functions + to the rest of the runtime system. """ - def __init__(self, graph, storage): - self._graph = graph + def __init__(self, compilation, storage): self._storage = storage + self._graph = compilation.execution_graph def get_next_nodes(self, node=None): if node is None: @@ -129,7 +131,7 @@ class GraphAnalyzer(object): retries if state is None. """ for node in self._graph.nodes_iter(): - if isinstance(node, r.Retry): + if isinstance(node, retry_atom.Retry): if not state or self.get_state(node) == state: yield node diff --git a/taskflow/engines/action_engine/compiler.py b/taskflow/engines/action_engine/compiler.py index 883e7f0b..f5c519cc 100644 --- a/taskflow/engines/action_engine/compiler.py +++ b/taskflow/engines/action_engine/compiler.py @@ -14,7 +14,6 @@ # License for the specific language governing permissions and limitations # under the License. -import collections import logging from taskflow import exceptions as exc @@ -27,10 +26,19 @@ from taskflow.utils import misc LOG = logging.getLogger(__name__) -# The result of a compilers compile() is this tuple (for now it is just a -# execution graph but in the future it may grow to include more attributes -# that help the runtime units execute in a more optimal/featureful manner). -Compilation = collections.namedtuple("Compilation", ["execution_graph"]) +class Compilation(object): + """The result of a compilers compile() is this *immutable* object. + + For now it is just a execution graph but in the future it will grow to + include more methods & properties that help the various runtime units + execute in a more optimal & featureful manner. + """ + def __init__(self, execution_graph): + self._execution_graph = execution_graph + + @property + def execution_graph(self): + return self._execution_graph class PatternCompiler(object): diff --git a/taskflow/engines/action_engine/runner.py b/taskflow/engines/action_engine/runner.py index dc8c1003..0120bd69 100644 --- a/taskflow/engines/action_engine/runner.py +++ b/taskflow/engines/action_engine/runner.py @@ -43,11 +43,10 @@ class Runner(object): ignorable_states = (st.SCHEDULING, st.WAITING, st.RESUMING, st.ANALYZING) def __init__(self, runtime, waiter): - self._runtime = runtime self._scheduler = runtime.scheduler self._completer = runtime.completer self._storage = runtime.storage - self._analyzer = runtime.graph_analyzer + self._analyzer = runtime.analyzer self._waiter = waiter def is_running(self): diff --git a/taskflow/engines/action_engine/runtime.py b/taskflow/engines/action_engine/runtime.py index 709ff78a..146c93a7 100644 --- a/taskflow/engines/action_engine/runtime.py +++ b/taskflow/engines/action_engine/runtime.py @@ -20,8 +20,8 @@ from taskflow import states as st from taskflow import task as task_atom from taskflow.utils import misc +from taskflow.engines.action_engine import analyzer as ca from taskflow.engines.action_engine import executor as ex -from taskflow.engines.action_engine import graph_analyzer as ga from taskflow.engines.action_engine import retry_action as ra from taskflow.engines.action_engine import task_action as ta @@ -47,9 +47,8 @@ class Runtime(object): return self._storage @misc.cachedproperty - def graph_analyzer(self): - return ga.GraphAnalyzer(self._compilation.execution_graph, - self._storage) + def analyzer(self): + return ca.Analyzer(self._compilation, self._storage) @misc.cachedproperty def completer(self): @@ -82,11 +81,11 @@ class Runtime(object): self.storage.set_atom_intention(node.name, intention) def reset_all(self, state=st.PENDING, intention=st.EXECUTE): - self.reset_nodes(self.graph_analyzer.iterate_all_nodes(), + self.reset_nodes(self.analyzer.iterate_all_nodes(), state=state, intention=intention) def reset_subgraph(self, node, state=st.PENDING, intention=st.EXECUTE): - self.reset_nodes(self.graph_analyzer.iterate_subgraph(node), + self.reset_nodes(self.analyzer.iterate_subgraph(node), state=state, intention=intention) @@ -100,7 +99,7 @@ class Completer(object): """Completes atoms using actions to complete them.""" def __init__(self, runtime): - self._analyzer = runtime.graph_analyzer + self._analyzer = runtime.analyzer self._retry_action = runtime.retry_action self._runtime = runtime self._storage = runtime.storage @@ -183,7 +182,7 @@ class Scheduler(object): """Schedules atoms using actions to schedule.""" def __init__(self, runtime): - self._analyzer = runtime.graph_analyzer + self._analyzer = runtime.analyzer self._retry_action = runtime.retry_action self._runtime = runtime self._storage = runtime.storage From 2ac96676e473faa6452d070d6bc73a74f131b1d4 Mon Sep 17 00:00:00 2001 From: Joshua Harlow Date: Mon, 2 Jun 2014 23:17:44 -0700 Subject: [PATCH 110/188] Rename inject_task_args to inject_atom_args Since storage injection can target atoms and not just tasks (just one type of atom) the method naming is more appropriate when named as atom injection (and associated variable name changes reflect this as well). Also add more stringent checking around the names of atoms that are targeted for injection (ensuring that the name provided actually exists in the storage unit). Change-Id: I0ad0178240613fda166ea8fcdc441b37290445f8 --- taskflow/engines/action_engine/engine.py | 2 +- .../engines/action_engine/retry_action.py | 2 +- taskflow/engines/action_engine/task_action.py | 4 +-- taskflow/storage.py | 25 ++++++++++++++----- 4 files changed, 23 insertions(+), 10 deletions(-) diff --git a/taskflow/engines/action_engine/engine.py b/taskflow/engines/action_engine/engine.py index e63aeb31..bfc90834 100644 --- a/taskflow/engines/action_engine/engine.py +++ b/taskflow/engines/action_engine/engine.py @@ -171,7 +171,7 @@ class ActionEngine(base.EngineBase): else: self.storage.ensure_task(node.name, version, node.save_as) if node.inject: - self.storage.inject_task_args(node.name, node.inject) + self.storage.inject_atom_args(node.name, node.inject) self._change_state(states.SUSPENDED) # does nothing in PENDING state @lock_utils.locked diff --git a/taskflow/engines/action_engine/retry_action.py b/taskflow/engines/action_engine/retry_action.py index eaedf04b..a1ca3abb 100644 --- a/taskflow/engines/action_engine/retry_action.py +++ b/taskflow/engines/action_engine/retry_action.py @@ -34,7 +34,7 @@ class RetryAction(object): def _get_retry_args(self, retry): kwargs = self._storage.fetch_mapped_args(retry.rebind, - task_name=retry.name) + atom_name=retry.name) kwargs['history'] = self._storage.get_retry_history(retry.name) return kwargs diff --git a/taskflow/engines/action_engine/task_action.py b/taskflow/engines/action_engine/task_action.py index 9ab8c460..c0d1daa5 100644 --- a/taskflow/engines/action_engine/task_action.py +++ b/taskflow/engines/action_engine/task_action.py @@ -66,7 +66,7 @@ class TaskAction(object): raise exceptions.InvalidState("Task %s is in invalid state and" " can't be executed" % task.name) kwargs = self._storage.fetch_mapped_args(task.rebind, - task_name=task.name) + atom_name=task.name) task_uuid = self._storage.get_atom_uuid(task.name) return self._task_executor.execute_task(task, task_uuid, kwargs, self._on_update_progress) @@ -83,7 +83,7 @@ class TaskAction(object): raise exceptions.InvalidState("Task %s is in invalid state and" " can't be reverted" % task.name) kwargs = self._storage.fetch_mapped_args(task.rebind, - task_name=task.name) + atom_name=task.name) task_uuid = self._storage.get_atom_uuid(task.name) task_result = self._storage.get(task.name) failures = self._storage.get_failures() diff --git a/taskflow/storage.py b/taskflow/storage.py index e3a208a4..353d44f3 100644 --- a/taskflow/storage.py +++ b/taskflow/storage.py @@ -411,9 +411,22 @@ class Storage(object): if self._reset_atom(ad, state): self._with_connection(self._save_atom_detail, ad) - def inject_task_args(self, task_name, injected_args): - self._injected_args.setdefault(task_name, {}) - self._injected_args[task_name].update(injected_args) + def inject_atom_args(self, atom_name, pairs): + """Add *transient* values into storage for a specific atom only. + + This method injects a dictionary/pairs of arguments for an atom so that + when that atom is scheduled for execution it will have immediate access + to these arguments. + + NOTE(harlowja): injected atom arguments take precedence over arguments + provided by predecessor atoms or arguments provided by injecting into + the flow scope (using the inject() method). + """ + if atom_name not in self._atom_name_to_uuid: + raise exceptions.NotFound("Unknown atom name: %s" % atom_name) + with self._lock.write_lock(): + self._injected_args.setdefault(atom_name, {}) + self._injected_args[atom_name].update(pairs) def inject(self, pairs, transient=False): """Add values into storage. @@ -521,12 +534,12 @@ class Storage(object): pass return results - def fetch_mapped_args(self, args_mapping, task_name=None): + def fetch_mapped_args(self, args_mapping, atom_name=None): """Fetch arguments for an atom using an atoms arguments mapping.""" with self._lock.read_lock(): injected_args = {} - if task_name: - injected_args = self._injected_args.get(task_name, {}) + if atom_name: + injected_args = self._injected_args.get(atom_name, {}) mapped_args = {} for key, name in six.iteritems(args_mapping): if name in injected_args: From 5f37aa78abad843f801839e5de13eb9407bd7e8f Mon Sep 17 00:00:00 2001 From: Joshua Harlow Date: Tue, 3 Jun 2014 16:46:22 -0700 Subject: [PATCH 111/188] Make the runner a runtime provided property The runner should be a component of a runtime system and as such should be part of the runtime object as a provided property instead of something that is constructed outside of the runtime object. Change-Id: I431a377e2dc4274102a60b6502a2d0d6e08c9556 --- taskflow/engines/action_engine/engine.py | 8 +++----- taskflow/engines/action_engine/runtime.py | 5 +++++ 2 files changed, 8 insertions(+), 5 deletions(-) diff --git a/taskflow/engines/action_engine/engine.py b/taskflow/engines/action_engine/engine.py index 3bdbfd74..ef14e2f1 100644 --- a/taskflow/engines/action_engine/engine.py +++ b/taskflow/engines/action_engine/engine.py @@ -18,7 +18,6 @@ import threading from taskflow.engines.action_engine import compiler from taskflow.engines.action_engine import executor -from taskflow.engines.action_engine import runner from taskflow.engines.action_engine import runtime from taskflow.engines import base @@ -53,7 +52,6 @@ class ActionEngine(base.EngineBase): def __init__(self, flow, flow_detail, backend, conf): super(ActionEngine, self).__init__(flow, flow_detail, backend, conf) - self._runner = None self._runtime = None self._compiled = False self._compilation = None @@ -116,9 +114,10 @@ class ActionEngine(base.EngineBase): self.prepare() self._task_executor.start() state = None + runner = self._runtime.runner try: self._change_state(states.RUNNING) - for state in self._runner.run_iter(timeout=timeout): + for state in runner.run_iter(timeout=timeout): try: try_suspend = yield state except GeneratorExit: @@ -130,7 +129,7 @@ class ActionEngine(base.EngineBase): with excutils.save_and_reraise_exception(): self._change_state(states.FAILURE) else: - ignorable_states = getattr(self._runner, 'ignorable_states', []) + ignorable_states = getattr(runner, 'ignorable_states', []) if state and state not in ignorable_states: self._change_state(state) if state != states.SUSPENDED and state != states.SUCCESS: @@ -214,7 +213,6 @@ class ActionEngine(base.EngineBase): self.storage, self.task_notifier, self._task_executor) - self._runner = runner.Runner(self._runtime, self._task_executor) self._compiled = True diff --git a/taskflow/engines/action_engine/runtime.py b/taskflow/engines/action_engine/runtime.py index 146c93a7..40e66453 100644 --- a/taskflow/engines/action_engine/runtime.py +++ b/taskflow/engines/action_engine/runtime.py @@ -23,6 +23,7 @@ from taskflow.utils import misc from taskflow.engines.action_engine import analyzer as ca from taskflow.engines.action_engine import executor as ex from taskflow.engines.action_engine import retry_action as ra +from taskflow.engines.action_engine import runner as ru from taskflow.engines.action_engine import task_action as ta @@ -50,6 +51,10 @@ class Runtime(object): def analyzer(self): return ca.Analyzer(self._compilation, self._storage) + @misc.cachedproperty + def runner(self): + return ru.Runner(self, self._task_executor) + @misc.cachedproperty def completer(self): return Completer(self) From 30d1f7066e752022fbe6565ddfbeb73fd4a73562 Mon Sep 17 00:00:00 2001 From: Joshua Harlow Date: Wed, 4 Jun 2014 15:24:34 -0700 Subject: [PATCH 112/188] Allow the mysql mode to be more than just TRADITIONAL To make it easier to later move to oslo.db match there ability to set a different mysql mode (this will default, unless configured explicitly off/differently to TRADITIONAL). To ensure that we retain backwards compatibility the prior existence of 'mysql_traditional_mode' as a boolean option will by default set the mode to TRADITIONAL, and a new configuration option 'mysql_sql_mode' can be used to provide a secondary overriding mode that will be used instead (incase someone wants to use something other than TRADITIONAL). Closes-Bug: 1326568 Change-Id: Ide34c27b12c26030c8842f3f4b0fcca43ce783a7 --- .../persistence/backends/impl_sqlalchemy.py | 27 +++++++++++++------ 1 file changed, 19 insertions(+), 8 deletions(-) diff --git a/taskflow/persistence/backends/impl_sqlalchemy.py b/taskflow/persistence/backends/impl_sqlalchemy.py index 2f2cffcb..bcbadab1 100644 --- a/taskflow/persistence/backends/impl_sqlalchemy.py +++ b/taskflow/persistence/backends/impl_sqlalchemy.py @@ -21,6 +21,7 @@ from __future__ import absolute_import import contextlib import copy +import functools import logging import time @@ -121,15 +122,19 @@ def _thread_yield(dbapi_con, con_record): time.sleep(0) -def _set_mode_traditional(dbapi_con, con_record, connection_proxy): - """Set engine mode to 'traditional'. +def _set_sql_mode(sql_mode, dbapi_con, connection_rec): + """Set the sql_mode session variable. - Required to prevent silent truncates at insert or update operations - under MySQL. By default MySQL truncates inserted string if it longer - than a declared field just with warning. That is fraught with data - corruption. + MySQL supports several server modes. The default is None, but sessions + may choose to enable server modes like TRADITIONAL, ANSI, + several STRICT_* modes and others. + + Note: passing in '' (empty string) for sql_mode clears + the SQL mode for the session, overriding a potentially set + server default. """ - dbapi_con.cursor().execute("SET SESSION sql_mode = TRADITIONAL;") + cursor = dbapi_con.cursor() + cursor.execute("SET SESSION sql_mode = %s", [sql_mode]) def _ping_listener(dbapi_conn, connection_rec, connection_proxy): @@ -200,8 +205,14 @@ class SQLAlchemyBackend(base.Backend): if 'mysql' in e_url.drivername: if misc.as_bool(conf.pop('checkout_ping', True)): sa.event.listen(engine, 'checkout', _ping_listener) + mode = None if misc.as_bool(conf.pop('mysql_traditional_mode', True)): - sa.event.listen(engine, 'checkout', _set_mode_traditional) + mode = 'TRADITIONAL' + if 'mysql_sql_mode' in conf: + mode = conf.pop('mysql_sql_mode') + if mode is not None: + sa.event.listen(engine, 'connect', + functools.partial(_set_sql_mode, mode)) return engine @property From 848aaddb1ac0b930e2782a9b44d528947723712c Mon Sep 17 00:00:00 2001 From: Joshua Harlow Date: Wed, 4 Jun 2014 12:49:54 -0700 Subject: [PATCH 113/188] Add in default transaction isolation levels Apply a default setting for transaction isolation levels for mysql and postgresql to help avoid consistency issues that happen when two transactions occur at the same time on the same set of records. Closes-Bug: 1326507 Change-Id: I1819722889d0d66d938641af6aa6f79fcfd2deb4 --- .../persistence/backends/impl_sqlalchemy.py | 34 ++++++++++++++++--- 1 file changed, 29 insertions(+), 5 deletions(-) diff --git a/taskflow/persistence/backends/impl_sqlalchemy.py b/taskflow/persistence/backends/impl_sqlalchemy.py index 2f2cffcb..0cdea01f 100644 --- a/taskflow/persistence/backends/impl_sqlalchemy.py +++ b/taskflow/persistence/backends/impl_sqlalchemy.py @@ -95,6 +95,17 @@ POSTGRES_GONE_WAY_AWAY_ERRORS = ( # These connection urls mean sqlite is being used as an in-memory DB. SQLITE_IN_MEMORY = ('sqlite://', 'sqlite:///', 'sqlite:///:memory:') +# Transacation isolation levels that will be automatically applied, we prefer +# strong read committed isolation levels to avoid merging and using dirty +# data... +# +# See: http://en.wikipedia.org/wiki/Isolation_(database_systems) +DEFAULT_TXN_ISOLATION_LEVELS = { + 'mysql': 'READ COMMITTED', + 'postgresql': 'READ COMMITTED', + 'postgres': 'READ COMMITTED', +} + def _in_any(reason, err_haystack): """Checks if any elements of the haystack are in the given reason.""" @@ -189,6 +200,24 @@ class SQLAlchemyBackend(base.Backend): ('pool_timeout', 'pool_timeout')]: if lookup_key in conf: engine_args[k] = misc.as_int(conf.pop(lookup_key)) + if 'isolation_level' not in conf: + # Check driver name exact matches first, then try driver name + # partial matches... + txn_isolation_levels = conf.pop('isolation_levels', + DEFAULT_TXN_ISOLATION_LEVELS) + level_applied = False + for (driver, level) in six.iteritems(txn_isolation_levels): + if driver == e_url.drivername: + engine_args['isolation_level'] = level + level_applied = True + break + if not level_applied: + for (driver, level) in six.iteritems(txn_isolation_levels): + if e_url.drivername.find(driver) != -1: + engine_args['isolation_level'] = level + break + else: + engine_args['isolation_level'] = conf.pop('isolation_level') # If the configuration dict specifies any additional engine args # or engine arg overrides make sure we merge them in. engine_args.update(conf.pop('engine_args', {})) @@ -384,11 +413,6 @@ class Connection(base.Connection): def _save_logbook(self, session, lb): try: lb_m = _logbook_get_model(lb.uuid, session=session) - # NOTE(harlowja): Merge them (note that this doesn't provide - # 100% correct update semantics due to how databases have - # MVCC). This is where a stored procedure or a better backing - # store would handle this better by allowing this merge logic - # to exist in the database itself. lb_m = _logbook_merge(lb_m, lb) except exc.NotFound: lb_m = _convert_lb_to_internal(lb) From eb5f06de29b4044f1482e1bc2a9e9baa3e529871 Mon Sep 17 00:00:00 2001 From: Joshua Harlow Date: Wed, 4 Jun 2014 22:21:35 -0700 Subject: [PATCH 114/188] Include the function name on internal errors To make it easier to debug and reason about the exception that occurred include the function name that was called during the sessions activation in the exception message (and associated LOG output). Change-Id: I21f9310f78968d1e60d88b1f77be0dc629e75525 --- taskflow/persistence/backends/impl_sqlalchemy.py | 7 +++++-- 1 file changed, 5 insertions(+), 2 deletions(-) diff --git a/taskflow/persistence/backends/impl_sqlalchemy.py b/taskflow/persistence/backends/impl_sqlalchemy.py index 2f2cffcb..c296abd0 100644 --- a/taskflow/persistence/backends/impl_sqlalchemy.py +++ b/taskflow/persistence/backends/impl_sqlalchemy.py @@ -306,8 +306,11 @@ class Connection(base.Connection): with session.begin(): return functor(session, *args, **kwargs) except sa_exc.SQLAlchemyError as e: - LOG.exception('Failed running database session') - raise exc.StorageFailure("Storage backend internal error", e) + LOG.exception("Failed running '%s' within a database session", + functor.__name__) + raise exc.StorageFailure("Storage backend internal error, failed" + " running '%s' within a database" + " session" % functor.__name__, e) def _make_session(self): try: From 1833914455ebf30cf4518d884ecfb0c02b3374c9 Mon Sep 17 00:00:00 2001 From: Joshua Harlow Date: Fri, 6 Jun 2014 19:38:20 -0700 Subject: [PATCH 115/188] Create a considerations section Instead of having the considerations and best practices that should be used when developing with this library scattered across a few different links and pages combine these together into a single section in the developer documentation. As a side-effect of this change remove the documenation about the utils to use and not use and just refer those to use the modules, classes and APIs described in these documents. Change-Id: I20e1405cb6ecb654baa29812b98ada7bdc393f6c --- doc/source/index.rst | 25 ++++++++++++++++++++++++- doc/source/utils.rst | 15 --------------- 2 files changed, 24 insertions(+), 16 deletions(-) delete mode 100644 doc/source/utils.rst diff --git a/doc/source/index.rst b/doc/source/index.rst index 7bfdc96d..b6667a94 100644 --- a/doc/source/index.rst +++ b/doc/source/index.rst @@ -33,6 +33,30 @@ Contents workers +Considerations +-------------- + +Things to consider before (and during) development and integration with +TaskFlow into your project: + +* Read over the `paradigm shifts`_ and engage the team in `IRC`_ (or via the + `openstack-dev`_ mailing list) if these need more explanation (prefix + ``[TaskFlow]`` to your emails subject to get an even faster response). +* Follow (or at least attempt to follow) some of the established + `best practices`_ (feel free to add your own suggested best practices). + +.. warning:: + + External usage of internal helpers and other internal utility functions + and modules should be kept to a *minimum* as these may be altered, + refactored or moved *without* notice. If you are unsure whether to use + a function, class, or module, please ask (see above). + +.. _IRC: irc://chat.freenode.net/openstack-state-management +.. _best practices: http://wiki.openstack.org/wiki/TaskFlow/Best_practices +.. _paradigm shifts: http://wiki.openstack.org/wiki/TaskFlow/Paradigm_shifts +.. _openstack-dev: mailto:openstack-dev@lists.openstack.org + Miscellaneous ------------- @@ -40,7 +64,6 @@ Miscellaneous :maxdepth: 2 exceptions - utils states examples diff --git a/doc/source/utils.rst b/doc/source/utils.rst deleted file mode 100644 index 75fe91b6..00000000 --- a/doc/source/utils.rst +++ /dev/null @@ -1,15 +0,0 @@ ------ -Utils ------ - -.. warning:: - - External usage of internal helpers and other internal utility functions - and modules should be kept to a *minimum* as these may be altered, - refactored or moved *without* notice. - -The following classes and modules though may be used: - -.. autoclass:: taskflow.utils.misc.Failure -.. autoclass:: taskflow.utils.eventlet_utils.GreenExecutor -.. automodule:: taskflow.utils.persistence_utils From c2731a84ec0c4981f7f0b6adacc0672765725a3f Mon Sep 17 00:00:00 2001 From: Joshua Harlow Date: Sat, 7 Jun 2014 22:23:13 -0700 Subject: [PATCH 116/188] Switch to a restructuredtext README file To make the README file showup in a more readable manner on pypi and on other external sites convert the README from the markdown format and move it to the restructuredtext format which displays better on those external sites (and keeps our documentation consistently one format). Change-Id: I38f7152c264bbc0ebcf4a539b36f8b21e86705b8 --- README.md | 54 ------------------------------------------------- README.rst | 59 ++++++++++++++++++++++++++++++++++++++++++++++++++++++ setup.cfg | 2 +- 3 files changed, 60 insertions(+), 55 deletions(-) delete mode 100644 README.md create mode 100644 README.rst diff --git a/README.md b/README.md deleted file mode 100644 index 928ceee4..00000000 --- a/README.md +++ /dev/null @@ -1,54 +0,0 @@ -TaskFlow -======== - -A library to do [jobs, tasks, flows] in a HA manner using different backends to -be used with OpenStack projects. - -* More information at http://wiki.openstack.org/wiki/TaskFlow - -Join us -------- - -- http://launchpad.net/taskflow - -Testing and requirements ------------------------- - -### Requirements - -Because TaskFlow has many optional (pluggable) parts like persistence -backends and engines, we decided to split our requirements into two -parts: -- things that are absolutely required by TaskFlow (you can't use - TaskFlow without them) are put to `requirements.txt`; -- things that are required by some optional part of TaskFlow (you - can use TaskFlow without them) are put to `optional-requirements.txt`; - if you want to use the feature in question, you should add that - requirements to your project or environment; -- as usual, things that required only for running tests are put - to `test-requirements.txt`. - -### Tox.ini - -Our tox.ini describes several test environments that allow to test -TaskFlow with different python versions and sets of requirements -installed. - -To generate tox.ini, use the `toxgen.py` script by first installing -[toxgen](https://pypi.python.org/pypi/toxgen/) and then provide that script -as input the `tox-tmpl.ini` file to generate the final `tox.ini` file. - -*For example:* - - $ toxgen.py -i tox-tmpl.ini -o tox.ini - - -Documentation -------------- - -http://wiki.openstack.org/wiki/TaskFlow - -We also have sphinx documentation in `docs/source`. To build it, -run: - - $ python ./setup.py build_sphinx diff --git a/README.rst b/README.rst new file mode 100644 index 00000000..f16b9688 --- /dev/null +++ b/README.rst @@ -0,0 +1,59 @@ +TaskFlow +======== + +A library to do [jobs, tasks, flows] in a highly available, easy to understand +and declarative manner (and more!) to be used with OpenStack and other +projects. + +- More information can be found by referring to the `developer documentation`_. + +Join us +------- + +- http://launchpad.net/taskflow + +Testing and requirements +------------------------ + +Requirements +~~~~~~~~~~~~ + +Because TaskFlow has many optional (pluggable) parts like persistence +backends and engines, we decided to split our requirements into two +parts: - things that are absolutely required by TaskFlow (you can’t use +TaskFlow without them) are put to ``requirements.txt``; - things that +are required by some optional part of TaskFlow (you can use TaskFlow +without them) are put to ``optional-requirements.txt``; if you want to +use the feature in question, you should add that requirements to your +project or environment; - as usual, things that required only for +running tests are put to ``test-requirements.txt``. + +Tox.ini +~~~~~~~ + +Our ``tox.ini`` file describes several test environments that allow to test +TaskFlow with different python versions and sets of requirements installed. + +To generate the ``tox.ini`` file, use the ``toxgen.py`` script by first +installing `toxgen`_ and then provide that script as input the ``tox-tmpl.ini`` +file to generate the final ``tox.ini`` file. + +*For example:* + +:: + + $ toxgen.py -i tox-tmpl.ini -o tox.ini + +Developer documentation +----------------------- + +We also have sphinx documentation in ``docs/source``. + +*To build it, run:* + +:: + + $ python setup.py build_sphinx + +.. _toxgen: https://pypi.python.org/pypi/toxgen/ +.. _developer documentation: http://docs.openstack.org/developer/taskflow/ diff --git a/setup.cfg b/setup.cfg index bc8f9a32..0c396204 100644 --- a/setup.cfg +++ b/setup.cfg @@ -2,7 +2,7 @@ name = taskflow summary = Taskflow structured state management library. description-file = - README.md + README.rst author = Taskflow Developers author-email = taskflow-dev@lists.launchpad.net home-page = https://launchpad.net/taskflow From 7c7e52351b61deb6b94b61ce3188ca27b44c3cad Mon Sep 17 00:00:00 2001 From: Joshua Harlow Date: Sun, 8 Jun 2014 20:55:25 -0700 Subject: [PATCH 117/188] Increase usefulness of the retry component compile errors Adjust the descriptiveness of the errors raised when a retry atom is used in a flow or as the root item of a to help users understand why the error is raised. Change-Id: Ia3e13382d49e2225d48b2ae875061f92c211093c --- taskflow/engines/action_engine/compiler.py | 14 ++++++++++++-- taskflow/tests/unit/test_action_engine_compile.py | 2 +- 2 files changed, 13 insertions(+), 3 deletions(-) diff --git a/taskflow/engines/action_engine/compiler.py b/taskflow/engines/action_engine/compiler.py index f5c519cc..5a591526 100644 --- a/taskflow/engines/action_engine/compiler.py +++ b/taskflow/engines/action_engine/compiler.py @@ -109,8 +109,18 @@ class _Flattener(object): elif isinstance(item, task.BaseTask): return self._flatten_task elif isinstance(item, retry.Retry): - raise TypeError("Retry controller %s (%s) is used not as a flow " - "parameter" % (item, type(item))) + if len(self._history) == 1: + raise TypeError("Retry controller: %s (%s) must only be used" + " as a flow constructor parameter and not as a" + " root component" % (item, type(item))) + else: + # TODO(harlowja): we should raise this type error earlier + # instead of later since we should do this same check on add() + # calls, this makes the error more visible (instead of waiting + # until compile time). + raise TypeError("Retry controller: %s (%s) must only be used" + " as a flow constructor parameter and not as a" + " flow added component" % (item, type(item))) else: return None diff --git a/taskflow/tests/unit/test_action_engine_compile.py b/taskflow/tests/unit/test_action_engine_compile.py index 57c248e0..82075b04 100644 --- a/taskflow/tests/unit/test_action_engine_compile.py +++ b/taskflow/tests/unit/test_action_engine_compile.py @@ -46,7 +46,7 @@ class PatternCompileTest(test.TestCase): def test_retry(self): r = retry.AlwaysRevert('r1') - msg_regex = "^Retry controller .* is used not as a flow parameter" + msg_regex = "^Retry controller: .* must only be used .*" self.assertRaisesRegexp(TypeError, msg_regex, compiler.PatternCompiler().compile, r) From 6542b99672310f7d339b13208dbf7befae5fd75a Mon Sep 17 00:00:00 2001 From: Joshua Harlow Date: Sun, 8 Jun 2014 23:37:50 -0700 Subject: [PATCH 118/188] Sync our version of the interprocess lock The oslo version gained a exists function and merged in the commented code we had so we can go ahead and merge that into our source tree and remove the comment (since it's not useful anymore). The update is from lockutils.py, oslo incubator commit hash e3b52f20367e18. Change-Id: I858a4412d03c48c4ef1c2506edf6334c66ded3f0 --- taskflow/utils/lock_utils.py | 4 +++- 1 file changed, 3 insertions(+), 1 deletion(-) diff --git a/taskflow/utils/lock_utils.py b/taskflow/utils/lock_utils.py index 942e27bb..1b4c9008 100644 --- a/taskflow/utils/lock_utils.py +++ b/taskflow/utils/lock_utils.py @@ -377,7 +377,6 @@ class _InterProcessLock(object): try: self.unlock() self.lockfile.close() - # This is fixed in: https://review.openstack.org/70506 LOG.debug('Released file lock "%s"', self.fname) except IOError: LOG.exception("Could not release the acquired lock `%s`", @@ -386,6 +385,9 @@ class _InterProcessLock(object): def __exit__(self, exc_type, exc_val, exc_tb): self.release() + def exists(self): + return os.path.exists(self.fname) + def trylock(self): raise NotImplementedError() From b72a5df10243271a5e1ef0ce19cd21e729d1f28a Mon Sep 17 00:00:00 2001 From: Joshua Harlow Date: Sat, 7 Jun 2014 17:10:20 -0700 Subject: [PATCH 119/188] Rename additional to general/higher-level The wiki really contains the general/overview and higher-level information and the developer documentation should reflect this (instead of just saying additional documentation is located there). Change-Id: I4ac58aac6ec61b11f4507a7c51973e0b492c5ade --- doc/source/index.rst | 5 +++-- 1 file changed, 3 insertions(+), 2 deletions(-) diff --git a/doc/source/index.rst b/doc/source/index.rst index 7bfdc96d..b8c930e5 100644 --- a/doc/source/index.rst +++ b/doc/source/index.rst @@ -6,8 +6,9 @@ easy, consistent, and reliable.* .. note:: - Additional documentation is also hosted on wiki: - https://wiki.openstack.org/wiki/TaskFlow + If you are just getting started or looking for an overview please + visit: http://wiki.openstack.org/wiki/TaskFlow which provides better + introductory material, description of high level goals and related content. Contents ======== From 89c4a30fd12190885991c0d6e23f0d50ef448092 Mon Sep 17 00:00:00 2001 From: Joshua Harlow Date: Tue, 10 Jun 2014 16:55:19 -0700 Subject: [PATCH 120/188] Update zake to requirements version Zake has been updated in the global requirements repository to be zake>=0.0.20, so in order for the requirements bot to get its change through (without updating hacking) we need to separate this change out. Change-Id: I5cf4407104409785fe4ee746af9fed7b766474e4 --- test-requirements.txt | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/test-requirements.txt b/test-requirements.txt index 8c0d3106..7e664134 100644 --- a/test-requirements.txt +++ b/test-requirements.txt @@ -5,7 +5,7 @@ mock>=1.0 python-subunit>=0.0.18 testrepository>=0.0.18 testtools>=0.9.34 -zake>=0.0.18 +zake>=0.0.20 # Apache-2.0 # docs build jobs sphinx>=1.2.1,<1.3 oslosphinx From 231e4c6cf8f57b96ca10d52d07b38d221eba5c21 Mon Sep 17 00:00:00 2001 From: Joshua Harlow Date: Wed, 4 Jun 2014 19:28:18 -0700 Subject: [PATCH 121/188] Remove misc.as_bool as oslo provides an equivalent There isn't a need to have a misc.as_bool function anymore now that we have imported the oslo incubator strutils module since that module provides a function that does *nearly* the same thing. Change-Id: I7afe141d5a37c50b0c926144743f9af71db95bbf --- .../persistence/backends/impl_sqlalchemy.py | 23 +++++++++++++++---- taskflow/utils/misc.py | 12 ---------- 2 files changed, 18 insertions(+), 17 deletions(-) diff --git a/taskflow/persistence/backends/impl_sqlalchemy.py b/taskflow/persistence/backends/impl_sqlalchemy.py index 81f053ea..26cc4d27 100644 --- a/taskflow/persistence/backends/impl_sqlalchemy.py +++ b/taskflow/persistence/backends/impl_sqlalchemy.py @@ -32,6 +32,7 @@ from sqlalchemy import orm as sa_orm from sqlalchemy import pool as sa_pool from taskflow import exceptions as exc +from taskflow.openstack.common import strutils from taskflow.persistence.backends import base from taskflow.persistence.backends.sqlalchemy import migration from taskflow.persistence.backends.sqlalchemy import models @@ -120,6 +121,18 @@ def _is_db_connection_error(reason): return _in_any(reason, list(MY_SQL_CONN_ERRORS + POSTGRES_CONN_ERRORS)) +def _as_bool(value): + if isinstance(value, bool): + return value + # This is different than strutils, but imho is an acceptable difference. + if value is None: + return False + # NOTE(harlowja): prefer strictness to avoid users getting accustomed + # to passing bad values in and this *just working* (which imho is a bad + # habit to encourage). + return strutils.bool_from_string(value, strict=True) + + def _thread_yield(dbapi_con, con_record): """Ensure other greenthreads get a chance to be executed. @@ -183,8 +196,8 @@ class SQLAlchemyBackend(base.Backend): # all the popping that will happen below. conf = copy.deepcopy(self._conf) engine_args = { - 'echo': misc.as_bool(conf.pop('echo', False)), - 'convert_unicode': misc.as_bool(conf.pop('convert_unicode', True)), + 'echo': _as_bool(conf.pop('echo', False)), + 'convert_unicode': _as_bool(conf.pop('convert_unicode', True)), 'pool_recycle': 3600, } if 'idle_timeout' in conf: @@ -229,13 +242,13 @@ class SQLAlchemyBackend(base.Backend): engine = sa.create_engine(sql_connection, **engine_args) checkin_yield = conf.pop('checkin_yield', eventlet_utils.EVENTLET_AVAILABLE) - if misc.as_bool(checkin_yield): + if _as_bool(checkin_yield): sa.event.listen(engine, 'checkin', _thread_yield) if 'mysql' in e_url.drivername: - if misc.as_bool(conf.pop('checkout_ping', True)): + if _as_bool(conf.pop('checkout_ping', True)): sa.event.listen(engine, 'checkout', _ping_listener) mode = None - if misc.as_bool(conf.pop('mysql_traditional_mode', True)): + if _as_bool(conf.pop('mysql_traditional_mode', True)): mode = 'TRADITIONAL' if 'mysql_sql_mode' in conf: mode = conf.pop('mysql_sql_mode') diff --git a/taskflow/utils/misc.py b/taskflow/utils/misc.py index d2360c9b..b69089f9 100644 --- a/taskflow/utils/misc.py +++ b/taskflow/utils/misc.py @@ -411,18 +411,6 @@ class ExponentialBackoff(object): return "ExponentialBackoff: %s" % ([str(v) for v in self]) -def as_bool(val): - """Converts an arbitrary value into a boolean.""" - if isinstance(val, bool): - return val - if isinstance(val, six.string_types): - if val.lower() in ('f', 'false', '0', 'n', 'no'): - return False - if val.lower() in ('t', 'true', '1', 'y', 'yes'): - return True - return bool(val) - - def as_int(obj, quiet=False): """Converts an arbitrary value into a integer.""" # Try "2" -> 2 From c4098d611f0d0617f6290a2a28fe97307f1ec988 Mon Sep 17 00:00:00 2001 From: Joshua Harlow Date: Sat, 7 Jun 2014 22:28:55 -0700 Subject: [PATCH 122/188] Denote that other projects can use this library This library while made primarily for OpenStack is and should not be limited to just being used by OpenStack and our documentation should reflect this. Change-Id: I9721adc062b27ab9e893b97e29cc34c6cb889308 --- doc/source/index.rst | 8 ++++++-- 1 file changed, 6 insertions(+), 2 deletions(-) diff --git a/doc/source/index.rst b/doc/source/index.rst index 7bfdc96d..e66aa900 100644 --- a/doc/source/index.rst +++ b/doc/source/index.rst @@ -1,8 +1,8 @@ TaskFlow ======== -*TaskFlow is a Python library for OpenStack that helps make task execution -easy, consistent, and reliable.* +*TaskFlow is a Python library that helps to make task execution easy, +consistent and reliable.* [#f1]_ .. note:: @@ -51,3 +51,7 @@ Indices and tables * :ref:`modindex` * :ref:`search` +.. [#f1] It should be noted that even though it is designed with OpenStack + integration in mind, and that is where most of its *current* + integration is it aims to be generally usable and useful in any + project. From c558da07b65bebdc6940be72348d126fa1eccda6 Mon Sep 17 00:00:00 2001 From: Joshua Harlow Date: Tue, 10 Jun 2014 17:04:15 -0700 Subject: [PATCH 123/188] Upgrade hacking version and fix some of the issues Update hacking to the new requirements version and fix about half of the new reported issues. The other hacking issues are for now ignored until fixed by adjusting our tox.ini file. This commit fixes the following new hacking errors: H405 - multi line docstring summary not separated with an empty line E265 - block comment should start with '# ' F402 - import 'endpoint' from line 21 shadowed by loop variable Change-Id: I6bae61591fb988cc17fa79e21cb5f1508d22781c --- taskflow/atom.py | 4 +- taskflow/conductors/base.py | 12 +-- taskflow/conductors/single_threaded.py | 11 ++- taskflow/engines/action_engine/analyzer.py | 37 +++++---- taskflow/engines/action_engine/compiler.py | 15 ++-- taskflow/engines/action_engine/runtime.py | 32 ++++++-- taskflow/engines/base.py | 9 +- taskflow/engines/helpers.py | 17 ++-- taskflow/engines/worker_based/executor.py | 6 +- taskflow/engines/worker_based/protocol.py | 13 ++- taskflow/engines/worker_based/proxy.py | 8 +- taskflow/engines/worker_based/server.py | 14 ++-- taskflow/engines/worker_based/worker.py | 4 +- taskflow/exceptions.py | 4 +- taskflow/flow.py | 7 +- taskflow/jobs/backends/__init__.py | 29 +++++-- taskflow/jobs/job.py | 22 +++-- taskflow/jobs/jobboard.py | 82 +++++++++++-------- taskflow/patterns/graph_flow.py | 9 +- taskflow/persistence/backends/__init__.py | 29 ++++++- taskflow/persistence/backends/base.py | 8 +- taskflow/persistence/backends/impl_dir.py | 22 ++++- taskflow/persistence/backends/impl_memory.py | 8 +- .../persistence/backends/impl_sqlalchemy.py | 18 +++- .../persistence/backends/impl_zookeeper.py | 18 +++- taskflow/persistence/logbook.py | 42 ++++++---- taskflow/retry.py | 78 ++++++++++++------ taskflow/states.py | 4 +- taskflow/storage.py | 18 ++-- taskflow/task.py | 25 ++++-- taskflow/test.py | 24 ++++-- taskflow/tests/test_examples.py | 8 +- taskflow/tests/unit/jobs/__init__.py | 15 ---- .../unit/persistence/test_sql_persistence.py | 4 +- taskflow/tests/unit/test_utils_lock_utils.py | 2 +- taskflow/types/graph.py | 7 +- taskflow/utils/kazoo_utils.py | 8 +- taskflow/utils/lock_utils.py | 31 ++++--- taskflow/utils/misc.py | 77 ++++++++++------- taskflow/utils/persistence_utils.py | 20 +++-- taskflow/utils/reflection.py | 7 +- test-requirements.txt | 2 +- tox-tmpl.ini | 5 ++ tox.ini | 1 + 44 files changed, 526 insertions(+), 290 deletions(-) diff --git a/taskflow/atom.py b/taskflow/atom.py index c288cbb7..d93ff57a 100644 --- a/taskflow/atom.py +++ b/taskflow/atom.py @@ -78,7 +78,9 @@ def _build_rebind_dict(args, rebind_args): def _build_arg_mapping(atom_name, reqs, rebind_args, function, do_infer, ignore_list=None): - """Given a function, its requirements and a rebind mapping this helper + """Builds an input argument mapping for a given function. + + Given a function, its requirements and a rebind mapping this helper function will build the correct argument mapping for the given function as well as verify that the final argument mapping does not have missing or extra arguments (where applicable). diff --git a/taskflow/conductors/base.py b/taskflow/conductors/base.py index 7cee0c64..e7c9887a 100644 --- a/taskflow/conductors/base.py +++ b/taskflow/conductors/base.py @@ -24,7 +24,9 @@ from taskflow.utils import lock_utils @six.add_metaclass(abc.ABCMeta) class Conductor(object): - """Conductors act as entities which extract jobs from a jobboard, assign + """Conductors conduct jobs & assist in associated runtime interactions. + + Conductors act as entities which extract jobs from a jobboard, assign there work to some engine (using some desired configuration) and then wait for that work to complete. If the work fails then they abandon the claimed work (or if the process they are running in crashes or dies this @@ -99,13 +101,13 @@ class Conductor(object): @abc.abstractmethod def run(self): - """Continuously claims, runs, and consumes jobs, and waits for more - jobs when there are none left on the jobboard. - """ + """Continuously claims, runs, and consumes jobs (and repeat).""" @abc.abstractmethod def _dispatch_job(self, job): - """Accepts a single (already claimed) job and causes it to be run in + """Dispatches a claimed job for work completion. + + Accepts a single (already claimed) job and causes it to be run in an engine. Returns a boolean that signifies whether the job should be consumed. The job is consumed upon completion (unless False is returned which will signify the job should be abandoned instead). diff --git a/taskflow/conductors/single_threaded.py b/taskflow/conductors/single_threaded.py index 5deeba30..bfd4d3d4 100644 --- a/taskflow/conductors/single_threaded.py +++ b/taskflow/conductors/single_threaded.py @@ -67,10 +67,13 @@ class SingleThreadedConductor(base.Conductor): @lock_utils.locked def stop(self, timeout=None): - """Requests the conductor to stop dispatching and returns whether the - stop request was successfully completed. If the dispatching is still - occurring then False is returned otherwise True will be returned to - signal that the conductor is no longer dispatching job requests. + """Requests the conductor to stop dispatching. + + This method can be used to request that a conductor stop its + consumption & dispatching loop. It returns whether the stop request + was successfully completed. If the dispatching is still occurring + then False is returned otherwise True will be returned to signal that + the conductor is no longer consuming & dispatching job requests. NOTE(harlowja): If a timeout is provided the dispatcher loop may not have ceased by the timeout reached (the request to cease will diff --git a/taskflow/engines/action_engine/analyzer.py b/taskflow/engines/action_engine/analyzer.py index ef960afc..d4b181a6 100644 --- a/taskflow/engines/action_engine/analyzer.py +++ b/taskflow/engines/action_engine/analyzer.py @@ -22,11 +22,13 @@ from taskflow import states as st class Analyzer(object): - """Analyzes a compilation output to get the next atoms for execution or - reversion by utilizing the compilations underlying structures (graphs, - nodes and edge relations...) and using this information along with the - atom state/states stored in storage to provide useful analysis functions - to the rest of the runtime system. + """Analyzes a compilation and aids in execution processes. + + Its primary purpose is to get the next atoms for execution or reversion + by utilizing the compilations underlying structures (graphs, nodes and + edge relations...) and using this information along with the atom + state/states stored in storage to provide other useful functionality to + the rest of the runtime system. """ def __init__(self, compilation, storage): @@ -56,8 +58,11 @@ class Analyzer(object): return [] def browse_nodes_for_execute(self, node=None): - """Browse next nodes to execute for given node if specified and - for whole graph otherwise. + """Browse next nodes to execute. + + This returns a collection of nodes that are ready to be executed, if + given a specific node it will only examine the successors of that node, + otherwise it will examine the whole graph. """ if node: nodes = self._graph.successors(node) @@ -71,8 +76,11 @@ class Analyzer(object): return available_nodes def browse_nodes_for_revert(self, node=None): - """Browse next nodes to revert for given node if specified and - for whole graph otherwise. + """Browse next nodes to revert. + + This returns a collection of nodes that are ready to be be reverted, if + given a specific node it will only examine the predecessors of that + node, otherwise it will examine the whole graph. """ if node: nodes = self._graph.predecessors(node) @@ -87,7 +95,6 @@ class Analyzer(object): def _is_ready_for_execute(self, task): """Checks if task is ready to be executed.""" - state = self.get_state(task) intention = self._storage.get_atom_intention(task.name) transition = st.check_task_transition(state, st.RUNNING) @@ -104,7 +111,6 @@ class Analyzer(object): def _is_ready_for_revert(self, task): """Checks if task is ready to be reverted.""" - state = self.get_state(task) intention = self._storage.get_atom_intention(task.name) transition = st.check_task_transition(state, st.REVERTING) @@ -120,15 +126,14 @@ class Analyzer(object): for state, intention in six.itervalues(task_states)) def iterate_subgraph(self, retry): - """Iterates a subgraph connected to current retry controller, including - nested retry controllers and its nodes. - """ + """Iterates a subgraph connected to given retry controller.""" for _src, dst in traversal.dfs_edges(self._graph, retry): yield dst def iterate_retries(self, state=None): - """Iterates retry controllers of a graph with given state or all - retries if state is None. + """Iterates retry controllers that match the provided state. + + If no state is provided it will yield back all retry controllers. """ for node in self._graph.nodes_iter(): if isinstance(node, retry_atom.Retry): diff --git a/taskflow/engines/action_engine/compiler.py b/taskflow/engines/action_engine/compiler.py index f5c519cc..de5af5fd 100644 --- a/taskflow/engines/action_engine/compiler.py +++ b/taskflow/engines/action_engine/compiler.py @@ -42,8 +42,7 @@ class Compilation(object): class PatternCompiler(object): - """Compiles patterns & atoms (potentially nested) into an compilation - unit with a *logically* equivalent directed acyclic graph representation. + """Compiles patterns & atoms into a compilation unit. NOTE(harlowja): during this pattern translation process any nested flows will be converted into there equivalent subgraphs. This currently implies @@ -51,8 +50,8 @@ class PatternCompiler(object): be associated with there previously containing flow but instead will lose this identity and what will remain is the logical constraints that there contained flow mandated. In the future this may be changed so that this - association is not lost via the compilation process (since it is sometime - useful to retain part of this relationship). + association is not lost via the compilation process (since it can be + useful to retain this relationship). """ def compile(self, root): graph = _Flattener(root).flatten() @@ -80,9 +79,11 @@ class _Flattener(object): self._freeze = bool(freeze) def _add_new_edges(self, graph, nodes_from, nodes_to, edge_attrs): - """Adds new edges from nodes to other nodes in the specified graph, - with the following edge attributes (defaulting to the class provided - edge_data if None), if the edge does not already exist. + """Adds new edges from nodes to other nodes in the specified graph. + + It will connect the nodes_from to the nodes_to if an edge currently + does *not* exist. When an edge is created the provided edge attributes + will be applied to the new edge between these two nodes. """ nodes_to = list(nodes_to) for u in nodes_from: diff --git a/taskflow/engines/action_engine/runtime.py b/taskflow/engines/action_engine/runtime.py index 40e66453..3f5e2670 100644 --- a/taskflow/engines/action_engine/runtime.py +++ b/taskflow/engines/action_engine/runtime.py @@ -28,9 +28,11 @@ from taskflow.engines.action_engine import task_action as ta class Runtime(object): - """An object that contains various utility methods and properties that - represent the collection of runtime components and functionality needed - for an action engine to run to completion. + """A aggregate of runtime objects, properties, ... used during execution. + + This object contains various utility methods and properties that represent + the collection of runtime components and functionality needed for an + action engine to run to completion. """ def __init__(self, compilation, storage, task_notifier, task_executor): @@ -155,8 +157,13 @@ class Completer(object): return False def _process_atom_failure(self, atom, failure): - """On atom failure find its retry controller, ask for the action to - perform with failed subflow and set proper intention for subflow nodes. + """Processes atom failure & applies resolution strategies. + + On atom failure this will find the atoms associated retry controller + and ask that controller for the strategy to perform to resolve that + failure. After getting a resolution strategy decision this method will + then adjust the needed other atoms intentions, and states, ... so that + the failure can be worked around. """ retry = self._analyzer.find_atom_retry(atom) if retry: @@ -195,6 +202,9 @@ class Scheduler(object): def _schedule_node(self, node): """Schedule a single node for execution.""" + # TODO(harlowja): we need to rework this so that we aren't doing type + # checking here, type checking usually means something isn't done right + # and usually will limit extensibility in the future. if isinstance(node, task_atom.BaseTask): return self._schedule_task(node) elif isinstance(node, retry_atom.Retry): @@ -204,8 +214,10 @@ class Scheduler(object): % (node, type(node))) def _schedule_retry(self, retry): - """Schedules the given retry for revert or execute depending - on its intention. + """Schedules the given retry atom for *future* completion. + + Depending on the atoms stored intention this may schedule the retry + atom for reversion or execution. """ intention = self._storage.get_atom_intention(retry.name) if intention == st.EXECUTE: @@ -221,8 +233,10 @@ class Scheduler(object): " intention: %s" % intention) def _schedule_task(self, task): - """Schedules the given task for revert or execute depending - on its intention. + """Schedules the given task atom for *future* completion. + + Depending on the atoms stored intention this may schedule the task + atom for reversion or execution. """ intention = self._storage.get_atom_intention(task.name) if intention == st.EXECUTE: diff --git a/taskflow/engines/base.py b/taskflow/engines/base.py index 9255a3da..4bfcbabc 100644 --- a/taskflow/engines/base.py +++ b/taskflow/engines/base.py @@ -54,9 +54,12 @@ class EngineBase(object): @abc.abstractmethod def compile(self): - """Compiles the contained flow into a structure which the engine can - use to run or if this can not be done then an exception is thrown - indicating why this compilation could not be achieved. + """Compiles the contained flow into a internal representation. + + This internal representation is what the engine will *actually* use to + run. If this compilation can not be accomplished then an exception + is expected to be thrown with a message indicating why the compilation + could not be achieved. """ @abc.abstractmethod diff --git a/taskflow/engines/helpers.py b/taskflow/engines/helpers.py index 0fb7a518..0b13044b 100644 --- a/taskflow/engines/helpers.py +++ b/taskflow/engines/helpers.py @@ -50,7 +50,7 @@ def _fetch_validate_factory(flow_factory): def load(flow, store=None, flow_detail=None, book=None, engine_conf=None, backend=None, namespace=ENGINES_NAMESPACE, **kwargs): - """Load flow into engine. + """Load a flow into an engine. This function creates and prepares engine to run the flow. All that is left is to run the engine with 'run()' method. @@ -151,8 +151,7 @@ def run(flow, store=None, flow_detail=None, book=None, def save_factory_details(flow_detail, flow_factory, factory_args, factory_kwargs, backend=None): - """Saves the given factories reimportable name, args, kwargs into the - flow detail. + """Saves the given factories reimportable attributes into the flow detail. This function saves the factory name, arguments, and keyword arguments into the given flow details object and if a backend is provided it will @@ -227,9 +226,11 @@ def load_from_factory(flow_factory, factory_args=None, factory_kwargs=None, def flow_from_detail(flow_detail): - """Recreate flow previously loaded with load_form_factory. + """Reloads a flow previously saved. - Gets flow factory name from metadata, calls it to recreate the flow. + Gets the flow factories name and any arguments and keyword arguments from + the flow details metadata, and then calls that factory to recreate the + flow. :param flow_detail: FlowDetail that holds state of the flow to load """ @@ -253,10 +254,10 @@ def flow_from_detail(flow_detail): def load_from_detail(flow_detail, store=None, engine_conf=None, backend=None, namespace=ENGINES_NAMESPACE, **kwargs): - """Reload flow previously loaded with load_form_factory function. + """Reloads an engine previously saved. - Gets flow factory name from metadata, calls it to recreate the flow - and loads flow into engine with load(). + This reloads the flow using the flow_from_detail() function and then calls + into the load() function to create an engine from that flow. :param flow_detail: FlowDetail that holds state of the flow to load :param store: dict -- data to put to storage to satisfy flow requirements diff --git a/taskflow/engines/worker_based/executor.py b/taskflow/engines/worker_based/executor.py index 98ff3a8d..37ea8bd7 100644 --- a/taskflow/engines/worker_based/executor.py +++ b/taskflow/engines/worker_based/executor.py @@ -216,7 +216,7 @@ class WorkerTaskExecutor(executor.TaskExecutorBase): return async_utils.wait_for_any(fs, timeout) def start(self): - """Start proxy thread (and associated topic notification thread).""" + """Starts proxy thread and associated topic notification thread.""" if not _is_alive(self._proxy_thread): self._proxy_thread = tu.daemon_thread(self._proxy.start) self._proxy_thread.start() @@ -227,9 +227,7 @@ class WorkerTaskExecutor(executor.TaskExecutorBase): self._periodic_thread.start() def stop(self): - """Stop proxy thread (and associated topic notification thread), so - those threads will be gracefully terminated. - """ + """Stops proxy thread and associated topic notification thread.""" if self._periodic_thread is not None: self._periodic.stop() self._periodic_thread.join() diff --git a/taskflow/engines/worker_based/protocol.py b/taskflow/engines/worker_based/protocol.py index 70859053..40a227a8 100644 --- a/taskflow/engines/worker_based/protocol.py +++ b/taskflow/engines/worker_based/protocol.py @@ -86,8 +86,10 @@ class Notify(Message): class Request(Message): - """Represents request with execution results. Every request is created in - the WAITING state and is expired within the given timeout. + """Represents request with execution results. + + Every request is created in the WAITING state and is expired within the + given timeout. """ TYPE = REQUEST @@ -136,8 +138,11 @@ class Request(Message): return False def to_dict(self): - """Return json-serializable request, converting all `misc.Failure` - objects into dictionaries. + """Return json-serializable request. + + To convert requests that have failed due to some exception this will + convert all `misc.Failure` objects into dictionaries (which will then + be reconstituted by the receiver). """ request = dict(task_cls=self._task_cls, task_name=self._task.name, task_version=self._task.version, action=self._action, diff --git a/taskflow/engines/worker_based/proxy.py b/taskflow/engines/worker_based/proxy.py index 4d5282ee..3700501e 100644 --- a/taskflow/engines/worker_based/proxy.py +++ b/taskflow/engines/worker_based/proxy.py @@ -29,9 +29,7 @@ DRAIN_EVENTS_PERIOD = 1 class Proxy(object): - """Proxy picks up messages from the named exchange, calls on_message - callback when new message received and is used to publish messages. - """ + """A proxy processes messages from/to the named exchange.""" def __init__(self, topic, exchange_name, on_message, on_wait=None, **kwargs): @@ -61,7 +59,7 @@ class Proxy(object): @property def is_running(self): - """Return whether proxy is running.""" + """Return whether the proxy is running.""" return self._running.is_set() def _make_queue(self, name, exchange, **kwargs): @@ -74,7 +72,7 @@ class Proxy(object): **kwargs) def publish(self, msg, routing_key, **kwargs): - """Publish message to the named exchange with routing key.""" + """Publish message to the named exchange with given routing key.""" LOG.debug("Sending %s", msg) if isinstance(routing_key, six.string_types): routing_keys = [routing_key] diff --git a/taskflow/engines/worker_based/server.py b/taskflow/engines/worker_based/server.py index 02f56647..d52c3dce 100644 --- a/taskflow/engines/worker_based/server.py +++ b/taskflow/engines/worker_based/server.py @@ -77,9 +77,10 @@ class Server(object): @staticmethod def _parse_request(task_cls, task_name, action, arguments, result=None, failures=None, **kwargs): - """Parse request before it can be processed. All `misc.Failure` objects - that have been converted to dict on the remote side to be serializable - are now converted back to objects. + """Parse request before it can be further processed. + + All `misc.Failure` objects that have been converted to dict on the + remote side will now converted back to `misc.Failure` objects. """ action_args = dict(arguments=arguments, task_name=task_name) if result is not None: @@ -96,9 +97,10 @@ class Server(object): @staticmethod def _parse_message(message): - """Parse broker message to get the `reply_to` and the `correlation_id` - properties. If required properties are missing - the `ValueError` is - raised. + """Extracts required attributes out of the messages properties. + + This extracts the `reply_to` and the `correlation_id` properties. If + any of these required properties are missing a `ValueError` is raised. """ properties = [] for prop in ('reply_to', 'correlation_id'): diff --git a/taskflow/engines/worker_based/worker.py b/taskflow/engines/worker_based/worker.py index 0b3d50dd..49011788 100644 --- a/taskflow/engines/worker_based/worker.py +++ b/taskflow/engines/worker_based/worker.py @@ -96,8 +96,8 @@ class Worker(object): LOG.info("Starting the '%s' topic worker using a %s.", self._topic, self._executor) LOG.info("Tasks list:") - for endpoint in self._endpoints: - LOG.info("|-- %s", endpoint) + for e in self._endpoints: + LOG.info("|-- %s", e) self._server.start() def wait(self): diff --git a/taskflow/exceptions.py b/taskflow/exceptions.py index 78186ef5..55c889ca 100644 --- a/taskflow/exceptions.py +++ b/taskflow/exceptions.py @@ -84,9 +84,7 @@ class ExecutionFailure(TaskFlowException): class RequestTimeout(ExecutionFailure): - """Raised when a worker request was not finished within an allotted - timeout. - """ + """Raised when a worker request was not finished within allotted time.""" class InvalidState(ExecutionFailure): diff --git a/taskflow/flow.py b/taskflow/flow.py index 0fb94338..5533ed4e 100644 --- a/taskflow/flow.py +++ b/taskflow/flow.py @@ -55,8 +55,11 @@ class Flow(object): @property def retry(self): - """A retry object that will affect control how (and if) this flow - retries while execution is underway. + """The associated flow retry controller. + + This retry controller object will affect & control how (and if) this + flow and its contained components retry when execution is underway and + a failure occurs. """ return self._retry diff --git a/taskflow/jobs/backends/__init__.py b/taskflow/jobs/backends/__init__.py index 0299636a..099f0476 100644 --- a/taskflow/jobs/backends/__init__.py +++ b/taskflow/jobs/backends/__init__.py @@ -31,9 +31,25 @@ LOG = logging.getLogger(__name__) def fetch(name, conf, namespace=BACKEND_NAMESPACE, **kwargs): - """Fetch a jobboard backend with the given configuration (and any board - specific kwargs) in the given entrypoint namespace and create it with the - given name. + """Fetch a jobboard backend with the given configuration. + + This fetch method will look for the entrypoint name in the entrypoint + namespace, and then attempt to instantiate that entrypoint using the + provided name, configuration and any board specific kwargs. + + NOTE(harlowja): to aid in making it easy to specify configuration and + options to a board the configuration (which is typical just a dictionary) + can also be a uri string that identifies the entrypoint name and any + configuration specific to that board. + + For example, given the following configuration uri: + + zookeeper:///?a=b&c=d + + This will look for the entrypoint named 'zookeeper' and will provide + a configuration object composed of the uris parameters, in this case that + is {'a': 'b', 'c': 'd'} to the constructor of that board instance (also + including the name specified). """ if isinstance(conf, six.string_types): conf = {'board': conf} @@ -58,8 +74,11 @@ def fetch(name, conf, namespace=BACKEND_NAMESPACE, **kwargs): @contextlib.contextmanager def backend(name, conf, namespace=BACKEND_NAMESPACE, **kwargs): - """Fetches a jobboard backend, connects to it and allows it to be used in - a context manager statement with the jobboard being closed upon completion. + """Fetches a jobboard, connects to it and closes it on completion. + + This allows a board instance to fetched, connected to, and then used in a + context manager statement with the board being closed upon context + manager exit. """ jb = fetch(name, conf, namespace=namespace, **kwargs) jb.connect() diff --git a/taskflow/jobs/job.py b/taskflow/jobs/job.py index 796e5d11..41ac4c16 100644 --- a/taskflow/jobs/job.py +++ b/taskflow/jobs/job.py @@ -24,16 +24,22 @@ from taskflow.openstack.common import uuidutils @six.add_metaclass(abc.ABCMeta) class Job(object): - """A job is a higher level abstraction over a set of flows as well as the - *ownership* of those flows, it is the highest piece of work that can be - owned by an entity performing those flows. + """A abstraction that represents a named and trackable unit of work. - Only one entity will be operating on the flows contained in a job at a - given time (for the foreseeable future). + A job connects a logbook, a owner, last modified and created on dates and + any associated state that the job has. Since it is a connector to a + logbook, which are each associated with a set of factories that can create + set of flows, it is the current top-level container for a piece of work + that can be owned by an entity (typically that entity will read those + logbooks and run any contained flows). - It is the object that should be transferred to another entity on failure of - so that the contained flows ownership can be transferred to the secondary - entity for resumption/continuation/reverting. + Only one entity will be allowed to own and operate on the flows contained + in a job at a given time (for the foreseeable future). + + NOTE(harlowja): It is the object that will be transferred to another + entity on failure so that the contained flows ownership can be + transferred to the secondary entity/owner for resumption, continuation, + reverting... """ def __init__(self, name, uuid=None, details=None): diff --git a/taskflow/jobs/jobboard.py b/taskflow/jobs/jobboard.py index 5857d554..c93123a5 100644 --- a/taskflow/jobs/jobboard.py +++ b/taskflow/jobs/jobboard.py @@ -24,10 +24,15 @@ from taskflow.utils import misc @six.add_metaclass(abc.ABCMeta) class JobBoard(object): - """A jobboard is an abstract representation of a place where jobs - can be posted, reposted, claimed and transferred. There can be multiple - implementations of this job board, depending on the desired semantics and - capabilities of the underlying jobboard implementation. + """A place where jobs can be posted, reposted, claimed and transferred. + + There can be multiple implementations of this job board, depending on the + desired semantics and capabilities of the underlying jobboard + implementation. + + NOTE(harlowja): the name is meant to be an analogous to a board/posting + system that is used in newspapers, or elsewhere to solicit jobs that + people can interview and apply for (and then work on & complete). """ def __init__(self, name, conf): @@ -36,8 +41,7 @@ class JobBoard(object): @abc.abstractmethod def iterjobs(self, only_unclaimed=False, ensure_fresh=False): - """Returns an iterator that will provide back jobs that are currently - on this jobboard. + """Returns an iterator of jobs that are currently on this board. NOTE(harlowja): the ordering of this iteration should be by posting order (oldest to newest) if possible, but it is left up to the backing @@ -60,9 +64,10 @@ class JobBoard(object): @abc.abstractmethod def wait(self, timeout=None): - """Waits a given amount of time for job/s to be posted, when jobs are - found then an iterator will be returned that contains the jobs at - the given point in time. + """Waits a given amount of time for jobs to be posted. + + When jobs are found then an iterator will be returned that can be used + to iterate over those jobs. NOTE(harlowja): since a jobboard can be mutated on by multiple external entities at the *same* time the iterator that can be returned *may* @@ -75,8 +80,11 @@ class JobBoard(object): @abc.abstractproperty def job_count(self): - """Returns how many jobs are on this jobboard (this count may change as - new jobs appear or are removed). + """Returns how many jobs are on this jobboard. + + NOTE(harlowja): this count may change as jobs appear or are removed so + the accuracy of this count should not be used in a way that requires + it to be exact & absolute. """ @abc.abstractmethod @@ -90,11 +98,13 @@ class JobBoard(object): @abc.abstractmethod def consume(self, job, who): - """Permanently (and atomically) removes a job from the jobboard, - signaling that this job has been completed by the entity assigned - to that job. + """Permanently (and atomically) removes a job from the jobboard. - Only the entity that has claimed that job is able to consume a job. + Consumption signals to the board (and any others examining the board) + that this job has been completed by the entity that previously claimed + that job. + + Only the entity that has claimed that job is able to consume the job. A job that has been consumed can not be reclaimed or reposted by another entity (job postings are immutable). Any entity consuming @@ -109,11 +119,13 @@ class JobBoard(object): @abc.abstractmethod def post(self, name, book, details=None): - """Atomically creates and posts a job to the jobboard, allowing others - to attempt to claim that job (and subsequently work on that job). The - contents of the provided logbook must provide enough information for - others to reference to construct & work on the desired entries that - are contained in that logbook. + """Atomically creates and posts a job to the jobboard. + + This posting allowing others to attempt to claim that job (and + subsequently work on that job). The contents of the provided logbook + must provide *enough* information for others to reference to + construct & work on the desired entries that are contained in that + logbook. Once a job has been posted it can only be removed by consuming that job (after that job is claimed). Any entity can post/propose jobs @@ -124,13 +136,14 @@ class JobBoard(object): @abc.abstractmethod def claim(self, job, who): - """Atomically attempts to claim the given job for the entity and either - succeeds or fails at claiming by throwing corresponding exceptions. + """Atomically attempts to claim the provided job. If a job is claimed it is expected that the entity that claims that job - will at sometime in the future work on that jobs flows and either fail - at completing them (resulting in a reposting) or consume that job from - the jobboard (signaling its completion). + will at sometime in the future work on that jobs contents and either + fail at completing them (resulting in a reposting) or consume that job + from the jobboard (signaling its completion). If claiming fails then + a corresponding exception will be raised to signal this to the claim + attempter. :param job: a job on this jobboard that can be claimed (if it does not exist then a NotFound exception will be raised). @@ -139,10 +152,12 @@ class JobBoard(object): @abc.abstractmethod def abandon(self, job, who): - """Atomically abandons the given job on the jobboard, allowing that job - to be reclaimed by others. This would typically occur if the entity - that has claimed the job has failed or is unable to complete the job - or jobs it has claimed. + """Atomically attempts to abandon the provided job. + + This abandonment signals to others that the job may now be reclaimed. + This would typically occur if the entity that has claimed the job has + failed or is unable to complete the job or jobs it had previously + claimed. Only the entity that has claimed that job can abandon a job. Any entity abandoning a unclaimed job (or a job they do not own) will cause an @@ -177,13 +192,14 @@ REMOVAL = 'REMOVAL' # existing job is/has been removed class NotifyingJobBoard(JobBoard): - """A jobboard subclass that can notify about jobs being created - and removed, which can remove the repeated usage of iterjobs() to achieve - the same operation. + """A jobboard subclass that can notify others about board events. + + Implementers are expected to notify *at least* about jobs being posted + and removed. NOTE(harlowja): notifications that are emitted *may* be emitted on a separate dedicated thread when they occur, so ensure that all callbacks - registered are thread safe. + registered are thread safe (and block for as little time as possible). """ def __init__(self, name, conf): super(NotifyingJobBoard, self).__init__(name, conf) diff --git a/taskflow/patterns/graph_flow.py b/taskflow/patterns/graph_flow.py index 0ed74c75..7db4fee2 100644 --- a/taskflow/patterns/graph_flow.py +++ b/taskflow/patterns/graph_flow.py @@ -72,9 +72,12 @@ class Flow(flow.Flow): return graph def _swap(self, graph): - """Validates the replacement graph and then swaps the underlying graph - with a frozen version of the replacement graph (this maintains the - invariant that the underlying graph is immutable). + """Validates the replacement graph and then swaps the underlying graph. + + After swapping occurs the underlying graph will be frozen so that the + immutability invariant is maintained (we may be able to relax this + constraint in the future since our exposed public api does not allow + direct access to the underlying graph). """ if not graph.is_directed_acyclic(): raise exc.DependencyFailure("No path through the items in the" diff --git a/taskflow/persistence/backends/__init__.py b/taskflow/persistence/backends/__init__.py index 7560a30b..6faabdef 100644 --- a/taskflow/persistence/backends/__init__.py +++ b/taskflow/persistence/backends/__init__.py @@ -30,8 +30,25 @@ LOG = logging.getLogger(__name__) def fetch(conf, namespace=BACKEND_NAMESPACE, **kwargs): - """Fetches a given backend using the given configuration (and any backend - specific kwargs) in the given entrypoint namespace. + """Fetch a persistence backend with the given configuration. + + This fetch method will look for the entrypoint name in the entrypoint + namespace, and then attempt to instantiate that entrypoint using the + provided configuration and any persistence backend specific kwargs. + + NOTE(harlowja): to aid in making it easy to specify configuration and + options to a backend the configuration (which is typical just a dictionary) + can also be a uri string that identifies the entrypoint name and any + configuration specific to that backend. + + For example, given the following configuration uri: + + mysql:///?a=b&c=d + + This will look for the entrypoint named 'mysql' and will provide + a configuration object composed of the uris parameters, in this case that + is {'a': 'b', 'c': 'd'} to the constructor of that persistence backend + instance. """ backend_name = conf['connection'] try: @@ -54,8 +71,12 @@ def fetch(conf, namespace=BACKEND_NAMESPACE, **kwargs): @contextlib.contextmanager def backend(conf, namespace=BACKEND_NAMESPACE, **kwargs): - """Fetches a persistence backend, ensures that it is upgraded and upon - context manager completion closes the backend. + """Fetches a backend, connects, upgrades, then closes it on completion. + + This allows a backend instance to be fetched, connected to, have its schema + upgraded (if the schema is already up to date this is a no-op) and then + used in a context manager statement with the backend being closed upon + context manager exit. """ with contextlib.closing(fetch(conf, namespace=namespace, **kwargs)) as be: with contextlib.closing(be.get_connection()) as conn: diff --git a/taskflow/persistence/backends/base.py b/taskflow/persistence/backends/base.py index 58aa3554..9185d69c 100644 --- a/taskflow/persistence/backends/base.py +++ b/taskflow/persistence/backends/base.py @@ -70,9 +70,11 @@ class Connection(object): @abc.abstractmethod def validate(self): - """Validates that a backend is still ok to be used (the semantics - of this vary depending on the backend). On failure a backend specific - exception is raised that will indicate why the failure occurred. + """Validates that a backend is still ok to be used. + + The semantics of this *may* vary depending on the backend. On failure a + backend specific exception should be raised that will indicate why the + failure occurred. """ pass diff --git a/taskflow/persistence/backends/impl_dir.py b/taskflow/persistence/backends/impl_dir.py index 7c0b3c9b..9ce4a324 100644 --- a/taskflow/persistence/backends/impl_dir.py +++ b/taskflow/persistence/backends/impl_dir.py @@ -33,10 +33,24 @@ LOG = logging.getLogger(__name__) class DirBackend(base.Backend): - """A backend that writes logbooks, flow details, and task details to a - provided directory. This backend does *not* provide transactional semantics - although it does guarantee that there will be no race conditions when - writing/reading by using file level locking. + """A directory and file based backend. + + This backend writes logbooks, flow details, and atom details to a provided + base path on the local filesystem. It will create and store those objects + in three key directories (one for logbooks, one for flow details and one + for atom details). It creates those associated directories and then + creates files inside those directories that represent the contents of those + objects for later reading and writing. + + This backend does *not* provide true transactional semantics. It does + guarantee that there will be no interprocess race conditions when + writing and reading by using a consistent hierarchy of file based locks. + + Example conf: + + conf = { + "path": "/tmp/taskflow", + } """ def __init__(self, conf): super(DirBackend, self).__init__(conf) diff --git a/taskflow/persistence/backends/impl_memory.py b/taskflow/persistence/backends/impl_memory.py index 2d4c5e09..f425987c 100644 --- a/taskflow/persistence/backends/impl_memory.py +++ b/taskflow/persistence/backends/impl_memory.py @@ -15,8 +15,6 @@ # License for the specific language governing permissions and limitations # under the License. -"""Implementation of in-memory backend.""" - import logging import six @@ -29,8 +27,10 @@ LOG = logging.getLogger(__name__) class MemoryBackend(base.Backend): - """A backend that writes logbooks, flow details, and task details to in - memory dictionaries. + """A in-memory (non-persistent) backend. + + This backend writes logbooks, flow details, and atom details to in-memory + dictionaries and retrieves from those dictionaries as needed. """ def __init__(self, conf=None): super(MemoryBackend, self).__init__(conf) diff --git a/taskflow/persistence/backends/impl_sqlalchemy.py b/taskflow/persistence/backends/impl_sqlalchemy.py index 81f053ea..40c0df25 100644 --- a/taskflow/persistence/backends/impl_sqlalchemy.py +++ b/taskflow/persistence/backends/impl_sqlalchemy.py @@ -167,6 +167,14 @@ def _ping_listener(dbapi_conn, connection_rec, connection_proxy): class SQLAlchemyBackend(base.Backend): + """A sqlalchemy backend. + + Example conf: + + conf = { + "connection": "sqlite:////tmp/test.db", + } + """ def __init__(self, conf, engine=None): super(SQLAlchemyBackend, self).__init__(conf) if engine is not None: @@ -337,9 +345,13 @@ class Connection(base.Connection): failures[-1].reraise() def _run_in_session(self, functor, *args, **kwargs): - """Runs a function in a session and makes sure that sqlalchemy - exceptions aren't emitted from that sessions actions (as that would - expose the underlying backends exception model). + """Runs a callback in a session. + + This function proxy will create a session, and then call the callback + with that session (along with the provided args and kwargs). It ensures + that the session is opened & closed and makes sure that sqlalchemy + exceptions aren't emitted from the callback or sessions actions (as + that would expose the underlying sqlalchemy exception model). """ try: session = self._make_session() diff --git a/taskflow/persistence/backends/impl_zookeeper.py b/taskflow/persistence/backends/impl_zookeeper.py index 8f42374c..024398d2 100644 --- a/taskflow/persistence/backends/impl_zookeeper.py +++ b/taskflow/persistence/backends/impl_zookeeper.py @@ -34,9 +34,16 @@ MIN_ZK_VERSION = (3, 4, 0) class ZkBackend(base.Backend): - """ZooKeeper as backend storage implementation + """A zookeeper backend. - Example conf (use Kazoo): + This backend writes logbooks, flow details, and atom details to a provided + base path in zookeeper. It will create and store those objects in three + key directories (one for logbooks, one for flow details and one for atom + details). It creates those associated directories and then creates files + inside those directories that represent the contents of those objects for + later reading and writing. + + Example conf: conf = { "hosts": "192.168.0.1:2181,192.168.0.2:2181,192.168.0.3:2181", @@ -126,8 +133,11 @@ class ZkConnection(base.Connection): @contextlib.contextmanager def _exc_wrapper(self): - """Exception wrapper which wraps kazoo exceptions and groups them - to taskflow exceptions. + """Exception context-manager which wraps kazoo exceptions. + + This is used to capture and wrap any kazoo specific exceptions and + then group them into corresponding taskflow exceptions (not doing + that would expose the underlying kazoo exception model). """ try: yield diff --git a/taskflow/persistence/logbook.py b/taskflow/persistence/logbook.py index 31815a1d..884873ea 100644 --- a/taskflow/persistence/logbook.py +++ b/taskflow/persistence/logbook.py @@ -64,14 +64,20 @@ def _fix_meta(data): class LogBook(object): - """This class that contains a dict of flow detail entries for a - given *job* so that the job can track what 'work' has been - completed for resumption/reverting and miscellaneous tracking + """A container of flow details, a name and associated metadata. + + Typically this class contains a collection of flow detail entries + for a given engine (or job) so that those entities can track what 'work' + has been completed for resumption, reverting and miscellaneous tracking purposes. The data contained within this class need *not* be backed by the backend storage in real time. The data in this class will only be guaranteed to be persisted when a save occurs via some backend connection. + + NOTE(harlowja): the naming of this class is analogous to a ships log or a + similar type of record used in detailing work that been completed (or work + that has not been completed). """ def __init__(self, name, uuid=None): if uuid: @@ -159,8 +165,11 @@ class LogBook(object): class FlowDetail(object): - """This class contains a dict of atom detail entries for a given - flow along with any metadata associated with that flow. + """A container of atom details, a name and associated metadata. + + Typically this class contains a collection of atom detail entries that + represent the atoms in a given flow structure (along with any other needed + metadata relevant to that flow). The data contained within this class need *not* be backed by the backend storage in real time. The data in this class will only be guaranteed to be @@ -241,13 +250,15 @@ class FlowDetail(object): @six.add_metaclass(abc.ABCMeta) class AtomDetail(object): - """This is a base class that contains an entry that contains the - persistence of an atom after or before (or during) it is running including - any results it may have produced, any state that it may be in (failed - for example), any exception that occurred when running and any associated - stacktrace that may have occurring during that exception being thrown - and any other metadata that should be stored along-side the details - about this atom. + """A base container of atom specific runtime information and metadata. + + This is a base class that contains attributes that are used to connect + a atom to the persistence layer during, after, or before it is running + including any results it may have produced, any state that it may be + in (failed for example), any exception that occurred when running and any + associated stacktrace that may have occurring during that exception being + thrown and any other metadata that should be stored along-side the details + about the connected atom. The data contained within this class need *not* backed by the backend storage in real time. The data in this class will only be guaranteed to be @@ -276,8 +287,11 @@ class AtomDetail(object): @property def last_results(self): - """Gets the atoms last result (if it has many results it should then - return the last one of many). + """Gets the atoms last result. + + If the atom has produced many results (for example if it has been + retried, reverted, executed and ...) this returns the last one of + many results. """ return self.results diff --git a/taskflow/retry.py b/taskflow/retry.py index fb7330e2..425c8ea6 100644 --- a/taskflow/retry.py +++ b/taskflow/retry.py @@ -34,8 +34,7 @@ RETRY = "RETRY" @six.add_metaclass(abc.ABCMeta) class Decider(object): - """A base class or mixin for an object that can decide how to resolve - execution failures. + """A class/mixin object that can decide how to resolve execution failures. A decider may be executed multiple times on subflow or other atom failure and it is expected to make a decision about what should be done @@ -45,10 +44,11 @@ class Decider(object): @abc.abstractmethod def on_failure(self, history, *args, **kwargs): - """On subflow failure makes a decision about the future flow - execution using information about prior previous failures (if this - historical failure information is not available or was not persisted - this history will be empty). + """On failure makes a decision about the future. + + This method will typically use information about prior failures (if + this historical failure information is not available or was not + persisted this history will be empty). Returns retry action constant: @@ -63,9 +63,13 @@ class Decider(object): @six.add_metaclass(abc.ABCMeta) class Retry(atom.Atom, Decider): - """A base class for a retry object that decides how to resolve subflow - execution failures and may also provide execute and revert methods to alter - the inputs of subflow atoms. + """A class that can decide how to resolve execution failures. + + This abstract base class is used to inherit from and provide different + strategies that will be activated upon execution failures. Since a retry + object is an atom it may also provide execute and revert methods to alter + the inputs of connected atoms (depending on the desired strategy to be + used this can be quite useful). """ default_provides = None @@ -88,22 +92,32 @@ class Retry(atom.Atom, Decider): @abc.abstractmethod def execute(self, history, *args, **kwargs): - """Activate a given retry which will produce data required to - start or restart a subflow using previously provided values and a - history of subflow failures from previous runs. - Retry can provide same values multiple times (after each run), - the latest value will be used by tasks. Old values will be saved to - the history of retry that is a list of tuples (result, failures) - where failures is a dictionary of failures by task names. - This allows to make retries of subflow with different parameters. + """Executes the given retry atom. + + This execution activates a given retry which will typically produce + data required to start or restart a connected component using + previously provided values and a history of prior failures from + previous runs. The historical data can be analyzed to alter the + resolution strategy that this retry controller will use. + + For example, a retry can provide the same values multiple times (after + each run), the latest value or some other variation. Old values will be + saved to the history of the retry atom automatically, that is a list of + tuples (result, failures) are persisted where failures is a dictionary + of failures indexed by task names and the result is the execution + result returned by this retry controller during that failure resolution + attempt. """ def revert(self, history, *args, **kwargs): - """Revert this retry using the given context, all results - that had been provided by previous tries and all errors caused - a reversion. This method will be called only if a subflow must be - reverted without the retry. It won't be called on subflow retry, but - all subflow's tasks will be reverted before the retry. + """Reverts this retry using the given context. + + On revert call all results that had been provided by previous tries + and all errors caused during reversion are provided. This method + will be called *only* if a subflow must be reverted without the + retry (that is to say that the controller has ran out of resolution + options and has either given up resolution or has failed to handle + a execution failure). """ @@ -146,9 +160,12 @@ class Times(Retry): class ForEachBase(Retry): - """Base class for retries that iterate given collection.""" + """Base class for retries that iterate over a given collection.""" def _get_next_value(self, values, history): + # Fetches the next resolution result to try, removes overlapping + # entries with what has already been tried and then returns the first + # resolution strategy remaining. items = (item for item, _failures in history) remaining = misc.sequence_minus(values, items) if not remaining: @@ -166,8 +183,10 @@ class ForEachBase(Retry): class ForEach(ForEachBase): - """Accepts a collection of values to the constructor. Returns the next - element of the collection on each try. + """Applies a statically provided collection of strategies. + + Accepts a collection of decision strategies on construction and returns the + next element of the collection on each try. """ def __init__(self, values, name=None, provides=None, requires=None, @@ -180,12 +199,17 @@ class ForEach(ForEachBase): return self._on_failure(self._values, history) def execute(self, history, *args, **kwargs): + # NOTE(harlowja): This allows any connected components to know the + # current resolution strategy being attempted. return self._get_next_value(self._values, history) class ParameterizedForEach(ForEachBase): - """Accepts a collection of values from storage as a parameter of execute - method. Returns the next element of the collection on each try. + """Applies a dynamically provided collection of strategies. + + Accepts a collection of decision strategies from a predecessor (or from + storage) as a parameter and returns the next element of that collection on + each try. """ def on_failure(self, values, history, *args, **kwargs): diff --git a/taskflow/states.py b/taskflow/states.py index 963e4f64..a06187b4 100644 --- a/taskflow/states.py +++ b/taskflow/states.py @@ -53,7 +53,7 @@ SCHEDULING = 'SCHEDULING' WAITING = 'WAITING' ANALYZING = 'ANALYZING' -## Flow state transitions +# Flow state transitions # See: http://docs.openstack.org/developer/taskflow/states.html _ALLOWED_FLOW_TRANSITIONS = frozenset(( @@ -124,7 +124,7 @@ def check_flow_transition(old_state, new_state): % pair) -## Task state transitions +# Task state transitions # See: http://docs.openstack.org/developer/taskflow/states.html _ALLOWED_TASK_TRANSITIONS = frozenset(( diff --git a/taskflow/storage.py b/taskflow/storage.py index 353d44f3..31a8868f 100644 --- a/taskflow/storage.py +++ b/taskflow/storage.py @@ -77,9 +77,12 @@ class Storage(object): @abc.abstractproperty def _lock_cls(self): - """Lock class used to generate reader/writer locks for protecting - read/write access to the underlying storage backend and internally - mutating operations. + """Lock class used to generate reader/writer locks. + + These locks are used for protecting read/write access to the + underlying storage backend when internally mutating operations occur. + They ensure that we read and write data in a consistent manner when + being used in a multithreaded situation. """ def _with_connection(self, functor, *args, **kwargs): @@ -248,9 +251,12 @@ class Storage(object): self._with_connection(self._save_atom_detail, ad) def update_atom_metadata(self, atom_name, update_with): - """Updates a atoms metadata given another dictionary or a list of - (key, value) pairs to include in the updated metadata (newer keys will - overwrite older keys). + """Updates a atoms associated metadata. + + This update will take a provided dictionary or a list of (key, value) + pairs to include in the updated metadata (newer keys will overwrite + older keys) and after merging saves the updated data into the + underlying persistence layer. """ self._update_atom_metadata(atom_name, update_with) diff --git a/taskflow/task.py b/taskflow/task.py index e66b435c..067613a2 100644 --- a/taskflow/task.py +++ b/taskflow/task.py @@ -30,8 +30,12 @@ LOG = logging.getLogger(__name__) @six.add_metaclass(abc.ABCMeta) class BaseTask(atom.Atom): - """An abstraction that defines a potential piece of work that can be - applied and can be reverted to undo the work as a single task. + """An abstraction that defines a potential piece of work. + + This potential piece of work is expected to be able to contain + functionality that defines what can be executed to accomplish that work + as well as a way of defining what can be executed to reverted/undo that + same piece of work. """ TASK_EVENTS = ('update_progress', ) @@ -101,8 +105,12 @@ class BaseTask(atom.Atom): @contextlib.contextmanager def autobind(self, event_name, handler_func, **kwargs): - """Binds a given function to the task for a given event name and then - unbinds that event name and associated function automatically on exit. + """Binds & unbinds a given event handler to the task. + + This function binds and unbinds using the context manager protocol. + When events are triggered on the task of the given event name this + handler will automatically be called with the provided keyword + arguments. """ bound = False if handler_func is not None: @@ -135,10 +143,11 @@ class BaseTask(atom.Atom): self._events_listeners[event].append((handler, kwargs)) def unbind(self, event, handler=None): - """Remove a previously-attached event handler from the task. If handler - function not passed, then unbind all event handlers for the provided - event. If multiple of the same handlers are bound, then the first - match is removed (and only the first match). + """Remove a previously-attached event handler from the task. + + If a handler function not passed, then this will unbind all event + handlers for the provided event. If multiple of the same handlers are + bound, then the first match is removed (and only the first match). :param event: event type :param handler: handler previously bound diff --git a/taskflow/test.py b/taskflow/test.py index ce99a373..aa2ffd42 100644 --- a/taskflow/test.py +++ b/taskflow/test.py @@ -41,8 +41,11 @@ class GreaterThanEqual(object): class FailureRegexpMatcher(object): - """Matches if the failure was caused by the given exception and its string - matches to the given pattern. + """Matches if the failure was caused by the given exception and message. + + This will match if a given failure contains and exception of the given + class type and if its string message matches to the given regular + expression pattern. """ def __init__(self, exc_class, pattern): @@ -59,8 +62,10 @@ class FailureRegexpMatcher(object): class ItemsEqual(object): - """Matches the sequence that has same elements as reference - object, regardless of the order. + """Matches the items in two sequences. + + This matcher will validate that the provided sequence has the same elements + as a reference sequence, regardless of the order. """ def __init__(self, seq): @@ -167,9 +172,7 @@ class TestCase(testcase.TestCase): def assertFailuresRegexp(self, exc_class, pattern, callable_obj, *args, **kwargs): - """Assert that the callable failed with the given exception and its - string matches to the given pattern. - """ + """Asserts the callable failed with the given exception and message.""" try: with utils.wrap_all_failures(): callable_obj(*args, **kwargs) @@ -200,8 +203,11 @@ class MockTestCase(TestCase): return mocked def _patch_class(self, module, name, autospec=True, attach_as=None): - """Patch class, create class instance mock and attach them to - the master mock. + """Patches a modules class. + + This will create a class instance mock (using the provided name to + find the class in the module) and attach a mock class the master mock + to be cleaned up on test exit. """ if autospec: instance_mock = mock.Mock(spec_set=getattr(module, name)) diff --git a/taskflow/tests/test_examples.py b/taskflow/tests/test_examples.py index 43f8f4d9..025fabcd 100644 --- a/taskflow/tests/test_examples.py +++ b/taskflow/tests/test_examples.py @@ -91,8 +91,12 @@ def list_examples(): class ExamplesTestCase(taskflow.test.TestCase): @classmethod def update(cls): - """For each example, adds on a test method that the testing framework - will then run. + """For each example, adds on a test method. + + This newly created test method will then be activated by the testing + framework when it scans for and runs tests. This makes for a elegant + and simple way to ensure that all of the provided examples + actually work. """ def add_test_method(name, method_name): def test_example(self): diff --git a/taskflow/tests/unit/jobs/__init__.py b/taskflow/tests/unit/jobs/__init__.py index da9e7d90..e69de29b 100644 --- a/taskflow/tests/unit/jobs/__init__.py +++ b/taskflow/tests/unit/jobs/__init__.py @@ -1,15 +0,0 @@ -# -*- coding: utf-8 -*- - -# Copyright (C) 2013 Yahoo! Inc. All Rights Reserved. -# -# Licensed under the Apache License, Version 2.0 (the "License"); you may -# not use this file except in compliance with the License. You may obtain -# a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT -# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the -# License for the specific language governing permissions and limitations -# under the License. diff --git a/taskflow/tests/unit/persistence/test_sql_persistence.py b/taskflow/tests/unit/persistence/test_sql_persistence.py index 35a36db6..b48f84a8 100644 --- a/taskflow/tests/unit/persistence/test_sql_persistence.py +++ b/taskflow/tests/unit/persistence/test_sql_persistence.py @@ -54,9 +54,7 @@ from taskflow.utils import lock_utils def _get_connect_string(backend, user, passwd, database=None, variant=None): - """Try to get a connection with a very specific set of values, if we get - these then we'll run the tests, otherwise they are skipped. - """ + """Forms a sqlalchemy database uri string for the given values.""" if backend == "postgres": if not variant: variant = 'psycopg2' diff --git a/taskflow/tests/unit/test_utils_lock_utils.py b/taskflow/tests/unit/test_utils_lock_utils.py index 30a120a3..2b2f1f83 100644 --- a/taskflow/tests/unit/test_utils_lock_utils.py +++ b/taskflow/tests/unit/test_utils_lock_utils.py @@ -29,7 +29,7 @@ from taskflow.utils import lock_utils NAPPY_TIME = 0.05 # We will spend this amount of time doing some "fake" work. -WORK_TIMES = [(0.01 + x/100.0) for x in range(0, 5)] +WORK_TIMES = [(0.01 + x / 100.0) for x in range(0, 5)] def _find_overlaps(times, start, end): diff --git a/taskflow/types/graph.py b/taskflow/types/graph.py index 358f018a..d3e2bae2 100644 --- a/taskflow/types/graph.py +++ b/taskflow/types/graph.py @@ -31,8 +31,7 @@ class DiGraph(nx.DiGraph): return self def get_edge_data(self, u, v, default=None): - """Returns a *copy* of the attribute dictionary associated with edges - between (u, v). + """Returns a *copy* of the edge attribute dictionary between (u, v). NOTE(harlowja): this differs from the networkx get_edge_data() as that function does not return a copy (but returns a reference to the actual @@ -48,7 +47,9 @@ class DiGraph(nx.DiGraph): return nx.topological_sort(self) def pformat(self): - """Pretty formats your graph into a string representation that includes + """Pretty formats your graph into a string. + + This pretty formatted string representation includes many useful details about your graph, including; name, type, frozeness, node count, nodes, edge count, edges, graph density and graph cycles (if any). """ diff --git a/taskflow/utils/kazoo_utils.py b/taskflow/utils/kazoo_utils.py index 8ca8bf52..84f6b262 100644 --- a/taskflow/utils/kazoo_utils.py +++ b/taskflow/utils/kazoo_utils.py @@ -46,8 +46,12 @@ def finalize_client(client): def check_compatible(client, min_version=None, max_version=None): - """Checks if a kazoo client is backed by a zookeeper server version - that satisfies a given min (inclusive) and max (inclusive) version range. + """Checks if a kazoo client is backed by a zookeeper server version. + + This check will verify that the zookeeper server version that the client + is connected to satisfies a given minimum version (inclusive) and + maximum (inclusive) version range. If the server is not in the provided + version range then a exception is raised indiciating this. """ server_version = None if min_version: diff --git a/taskflow/utils/lock_utils.py b/taskflow/utils/lock_utils.py index 942e27bb..c3af6895 100644 --- a/taskflow/utils/lock_utils.py +++ b/taskflow/utils/lock_utils.py @@ -48,9 +48,16 @@ def try_lock(lock): def locked(*args, **kwargs): - """A decorator that looks for a given attribute (typically a lock or a list - of locks) and before executing the decorated function uses the given lock - or list of locks as a context manager, automatically releasing on exit. + """A locking decorator. + + It will look for a provided attribute (typically a lock or a list + of locks) on the first argument of the function decorated (typically this + is the 'self' object) and before executing the decorated function it + activates the given lock or list of locks as a context manager, + automatically releasing that lock on exit. + + NOTE(harlowja): if no attribute is provided then by default the attribute + named '_lock' is looked for. """ def decorator(f): @@ -244,8 +251,11 @@ class ReaderWriterLock(_ReaderWriterLockBase): class DummyReaderWriterLock(_ReaderWriterLockBase): - """A dummy reader/writer lock that doesn't lock anything but provides same - functions as a normal reader/writer lock class. + """A dummy reader/writer lock. + + This dummy lock doesn't lock anything but provides the same functions as a + normal reader/writer lock class and can be useful in unit tests or other + similar scenarios (do *not* use it if locking is actually required). """ @contextlib.contextmanager def write_lock(self): @@ -271,11 +281,10 @@ class DummyReaderWriterLock(_ReaderWriterLockBase): class MultiLock(object): - """A class which can attempt to obtain many locks at once and release - said locks when exiting. + """A class which attempts to obtain & release many locks at once. - Useful as a context manager around many locks (instead of having to nest - said individual context managers). + It is typically useful as a context manager around many locks (instead of + having to nest individual lock context managers). """ def __init__(self, locks): @@ -318,7 +327,9 @@ class MultiLock(object): class _InterProcessLock(object): - """Lock implementation which allows multiple locks, working around + """An interprocess locking implementation. + + This is a lock implementation which allows multiple locks, working around issues like bugs.debian.org/cgi-bin/bugreport.cgi?bug=632857 and does not require any cleanup. Since the lock is always held on a file descriptor rather than outside of the process, the lock gets dropped diff --git a/taskflow/utils/misc.py b/taskflow/utils/misc.py index d2360c9b..f50e64eb 100644 --- a/taskflow/utils/misc.py +++ b/taskflow/utils/misc.py @@ -50,9 +50,11 @@ _SCHEME_REGEX = re.compile(r"^([A-Za-z][A-Za-z0-9+.-]*):") def merge_uri(uri_pieces, conf): - """Merges the username, password, hostname, and query params of a uri into - the given configuration (does not overwrite the configuration keys if they - already exist) and returns the adjusted configuration. + """Merges a parsed uri into the given configuration dictionary. + + Merges the username, password, hostname, and query params of a uri into + the given configuration (it does not overwrite the configuration keys if + they already exist) and returns the adjusted configuration. NOTE(harlowja): does not merge the path, scheme or fragment. """ @@ -72,9 +74,7 @@ def merge_uri(uri_pieces, conf): def parse_uri(uri, query_duplicates=False): - """Parses a uri into its components and returns a dictionary containing - those components. - """ + """Parses a uri into its components.""" # Do some basic validation before continuing... if not isinstance(uri, six.string_types): raise TypeError("Can only parse string types to uri data, " @@ -176,9 +176,17 @@ def decode_json(raw_data, root_types=(dict,)): class cachedproperty(object): - """Descriptor that can be placed on instance methods to translate + """A descriptor property that is only evaluated once.. + + This caching descriptor can be placed on instance methods to translate those methods into properties that will be cached in the instance (avoiding - repeated creation checking logic to do the equivalent). + repeated attribute checking logic to do the equivalent). + + NOTE(harlowja): by default the property that will be saved will be under + the decorated methods name prefixed with an underscore. For example if we + were to attach this descriptor to an instance method 'get_thing(self)' the + cached property would be stored under '_get_thing' in the self object + after the first call to 'get_thing' occurs. """ def __init__(self, fget): # If a name is provided (as an argument) then this will be the string @@ -226,8 +234,10 @@ def wallclock(): def wraps(fn): - """This will not be needed in python 3.2 or greater which already has this - built-in to its functools.wraps method. + """Wraps a method and ensures the __wrapped__ attribute is set. + + NOTE(harlowja): This will not be needed in python 3.2 or greater which + already has this built-in to its functools.wraps method. """ def wrapper(f): @@ -239,9 +249,7 @@ def wraps(fn): def millis_to_datetime(milliseconds): - """Converts a given number of milliseconds from the epoch into a datetime - object. - """ + """Converts number of milliseconds (from epoch) into a datetime object.""" return datetime.datetime.fromtimestamp(float(milliseconds) / 1000) @@ -313,9 +321,7 @@ _ASCII_WORD_SYMBOLS = frozenset(string.ascii_letters + string.digits + '_') def is_valid_attribute_name(name, allow_self=False, allow_hidden=False): - """Validates that a string name is a valid/invalid python attribute - name. - """ + """Checks that a string is a valid/invalid python attribute name.""" return all(( isinstance(name, six.string_types), len(name) > 0, @@ -332,8 +338,12 @@ def is_valid_attribute_name(name, allow_self=False, allow_hidden=False): class AttrDict(dict): - """Helper utility dict sub-class to create a class that can be accessed by - attribute name from a dictionary that contains a set of keys and values. + """Dictionary subclass that allows for attribute based access. + + This subclass allows for accessing a dictionaries keys and values by + accessing those keys as regular attributes. Keys that are not valid python + attribute names can not of course be acccessed/set (those keys must be + accessed/set by the traditional dictionary indexing operators instead). """ NO_ATTRS = tuple(reflection.get_member_names(dict)) @@ -392,9 +402,12 @@ class Timeout(object): class ExponentialBackoff(object): - """An iterable object that will yield back an exponential delay sequence - provided an exponent and a number of items to yield. This object may be - iterated over multiple times (yielding the same sequence each time). + """An iterable object that will yield back an exponential delay sequence. + + This objects provides for a configurable exponent, count of numbers + to generate, and a maximum number that will be returned. This object may + also be iterated over multiple times (yielding the same sequence each + time). """ def __init__(self, count, exponent=2, max_backoff=3600): self.count = max(0, int(count)) @@ -541,9 +554,12 @@ class StopWatch(object): class Notifier(object): - """A utility helper class that can be used to subscribe to - notifications of events occurring as well as allow a entity to post said - notifications to subscribers. + """A notification helper class. + + It is intended to be used to subscribe to notifications of events + occurring as well as allow a entity to post said notifications to any + associated subscribers without having either entity care about how this + notification occurs. """ RESERVED_KEYS = ('details',) @@ -665,12 +681,15 @@ def are_equal_exc_info_tuples(ei1, ei2): @contextlib.contextmanager def capture_failure(): - """Save current exception, and yield back the failure (or raises a - runtime error if no active exception is being handled). + """Captures the occuring exception and provides a failure back. - In some cases the exception context can be cleared, resulting in None - being attempted to be saved after an exception handler is run. This - can happen when eventlet switches greenthreads or when running an + This will save the current exception information and yield back a + failure object for the caller to use (it will raise a runtime error if + no active exception is being handled). + + This is useful since in some cases the exception context can be cleared, + resulting in None being attempted to be saved after an exception handler is + run. This can happen when eventlet switches greenthreads or when running an exception handler, code raises and catches an exception. In both cases the exception context will be cleared. diff --git a/taskflow/utils/persistence_utils.py b/taskflow/utils/persistence_utils.py index f58cea09..e3c4ba36 100644 --- a/taskflow/utils/persistence_utils.py +++ b/taskflow/utils/persistence_utils.py @@ -39,11 +39,10 @@ def temporary_log_book(backend=None): def temporary_flow_detail(backend=None): - """Creates a temporary flow detail and logbook for temporary usage in - the given backend. + """Creates a temporary flow detail and logbook in the given backend. Mainly useful for tests and other use cases where a temporary flow detail - is needed for a short-period of time. + and a temporary logbook is needed for a short-period of time. """ flow_id = uuidutils.generate_uuid() book = temporary_log_book(backend) @@ -57,9 +56,18 @@ def temporary_flow_detail(backend=None): def create_flow_detail(flow, book=None, backend=None, meta=None): - """Creates a flow detail for the given flow and adds it to the provided - logbook (if provided) and then uses the given backend (if provided) to - save the logbook then returns the created flow detail. + """Creates a flow detail for a flow & adds & saves it in a logbook. + + This will create a flow detail for the given flow using the flow name, + and add it to the provided logbook and then uses the given backend to save + the logbook and then returns the created flow detail. + + If no book is provided a temporary one will be created automatically (no + reference to the logbook will be returned, so this should nearly *always* + be provided or only used in situations where no logbook is needed, for + example in tests). If no backend is provided then no saving will occur and + the created flow detail will not be persisted even if the flow detail was + added to a given (or temporarily generated) logbook. """ flow_id = uuidutils.generate_uuid() flow_name = getattr(flow, 'name', None) diff --git a/taskflow/utils/reflection.py b/taskflow/utils/reflection.py index a5d80b55..c7f1a06a 100644 --- a/taskflow/utils/reflection.py +++ b/taskflow/utils/reflection.py @@ -31,8 +31,11 @@ def _get_members(obj, exclude_hidden): def find_subclasses(locations, base_cls, exclude_hidden=True): - """Examines the given locations for types which are subclasses of the base - class type provided and returns the found subclasses. + """Finds subclass types in the given locations. + + This will examines the given locations for types which are subclasses of + the base class type provided and returns the found subclasses (or fails + with exceptions if this introspection can not be accomplished). If a string is provided as one of the locations it will be imported and examined if it is a subclass of the base class. If a module is given, diff --git a/test-requirements.txt b/test-requirements.txt index 7e664134..e054e1b0 100644 --- a/test-requirements.txt +++ b/test-requirements.txt @@ -1,4 +1,4 @@ -hacking>=0.8.0,<0.9 +hacking>=0.9.1,<0.10 discover coverage>=3.6 mock>=1.0 diff --git a/tox-tmpl.ini b/tox-tmpl.ini index 96df6f62..08164ecf 100644 --- a/tox-tmpl.ini +++ b/tox-tmpl.ini @@ -39,6 +39,11 @@ commands = python setup.py testr --coverage --testr-args='{posargs}' commands = {posargs} [flake8] +# E265 block comment should start with '# ' +# H305 imports not grouped correctly +# H307 like imports should be grouped together +# H904 Wrap long lines in parentheses instead of a backslash +ignore = H307,H305,H904,E265 builtins = _ exclude = .venv,.tox,dist,doc,./taskflow/openstack/common,*egg,.git,build,tools diff --git a/tox.ini b/tox.ini index 4768beaa..f63a612d 100644 --- a/tox.ini +++ b/tox.ini @@ -68,6 +68,7 @@ commands = python setup.py testr --coverage --testr-args='{posargs}' commands = {posargs} [flake8] +ignore = H307,H305,H904,E265 builtins = _ exclude = .venv,.tox,dist,doc,./taskflow/openstack/common,*egg,.git,build,tools From 564f145b25794a169a1c505ea1ac362a28e8414e Mon Sep 17 00:00:00 2001 From: Joshua Harlow Date: Fri, 13 Jun 2014 19:29:42 -0700 Subject: [PATCH 124/188] Adjust sphinx requirement Bump up the sphinx requirement to match the openstack requirements version to ensure consistency among projects. Change-Id: Ia348566c1c22ca31fa1cbdeb38d902ca0f622378 --- test-requirements.txt | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/test-requirements.txt b/test-requirements.txt index 8c0d3106..4ba1754b 100644 --- a/test-requirements.txt +++ b/test-requirements.txt @@ -7,5 +7,5 @@ testrepository>=0.0.18 testtools>=0.9.34 zake>=0.0.18 # docs build jobs -sphinx>=1.2.1,<1.3 +sphinx>=1.1.2,!=1.2.0,<1.3 oslosphinx From 92ace495e1580f84c247c29ddabc06c2b9aa3431 Mon Sep 17 00:00:00 2001 From: Joshua Harlow Date: Mon, 16 Jun 2014 14:04:17 -0700 Subject: [PATCH 125/188] Fix doc which should state fetch() usage The doc should say single results can be fetched with fetch() and not using the fetch_all() method which is used for a different purpose. Change-Id: I57f23f5f3f72336d91aedcb413d4f789a2844b06 --- doc/source/inputs_and_outputs.rst | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/doc/source/inputs_and_outputs.rst b/doc/source/inputs_and_outputs.rst index d89d6ab8..cc1fd2d0 100644 --- a/doc/source/inputs_and_outputs.rst +++ b/doc/source/inputs_and_outputs.rst @@ -145,7 +145,7 @@ Outputs As you can see from examples above, the run method returns all flow outputs in a ``dict``. This same data can be fetched via :py:meth:`~taskflow.storage.Storage.fetch_all` method of the storage. You can -also get single results using :py:meth:`~taskflow.storage.Storage.fetch_all`. +also get single results using :py:meth:`~taskflow.storage.Storage.fetch`. For example: .. doctest:: From d5b835efb8bf0a192e066dd8ec393d0c60a08537 Mon Sep 17 00:00:00 2001 From: Joshua Harlow Date: Mon, 16 Jun 2014 19:01:28 -0700 Subject: [PATCH 126/188] Fix E265 hacking warnings Adjust the example code to comply with the new E265 hacking check. This check warns about the following: * E265 block comment should start with '# ' Change-Id: I77aaf8c0bbc50bad9646f8192b1e1b80cf0afc8c --- taskflow/examples/resume_from_backend.py | 10 +++++----- tox-tmpl.ini | 3 +-- tox.ini | 2 +- 3 files changed, 7 insertions(+), 8 deletions(-) diff --git a/taskflow/examples/resume_from_backend.py b/taskflow/examples/resume_from_backend.py index 6e270408..ef20a160 100644 --- a/taskflow/examples/resume_from_backend.py +++ b/taskflow/examples/resume_from_backend.py @@ -56,7 +56,7 @@ import example_utils # noqa # -### UTILITY FUNCTIONS ######################################### +# UTILITY FUNCTIONS ######################################### def print_wrapped(text): @@ -82,7 +82,7 @@ def find_flow_detail(backend, lb_id, fd_id): return lb.find(fd_id) -### CREATE FLOW ############################################### +# CREATE FLOW ############################################### class InterruptTask(task.Task): @@ -104,12 +104,12 @@ def flow_factory(): TestTask(name='second')) -### INITIALIZE PERSISTENCE #################################### +# INITIALIZE PERSISTENCE #################################### with example_utils.get_backend() as backend: logbook = p_utils.temporary_log_book(backend) - ### CREATE AND RUN THE FLOW: FIRST ATTEMPT #################### + # CREATE AND RUN THE FLOW: FIRST ATTEMPT #################### flow = flow_factory() flowdetail = p_utils.create_flow_detail(flow, logbook, backend) @@ -121,7 +121,7 @@ with example_utils.get_backend() as backend: engine.run() print_task_states(flowdetail, "After running") - ### RE-CREATE, RESUME, RUN #################################### + # RE-CREATE, RESUME, RUN #################################### print_wrapped("Resuming and running again") diff --git a/tox-tmpl.ini b/tox-tmpl.ini index 08164ecf..2d32f20c 100644 --- a/tox-tmpl.ini +++ b/tox-tmpl.ini @@ -39,11 +39,10 @@ commands = python setup.py testr --coverage --testr-args='{posargs}' commands = {posargs} [flake8] -# E265 block comment should start with '# ' # H305 imports not grouped correctly # H307 like imports should be grouped together # H904 Wrap long lines in parentheses instead of a backslash -ignore = H307,H305,H904,E265 +ignore = H307,H305,H904 builtins = _ exclude = .venv,.tox,dist,doc,./taskflow/openstack/common,*egg,.git,build,tools diff --git a/tox.ini b/tox.ini index f63a612d..5adee73c 100644 --- a/tox.ini +++ b/tox.ini @@ -68,7 +68,7 @@ commands = python setup.py testr --coverage --testr-args='{posargs}' commands = {posargs} [flake8] -ignore = H307,H305,H904,E265 +ignore = H307,H305,H904 builtins = _ exclude = .venv,.tox,dist,doc,./taskflow/openstack/common,*egg,.git,build,tools From fad9e9aeb47bca14a70648bf153de897bd2525d8 Mon Sep 17 00:00:00 2001 From: "Ivan A. Melnikov" Date: Wed, 7 May 2014 13:42:42 +0400 Subject: [PATCH 127/188] Add example for pseudo-scoping Add an example how to use flow factories and prefixing to achieve effect similar to scoping, but without it. Change-Id: Ia587ad59f76a0dd477dba79c24e5f86f4b4a34ba --- taskflow/examples/pseudo_scoping.out.txt | 11 +++ taskflow/examples/pseudo_scoping.py | 113 +++++++++++++++++++++++ 2 files changed, 124 insertions(+) create mode 100644 taskflow/examples/pseudo_scoping.out.txt create mode 100644 taskflow/examples/pseudo_scoping.py diff --git a/taskflow/examples/pseudo_scoping.out.txt b/taskflow/examples/pseudo_scoping.out.txt new file mode 100644 index 00000000..81a27765 --- /dev/null +++ b/taskflow/examples/pseudo_scoping.out.txt @@ -0,0 +1,11 @@ +Running simple flow: +Fetching number for Josh. +Calling Josh 777. + +Calling many people using prefixed factory: +Fetching number for Jim. +Calling Jim 444. +Fetching number for Joe. +Calling Joe 555. +Fetching number for Josh. +Calling Josh 777. diff --git a/taskflow/examples/pseudo_scoping.py b/taskflow/examples/pseudo_scoping.py new file mode 100644 index 00000000..6a964191 --- /dev/null +++ b/taskflow/examples/pseudo_scoping.py @@ -0,0 +1,113 @@ +# -*- coding: utf-8 -*- + +# Copyright (C) 2014 Ivan Melnikov +# +# Licensed under the Apache License, Version 2.0 (the "License"); you may +# not use this file except in compliance with the License. You may obtain +# a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the +# License for the specific language governing permissions and limitations +# under the License. + +import logging +import os +import sys + +logging.basicConfig(level=logging.ERROR) + +top_dir = os.path.abspath(os.path.join(os.path.dirname(__file__), + os.pardir, + os.pardir)) +sys.path.insert(0, top_dir) + +import taskflow.engines +from taskflow.patterns import linear_flow as lf +from taskflow import task + +# INTRO: pseudo-scoping by adding prefixes + +# Sometimes you need scoping -- e.g. for adding several +# similar subflows to one flow to do same stuff for different +# data. But current version of TaskFlow does not allow that +# directly, so you have to resort to some kind of trickery. +# One (and more or less recommended, if not the only) way of +# solving the problem is to transform every task name, it's +# provides and requires values -- e.g. by adding prefix to them. +# This example shows how this could be done. + + +# The example task is simple: for each specified person, fetch +# his or her phone number from phone book and call. + + +PHONE_BOOK = { + 'jim': '444', + 'joe': '555', + 'iv_m': '666', + 'josh': '777' +} + + +class FetchNumberTask(task.Task): + """Task that fetches number from phone book.""" + + default_provides = 'number' + + def execute(self, person): + print('Fetching number for %s.' % person) + return PHONE_BOOK[person.lower()] + + +class CallTask(task.Task): + """Task that calls person by number.""" + + def execute(self, person, number): + print('Calling %s %s.' % (person, number)) + +# This is how it works for one person: + +simple_flow = lf.Flow('simple one').add( + FetchNumberTask(), + CallTask()) +print('Running simple flow:') +taskflow.engines.run(simple_flow, store={'person': 'Josh'}) + + +# To call several people you'll need a factory function that will +# make a flow with given prefix for you. We need to add prefix +# to task names, their provides and requires values. For requires, +# we use `rebind` argument of task constructor. +def subflow_factory(prefix): + def pr(what): + return '%s-%s' % (prefix, what) + + return lf.Flow(pr('flow')).add( + FetchNumberTask(pr('fetch'), + provides=pr('number'), + rebind=[pr('person')]), + CallTask(pr('call'), + rebind=[pr('person'), pr('number')]) + ) + + +def call_them_all(): + # Let's call them all. We need a flow: + flow = lf.Flow('call-them-prefixed') + + # We'll also need to inject person names with prefixed argument + # name to storage to satisfy task requirements. + persons = {} + + for person in ('Jim', 'Joe', 'Josh'): + prefix = person.lower() + persons['%s-person' % prefix] = person + flow.add(subflow_factory(prefix)) + taskflow.engines.run(flow, store=persons) + +print('\nCalling many people using prefixed factory:') +call_them_all() From 1727c734e9d4c5f14c6a44f7a72afb9f65a89a4b Mon Sep 17 00:00:00 2001 From: OpenStack Proposal Bot Date: Wed, 18 Jun 2014 00:48:10 +0000 Subject: [PATCH 128/188] Updated from global requirements Change-Id: I9ff09bd79cfa5422e75e21463ac3d7085f20d721 --- requirements.txt | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/requirements.txt b/requirements.txt index 2b42a00c..87e82ad3 100644 --- a/requirements.txt +++ b/requirements.txt @@ -3,7 +3,7 @@ pbr>=0.6,!=0.7,<1.0 anyjson>=0.3.3 iso8601>=0.1.9 # Python 2->3 compatibility library. -six>=1.6.0 +six>=1.7.0 # Very nice graph library networkx>=1.8 Babel>=1.3 From 270a8a33ba8509aa44e38c0190870fb9fe391374 Mon Sep 17 00:00:00 2001 From: Joshua Harlow Date: Mon, 9 Jun 2014 11:30:30 -0700 Subject: [PATCH 129/188] Make intentions a tuple (to denote immutability) Since the atom intentions are not meant to be mutable we should make sure that we create the object as a type which is not mutable. This helps with those looking over the code to understand its desired usage which is not to be a mutable collection. Change-Id: I84948faf2e6bd8f4b4a9b27e390c9a03a14efa4b --- taskflow/states.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/taskflow/states.py b/taskflow/states.py index 963e4f64..500d6fef 100644 --- a/taskflow/states.py +++ b/taskflow/states.py @@ -46,7 +46,7 @@ EXECUTE = 'EXECUTE' IGNORE = 'IGNORE' REVERT = 'REVERT' RETRY = 'RETRY' -INTENTIONS = [EXECUTE, IGNORE, REVERT, RETRY] +INTENTIONS = (EXECUTE, IGNORE, REVERT, RETRY) # Additional engine states SCHEDULING = 'SCHEDULING' From ade8bb35fa934982d33b086eab1787ea15d0b214 Mon Sep 17 00:00:00 2001 From: Joshua Harlow Date: Wed, 28 May 2014 18:21:11 -0700 Subject: [PATCH 130/188] Add a tree type A tree module will be very useful for tracking tree structures in taskflow. So to encourage development and usage of such structures add a type module and helper classes that can be used perform tree operations on tree structures. Change-Id: I63c0653d051aeb4d1ea8a55f0e25fc25ff9e37f1 --- taskflow/tests/unit/test_types.py | 115 +++++++++++++++++++ taskflow/types/tree.py | 182 ++++++++++++++++++++++++++++++ 2 files changed, 297 insertions(+) create mode 100644 taskflow/tests/unit/test_types.py create mode 100644 taskflow/types/tree.py diff --git a/taskflow/tests/unit/test_types.py b/taskflow/tests/unit/test_types.py new file mode 100644 index 00000000..5e5b074d --- /dev/null +++ b/taskflow/tests/unit/test_types.py @@ -0,0 +1,115 @@ +# -*- coding: utf-8 -*- + +# Copyright (C) 2014 Yahoo! Inc. All Rights Reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); you may +# not use this file except in compliance with the License. You may obtain +# a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the +# License for the specific language governing permissions and limitations +# under the License. + +import networkx as nx + +from taskflow.types import graph +from taskflow.types import tree + +from taskflow import test + + +class GraphTest(test.TestCase): + def test_no_successors_no_predecessors(self): + g = graph.DiGraph() + g.add_node("a") + g.add_node("b") + g.add_node("c") + g.add_edge("b", "c") + self.assertEqual(set(['a', 'b']), + set(g.no_predecessors_iter())) + self.assertEqual(set(['a', 'c']), + set(g.no_successors_iter())) + + def test_directed(self): + g = graph.DiGraph() + g.add_node("a") + g.add_node("b") + g.add_edge("a", "b") + self.assertTrue(g.is_directed_acyclic()) + g.add_edge("b", "a") + self.assertFalse(g.is_directed_acyclic()) + + def test_frozen(self): + g = graph.DiGraph() + self.assertFalse(g.frozen) + g.add_node("b") + g.freeze() + self.assertRaises(nx.NetworkXError, g.add_node, "c") + + +class TreeTest(test.TestCase): + def _make_species(self): + # This is the following tree: + # + # animal + # |__mammal + # | |__horse + # | |__primate + # | |__monkey + # | |__human + # |__reptile + a = tree.Node("animal") + m = tree.Node("mammal") + r = tree.Node("reptile") + a.add(m) + a.add(r) + m.add(tree.Node("horse")) + p = tree.Node("primate") + m.add(p) + p.add(tree.Node("monkey")) + p.add(tree.Node("human")) + return a + + def test_path(self): + root = self._make_species() + human = root.find("human") + self.assertIsNotNone(human) + p = list([n.item for n in human.path_iter()]) + self.assertEqual(['human', 'primate', 'mammal', 'animal'], p) + + def test_empty(self): + root = tree.Node("josh") + self.assertTrue(root.empty()) + + def test_not_empty(self): + root = self._make_species() + self.assertFalse(root.empty()) + + def test_node_count(self): + root = self._make_species() + self.assertEqual(7, 1 + root.child_count(only_direct=False)) + + def test_index(self): + root = self._make_species() + self.assertEqual(0, root.index("mammal")) + self.assertEqual(1, root.index("reptile")) + + def test_contains(self): + root = self._make_species() + self.assertIn("monkey", root) + self.assertNotIn("bird", root) + + def test_freeze(self): + root = self._make_species() + root.freeze() + self.assertRaises(tree.FrozenNode, root.add, "bird") + + def test_dfs_itr(self): + root = self._make_species() + things = list([n.item for n in root.dfs_iter(include_self=True)]) + self.assertEqual(set(['animal', 'reptile', 'mammal', 'horse', + 'primate', 'monkey', 'human']), set(things)) diff --git a/taskflow/types/tree.py b/taskflow/types/tree.py new file mode 100644 index 00000000..41369b04 --- /dev/null +++ b/taskflow/types/tree.py @@ -0,0 +1,182 @@ +# -*- coding: utf-8 -*- + +# vim: tabstop=4 shiftwidth=4 softtabstop=4 + +# Copyright (C) 2014 Yahoo! Inc. All Rights Reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); you may +# not use this file except in compliance with the License. You may obtain +# a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the +# License for the specific language governing permissions and limitations +# under the License. + +import six + + +class FrozenNode(Exception): + """Exception raised when a frozen node is modified.""" + + +class _DFSIter(object): + """Depth first iterator (non-recursive) over the child nodes.""" + + def __init__(self, root, include_self=False): + self.root = root + self.include_self = bool(include_self) + + def __iter__(self): + stack = [] + if self.include_self: + stack.append(self.root) + else: + for child_node in self.root: + stack.append(child_node) + while stack: + node = stack.pop() + # Visit the node. + yield node + # Traverse the left & right subtree. + for child_node in reversed(list(node)): + stack.append(child_node) + + +class Node(object): + """A n-ary node class that can be used to create tree structures.""" + + def __init__(self, item, **kwargs): + self.item = item + self.parent = None + self.metadata = dict(kwargs) + self._children = [] + self._frozen = False + + def _frozen_add(self, child): + raise FrozenNode("Frozen node(s) can't be modified") + + def freeze(self): + if not self._frozen: + for n in self: + n.freeze() + self.add = self._frozen_add + self._frozen = True + + def add(self, child): + child.parent = self + self._children.append(child) + + def empty(self): + """Returns if the node is a leaf node.""" + return self.child_count() == 0 + + def path_iter(self, include_self=True): + """Yields back the path from this node to the root node.""" + if include_self: + node = self + else: + node = self.parent + while node is not None: + yield node + node = node.parent + + def find(self, item): + """Returns the node for an item if it exists in this node. + + This will search not only this node but also any children nodes and + finally if nothing is found then None is returned instead of a node + object. + """ + for n in self.dfs_iter(include_self=True): + if n.item == item: + return n + return None + + def __contains__(self, item): + """Returns if this item exists in this node or this nodes children.""" + return self.find(item) is not None + + def __getitem__(self, index): + # NOTE(harlowja): 0 is the right most index, len - 1 is the left most + return self._children[index] + + def pformat(self): + """Recursively formats a node into a nice string representation. + + Example Input: + yahoo = tt.Node("CEO") + yahoo.add(tt.Node("Infra")) + yahoo[0].add(tt.Node("Boss")) + yahoo[0][0].add(tt.Node("Me")) + yahoo.add(tt.Node("Mobile")) + yahoo.add(tt.Node("Mail")) + + Example Output: + CEO + |__Infra + | |__Boss + | |__Me + |__Mobile + |__Mail + """ + def _inner_pformat(node, level): + if level == 0: + yield six.text_type(node.item) + prefix = "" + else: + yield "__%s" % six.text_type(node.item) + prefix = " " * 2 + children = list(node) + for (i, child) in enumerate(children): + for (j, text) in enumerate(_inner_pformat(child, level + 1)): + if j == 0 or i + 1 < len(children): + text = prefix + "|" + text + else: + text = prefix + " " + text + yield text + expected_lines = self.child_count(only_direct=False) + accumulator = six.StringIO() + for i, line in enumerate(_inner_pformat(self, 0)): + accumulator.write(line) + if i < expected_lines: + accumulator.write('\n') + return accumulator.getvalue() + + def child_count(self, only_direct=True): + """Returns how many children this node has. + + This can be either only the direct children of this node or inclusive + of all children nodes of this node (children of children and so-on). + + NOTE(harlowja): it does not account for the current node in this count. + """ + if not only_direct: + count = 0 + for _node in self.dfs_iter(): + count += 1 + return count + return len(self._children) + + def __iter__(self): + """Iterates over the direct children of this node (right->left).""" + for c in self._children: + yield c + + def index(self, item): + """Finds the child index of a given item, searchs in added order.""" + index_at = None + for (i, child) in enumerate(self._children): + if child.item == item: + index_at = i + break + if index_at is None: + raise ValueError("%s is not contained in any child" % (item)) + return index_at + + def dfs_iter(self, include_self=False): + """Depth first iteration (non-recursive) over the child nodes.""" + return _DFSIter(self, include_self=include_self) From cdd0bf9cbd0af543bdd1e47397a2c25383f6ccb8 Mon Sep 17 00:00:00 2001 From: Joshua Harlow Date: Mon, 16 Jun 2014 19:20:53 -0700 Subject: [PATCH 131/188] Remove functions created for pre-six 1.7.0 With the new six version we can remove some of the utility functions that we created now that upstream six has equivalent or better functionality. Change-Id: I637fcf1475ca1af02608e736ca920c3e116c5529 --- taskflow/utils/lock_utils.py | 2 +- taskflow/utils/misc.py | 16 ---------------- taskflow/utils/threading_utils.py | 11 +++++------ 3 files changed, 6 insertions(+), 23 deletions(-) diff --git a/taskflow/utils/lock_utils.py b/taskflow/utils/lock_utils.py index c3af6895..561b5d58 100644 --- a/taskflow/utils/lock_utils.py +++ b/taskflow/utils/lock_utils.py @@ -63,7 +63,7 @@ def locked(*args, **kwargs): def decorator(f): attr_name = kwargs.get('lock', '_lock') - @misc.wraps(f) + @six.wraps(f) def wrapper(*args, **kwargs): lock = getattr(args[0], attr_name) if isinstance(lock, (tuple, list)): diff --git a/taskflow/utils/misc.py b/taskflow/utils/misc.py index 607bbe2a..eea56d6d 100644 --- a/taskflow/utils/misc.py +++ b/taskflow/utils/misc.py @@ -20,7 +20,6 @@ import contextlib import copy import datetime import errno -import functools import inspect import keyword import logging @@ -233,21 +232,6 @@ def wallclock(): return time.time() -def wraps(fn): - """Wraps a method and ensures the __wrapped__ attribute is set. - - NOTE(harlowja): This will not be needed in python 3.2 or greater which - already has this built-in to its functools.wraps method. - """ - - def wrapper(f): - f = functools.wraps(fn)(f) - f.__wrapped__ = getattr(fn, '__wrapped__', fn) - return f - - return wrapper - - def millis_to_datetime(milliseconds): """Converts number of milliseconds (from epoch) into a datetime object.""" return datetime.datetime.fromtimestamp(float(milliseconds) / 1000) diff --git a/taskflow/utils/threading_utils.py b/taskflow/utils/threading_utils.py index c669619a..2af17023 100644 --- a/taskflow/utils/threading_utils.py +++ b/taskflow/utils/threading_utils.py @@ -17,13 +17,12 @@ import multiprocessing import threading -import six +from six.moves import _thread -if six.PY2: - from thread import get_ident # noqa -else: - # In python3+ the get_ident call moved (whhhy??) - from threading import get_ident # noqa + +def get_ident(): + """Return the 'thread identifier' of the current thread.""" + return _thread.get_ident() def get_optimal_thread_count(): From 510635a9134f7ed2563e60649802708564a5d39b Mon Sep 17 00:00:00 2001 From: Joshua Harlow Date: Tue, 3 Jun 2014 15:12:11 -0700 Subject: [PATCH 132/188] Remove check_doc.py and use doc8 Instead of using a helper script that is internal to taskflow use the doc8 package (which was created based on the check_doc.py work) instead. This avoids repeating duplicated code when a stackforge/external pypi package can provide the same functionality. Change-Id: Ie5a43f96b20e34f3955657ad2ef8beba05a4300e --- tools/check_doc.py | 114 --------------------------------------------- tox-tmpl.ini | 3 +- tox.ini | 3 +- 3 files changed, 4 insertions(+), 116 deletions(-) delete mode 100644 tools/check_doc.py diff --git a/tools/check_doc.py b/tools/check_doc.py deleted file mode 100644 index 04c70dc2..00000000 --- a/tools/check_doc.py +++ /dev/null @@ -1,114 +0,0 @@ -#!/usr/bin/env python -# -*- coding: utf-8 -*- - -# Copyright (C) 2014 Ivan Melnikov -# -# Licensed under the Apache License, Version 2.0 (the "License"); you may -# not use this file except in compliance with the License. You may obtain -# a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT -# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the -# License for the specific language governing permissions and limitations -# under the License. - - -"""Check documentation for simple style requirements. - -What is checked: - - lines should not be longer than 79 characters - - exception: line with no whitespace except maybe in the beginning - - exception: line that starts with '..' -- longer directives are allowed, - including footnotes - - no tabulation for indentation - - no trailing whitespace -""" - -import fnmatch -import os -import re -import sys - - -FILE_PATTERNS = ['*.rst', '*.txt'] -MAX_LINE_LENGTH = 79 -TRAILING_WHITESPACE_REGEX = re.compile('\s$') -STARTING_WHITESPACE_REGEX = re.compile('^(\s+)') - - -def check_max_length(line): - if len(line) > MAX_LINE_LENGTH: - stripped = line.strip() - if not any(( - line.startswith('..'), # this is directive - stripped.startswith('>>>'), # this is doctest - stripped.startswith('...'), # and this - stripped.startswith('taskflow.'), - ' ' not in stripped # line can't be split - )): - yield ('D001', 'Line too long') - - -def check_trailing_whitespace(line): - if TRAILING_WHITESPACE_REGEX.search(line): - yield ('D002', 'Trailing whitespace') - - -def check_indentation_no_tab(line): - match = STARTING_WHITESPACE_REGEX.search(line) - if match: - spaces = match.group(1) - if '\t' in spaces: - yield ('D003', 'Tabulation used for indentation') - - -LINE_CHECKS = (check_max_length, - check_trailing_whitespace, - check_indentation_no_tab) - - -def check_lines(lines): - for idx, line in enumerate(lines, 1): - line = line.rstrip('\n') - for check in LINE_CHECKS: - for code, message in check(line): - yield idx, code, message - - -def check_files(filenames): - for fn in filenames: - with open(fn) as f: - for line_num, code, message in check_lines(f): - yield fn, line_num, code, message - - -def find_files(pathes, patterns): - for path in pathes: - if os.path.isfile(path): - yield path - elif os.path.isdir(path): - for root, dirnames, filenames in os.walk(path): - for filename in filenames: - if any(fnmatch.fnmatch(filename, pattern) - for pattern in patterns): - yield os.path.join(root, filename) - else: - print('Invalid path: %s' % path) - - -def main(): - ok = True - if len(sys.argv) > 1: - dirs = sys.argv[1:] - else: - dirs = ['.'] - for error in check_files(find_files(dirs, FILE_PATTERNS)): - ok = False - print('%s:%s: %s %s' % error) - sys.exit(0 if ok else 1) - -if __name__ == '__main__': - main() diff --git a/tox-tmpl.ini b/tox-tmpl.ini index 08164ecf..8cc6940e 100644 --- a/tox-tmpl.ini +++ b/tox-tmpl.ini @@ -56,10 +56,11 @@ deps = {[testenv:py26-sa7-mysql-ev]deps} deps = -r{toxinidir}/requirements.txt -r{toxinidir}/optional-requirements.txt -r{toxinidir}/test-requirements.txt + doc8>=0.3.4 commands = python setup.py testr --slowest --testr-args='{posargs}' sphinx-build -b doctest doc/source doc/build - python tools/check_doc.py doc/source + doc8 doc/source [testenv:py33] deps = {[testenv]deps} diff --git a/tox.ini b/tox.ini index f63a612d..9d276c2c 100644 --- a/tox.ini +++ b/tox.ini @@ -79,10 +79,11 @@ deps = {[testenv:py26-sa7-mysql-ev]deps} deps = -r{toxinidir}/requirements.txt -r{toxinidir}/optional-requirements.txt -r{toxinidir}/test-requirements.txt + doc8>=0.3.4 commands = python setup.py testr --slowest --testr-args='{posargs}' sphinx-build -b doctest doc/source doc/build - python tools/check_doc.py doc/source + doc8 doc/source [testenv:py33] deps = {[testenv]deps} From 7d095ac60070369131b9ed72e9870d781377cf88 Mon Sep 17 00:00:00 2001 From: "Ivan A. Melnikov" Date: Wed, 18 Jun 2014 11:46:56 +0400 Subject: [PATCH 133/188] Simplify identity transition handling for tasks and retries TaskAction.change_state and RetryAction.change_state should just work for any transition. We still ignore real identity transitions -- those which don't change state, result and progress of the atom -- in order order to avoid unnecessary notifications. Change-Id: Ib5cbc4c04abbd23f4204b2b6145c0fedd2fd811b --- .../engines/action_engine/retry_action.py | 20 ++++------- taskflow/engines/action_engine/task_action.py | 36 +++++++++++++------ 2 files changed, 32 insertions(+), 24 deletions(-) diff --git a/taskflow/engines/action_engine/retry_action.py b/taskflow/engines/action_engine/retry_action.py index a1ca3abb..afdfb456 100644 --- a/taskflow/engines/action_engine/retry_action.py +++ b/taskflow/engines/action_engine/retry_action.py @@ -17,7 +17,6 @@ import logging from taskflow.engines.action_engine import executor as ex -from taskflow import exceptions from taskflow import states from taskflow.utils import async_utils from taskflow.utils import misc @@ -39,27 +38,25 @@ class RetryAction(object): return kwargs def change_state(self, retry, state, result=None): - old_state = self._storage.get_atom_state(retry.name) - if old_state == state: - return state != states.PENDING if state in SAVE_RESULT_STATES: self._storage.save(retry.name, result, state) elif state == states.REVERTED: self._storage.cleanup_retry_history(retry.name, state) else: + old_state = self._storage.get_atom_state(retry.name) + if state == old_state: + # NOTE(imelnikov): nothing really changed, so we should not + # write anything to storage and run notifications + return self._storage.set_atom_state(retry.name, state) retry_uuid = self._storage.get_atom_uuid(retry.name) details = dict(retry_name=retry.name, retry_uuid=retry_uuid, result=result) self._notifier.notify(state, details) - return True def execute(self, retry): - if not self.change_state(retry, states.RUNNING): - raise exceptions.InvalidState("Retry controller %s is in invalid " - "state and can't be executed" % - retry.name) + self.change_state(retry, states.RUNNING) kwargs = self._get_retry_args(retry) try: result = retry.execute(**kwargs) @@ -71,10 +68,7 @@ class RetryAction(object): return async_utils.make_completed_future((retry, ex.EXECUTED, result)) def revert(self, retry): - if not self.change_state(retry, states.REVERTING): - raise exceptions.InvalidState("Retry controller %s is in invalid " - "state and can't be reverted" % - retry.name) + self.change_state(retry, states.REVERTING) kwargs = self._get_retry_args(retry) kwargs['flow_failures'] = self._storage.get_failures() try: diff --git a/taskflow/engines/action_engine/task_action.py b/taskflow/engines/action_engine/task_action.py index c0d1daa5..a07ded79 100644 --- a/taskflow/engines/action_engine/task_action.py +++ b/taskflow/engines/action_engine/task_action.py @@ -16,7 +16,6 @@ import logging -from taskflow import exceptions from taskflow import states from taskflow.utils import misc @@ -32,10 +31,30 @@ class TaskAction(object): self._task_executor = task_executor self._notifier = notifier - def change_state(self, task, state, result=None, progress=None): + def _is_identity_transition(self, state, task, progress): + if state in SAVE_RESULT_STATES: + # saving result is never identity transition + return False old_state = self._storage.get_atom_state(task.name) - if old_state == state: - return state != states.PENDING + if state != old_state: + # changing state is not identity transition by definition + return False + # NOTE(imelnikov): last thing to check is that the progress has + # changed, which means progress is not None and is different from + # what is stored in the database. + if progress is None: + return False + old_progress = self._storage.get_task_progress(task.name) + if old_progress != progress: + return False + return True + + def change_state(self, task, state, result=None, progress=None): + if self._is_identity_transition(state, task, progress): + # NOTE(imelnikov): ignore identity transitions in order + # to avoid extra write to storage backend and, what's + # more important, extra notifications + return if state in SAVE_RESULT_STATES: self._storage.save(task.name, result, state) else: @@ -49,7 +68,6 @@ class TaskAction(object): self._notifier.notify(state, details) if progress is not None: task.update_progress(progress) - return True def _on_update_progress(self, task, event_data, progress, **kwargs): """Should be called when task updates its progress.""" @@ -62,9 +80,7 @@ class TaskAction(object): task, progress) def schedule_execution(self, task): - if not self.change_state(task, states.RUNNING, progress=0.0): - raise exceptions.InvalidState("Task %s is in invalid state and" - " can't be executed" % task.name) + self.change_state(task, states.RUNNING, progress=0.0) kwargs = self._storage.fetch_mapped_args(task.rebind, atom_name=task.name) task_uuid = self._storage.get_atom_uuid(task.name) @@ -79,9 +95,7 @@ class TaskAction(object): result=result, progress=1.0) def schedule_reversion(self, task): - if not self.change_state(task, states.REVERTING, progress=0.0): - raise exceptions.InvalidState("Task %s is in invalid state and" - " can't be reverted" % task.name) + self.change_state(task, states.REVERTING, progress=0.0) kwargs = self._storage.fetch_mapped_args(task.rebind, atom_name=task.name) task_uuid = self._storage.get_atom_uuid(task.name) From d59fb4d42865a64f37680d26a593e20f74756ce1 Mon Sep 17 00:00:00 2001 From: Joshua Harlow Date: Fri, 20 Jun 2014 19:23:45 -0700 Subject: [PATCH 134/188] Create a top level time type Create a time module under types and place the stop watch class and timeout classes there so that we can have a single location where generic types that do things with time can be located. Part of blueprint top-level-types Change-Id: I8eb7e897c0354b20a7fa0d061434006e775d5e94 --- taskflow/conductors/single_threaded.py | 6 +- taskflow/engines/worker_based/executor.py | 3 +- taskflow/engines/worker_based/protocol.py | 3 +- taskflow/jobs/backends/impl_zookeeper.py | 6 +- taskflow/listeners/timing.py | 5 +- taskflow/tests/unit/test_utils.py | 13 +-- taskflow/types/time.py | 125 ++++++++++++++++++++++ taskflow/utils/misc.py | 107 ------------------ 8 files changed, 144 insertions(+), 124 deletions(-) create mode 100644 taskflow/types/time.py diff --git a/taskflow/conductors/single_threaded.py b/taskflow/conductors/single_threaded.py index bfd4d3d4..d13d666c 100644 --- a/taskflow/conductors/single_threaded.py +++ b/taskflow/conductors/single_threaded.py @@ -20,8 +20,8 @@ import six from taskflow.conductors import base from taskflow import exceptions as excp from taskflow.listeners import logging as logging_listener +from taskflow.types import time as tt from taskflow.utils import lock_utils -from taskflow.utils import misc LOG = logging.getLogger(__name__) WAIT_TIMEOUT = 0.5 @@ -58,8 +58,8 @@ class SingleThreadedConductor(base.Conductor): if wait_timeout is None: wait_timeout = WAIT_TIMEOUT if isinstance(wait_timeout, (int, float) + six.string_types): - self._wait_timeout = misc.Timeout(float(wait_timeout)) - elif isinstance(wait_timeout, misc.Timeout): + self._wait_timeout = tt.Timeout(float(wait_timeout)) + elif isinstance(wait_timeout, tt.Timeout): self._wait_timeout = wait_timeout else: raise ValueError("Invalid timeout literal: %s" % (wait_timeout)) diff --git a/taskflow/engines/worker_based/executor.py b/taskflow/engines/worker_based/executor.py index 37ea8bd7..1053c481 100644 --- a/taskflow/engines/worker_based/executor.py +++ b/taskflow/engines/worker_based/executor.py @@ -23,6 +23,7 @@ from taskflow.engines.worker_based import cache from taskflow.engines.worker_based import protocol as pr from taskflow.engines.worker_based import proxy from taskflow import exceptions as exc +from taskflow.types import time as tt from taskflow.utils import async_utils from taskflow.utils import misc from taskflow.utils import reflection @@ -77,7 +78,7 @@ class WorkerTaskExecutor(executor.TaskExecutorBase): self._proxy = proxy.Proxy(uuid, exchange, self._on_message, self._on_wait, **kwargs) self._proxy_thread = None - self._periodic = PeriodicWorker(misc.Timeout(pr.NOTIFY_PERIOD), + self._periodic = PeriodicWorker(tt.Timeout(pr.NOTIFY_PERIOD), [self._notify_topics]) self._periodic_thread = None diff --git a/taskflow/engines/worker_based/protocol.py b/taskflow/engines/worker_based/protocol.py index 40a227a8..d8cab533 100644 --- a/taskflow/engines/worker_based/protocol.py +++ b/taskflow/engines/worker_based/protocol.py @@ -21,6 +21,7 @@ import six from concurrent import futures from taskflow.engines.action_engine import executor +from taskflow.types import time from taskflow.utils import misc from taskflow.utils import reflection @@ -103,7 +104,7 @@ class Request(Message): self._arguments = arguments self._progress_callback = progress_callback self._kwargs = kwargs - self._watch = misc.StopWatch(duration=timeout).start() + self._watch = time.StopWatch(duration=timeout).start() self._state = WAITING self.result = futures.Future() diff --git a/taskflow/jobs/backends/impl_zookeeper.py b/taskflow/jobs/backends/impl_zookeeper.py index fd73a097..be305a12 100644 --- a/taskflow/jobs/backends/impl_zookeeper.py +++ b/taskflow/jobs/backends/impl_zookeeper.py @@ -33,6 +33,7 @@ from taskflow.openstack.common import excutils from taskflow.openstack.common import jsonutils from taskflow.openstack.common import uuidutils from taskflow import states +from taskflow.types import time from taskflow.utils import kazoo_utils from taskflow.utils import lock_utils from taskflow.utils import misc @@ -586,13 +587,12 @@ class ZookeeperJobBoard(jobboard.NotifyingJobBoard): # Wait until timeout expires (or forever) for jobs to appear. watch = None if timeout is not None: - watch = misc.StopWatch(duration=float(timeout)) - watch.start() + watch = time.StopWatch(duration=float(timeout)).start() self._job_cond.acquire() try: while True: if not self._known_jobs: - if watch and watch.expired(): + if watch is not None and watch.expired(): raise excp.NotFound("Expired waiting for jobs to" " arrive; waited %s seconds" % watch.elapsed()) diff --git a/taskflow/listeners/timing.py b/taskflow/listeners/timing.py index 15ebe82e..87240a36 100644 --- a/taskflow/listeners/timing.py +++ b/taskflow/listeners/timing.py @@ -21,7 +21,7 @@ import logging from taskflow import exceptions as exc from taskflow.listeners import base from taskflow import states -from taskflow.utils import misc +from taskflow.types import time STARTING_STATES = (states.RUNNING, states.REVERTING) FINISHED_STATES = base.FINISH_STATES + (states.REVERTED,) @@ -64,8 +64,7 @@ class TimingListener(base.ListenerBase): if state == states.PENDING: self._timers.pop(task_name, None) elif state in STARTING_STATES: - self._timers[task_name] = misc.StopWatch() - self._timers[task_name].start() + self._timers[task_name] = time.StopWatch().start() elif state in FINISHED_STATES: if task_name in self._timers: self._record_ending(self._timers[task_name], task_name) diff --git a/taskflow/tests/unit/test_utils.py b/taskflow/tests/unit/test_utils.py index 1d1ea336..2c2dac2d 100644 --- a/taskflow/tests/unit/test_utils.py +++ b/taskflow/tests/unit/test_utils.py @@ -23,6 +23,7 @@ import time from taskflow import states from taskflow import test from taskflow.tests import utils as test_utils +from taskflow.types import time as tt from taskflow.utils import lock_utils from taskflow.utils import misc from taskflow.utils import reflection @@ -496,30 +497,30 @@ class IsValidAttributeNameTestCase(test.TestCase): class StopWatchUtilsTest(test.TestCase): def test_no_states(self): - watch = misc.StopWatch() + watch = tt.StopWatch() self.assertRaises(RuntimeError, watch.stop) self.assertRaises(RuntimeError, watch.resume) def test_expiry(self): - watch = misc.StopWatch(0.1) + watch = tt.StopWatch(0.1) watch.start() time.sleep(0.2) self.assertTrue(watch.expired()) def test_no_expiry(self): - watch = misc.StopWatch(0.1) + watch = tt.StopWatch(0.1) watch.start() self.assertFalse(watch.expired()) def test_elapsed(self): - watch = misc.StopWatch() + watch = tt.StopWatch() watch.start() time.sleep(0.2) # NOTE(harlowja): Allow for a slight variation by using 0.19. self.assertGreaterEqual(0.19, watch.elapsed()) def test_pause_resume(self): - watch = misc.StopWatch() + watch = tt.StopWatch() watch.start() time.sleep(0.05) watch.stop() @@ -530,7 +531,7 @@ class StopWatchUtilsTest(test.TestCase): self.assertNotEqual(elapsed, watch.elapsed()) def test_context_manager(self): - with misc.StopWatch() as watch: + with tt.StopWatch() as watch: time.sleep(0.05) self.assertGreater(0.01, watch.elapsed()) diff --git a/taskflow/types/time.py b/taskflow/types/time.py new file mode 100644 index 00000000..cd822ae7 --- /dev/null +++ b/taskflow/types/time.py @@ -0,0 +1,125 @@ +# -*- coding: utf-8 -*- + +# Copyright (C) 2014 Yahoo! Inc. All Rights Reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); you may +# not use this file except in compliance with the License. You may obtain +# a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the +# License for the specific language governing permissions and limitations +# under the License. + +import threading + +from taskflow.utils import misc + + +class Timeout(object): + """An object which represents a timeout. + + This object has the ability to be interrupted before the actual timeout + is reached. + """ + def __init__(self, timeout): + if timeout < 0: + raise ValueError("Timeout must be >= 0 and not %s" % (timeout)) + self._timeout = timeout + self._event = threading.Event() + + def interrupt(self): + self._event.set() + + def is_stopped(self): + return self._event.is_set() + + def wait(self): + self._event.wait(self._timeout) + + def reset(self): + self._event.clear() + + +class StopWatch(object): + """A simple timer/stopwatch helper class. + + Inspired by: apache-commons-lang java stopwatch. + + Not thread-safe. + """ + _STARTED = 'STARTED' + _STOPPED = 'STOPPED' + + def __init__(self, duration=None): + self._duration = duration + self._started_at = None + self._stopped_at = None + self._state = None + + def start(self): + if self._state == self._STARTED: + return self + self._started_at = misc.wallclock() + self._stopped_at = None + self._state = self._STARTED + return self + + def elapsed(self): + if self._state == self._STOPPED: + return float(self._stopped_at - self._started_at) + elif self._state == self._STARTED: + return float(misc.wallclock() - self._started_at) + else: + raise RuntimeError("Can not get the elapsed time of an invalid" + " stopwatch") + + def __enter__(self): + self.start() + return self + + def __exit__(self, type, value, traceback): + try: + self.stop() + except RuntimeError: + pass + # NOTE(harlowja): don't silence the exception. + return False + + def leftover(self): + if self._duration is None: + raise RuntimeError("Can not get the leftover time of a watch that" + " has no duration") + if self._state != self._STARTED: + raise RuntimeError("Can not get the leftover time of a stopwatch" + " that has not been started") + end_time = self._started_at + self._duration + return max(0.0, end_time - misc.wallclock()) + + def expired(self): + if self._duration is None: + return False + if self.elapsed() > self._duration: + return True + return False + + def resume(self): + if self._state == self._STOPPED: + self._state = self._STARTED + return self + else: + raise RuntimeError("Can not resume a stopwatch that has not been" + " stopped") + + def stop(self): + if self._state == self._STOPPED: + return self + if self._state != self._STARTED: + raise RuntimeError("Can not stop a stopwatch that has not been" + " started") + self._stopped_at = misc.wallclock() + self._state = self._STOPPED + return self diff --git a/taskflow/utils/misc.py b/taskflow/utils/misc.py index eea56d6d..65f21e24 100644 --- a/taskflow/utils/misc.py +++ b/taskflow/utils/misc.py @@ -27,7 +27,6 @@ import os import re import string import sys -import threading import time import traceback @@ -360,31 +359,6 @@ class AttrDict(dict): self[name] = value -class Timeout(object): - """An object which represents a timeout. - - This object has the ability to be interrupted before the actual timeout - is reached. - """ - def __init__(self, timeout): - if timeout < 0: - raise ValueError("Timeout must be >= 0 and not %s" % (timeout)) - self._timeout = timeout - self._event = threading.Event() - - def interrupt(self): - self._event.set() - - def is_stopped(self): - return self._event.is_set() - - def wait(self): - self._event.wait(self._timeout) - - def reset(self): - self._event.clear() - - class ExponentialBackoff(object): """An iterable object that will yield back an exponential delay sequence. @@ -444,87 +418,6 @@ def ensure_tree(path): raise -class StopWatch(object): - """A simple timer/stopwatch helper class. - - Inspired by: apache-commons-lang java stopwatch. - - Not thread-safe. - """ - _STARTED = 'STARTED' - _STOPPED = 'STOPPED' - - def __init__(self, duration=None): - self._duration = duration - self._started_at = None - self._stopped_at = None - self._state = None - - def start(self): - if self._state == self._STARTED: - return self - self._started_at = wallclock() - self._stopped_at = None - self._state = self._STARTED - return self - - def elapsed(self): - if self._state == self._STOPPED: - return float(self._stopped_at - self._started_at) - elif self._state == self._STARTED: - return float(wallclock() - self._started_at) - else: - raise RuntimeError("Can not get the elapsed time of an invalid" - " stopwatch") - - def __enter__(self): - self.start() - return self - - def __exit__(self, type, value, traceback): - try: - self.stop() - except RuntimeError: - pass - # NOTE(harlowja): don't silence the exception. - return False - - def leftover(self): - if self._duration is None: - raise RuntimeError("Can not get the leftover time of a watch that" - " has no duration") - if self._state != self._STARTED: - raise RuntimeError("Can not get the leftover time of a stopwatch" - " that has not been started") - end_time = self._started_at + self._duration - return max(0.0, end_time - wallclock()) - - def expired(self): - if self._duration is None: - return False - if self.elapsed() > self._duration: - return True - return False - - def resume(self): - if self._state == self._STOPPED: - self._state = self._STARTED - return self - else: - raise RuntimeError("Can not resume a stopwatch that has not been" - " stopped") - - def stop(self): - if self._state == self._STOPPED: - return self - if self._state != self._STARTED: - raise RuntimeError("Can not stop a stopwatch that has not been" - " started") - self._stopped_at = wallclock() - self._state = self._STOPPED - return self - - class Notifier(object): """A notification helper class. From 162b9ca4a623ee29a90bc1ba6270f68ed369e799 Mon Sep 17 00:00:00 2001 From: Joshua Harlow Date: Sun, 8 Jun 2014 23:05:40 -0700 Subject: [PATCH 135/188] Clarify locked decorator is for instance methods Make it clear when reading the lock utils code that the locked decorator will only currently work with instance methods by removing the usage of 'args[0]' and renaming this variable to 'self' and adjusting the docstring. Once the wrapt module is approved and we are able to use it in taskflow, we can refactor the function to be correct with regards to classmethods. Change-Id: Ic33eb9e47679d2105654634469dd6d305d38b2e0 --- taskflow/utils/lock_utils.py | 15 +++++++++++---- 1 file changed, 11 insertions(+), 4 deletions(-) diff --git a/taskflow/utils/lock_utils.py b/taskflow/utils/lock_utils.py index 954e606b..dbc0b778 100644 --- a/taskflow/utils/lock_utils.py +++ b/taskflow/utils/lock_utils.py @@ -57,19 +57,26 @@ def locked(*args, **kwargs): automatically releasing that lock on exit. NOTE(harlowja): if no attribute is provided then by default the attribute - named '_lock' is looked for. + named '_lock' is looked for in the instance object this decorator is + attached to. + + NOTE(harlowja): when we get the wrapt module approved we can address the + correctness of this decorator with regards to classmethods, to keep sanity + and correctness it is recommended to avoid using this on classmethods, once + https://review.openstack.org/#/c/94754/ is merged this will be refactored + and that use-case can be provided in a correct manner. """ def decorator(f): attr_name = kwargs.get('lock', '_lock') @six.wraps(f) - def wrapper(*args, **kwargs): - lock = getattr(args[0], attr_name) + def wrapper(self, *args, **kwargs): + lock = getattr(self, attr_name) if isinstance(lock, (tuple, list)): lock = MultiLock(locks=list(lock)) with lock: - return f(*args, **kwargs) + return f(self, *args, **kwargs) return wrapper From 783eeb5f0a4bf77b513563e1017742fe8ad9f077 Mon Sep 17 00:00:00 2001 From: Joshua Harlow Date: Mon, 16 Jun 2014 21:25:49 -0700 Subject: [PATCH 136/188] Fix traces left in zookeeper We currently are leaving the root node in zookeeper whenever this test is ran, we should instead be removing the full directory and any children to avoid leaving test data behind in zookeeper (aka, cleanup our dirty laundry). Fixes bug 1330807 Change-Id: I56d9dabd9926463506e1710ddf0a6c4831d5dc57 --- .../unit/persistence/test_zk_persistence.py | 33 ++++++++++++++----- 1 file changed, 25 insertions(+), 8 deletions(-) diff --git a/taskflow/tests/unit/persistence/test_zk_persistence.py b/taskflow/tests/unit/persistence/test_zk_persistence.py index 414db09b..354c2a71 100644 --- a/taskflow/tests/unit/persistence/test_zk_persistence.py +++ b/taskflow/tests/unit/persistence/test_zk_persistence.py @@ -16,13 +16,17 @@ import contextlib +from kazoo import exceptions as kazoo_exceptions import testtools from zake import fake_client +from taskflow import exceptions as exc from taskflow.openstack.common import uuidutils from taskflow.persistence import backends from taskflow.persistence.backends import impl_zookeeper from taskflow import test +from taskflow.utils import kazoo_utils + from taskflow.tests.unit.persistence import base from taskflow.tests import utils as test_utils @@ -31,15 +35,27 @@ _ZOOKEEPER_AVAILABLE = test_utils.zookeeper_available( impl_zookeeper.MIN_ZK_VERSION) +def clean_backend(backend, conf): + with contextlib.closing(backend.get_connection()) as conn: + try: + conn.clear_all() + except exc.NotFound: + pass + client = kazoo_utils.make_client(conf) + client.start() + try: + client.delete(conf['path'], recursive=True) + except kazoo_exceptions.NoNodeError: + pass + finally: + kazoo_utils.finalize_client(client) + + @testtools.skipIf(not _ZOOKEEPER_AVAILABLE, 'zookeeper is not available') class ZkPersistenceTest(test.TestCase, base.PersistenceTestMixin): def _get_connection(self): return self.backend.get_connection() - def _clear_all(self): - with contextlib.closing(self._get_connection()) as conn: - conn.clear_all() - def setUp(self): super(ZkPersistenceTest, self).setUp() conf = test_utils.ZK_TEST_CONFIG.copy() @@ -48,13 +64,14 @@ class ZkPersistenceTest(test.TestCase, base.PersistenceTestMixin): conf['path'] = TEST_PATH_TPL % (uuidutils.generate_uuid()) try: self.backend = impl_zookeeper.ZkBackend(conf) - self.addCleanup(self.backend.close) except Exception as e: self.skipTest("Failed creating backend created from configuration" " %s due to %s" % (conf, e)) - with contextlib.closing(self._get_connection()) as conn: - conn.upgrade() - self.addCleanup(self._clear_all) + else: + self.addCleanup(self.backend.close) + self.addCleanup(clean_backend, self.backend, conf) + with contextlib.closing(self.backend.get_connection()) as conn: + conn.upgrade() def test_zk_persistence_entry_point(self): conf = {'connection': 'zookeeper:'} From 6dca61b63d52bf4a1a1a33451f165e04a638eec9 Mon Sep 17 00:00:00 2001 From: Joshua Harlow Date: Sat, 28 Jun 2014 22:40:02 -0700 Subject: [PATCH 137/188] Use `flow_uuid` and `flow_name` from storage Instead of fetching these attributes from the flow or flow detail objects before notifying just use the same attributes provided from storage which is the best source of these two attributes. Change-Id: I383e152758f177de2aac87425790ee1a82c3ca6b --- taskflow/engines/action_engine/engine.py | 20 ++++++-------------- 1 file changed, 6 insertions(+), 14 deletions(-) diff --git a/taskflow/engines/action_engine/engine.py b/taskflow/engines/action_engine/engine.py index 8a370516..fefcef0f 100644 --- a/taskflow/engines/action_engine/engine.py +++ b/taskflow/engines/action_engine/engine.py @@ -144,20 +144,12 @@ class ActionEngine(base.EngineBase): if not states.check_flow_transition(old_state, state): return self.storage.set_flow_state(state) - try: - flow_uuid = self._flow.uuid - except AttributeError: - # NOTE(harlowja): if the flow was just a single task, then it - # will not itself have a uuid, but the constructed flow_detail - # will. - if self._flow_detail is not None: - flow_uuid = self._flow_detail.uuid - else: - flow_uuid = None - details = dict(engine=self, - flow_name=self._flow.name, - flow_uuid=flow_uuid, - old_state=old_state) + details = { + 'engine': self, + 'flow_name': self.storage.flow_name, + 'flow_uuid': self.storage.flow_uuid, + 'old_state': old_state, + } self.notifier.notify(state, details) def _ensure_storage(self): From 5d74d72257f17fa8ed0f6b3202544dc35819d787 Mon Sep 17 00:00:00 2001 From: Joshua Harlow Date: Fri, 6 Jun 2014 20:55:32 -0700 Subject: [PATCH 138/188] Make the expiring cache a top level cache type Create a cache module type and adjust a few of its methods to be more pythonic and then switch out the work_based engines usage of it and adjust its tests methods with adjusted methods using the new cache types functionality. Part of blueprint top-level-types Change-Id: I75c4b7db6dd989ef328e9e14d4b00266b1c97a9f --- taskflow/engines/worker_based/cache.py | 48 ++---------- taskflow/engines/worker_based/executor.py | 13 ++-- .../tests/unit/worker_based/test_executor.py | 43 ++++++----- taskflow/types/cache.py | 73 +++++++++++++++++++ 4 files changed, 107 insertions(+), 70 deletions(-) create mode 100644 taskflow/types/cache.py diff --git a/taskflow/engines/worker_based/cache.py b/taskflow/engines/worker_based/cache.py index f92bf23e..9da7f12c 100644 --- a/taskflow/engines/worker_based/cache.py +++ b/taskflow/engines/worker_based/cache.py @@ -14,54 +14,16 @@ # License for the specific language governing permissions and limitations # under the License. -import logging import random import six from taskflow.engines.worker_based import protocol as pr -from taskflow.utils import lock_utils as lu - -LOG = logging.getLogger(__name__) +from taskflow.types import cache as base -class Cache(object): - """Represents thread-safe cache.""" - - def __init__(self): - self._data = {} - self._lock = lu.ReaderWriterLock() - - def get(self, key): - """Retrieve a value from the cache.""" - with self._lock.read_lock(): - return self._data.get(key) - - def set(self, key, value): - """Set a value in the cache.""" - with self._lock.write_lock(): - self._data[key] = value - LOG.debug("Cache updated. Capacity: %s", len(self._data)) - - def delete(self, key): - """Delete a value from the cache.""" - with self._lock.write_lock(): - self._data.pop(key, None) - - def cleanup(self, on_expired_callback=None): - """Delete out-dated values from the cache.""" - with self._lock.write_lock(): - expired_values = [(k, v) for k, v in six.iteritems(self._data) - if v.expired] - for (k, _v) in expired_values: - self._data.pop(k, None) - if on_expired_callback: - for (_k, v) in expired_values: - on_expired_callback(v) - - -class RequestsCache(Cache): - """Represents thread-safe requests cache.""" +class RequestsCache(base.ExpiringCache): + """Represents a thread-safe requests cache.""" def get_waiting_requests(self, tasks): """Get list of waiting requests by tasks.""" @@ -73,8 +35,8 @@ class RequestsCache(Cache): return waiting_requests -class WorkersCache(Cache): - """Represents thread-safe workers cache.""" +class WorkersCache(base.ExpiringCache): + """Represents a thread-safe workers cache.""" def get_topic_by_task(self, task): """Get topic for a given task.""" diff --git a/taskflow/engines/worker_based/executor.py b/taskflow/engines/worker_based/executor.py index 37ea8bd7..35febd40 100644 --- a/taskflow/engines/worker_based/executor.py +++ b/taskflow/engines/worker_based/executor.py @@ -110,7 +110,7 @@ class WorkerTaskExecutor(executor.TaskExecutorBase): tasks = notify['tasks'] # add worker info to the cache - self._workers_cache.set(topic, tasks) + self._workers_cache[topic] = tasks # publish waiting requests for request in self._requests_cache.get_waiting_requests(tasks): @@ -137,7 +137,7 @@ class WorkerTaskExecutor(executor.TaskExecutorBase): # NOTE(imelnikov): request should not be in cache when # another thread can see its result and schedule another # request with same uuid; so we remove it, then set result - self._requests_cache.delete(request.uuid) + del self._requests_cache[request.uuid] request.set_result(**response.data) else: LOG.warning("Unexpected response status: '%s'", @@ -175,10 +175,10 @@ class WorkerTaskExecutor(executor.TaskExecutorBase): # processing thread get list of waiting requests and publish it # before it is published here, so it wouldn't be published twice. request.set_pending() - self._requests_cache.set(request.uuid, request) + self._requests_cache[request.uuid] = request self._publish_request(request, topic) else: - self._requests_cache.set(request.uuid, request) + self._requests_cache[request.uuid] = request return request.result @@ -191,9 +191,8 @@ class WorkerTaskExecutor(executor.TaskExecutorBase): correlation_id=request.uuid) except Exception: with misc.capture_failure() as failure: - LOG.exception("Failed to submit the '%s' request." % - request) - self._requests_cache.delete(request.uuid) + LOG.exception("Failed to submit the '%s' request.", request) + del self._requests_cache[request.uuid] request.set_result(failure) def _notify_topics(self): diff --git a/taskflow/tests/unit/worker_based/test_executor.py b/taskflow/tests/unit/worker_based/test_executor.py index 75092003..7faee1b7 100644 --- a/taskflow/tests/unit/worker_based/test_executor.py +++ b/taskflow/tests/unit/worker_based/test_executor.py @@ -93,7 +93,7 @@ class TestWorkerTaskExecutor(test.MockTestCase): def test_on_message_response_state_running(self): response = pr.Response(pr.RUNNING) ex = self.executor() - ex._requests_cache.set(self.task_uuid, self.request_inst_mock) + ex._requests_cache[self.task_uuid] = self.request_inst_mock ex._on_message(response.to_dict(), self.message_mock) self.assertEqual(self.request_inst_mock.mock_calls, @@ -103,7 +103,7 @@ class TestWorkerTaskExecutor(test.MockTestCase): def test_on_message_response_state_progress(self): response = pr.Response(pr.PROGRESS, progress=1.0) ex = self.executor() - ex._requests_cache.set(self.task_uuid, self.request_inst_mock) + ex._requests_cache[self.task_uuid] = self.request_inst_mock ex._on_message(response.to_dict(), self.message_mock) self.assertEqual(self.request_inst_mock.mock_calls, @@ -115,10 +115,10 @@ class TestWorkerTaskExecutor(test.MockTestCase): failure_dict = failure.to_dict() response = pr.Response(pr.FAILURE, result=failure_dict) ex = self.executor() - ex._requests_cache.set(self.task_uuid, self.request_inst_mock) + ex._requests_cache[self.task_uuid] = self.request_inst_mock ex._on_message(response.to_dict(), self.message_mock) - self.assertEqual(len(ex._requests_cache._data), 0) + self.assertEqual(len(ex._requests_cache), 0) self.assertEqual(self.request_inst_mock.mock_calls, [ mock.call.set_result(result=utils.FailureMatcher(failure)) ]) @@ -128,7 +128,7 @@ class TestWorkerTaskExecutor(test.MockTestCase): response = pr.Response(pr.SUCCESS, result=self.task_result, event='executed') ex = self.executor() - ex._requests_cache.set(self.task_uuid, self.request_inst_mock) + ex._requests_cache[self.task_uuid] = self.request_inst_mock ex._on_message(response.to_dict(), self.message_mock) self.assertEqual(self.request_inst_mock.mock_calls, @@ -139,7 +139,7 @@ class TestWorkerTaskExecutor(test.MockTestCase): def test_on_message_response_unknown_state(self): response = pr.Response(state='') ex = self.executor() - ex._requests_cache.set(self.task_uuid, self.request_inst_mock) + ex._requests_cache[self.task_uuid] = self.request_inst_mock ex._on_message(response.to_dict(), self.message_mock) self.assertEqual(self.request_inst_mock.mock_calls, []) @@ -149,7 +149,7 @@ class TestWorkerTaskExecutor(test.MockTestCase): self.message_mock.properties['correlation_id'] = '' response = pr.Response(pr.RUNNING) ex = self.executor() - ex._requests_cache.set(self.task_uuid, self.request_inst_mock) + ex._requests_cache[self.task_uuid] = self.request_inst_mock ex._on_message(response.to_dict(), self.message_mock) self.assertEqual(self.request_inst_mock.mock_calls, []) @@ -159,7 +159,7 @@ class TestWorkerTaskExecutor(test.MockTestCase): self.message_mock.properties = {'type': pr.RESPONSE} response = pr.Response(pr.RUNNING) ex = self.executor() - ex._requests_cache.set(self.task_uuid, self.request_inst_mock) + ex._requests_cache[self.task_uuid] = self.request_inst_mock ex._on_message(response.to_dict(), self.message_mock) self.assertEqual(self.request_inst_mock.mock_calls, []) @@ -188,32 +188,35 @@ class TestWorkerTaskExecutor(test.MockTestCase): def test_on_wait_task_not_expired(self): ex = self.executor() - ex._requests_cache.set(self.task_uuid, self.request_inst_mock) + ex._requests_cache[self.task_uuid] = self.request_inst_mock - self.assertEqual(len(ex._requests_cache._data), 1) + self.assertEqual(len(ex._requests_cache), 1) ex._on_wait() - self.assertEqual(len(ex._requests_cache._data), 1) + self.assertEqual(len(ex._requests_cache), 1) def test_on_wait_task_expired(self): self.request_inst_mock.expired = True ex = self.executor() - ex._requests_cache.set(self.task_uuid, self.request_inst_mock) + ex._requests_cache[self.task_uuid] = self.request_inst_mock - self.assertEqual(len(ex._requests_cache._data), 1) + self.assertEqual(len(ex._requests_cache), 1) ex._on_wait() - self.assertEqual(len(ex._requests_cache._data), 0) + self.assertEqual(len(ex._requests_cache), 0) def test_remove_task_non_existent(self): ex = self.executor() - ex._requests_cache.set(self.task_uuid, self.request_inst_mock) + ex._requests_cache[self.task_uuid] = self.request_inst_mock - self.assertEqual(len(ex._requests_cache._data), 1) - ex._requests_cache.delete(self.task_uuid) - self.assertEqual(len(ex._requests_cache._data), 0) + self.assertEqual(len(ex._requests_cache), 1) + del ex._requests_cache[self.task_uuid] + self.assertEqual(len(ex._requests_cache), 0) # delete non-existent - ex._requests_cache.delete(self.task_uuid) - self.assertEqual(len(ex._requests_cache._data), 0) + try: + del ex._requests_cache[self.task_uuid] + except KeyError: + pass + self.assertEqual(len(ex._requests_cache), 0) def test_execute_task(self): self.message_mock.properties['type'] = pr.NOTIFY diff --git a/taskflow/types/cache.py b/taskflow/types/cache.py new file mode 100644 index 00000000..72214fed --- /dev/null +++ b/taskflow/types/cache.py @@ -0,0 +1,73 @@ +# -*- coding: utf-8 -*- + +# Copyright (C) 2014 Yahoo! Inc. All Rights Reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); you may +# not use this file except in compliance with the License. You may obtain +# a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the +# License for the specific language governing permissions and limitations +# under the License. + +import six + +from taskflow.utils import lock_utils as lu +from taskflow.utils import reflection + + +class ExpiringCache(object): + """Represents a thread-safe time-based expiring cache. + + NOTE(harlowja): the values in this cache must have a expired attribute that + can be used to determine if the key and associated value has expired or if + it has not. + """ + + def __init__(self): + self._data = {} + self._lock = lu.ReaderWriterLock() + + def __setitem__(self, key, value): + """Set a value in the cache.""" + with self._lock.write_lock(): + self._data[key] = value + + def __len__(self): + """Returns how many items are in this cache.""" + with self._lock.read_lock(): + return len(self._data) + + def get(self, key, default=None): + """Retrieve a value from the cache (returns default if not found).""" + with self._lock.read_lock(): + return self._data.get(key, default) + + def __getitem__(self, key): + """Retrieve a value from the cache.""" + with self._lock.read_lock(): + return self._data[key] + + def __delitem__(self, key): + """Delete a key & value from the cache.""" + with self._lock.write_lock(): + del self._data[key] + + def cleanup(self, on_expired_callback=None): + """Delete out-dated keys & values from the cache.""" + with self._lock.write_lock(): + expired_values = [(k, v) for k, v in six.iteritems(self._data) + if v.expired] + for (k, _v) in expired_values: + del self._data[k] + if on_expired_callback: + arg_c = len(reflection.get_callable_args(on_expired_callback)) + for (k, v) in expired_values: + if arg_c == 2: + on_expired_callback(k, v) + else: + on_expired_callback(v) From c2ec0b2e4980dee3d28134df34b1bc664c7a7cac Mon Sep 17 00:00:00 2001 From: Joshua Harlow Date: Fri, 27 Jun 2014 16:13:24 -0700 Subject: [PATCH 139/188] Add a conductor considerations section Add a small section in the conductor docs about the cycling issue and give some resolutions that can be applied as well as link to the better solution which is garbage collection for jobs that are not working out. Also includes some tiny tweaks to other docs. Change-Id: I73e9f8f5a8888eaf967d62723f6ffb45b02887c9 --- doc/source/conductors.rst | 34 ++++++++++++++++++++++++++++- doc/source/img/conductor_cycle.png | Bin 0 -> 36940 bytes doc/source/jobs.rst | 6 ++--- doc/source/workers.rst | 4 ++-- 4 files changed, 38 insertions(+), 6 deletions(-) create mode 100644 doc/source/img/conductor_cycle.png diff --git a/doc/source/conductors.rst b/doc/source/conductors.rst index 4dfa3e33..25eb75c8 100644 --- a/doc/source/conductors.rst +++ b/doc/source/conductors.rst @@ -24,9 +24,41 @@ They are responsible for the following: .. note:: - They are inspired by and have similar responsiblities + They are inspired by and have similar responsibilities as `railroad conductors`_. +Considerations +============== + +Some usage considerations should be used when using a conductor to make sure +it's used in a safe and reliable manner. Eventually we hope to make these +non-issues but for now they are worth mentioning. + +Endless cycling +--------------- + +**What:** Jobs that fail (due to some type of internal error) on one conductor +will be abandoned by that conductor and then another conductor may experience +those same errors and abandon it (and repeat). This will create a job +abandonment cycle that will continue for as long as the job exists in an +claimable state. + +**Example:** + +.. image:: img/conductor_cycle.png + :scale: 70% + :alt: Conductor cycling + +**Alleviate by:** + +#. Forcefully delete jobs that have been failing continuously after a given + number of conductor attempts. This can be either done manually or + automatically via scripts (or other associated monitoring). +#. Resolve the internal error's cause (storage backend failure, other...). +#. Help implement `jobboard garbage binning`_. + +.. _jobboard garbage binning: https://blueprints.launchpad.net/taskflow/+spec/jobboard-garbage-bin + Interfaces ========== diff --git a/doc/source/img/conductor_cycle.png b/doc/source/img/conductor_cycle.png new file mode 100644 index 0000000000000000000000000000000000000000..b09d71b4ccfe1f92df25eaf477db80ec5773502d GIT binary patch literal 36940 zcmZ^~1ymft(=Uo!aCa7m1lQp1E(z}L?(PoD;u>VpU;%XUBx!a_|KkgqXS2Yb~drS{h1?T|03xo_69Ux#!ymJXU1 zmaHxU7TTaxUqa#f+_j;DxeB#$&rf>wv2n%*9$*MDq0zOWd$ues;ba~qVP;=Cg|f2B zWd|GX&sl=a+gF&nRq5R3--D8=cqM2MpirddcmxCK2=-u7H5z^qalFHg-lj2fM6z?> z7a|u`QSA)W+TWM$e`+7e%%IyXB-R-x>fp+cZ&zKDigi{J!;5 z_tFM=pvJ4vu&*slaKU40v9)hcg!!K%wsmS3m*6k63A=*F|bCmF) zxvd~V9(qKgz&tx#YY!u+fCxU-7CFRWCm|>aYFHXa5nq^q6Fp3Jl;N%p>+0lMwj0?m zU|CGqbXXQy7#mTzdNJWehzYVULVJeUIz!Ed_)fwphw?Zh=)-CCpgDg)4VDo_iR@Ki zgI*7@K?=RZ#48f31mc4+vc%^xNpz8XB*aud z6SpB5g#9f*oCI(oyg~`|-F+~{lr67q8Bf(BdYa@|dNs0rtX`rzv?UJd$FcnhSKy)xkYl3XD%PuTJRG~>G zA^srIPLxjkmg!fMc~g0|xlFUnuZjfwJeHW8Ay1(trgJ|^wdt!wJdt^@auS#az8WVP z>(yoafL$~>6(B_)>LJ=Dbq;JH)aP*Iu7PXDScq>AKIuo>qPz0$__&UH5sK04c4_KQ z*@Y5B8brR4L^0S+4@B05vkS$bOb5`|(`aH6BOM^UhGX?{sfCoQxKl-9`V8osGX6+h z6zh;9rBhD9m5ZbyP^V$cY7Ehov!ybi6Qb*+Oc~)fmvJT35j9k)q4@?dqQwA-sHQT| z;i|}!N{KHi^U1f0`iS~Sl&GVXJI-oXX13+&s&q;Q$Oo{>q;O{3WbBMor+iPgOFPN% zqg6>=$neTAV);gIL>VOiR{GKcqt+|hs}WLM_q9-ZtFccgLQX2XD9tq5G}|=BG;Evu zml|ycCQ{pq-(x}wPzihG0Mkd1xex6A=`gKcj*m;X=L2IG6 z$FN7bN4(cCs*cddX8g#hPE-<~65k%5bi=jZI^kI8H35x4ba6u-hcxq4oVu8WWW=$@!SHe4bbagR#av-^c z5habq*>z3hwu#0GRJ(m^Iu3mA&D-7#wzFJ0UJSzW=r;n{r_AdqcPw}#)Y z+&*!SeoysG_sotc6y+V&iT<$Y?#a*|9x#V>a=6tpeigriNbg7gV@9rQBTr|XaU5;D z_ya1zCBZB+q3e{(Rtu?XGK)D&ypgwa;p~@{v>n}(zrU3h5)M2E90t}Fg_d4ho&630h43jz@cd{zT`L}@*A$|5PPb8Z&RID1(;H5gU4QOXj7`)4je2!l zK9ZAAkZoYT$Kdo;>?zjV8nt@u!7Ze;M?HfM20tAnTp2uCyd=YAgx7~pNoEWf#F)v| z$l=NDz^I7*-1Mk7x(?Z`NCl=AGjdi;Rs69DZ+vW=5vMhYt@~uaDMa7N+Rs(YCeb!( zGk5+4r$}@vS&_j_$Fe;$@lEkgHl8MjHcaP3gKhn~m5jx=QMKP#`?tHW9x>C{Xy|oakZ^Z=5alA`(O5W_GfFB4b`?Q8B~T-ccx?8vD+%NtDut&odB(; zwcq#QXPqVYCC^@)o=9G+Ye#K2+F7PLhD(z*n+DZ;zpsm~o7=TcyS|#}xfo;EB$+2Q za+KG1%GcV;%#|I3k3)!)viCcW{hH76mWNgrx;GqxW}p*be<2E!Yq3%I&SN>3Ni|kJd?djo{))VmBRdC5CHlXr|{`K2^)1+>!ZsGjud`uIV$Hq_5Z{sL< zCXZ~Kj9Db--TyZJJ}bLn)JkC@c^oe{hwqzL;I;T*#8q4jK@`81yKr0M?sQ&FUCsJf zNJg#URjX^oO6T*-myGXLHGBpF0eJzPXOXW7^W^RM=|-o4Hb*}%96FDj8(!0QsuR25 z0}lQgJhQIfw*~AazUPs^#v%MYXX)~LH(V7u3Rr)gLae|r6#4D@`PKT>>~yDy9!2G2 zex*qD>)hjlWku)hX~BZ=L_@SyQe=w+>wDT$tx7-qM8(7>=|<^enQEDd_^0?a-ygTX zF2~|CW_BXF8Q+>7(x$$D3-o&Kv@#tTxU`!($r6!YFYn^}JAdzRV1Bqe<7IPES{A$h zy-Vai~<*m=v3uVCGA^+h?hU|%e6P1gXR9lL-@@z2ft1r1XChA%j&p6z~;ig z16rIR#~T7S=WH~!-L;hz`OTahSWL{FOf6Wv9h@Q5P*8&2{E$-z3wINMw}ZW-8^5;@ z)qg1XA?N>^S*Za3A#t}8qS97U1xPr#S^&6NxLDYzgi!zhfS{|nCBM3)^nbHMu7s$p z-QAt}Sy{cjyjZ+AS)5$0SlRjb_*mIESUEVDAr#DRK924t-pr0})c+;qf6I}ya5HnY zadx+Has>R7Yhvo;;Vwi)^{=D<`Tf^8-EA!YPfw0+|E(58LDqkFSlLhgjMig zE5C%3gR`rJn;S&FFt^};DE}XA|EHh-qF1$ca(9Bvf~$?0oTIyiD}>qIM) zf)a(2lN8hRhCack4%dvQe&r%kB-*DmxWwd z)4p)NH~=A(loXs?(c8t!iq%^^nI!~2P71kA-1r-KoP7PV;deZ%Rd!rzQ#wMsAX*cm z58I01indEU%nh^y{C^vIoVX|dHs?Hj{YR-(4*k3!Z~XO=`Vn~-^k3wRrO$~5ZF>?-_BrlmtAY?m_Vf2yLL4*CSmiBAZLoQ;%)be8KT$v$VN z=Ke_xOj%w6&FKA{OrhxYnOOYa6hak$hM zjqrX#RVClwi*v50GIt?PR04}4*e&kr3E6Fgds36QB`0JN0g-% zfw&l&HPIe95z#N~vg2p z!(qH6^tNwGWbr=nOkcMS!Qa}Y7~lWxsX|2u?e^fJ1F=GLp56pT7P*p3C2rAi1cgxP z!^AX|E}zIqMK$zUmB3)JV z-`nEz(mo3xRLmipvj_XyFDEB~iV4rSrJDi}_a<6mQm7t@GtlsAu>$((nyD2bTO9UT zBW6Ov5gH#_3%YjuFK)MTh~%!u>qIaMAmihFFfT7ZYY<7*TJX(c6SU8NSkz&aX9A&E z%muh_j&sVyW>?%Y?>>vV18BN#m2QRTo5egjx@LN7jbf4G+h5 zt?))W?ZZmn#Wjstf<4bN|}7lJx82E1BP?^5ek`ju zk=O^>6)baMHg&)t)*M{mR%`0#KcenRYF1`3Hg-b4o9%cPYj@N)2wWv^;?<$3euYb< z%V|x982HDzo-Wb{XUqqxUpDTv+pz0=SP{~t;@*PqPfnNI-o#+)VW_6O`T&%{kpEbB zr4DS4+hwuX!7dFymKT-)ZPDDKFoog=*DP^j)g@CKRe4#%w^iIzczncv)8~oiopImL z<5Ach+8rEHkQyBb`1AdjgBVMS4(y5AyN@viCJHMpO*3TSswxpLg2D{I`LOfr81M_w z1l9CXl1C+h^?MwAT+SS6ZOXU&m#(k*02-u`vVy$qJ#sZ`I*d;Suq&>?HP5KfcUYi< zdI8xO>n~0H)E{Z7oG^yV>=_Cff3<&isPl+R8`oCYb4&3=aGq)-b(!)D0P-60Esu?* z7KZ?eh8X-stQjbO5g#%A;=sU3$DXPO2f(63wQ?0ABo@_Z*;Y6qPdir93MowgHuuaI zLU=?PF#%*ie54*MBUbA;V-_4G^J16p>5}^5p5F5JIE>s`|*|V}oFKRtkj5RXnM} z!VYugq^&US@nVJ*-U&jQMJbHm^hYOqBvPM z<3i%_ImgAkp$X9fp-zJLnXoS!cZQOT>}Si>Y8ref;uydL5~2Fps<2{`{pB;<%AL=e zxxTFvD_X}mD@W*?4ou_ZH89H!l8xwf`BLmep9UF-c>_H2wvVtufrNoMi!R>Z}L z(LG_0wMa5F@1b>eRXG$S=L0whVez3VKEu|Xr-3AIRtA}M(f3bz<+|6Sh-8elJr4P$SU*; z#SIt1)#~XjNU|4)N;9beM(7Wp4sRO`E%i?K%k^0K+w1ei_*+&87_1y_t_~X@7lG_O0gT?fK&2bfx*?I!Wk$(Ud^nxz~U9Cg|3YStI}plrw~NS0af= zv&SBnE~@IQ$bwvMB}MfYS`ca0TT0Oj!-^b+)&XF;M~hjoGHn;h2k z$I}L!H7FLCz2Qn+SChXQY^dI8%0I-^#k1oa1T5Wvm!Y$HlLbOXa-yz>aQjd+W!nsG$rR>1XHm_VYHoAEp7IFSA56v`ap#eQwq@yY3$k=on8CQ&Xd9j$AFoP}b29 zVC5Y63c#M-j4SWZO*V1q27J#>J(z&F3~bPEA*^_){bZ*4Sm_t!7Q)ZlC?wQ0#DlGE zL(clA<$aUm*`TCI(tIFUhp2dlQQ#8?WIg^p2Tu5G1iioZGihUlat9*sXKErP3%c|` zBf6K7%T;g!C;5XOx1t0TD-|WCue!QY$^#*n-y_jTJTFO{I)023+f|vYiK&+CYUUhB z-dCoseYTA`Jl&oWaE_GGl$X3|sQp2%xcKZfJ6HVpjagWm7%d)A07@Xa_*-}~HYj!j z`3289cTbiVWogks0cB0)=iEHE_qv>-ofx43sHVCOE8rwYKAW)G=k$$gyok z0_oUAupY-K;BL+XU*B0ws2<`2m~C&eWowXZkX&weWjElOc9@Kjya`j6m+HZM&J<9i zcpi6zvN6N&LYTJ*0li0}&o*kVxK_`;%vS!~HjI>WAX*KO`OSyLdT?sqgPpUYoUcJn zT-{A+8;4?Hz)vjTF14k0(CeK51|i#K5^w@fAJ4B_Up!NjFWU50fwj}TOdH8P=;dnY zAzyR7RQ^Dun?k_zl=|NoO?cP6i{AxsfcE`%^*3L!+4&;w$Xi9i{z?{oA;k=WtJx{R zmb_(%NuWeIp+M}Tz7C?LL_AsP5!>XxAh0@BaT?k}u`23|>$v&ej zge)j!nM0AvfqweuGhfQ<3HAcDFD@KbwdjjJ@UG+br5oP2anzuv?KKH=BmnC3XKY>Z z&#d%MIG=y)ntQxQL6b;Rk9}-wqYpU=QLHtwuA>f-tS45r4~{XG;w5JA13AgU-th3` zMADtA_Mw-uqae$!{k|R-xOfSH-y)$b*h(p!S>-!f^7?C zDk398B)!al^_6@MRlf>)*T`PXZ6Y`@Cr8BYyZ?nF5IV^?izfPO`Dq+BK#Z@RA>y{_ zZ!k%(^(FZs3Jn?2#(TpLvJiAA9dii|m0|UQ_>SWqWgMDRUolRg@R&&y84rb4U7`uf z{nhTt4q7cy(rI$!LB2m9h+o=DB8dW3>E{RlBEiNAKdPOKFBB3C`;k2YhD|gH8%auYxB5iqKNZT&ISk z7*67-2u`JZH(HoFl2+i!R=hbufGVmc!oi<~$9 zr0{n`!Cu&48eA%(&r^%JFeou1$S#{47AdX3V zpITI25W7ISh0Xe77A~^V1ckMG-n#$I9|!G@0MBc$E&KPLTjxWnIHvjsBCA}Nz$TCe z3-1GOfRO*=CC_o)vI3NTj07|@-)mFbD3uQ@#k zOSQX-xs@g1n)U7DN}Z81J&*0e@kWRFN>k5cKVN_6<%yF>be$@E+hF41JqCd7Q8 zJ`<)=x2u-VQfGf_4C(1Ld{(oBEqOu_IDY!!pFyL7sYWr4550Wybu+2pm=Tit3j0G+hj6KV$LBwg|4o9i#1pVAMMK8Q2=kQ?^5nd~&RNx@sb zNef>m@%Qpj*H7HG*v9u%uv7271mOD+uH-Gcg%xYMSKVFr-L!p)WT#))0)*o~iI6!58J!ntcZw&Yy0xIv_7_ zznmClb9utr^|+0Y8)bMD-{Uz|N`r)$z@*XK4GgmWfhv|D>xynpFbh`XOppXSENj(A zzuEzisOnFfME$%*{YB56N7`gI7pJhw0bf`F9rse zusbklA{u($dXkGsU1%C`n4k<17hua&A7I{t0xt1^TMjL}y_l1lMIl{qTR5QKh55xy}5Rt@wD`>V#UG`au)KtbZKr-hqZAhNMEG=kxt>6=(+HY&UcbrrB#))P~OvC-0 z3h=`!Sz@?@R~w`J#>5!j_?sTN^@Z}usTH}**QGERerz)ieD_LUxH=Wo32jf%(ik`T zSOC-;?3-a#0A=4hjE<+&B`gq_2)`EMcEVK8ko67zZ33`4Ysa>Ty2!vtxx|p7kY2bQ zxCyh_I!kv%cnJK+GqFEnMnFs1$ZzK%siO#3t*a9Wf$ zKgn7xzCLV-#2>|kNmRVFK~T5HuJoWtsX&XV=LKU~?lppqCrperY4rGUt8d_Ow*)&E zITD9D!G2n*e7`uc?@`^{S3M6Mr_Q@JQFVjMa!ba2+DwiU|RjV;8|&elX*|FwNxv#((H8Mq7p{%Fb%BK4adC6C>-q(0EI*xqrj=A3{-Y%nRAQ1HS)LUsk>ohdu_Lcsdzbop@Z-Gb735{)T zCR&<1?Ht)r&M?Bvu;_8BGhE|Vo!+rcPiLy(j~A+&EXDz&IL2^`y$OsAW!FaJ?;Jg5 zwj5KcbhI}OJ3N?qauMY4?l@^yqFQxulg<)?+9TFuvgOpP*#T`?qiCB`;-^7_Z_HPm zA=C<(Q!@h?@IeS4i-muLy{4K^TPGr(1W)wrZNdY;%<2{3xd>Rfi+2{D5g2-JxoMMt zKGT2y{OWsLQ6}7eH?N1d?zItMbYtZS5KwoglUNzQ)s65XJNxZkGA2V2aLcGsPU7t` zoMb*r?JA0StSU*CUkePL{Sn>b8~qx2Ji>OHdj6fc8rZDY>^SrKG!c~lt-Z3{Rneey zz}@loaOxr7ZJbXg7qVU7aPGiX_YNx*kxGTvi(%Y#V4*qqN=FM* zFNAHei7Ww;lBm=?6_jx0{%ucE%Tk1npAWEt{S2J#hg#Q-u@x|~fV^qn-u zPVX%}qc_*;SU6?M-0-+eeaHj7JGU;VN^G3GGett4kcqPUg z)hzPuRRB^sOP(EpNU)kH%VJlH5>V+>^XJGbjlAr4=y!YddKdx0OGjfipr$gsRcPBT z#p!JUZE9f84$wF>IvO8;cb&$4!(>yr4&@N-4elJ~F6PvsE3PMO(}j(-uHS}~{A6nr zqk!ew+-vJI>X|0FhC7{rv7Mo9(=bc>sjFMN=NVV(2vrqqg4OE*PCmvnke4QLS0xmH z8WE05Hr!GN3m^Svc;2#;=y6#o+|yo<78X57umei=P>@wqIRr$53t;bM2;11XoZ1{# zT_ck>*p6e_?&s_|_Z-|HFoU)3Am5Lr?QuJ9LOE-M7N>>v=n)=YdlTUeFlUq7ZEoJd zb0oLzVXSib5s5Epc%x@|GeWj%ax&1tXlAfWrB5hk#b_Ry0!mKS&j$O4M%pMTjXyjO zj7sV`ljBJ*De@bf`5%vv-8<|{zzH_89g;T1Ez8JfiWPw{wN2^GA1@?TD4OwbuGpUu zximw8>xDf~b!KwCWlQHU=?rozyb>x?Kd6}8i@E}y9iz2e;Ac1%(6aTtqi&P5AUo0( zMM~kV`bL^$>?9gWXd-FfY|ApaUQ4m<+|oTEshM~p&O!cD@ASDI)K^v$LH-i5srqA4 zS$r(w40F?OSBr_>yT~ARro$hE#41~Fo;}y41bu&aH~jBYl=*Fz(zFbH$fxPzE(@-^ zwY&MVYfE2-DZ&HAGma@$|Gt-j>i8CMygSe%?ZPB_N(yxMB+&i2CQs~Kk;4=}osUGi zO~dz=9VOCq30&p$9v3)))`5qE=Du+5Fxc^?FZy)^byukmy`#pc+StD_Zc}OMs~|}O za(FE$Vzpsc$#p1$UaREH@IvaO?|5wSiL>df=A^?&zW(fa1xEvS0J+1)8RciBdLXVX zHJO~aETI^2vZZy_p{S%C&G?v``fmbqRZO*6n?vE7a)B}8O2jde(!VZ2hofx9`I76!qpn3lEeP*(( z&=<2paboa`@q%d)>@C*j*0!fXZ~W;h!m7SjX+@or$J>q1CXPe3Eb^uN=F>TXddxVk35ln!Q_flQ zMi-9P<@)bi%eh8TvmL@$RRP+iyc0!>|Bo%EjgV|>+Fj?fE zP+3e$tY(QN{PdW=t4)D3@o@l_JM232ttLqv{x~35+;{f_hpS#^o57?5JJ)%z6$3O8 zK~shemb7l8k{`KN76!IamO$4XTef10C0nfD&wE`+)aO~X;;$-e;mV}KVWfdYoM)vW zDa|Lju3h7GL=xqiuxV}E6eZc&k`DaS5Atap95$?>bG-@$q)YMQabeG5?;sQ|7UXSy zXU~zYzdYQtRz3Eca&7(R3+Z(N#z+8kpP)x zYn$^M-Ruu2rQ9*1vO61r&u6b0dXBA#6>RR+iL-Ktt&O$jPZx6|FM^9C*5A`;g!nRj zf?qA_WeLAHqHA8d(SjWeBzaCl+`U@cPTYD8yCF!t<^%iX?FZ$ zCN9;RhOC*I%TY#lWEkBfn+y^&H6h&H@|0cPAB5dcco7qaEb?ZFqI?FCinB?J>RPH8B|*08C&Y&M6iN1R`!rL<-589GxrVmg8mFnR#Iar=v!;jDMQWvns`K^ z+e#H9(g;5S1D0CQw2h)8UpeY_cFIFagr3^tdbeB4bSl-OfH%-LWR4fnI#*WlD*><8>nsE&d}2l z?JErNw%O*rX>P$3730eX?e1%oL-=)%Vssr~fC|jW$rO!|({_Iki(K{;caNK~@+BTa zT4R``)nw+1fydmJxZC5olkxd#J-w!>z8h5>Wud#-3~!sIC{6s`A_6OaeJn?rRkq=w z7BavYoF{@DHJZaGWT{m7#ě@VNwAkgIm4ZE^h_$C~DS7KB0{zYE;977%D74rBp z^jFhW<4F*Xz+iJ-dlFtF0mt0a-<;NV+suDPJX4Uu-HF3@O&c7F6Nx2c%QW7+jg2PH z_Zdwc#V530-{JKUvrEFaH2X&W^om)>W6MdH^K%HneT4_@6^pt!3}e{|U2-AmWJkw^ z5m%D&ryU|in!LbMx!Mj|xtwox3g48%k0x|Kto!v}M&?_dnZS9-GLS3C3;N2HruB6Y z2QVT_A=YuiJrN>g!NmY?Nz@i7O%Nf*IghnsqnkQHJQxHnblLhLX)S21hqNLtsSird z47G2Bmm8W)v^(rtZ2Gg}N}=sH4@%z~93e+#nxC`Y!pbi8UVOCt_%Q=Wlk+K<7pfac z7>*?-y4Ehn?$aOjKDNrcU9Ki|@oC^UxG^Z1Y`b8Vnh#E4y@r6aLpZF+t>zuMbCwK! z*O4F{YTeJws+KPcIgV{d)rOtEougkG0K{n8*Hb+(=O4sfgG8}E(iUAthKAG7NL?dw z#5hR&x(hPQlwlaiQG#=?W_254o6MWvSU=Ro>*L(xrGr4y3^{RK-+v2p4Y(mCTW`HXTqvrmsE-Dxu$SYK(Zt z9KZ#eGaO~o)6(vU$u}8bCY~`9#YEfDi>W&J09$dA=Iw*EI1!H$(!RBvcJp4?`Nqdi zH`NOjImmmQxbRt{BOr!t6-X%%n*97!$RmE8OM|^oBn+<-G7H#VC9~*X)yyS+6>IzfDQ$5+j-lG@Nfh zTOuwpuN?_KO2`%~b(x)%8r?&fwH(hzF6eJY4%_Ve>a(J5l+-Fi%DxzjSTE4}7`Aq& z04f>%2_qP6c;0Ur_D`0BA`NQ{Tim!|^M-zBQQkfQKt-S$W|E}2;4q^g?G0aj>}C9^ zrkMCtMc?8Hr+4hvpWp+61N(O3qj;?}ZVW)ux^WoVzE!eb zJA(oMoH)kX{c^0I+maFCu%dkRt}Zu0ujF)fb(r}_4(r63LnZ9o7>WUJLMIGIOdMMc zaUIkmjNl`5C7sMQxc1GeB>#3NY8HwHxlCMQW;w-S3LN?923 z3a3N3oZj>W^ndapeb})L~h?CHjxoWo%sc?6^G^nhB98T7~;qxW&Da*{7~S{V$AfE226c%NnQ|aIwsCOBkW3FEFGK$yq+S+#yg|~R_D^)_S zzFrgKd52kPu)YJg4zCxi)7!P_36v*vu&pvFKl6z=`4w*|Gy|iv1Buy9=*3q~s45pt zJ(4=WmJ($E;}vAZB#F!6cB5Gs)3CART4xJG4ksf^>N05x2-`%&@2bESjY&p`XvAEJ zh#B4MmN^b>ZVh&=0p`m-`&m|bM3$Lbl%?;!=11>O-L0wCJ<7}2lGHc)Wb6@=V5`}X zDLX52uwah&()I0(O3;Ar40T5=f3$RcvOlU65J*Ey1E7Rn;&XLEqU8!%9FK(qp$j?^ zEVmOLBOl!nwt0e^QwKk+{L4f08q#+VO;Fo%S|Dq51>QiiYMve3)4ei_^XU8~U|IFC zbnA}&st7>QtSD{qeaCJmi4&4w7d!pL*eT)$yaj*A+@}bw#QIsA@9ZchOfrY18Z0t(xua zL`lr++Ex4c&{jz^hen5n_tnt?5=8EzPXXAfKvj1dkgE+Cfd)LSOb%DuwWdXku$8Ie zr&byvEA1jOo#c{nLw%!n@76Iatlc`{-8jp<~Cc zwzG-Xh;6MxA9gb=hI&EY?N2*u1$FdrGB+*V4kK1X4hxn|yqG8!C{+Q+1~5FtCWV7fOX)W;D0 zl2k>T8iyEyPaGzM9#(%nvTYXbT|iQ{nxx_nzd%M^J%Qkh^64v_9(+L}s% zf4}+mPO4y1W@k7Tcci;mVG@_VFE4Ea>z7D{Un6h|$wXK=H&>CX>+~%!bYS|%?CU)Yq+DnH zntnOAZcqyea9S`jQQ9O$ntU^9p!uozZ+5kC^VLiwx5;K!#<^w`g+=w!FZ*5sJ8;j) zP!!mXh}AGv!)q|Ih9=PgSVRcOd+V5^nc~>%YT->{sHjaz=H_}O-U!}o-l^-m*OH#t(nJ@ zxvckJuJ27>$S=0u1h_?H6Fu8j2YjiXHY!{$&Ir8&zW6I8LVVR^kCqtS!o4MNJOJ zJ*Mpl_75$3E3alx%fmeoH+O=zy(fF}FV9ko%XF96mzblmVGj(<9zz~F+ML-g5?A{I@W2AWDx!Y2X^)Z-Oq{S0 zF*FmK@T^u_k40-*w-ti0@*_tM|My<7mo{a9fX z`Cmc{GNOK1l#I1Wu1?B+0M*)SZnUa6UTmJBCV)>34s}9%0p`ld!}F( z{DpM@enUoSLB3kZCeqKn!yJJt^72bO#uh+YUZ-aP^tqu8`2ZNn#%Di3zF!qE4!TC{>(9_laTd&jT5C9saCe1A)d)9&trZ&Pr%FYN3L|p zK>k5}wImo(b0-leBU&*fIvh^ZR*+^-{TDG3_M||4z)ooRdNRaq|BMrViKb5UHd$?P zIiFzimVxB2rTk?6gv1L*^&M*3D~^=ELUQ+Y)Fzfh?4|?dILe>5@K9`x+qT)c-2`N1 zVG}%xw9Swf@(o%wTM^X7>kS}PJ!C6_rHAaQbBl=UQ?e{~-O~BL`y?83Zfw4f5 zKTkvIUB~pTe&Ur|a>#d5?Qq*k=?mp#dEq&zE1NlA41ZXiq(hGQAt!DU3{s0vOw4BI z{;)&Km|4)KEVjfgwTku!~%O=qe|=cV+2=eCMcp89!pO8=$1*aSn)m0ss;s3az)y&z1jqH4Yoii2+HDYWmUCcAtTWx|G_Q<&PF@laqH3NOe&Zs20v{ogi>rUTnDi zWuVjw+c0GHZa>G#FhxAVL#40oB(sLwPQ22RqrT7~SDUpSdd1M?7v86<-zvObmRMce zZ2OC5NdYk&5KN}gvKP5kiiovmo+{Nlm&1#YW9wQ;fSrfT&K?G^ZTBwVYz1Q zH@au8nRs1lN)$7j+^;GQ5$e!!H5e$u+~BM-MIsy}-Ai2jaj^dqwcj8qSsnII$&+1= zCwjOQZdi&ZR=yjm`OXC2L9SMyRDETsLI%tC>bB#`231FOUNOt+dfRKQVyjf;DF{$3 zV#etA+$H(a)|w?*Y;vVq%fMY3t)NU?01W4~bHo;&wj(EithkOm*nme;+hDD;m zf}rD|j~zSd-H>VvN#yqJ;7=Jx096njHpi=6$C#TB@qWoZkc3eggncOffIS++@Ew#R z6SyK&Y6k*^rNL$tdQ#s4QP#0fGAJNHPpz(6hLp>W<&hYTz5r9qj%0&~ zu8KvBq3)_W6FGQOb@%9sU!PU^bMFJ!iG~cO+G49G1z@=YOgzF6J#0q6qpf$&>>A7C zHotG;d(VRL89zR%ev2$kd8Y3&G`{{#DZl&4x`5|GwjKF`!M+UB5Heh+47cM|H*m&< z=qL3F2Kr)GT>h-lMM98jFLf-C09^ZJ{Qb&AXk)f)D_q8$kfntNeD#YTYs$Y`mbTHj zVoV^m_jessH{CTPHi!17l2w>=Dy$5tv@BN=ms}mSN9X(Z*q40$pR(~tV$wY7K=m95 z_Air}$8m`rRI>c0UNh(&9W`bkFWah>`la^|Z%~C->arq#9p1M8{IW%`V&4wsnGbQ# z#j{V0k-)-wzeH@8sQOJZBr<7@E3p+l70DVfKJrk$Keo)IA2=t=mFX@yv*syKY6$|V znrgmme0^I{=f?EJw3A+rvRMXs;@oT2ReyriQU2Ku42}G-@u(NET499?>!K`;rnO@} zMwDO-`%Ay=EZ^^3Xa2)%W|0j|g%++$f}*;-90MwF7xG~Z?l66a#Tm^hEkXma`1T1$ ze^w>hNG0camR<}8dJB4o$%U9KMD53yeDj}>!eo$|w{*2n#g~0X;tBKwlh}LYJ2>bF zEWY{(6B~Q><300m4%dn)(Z#Sr9-MHnP{jYD=`7fydcQ7A_s}tPH`2|}HH4H>()FWD zx;v#qq*GG5MY^TCk?wA!{>S&a-fu8-<~+~dd+mFz0nPOp`J6Vw^3jumH{X5#a(6J* z`?O~|#)M>I8~Hlfwag-}*O;JQ{t!6`o%(u9vp=7{1K_t5L*ExNroQz2oGp_tN^B)B zq?`gi>$4pEawJ_5813}CYar=_9+^0UXtGmO=PkhKHPM98c=17QP%R2hK3;*S-T9W7F24Yh(oTXgps<9hNf zYIDN3uL>=F=MtoM6f*cPO}aQ5Ssxh?-+v!v5wpjLZ$Aaj@tH>nH#*1aTci{-&i703 zCT|_^>1MyfqB0}ZP|+(e(HG^)#gPg5cLl=cE@-OTKY2Pei5zl?qH9dwRg|62yW4as zReZ%OkBzkZru7BTg@ZdqD!g$$leZ6^&f3nE>a2OWyn%zCpw=^l0k|(XnL(l1E#lB& z-jWwgCvT;2JVO9neGN?yb)y{y+WoYY;K%}1Fh6=361QS1#U+?}c^nY8(Jkzw08|^R zNEsYGKm)6wNRqgcxYzX0Naja^Qq2S@pkg>xHqE#I*e}fT>ZIwHUyrqT%~hk4Klv%q zm+thU6n4x*af^#eFTgm#k!aK&Y|2l&CByaVLWDzImqo~L%PVnQj2DKFC+zH#$Wxm=;ON_9PhW-Ep%FGfOXMn z%P~>$_%E>;*f*eG*>aljaz(dRnzX+A4EV*cL7`&^%!t(1s+g{rzIed+eM4)MOFWf; zHUnXkh02`F?ebJbMp{xK zD(Wb!K&_S^{y{v+lYt$E{Bq?zR;hN#sIuS6&LA#TL6b0)o(HYKXyG^m5M5!wYVao_ ziscu-SX3_?kStSgt<>0bO(L&XhPgm~Vsp;1%?Qd@q+?-!I@A7Q8k+b!;Eoc?te(fD zVL|N=kORN8?gi{>8cY~i(iGJInHTYEI+Z`n|1j{d`qS|eqpLZ#w(v(o z?n)uiTTTeaLenP42vkIuf-6-4z`B>xV(i{z$Eolm>I9B-FYfhY{B7Z3#|Oyj@1Io{ zs}D78J5yEU@=ti#5^z;{Cdl3;(eFIwIYxsBK?k$SeeR*U1%m*J`7-=KzmG-X)1@_AnOgCj2Er*@4)b9@j9txD+3DEA0k`f%xkSb76jNMwOY zMKk?1ZSE%$l&i}(T8Gm2h(Ucxflc>#9LmSdBkXO@5r+#_O^nR(7V_9HiOKlDQ;cge2er~kN3dF#~ep&)wW7aW06;23Lx79W-J-&xxt z!YNASZcqjcl6znrE!FQ=0+>;vz~-yUx38dOV5LcEBEK0GAZ4#zbW4n7indazNV5e& z`_dDh%GQ`PJ<2n2A{~<6_b7k}G`=$1G< zyJ*`A<*4)8@D|;MmEgx}UQ;F}M6@CuO-d|&p(|>s^h4C3QA8bX9C6maqU?W)&e^zn zD`cZ(690!kb%Zoad80=^h6G~J=WCoZ4RO2#)M@5=R!eBw({YV+5;v&F+QBie&6;8VPHH0` z;EpF63is%1Xgw;ga5om-E9C|K$;xmC-bDSkY>)LN3!q09P>{lv{4d;k)qDFb5^#o6 zB_jo6G%#N+yKMbRFvQ5Piu80%6A0K?9OLxN=qtTr?M)*XyI_Q(DEMx+0cA3nDId;bP(Bt&o} zF>&IZNUD_fRfqe^8qo=GqZ)m8}8pT?wmBjF~RT7|I033lcHd?C7H+i8=b ziB+j`+pLDt?N+v2nnUaESCvCAYlFxC&jPShb2AP>+n!pK`DbGN3S8-RaHm>N-0ghT zIoYwC$ro3CL|wgd*?VDY|CN^{@%TukvPp_j6oQBgM;a0oN@xM3leg+C!D<#8V|Ro7 zJBPu}{lCZ6MlqkKO05q9!Xz!qgeNj>spcdSBcKe>EqNbh_-y0^T}-`EP3w7pQT;}mDa;%-2mbESDIs~y=;bDGKmVWlleri zKD;g;Hh<9=nnIDWB}6pg@q3`}){scuhxj33+rJ2xd&fTK18HAcxcH`K9_8g-J65a7)re{PYYF-eh4jz=G*`2cfeUdi zu&Iz7bsJX$cJUn2#}nmY)Cs(bm)11L=;q}m(>&Vn!~`gZjsZ_ACjr1Qt~R&X<*~6C zIhZPxXRqmkBl-|DQQUuR-7)>aOq~)p@$GP~&FUsAMNN(!G%c~HOk$&lD6+>ogw9|3 zl@5=;Lff2t-dZ$#Oa)S_n5^c?`=70Ym7NFR12r0Mm*Oi#^C*hj=F2g%6!qLYG1$xT zaiDCtWyzYU3zR7mE2w2v7a%ia*DMIC`BPICY&qL#yMS0nA#R(IehN7b_$c+NcQph0=(cDeLcd&{nEKs(i55R%fBj zKlk;d8mQ@ChF=9nu)-3!d z4NaAk`;LIK5^kL}_BITF%}MDoI$d953b-nnwi;svYtI7$zP;^{lCy(W!D z%))8;}dqXo8uxvNOBQ}b;SK9qF!>eUfS65jwB2@Cv`LdhN zY0t0aSHA$g5;W^6MMuJ+Z?_q~wmYK?ioLczz8}u?mX=f=8#iRlj*+^Ql=GH=6)l7V zmhFwOAp6y!!H}kF`-UmGY}3q|rh(W7Iz5UpK28OYOu#e`TJ=!FJR~9evD?Ani2U;3 zxL5cKzSfT_&!E^)y}o8fjbdL*J&8pmk_l2sg1Z%oZlvr9*i*$>_7AYc9yW4zQ$|LB z6araz+{yD|%c4gFN%^y1hWTH)-5C4;+vUq}0_M;F{9U3%_U*@!aYed9wVHex{Mf`Q zfKyXXG@%!7DTdAb{zm3J%$`U#qbT9amd|B&pd#}k5NdnjJmhx+%RQ0T zEr0T_zeg3AbzCSB9w|wQ&ix%4w;E;SK)wM11&9WfI4u-SMnT-n&py3u;gMVE`0j#|&2XxBaw7nFRc3ra1 zu#$@I!~J}LxMTu3BO7BFv0u;!0g~x9= zGT(4D7QI6W3Zk#D3Q2_Z64ZHRc$F2Qcu%*-_hTiw{=$M4EdS;D-F<((DG?u&ZA}3r zydNsk{|040l1NzP6;#!>Q}5%-W?er-+m(RpF@bwp(MGq^KOM$kc(;0Ln(;Ez5^D-Q z78X$wD(LX!KvD`??A`1K)pl7(MT783P46p|8U!fENrb@4E>!<$HjtkN7Lhm%)VOBf zytcQucEmcT`}1q|AHX2EuxfOJ*w)hl;B9gKKmjv($ET^S!RIMwGdrNB!BHk5QA8}P zndv!9pZ2R;`Q260EaxWPV!%e2?a%2(9F4{UpdZcLx3K|1r6i%7>LRkXF2D=u_}=Oh zgV$w05&6fE$e!^J6HwB$&lo+U{QmG$*kb_5y`MIvT3^X5Qj3K4;=hur&iJxh;OQp0=rwlt#3!WIZiS z>c{uyJC{f}{Z=N@bNcE$uYkQSg-0{F~DU=SI3%pj9VFM`l} z2oPQHhi$)GmjX*SxUOSMQYu<~T+PZ(h#Gjz{v|zR?q zv1FP6sy2F>AMf2OUs|#RRn_^ocBi~E(-7I_#1HJ2Dbas?;COKiZ6g&+fH2Zan9=&;$ z2CUv-?S|d2dcvg^glF%(zjV9uBHBXyfNxfOh-&?`NsMDJ_D}1JHSS$x!cB}Nh-IXH zaRu5bUoPXWz==pp!gY~)%CPCgaZeSwbiSn@=mbfskq`ffXn-_J5P4!29YaRb3Q4h$cfxt#W9T+p4-d^uJ z7rI%0KilZ?^%C$BRhe~Xew4Je6*5q17ravHeI)V1kK=(7^o>KJO0a`s-9Q#U^d$hG2BI;2$~mok)Zk<1w; z*y^J&|1J-saU)IFbQ=a*(*E$eDZ5gz=1tr{(4{ zC}lt~G>aGA(*qvb+jHmIlD~7b92qcydS95dg)cjH1!-l*PkL&l)7l1=C3b!iNd?gm ze-BOZmiLx3g*#_GdZLwr)UyHB%XTJlaYS@&MHF(-oNmjpjEgljuq&*4F*9aS-kt!U zVd;6X zg>SkS(H-J@-^>iZ8+*!{^oAEJ$bFYdNlhTk!%Tc+Q2wo~UZ%yZp}-%_L{Ce~Nz%-T zEdKYP&}irPC*5_Nw7(1fXRZ59`6{s-kRsQY$Q}KRS|F&VV2r*n_+3qv;0mCK`PsDY zk7qCY4aAaaXKK?CBjS8FPf3l0G%N(|eswEMl10?62u%blmeXJisNqA1S2wg)4MgoH zazu$3-qizg2t-sDIiSJuPX{eCnW1jW*e)2lXEubc%m)fJuvP`4Qzj2}2Q7h;fstda zUzJ6>-BDn&n(R6OuytFfjF>5~;%wBW5~(qv?pkq`LLU|htlHv3q6r8LDouM4xQf;j zV-JQ3wDFxz4^jcg`V**pw8vK3a%1{fh-X~T3>j@ZnhR zNd{w>{@(Kny95&w=Kb+GZ|4FuM6G=XK&55gFR}zYCIpYm(nb!&WKxgd${w8`b!-K! z`#M+L$(lf9k+l*RjGOUK@fUb3h!f&HeyTSmNI36uNK4P&jkNk5i6m@^Rbh0#Oi>Ab zAAU$h{xtSVofzs+g{4tYYtZ`2m9e)I#ve9sa{NGlEXM ze)}N4KR^xhv#B@fTy|BSisT<+<^EH&)4CORk8+RfZ=+XT%BVyBpj%YNo(h%YDNN<- z4(^&N6RX`!p(={%3j^6ouWU>mASP|e9#(e%cW7?1;a6qxmmARs++kvT_nlcQfD^$P zQ07fznUV0Mnu=?xunGXG;q8Xa$?$H@#KaJw{`nYPUKUa1p)@MtxUZzh>`=eKAiOzDw6_G3C(pMoL(Icna zNa_S>e7&$fA=e4{%M)fMD!DZq$#?KCM=dq#l)+AZg`;@=6ifIZ&+GAI^81X6NM(qs zZqK#lgy_wrbgsG}(%>x5v|R6hAF78uF`6u195oK?w?vapqra5jC??Dap&0b@b=4X> z*U)&PKv3@x+!T0qj>u)V!#&Kwv{Mg&^~>c!*D@Q>d{XC|uPU+G32Ai zQ9tE94~%A@#rkYBLPpl5mRCnuzH5w0Ge+9;bD@so{d680k_`AuoC}^2xJ80-sx_gR zXAz4Getd)b7`e7ZMZ9op?#8L7X;6~IVGAKYW;yyfFKd#3(_I3Y|E@Z-m+Aqu1? zo~!5Jko3Fj1|P3G0AOod#z=dGhU5<}Pcaj<)V%Gw@k&H|wD=+Zx^_CcUQB(r?4X5U z{jtA!p#b44c309We#*)+xlZ;GB)eF2M|?*zf?+apQvLz*D%7RYC47{mz0}H8x;XN{ z;%^hZ-lw_2dErWqe5HNoWi$}twxi%-t#4w*b=B_++`S|HxaT&ms^1{cb~ohPcsE)amEHt_F8~xy9cM^FG}& z$1A>(Aq|d>QbB?{niHzPjV2Xsfs{njpykT&c!y|s!}Vii;6&UgFgmqN)TUZzcC_8i z%8LE&9fH^oTGf+ioPIgClqnvxb0!keUj^Xr1I_{`UH>f1>7_+k35?H{>ke(F4%?u_ z@ey>n_e`5*+W;uRA_4zh#@r{$eYK8@R{K-+HvsKy*R8SJ^e?IgZ%`D#iBd%|p0IB% zn=M){R`8SZD>oRnrjp&|3rXC2yJ&lcm%W$Er&!QopqE~9=56uUAXc2V9|fCW6n68W zfoP-v?Iq$-;=0hazY`8YD&ns-O#wLDwPxiaQp?I}8Hd%nn_8tQ`fD&!7W zZFt9&AD_~f&2PvuT#6RkC_p(Quarl_cx{D}X0Je6p`?rn8E(=A*uO83;^|>Kp1!jV z5zBf2__q7=fVpxP>*Ln0Nt70MQdW=jZ&XwZWkhEBf)?LD|CHmCo}Yj0AsFp)W^rH8 zNjk{@;TNj|(7}v8#DCznR|>5pwu!(@N>X36R+OVt)!;Xe?{lGD#6h6;Li{VK78P2Q zbDRo1Cfw2d4M8WENR^*xm)R1nxFB0_tbMO73<+Q^P*5@Rqow1}>LJm1 z`X|^O{nJv2Se$p>qYk@#Ja?{=aY=^*Lg79eW4k9vc>*)K$L^YxhbND%S^PE-UEmY$ z6aDu?JVH^ntWFJA(`|HvEKZX(ID~0R;dj(YE0*VpLD%Vq%EntI@>hgZIp`NB<%Pg< zoT#CJ>9uE!Mb6w{p&;^miAmBM#1eRp?s-O%+yMq+!A}prCQgj~Yrr}H+Th=| z!A?jYD)m}vMDkCLTi=W>(tzAC{m?{PLFc7T+Dobu;HDhF$F^s(lU%vHdSNq;14^(5 zx)`hI$y?@+XK>CO>8uxnASM~cD?$?-Zoi^L-2qwyGEsJ;(&>!ZHTW1e>NA^;*l>eA z*J*A)@A3WbHH!XIMU=RGs&%`epxKhZ@YyMRFZL4Lwb9-{wj+yY`oP}-xTPm18~ioQ zo=A(4Zc(8O8s|eJxpB}qm3i{BYpboEQ!f4K7k%x_g*ZV|KDpxEY=MX?lfZ!6qqzzm z0R zd=7lKh;QNvu-w*P08S!%p5C_pnt*CEZ1w#P?}o%O`7h+JZ~{S!i5x-+LM*$+Braqg z>N<|rYel%CUtqfuOTc;-n z;O{pMA(Ng^RZpV(j9C%VNu_HDq?n2^o-eH!D9r7DH7Abr97}l|&$7~Bw=&)wvj_VB zNl2`JO$jU! zg5w7avJ?g_HDBpCS>!y!GW?0sCU}E58Bw~mUnR*Ho9~u}ZWWHm!y<0*!c1MGh1v@j zm^LD{crpxJr(`L!KL2KJSE$dCQ&?%TCo)aEF*!3@%82Hbk++h!5&2?bFQ@RCotO`s zu1IXIGP1k$6iBLDkNtxGwrN<<&NHuPdj}A9RzrZmA3!`v|sooH__S@vA=UiXSQ?yW62^ai{>v2fBG6$DCvmM z+Y*rYC1@2nyXzKuS#0v2?EXn)-35}U_qUCIC;sSnZ;!i(aQ9bFeS=mYE6=FYpCoDP zElQuHBxtfw1|PvuJ|SVb{q^thDq7Rp7{s#4Y5R3A;Ja7+tpJ4k^ZvzVFMTi-b#EYi ziBpXW41QB99i4`hX}w%l+!%;%RHU1H1DO*p3t{yhx5i;X=(Yt5F=>4^MfTykz3Yi8 zP8RnLOh11vq4^t$tPzy(^E=5RK^NZKt!5ldCZVa@TOYJcn_IMIojs3K2kr;PJiH#< zn)%vRVRzX^FaRCZIc{e?{`vZ!=#k5r_0rd$z4jg?)Kkk|kcq4;H7R&m14P(E=kmeB zz(VJJ0F8GssY-agZ+X>H^K7P3}%lM@nN5qcLYcNN5_sc_Ak@!d}ywY z!@uxW&cP|-F3q#Woj(e?l1$Y=gq#KnaxWTVM`++UwPR4D#AyvMa$?uPVwwkVu%yhs zW>F}zEq}>R&ocp-_U_zn(G5N0+;W>*SWzxg>0g-hh|zQ=%PSITnndpZ+@vj)L2|Yw zxj3Upd$1(q+B5VKvvPoDZ7C0RCmR*tI-jSjOWqs34qEB!ktQsJklf!X_xLHjlwI)7X@<1Dp`+BTBr1x z0e6aHRhu8>@Tt{y&WXj=)HkUxNNKjDhKCdTxs1UHm7v`=7~ToYS7 z(X64f%3z`Pcio2%WX$e}9D(tqo>aI~y2b$;S;DC%cKWa?3V60rKI>dk3d;6DgEWWM zf^iyBA=q&(NnbyRZ1Y?l4Ee5Fw-F2Q+L*s*#{2fuzz0Y)$$6Uycy&P_P^$Ua#$U`? zCAb{~=!Sgq- zCzm7lt0`t9ybqs`Dyq*$Eenj^vT-x1V6Pq@0awFe@0&h~-Tg)6tFo_84YLm34a}Fo zM@b^%L6;51m5Qn+@93PFgpfWPVHv&ks5#w%#F$qf5t~zOpL`Z9rZ@4%!Y1_nHER34i-P9e&7s zL2C=25NALNhbNz)!!oVasZAWkaQA)I zGGh%c@&zR)4h9T=a5#PKB1}aP5EXWiWQyp`Pco6Yp{ussa-Bd}Ac;V2jn7PNtwW+? zA6gV7C}opJbW!xW>-&1p$j~u;mw6_$_J_x>?cg6>5dDUsszC0&dUznx7^z?oO_iwS zNs!5~Qv1hA19#Uxxk;if*4>#Z!va9t)00U-Sbccjrvhj;TcCA$Uf*66#VHGl0z@hRD;L9qBc&25BWjZ8u_ZPW_YR9g^E|-svszK*L@HxBx!BYnENV3OH z28K5xdLlsud%JXuEz!I}B16@yfb_)WkLK`7o{S;rL2s?FYV(NV!ES+DT+y7boX-~t z8d9IKeM#~V{#xdl2wTjkO(4Im0<8#z1&+5b-u;*9BZjNbLGq8>!*{DOWt%Gs^y={j zi$ND%1WA zi7wp=8_*ic2RtXLWfVcTb0E^)KvG(i#%)iiL_-mJric3wVB4Cg-T;JK`TZy>y7^{5 zF%#(bq-ES$>RBsYVrqs!-LCX}>dNRGbs#;7mGYJT&|{02e#ef+tP3tt4db*l>7%_2 zzOE>{IwiHWUz=&26r~5~*5RIbgrBnZnGz zuLu?q6-psV=X^%Tr^9CEb!(B08++@q6FtMWYW^Ta#)?&Rru-9umQIn&0IqpWn<{UN zf5mPbOZ?eMVEz5;pJ8V&vA7D}9kJ?sz)|!-H0X>Yh?j*D2lEuX);(`5Z9JH4tP};s zRl{=Ezb){`L?R7JxQ+s%fOT1p>n?dvu$b4<=+~1+L%N;v+I?c)Q)8(|y|C^m(7beu zcTsIzecAuyRzSUS;4Rz6AfKBWnN$#V@Zs4|Gh^s3zw7ODLxmEAor_s2Bi;`kUZf4F z%)}Z3k^ zxMPSUKo!bUe$2iHIOuuW)mm=NdcsYrWK%7}rMfL(v*9dAG5LZ$ zIn?mH6DMM<4|p4@FMe-~2v(s7o_fFuBAmdx!|DZt0^t)wR(d3Spl*7KGAHbwBkXu~ zJ4kgT53sXxQ0=6uY(;33v{rgHoKZ;O7eq_;@S{(e4l#C;o0k$&pWp=#R&t^-yS+Au zYKWQiG~ij=lDN^9!AfwXnp&xmIY|5$MuEx@T=T@tp&!8?^&_Kw;Pl~NKPk_Kco9G} zR#ieK}Y&i6|~=-Js%3Bvy%;|k7Qog`$7LVtHvF5(eW zNAdaeoPAlxiQ2m>OiD`dSf?#Ssum9{plqS>B#L!gkaHq9uu^l-#`CA3GsQFZLV1z= z*N4AOxM*(If8(qjYL@$;&B($@! zeredBEj($cl-U=!m53u?GfYwBr4HDcdhw<2GURWv0au;32AM> zJOzbxSx`k<5pLlQE=$1w!Tna9U&lmDpB~ZWCxNxc=|q4+BOKPUKH|S&qm~(}5}Z)b zyYxVjMw^F#pAZ^~zzd|lA4DSrMST?)><7 z4Hh2B_Zisz`3}GEiaQy*7dJ@DGG2dsuRWWn#)wGcy^|>K{f`B?uX~5Ofb!N`H!;{e z!8J(s|xp2Ck%9F z%yR14iXeAiMANTyHj+jCP(VptPHoIIla_B6fIPtM#^U-BYL4-t4AxV?PtYk!cdcRD zcQ0^=VlOtzrQTqwXCXj*2?)eC`n!|-W_zx@QCa?5>{%!eip}yZGB58lJ%KZwEQtDI zBwxc>iuMCr1RbB{ESUypR+ms2!jxic78FkQGXtYcgOkKZ1Tx7iG4YhYIs2Z5{wvqn zK0>{sP9wLk{#dq)w#(;+$DIr7`&lDO=Ui$Y^wgZxGQ|Xii>T?2Ibu%tbh&%FQee}U zYTqp5GG<=dZvQO!@mTC4@~HTb`8>+OUf9V5ztoU*PX34|33U|7EALfIpB{6yz*n?o zE2keJd7scFN#vyW%}G{*A24)~^#?8V*nZZrYN?wbAKd}@7}OdGKYKH=n|KnDyN5-^ zxkpgW3r_5qWFz^DRx4zeG5>^85wj?sA>mVTbeO1Ep-B5?IZfkAl}e>33z(%%2IqR) z{pk#Nt3pXReO&>!l?^|O*}}k&KNgq`##LNi-4A?yrw7Dg|F}GJ=vH9NzBvGv9Dhkd zFw{}0HLRjh)wrlXtFy9GOBM6Y=pM^$p@LBY5MJDH$WW=F<1f<_Bq{oQJC2DY^x8~3wz354^W%p;yXd+@#kq^NfSw^K>ouBW{ItfzNr?fyEZfT?K zhf;H&^=g>}s7wyE#A;igS97>G1Zgiq1+C*d%ge?4Y%}^|L4_M#kJW$vH zuG4+d1D|=EIzEjvHH+N3kW`9k#E$GJntEjNn(%#|A ze+rLP9XZ=yn@~^rl=}ALO5}MkBHAmNN9_Ic2GFzndjsC;iR*P1R;d93ieBs-6zNb5 z(JJHCgIJEBAK1)tx2QM~pRbQFdhU$ZIEn7{Ijbol!0&avhXaQ17-wRDo)sWCj&3WvVi za;Ntm#cy6fHAYWGEC=Fswfe6*6a*-xb6|8R=DhGJn0e$sG#Mv)oCC|arT6v;{}Qd` zfqxu$r9GK<1dBTxdBc!foUj z@Xb|*&=oR};f8pZuoLqt$WbD{)z(?rWU0-p&#aCl+(|a)&<--)%KzAOHMn3D@WT2` zW`cR1swX`T^cd_44vr^SLaJFSF+x8N5-7ukFLdrYf21q$JkG z@CL-Ni~Y4dtopqbH)B3zI9{ z^~wJJB$^7$Q)%Dodm$BBNfS~mJmC6Y_{@rb<~qb;6KA4YQ+3{A2zQFDgFcWxl)3-n zp=Mki7T%nBkF9HW&?L$Yv1I!z;kN94PwTgkBvN@eyqqP8FN+s zP_KnyMKKyiBwW8*aumys#s4P@Inh*gUH4#jMfRg7XHoLX^DzO~d=NOrKH08a|vc_wVH zv4yBAEVKLgUhe=adX~-#>qR0z69tI%JMT)5w7~-AB2pC@VEW*k}jf8nS ziw6Rxc+9qA1t=64S>q{0f8KWm!DDTdRWiI>`83LYC|35eAN|I~ah>!h?5s)_R)Z7B zH=|~!<`1X%(l8k7BTB$59n}=GQv5|v9d}zJS4jIT+%dQmtQ}z2;U!jAT`#83kZ3 zhQY2!PNem~?hP{yM-=MMxULbR5>uaPQqAC*Pqs5Wny`%V1ktieKM8z;vo{WSpyf6H z%UzQNw?pTgPm+d>*Ds6DaE+ha^^0v!kKs2-bJZzHbG+tbiyxmqSEmCY+$HNA2dOU=nU3hf5*I*B~@Nnci!t$kGrR> zhk~VC2?yDk1jT}G{%pE>lE{Ua?#4a{I-ME9?Ms=ZQ=ULDDMNybs%;!pN{eksIcu^v zK7GS#zc$f-CWnfnk#{M$b1FxCwlhM@MRKSg>=mj(!7muTAkzm$ucz0r&E#&*M6!}d za;a;mNm=1jZ0kxjjH2Vz$2qU?jH2S^zkF-f$mF;Gyw!IfIDZU8;H|{+FI&Cu_+j;= zscRThRgie#;`U0jCHyZe9Okc1=j^~sBjntlkXjJE0rZY1dQxr0?KXSF9F7vDz#M%F2>_X5mied z`9A@BNlsn|=IobB@M)K`up>ci*@hsbdZ59;dNlKrffL=vq1MS}T|JDv1?Eq9uQ6Gl z?4Ohe!wUpYOtkQ1860O;!&r+jD^-%KGqozA;;x@UK~xy(+#l6Ou}rvbd_nbX`Plg= z1_-9he@%r$nU=NK`drx!wY8g({@gj#14g{+#-;x(nszQ;#L#8-6zUWgH(ld*Juc5OkN4|5C=*^}-%b{9yYg@!PR zquJfVq1Y%t3sgKk{@6gy;fkUzvc=e2b;16OmIeO)ZJ$VIz28`?Ta}0}KB>|-{U}pI@%J6R#P4sm{zp%CU zF8+>9NHmQsfa)mHkgC^Um5Bb>|H5+e-`<5s#;XjF82)bXPzOWrqVvpQWC6#UphM># zCq{L{p>PzJ37_m5{|j~=3PIA4wq1;pmR*dV*cBwFa-ZwEz$lCnXyFYU=&P3N$9zGyl1b684P@NTx-fbXeq%)rvWT22rd6fQVd4qjx zu-XsV8bbZ{r(BT=ygdwcB~h@_I{Whj=;1 zq`?C02X5vV#PQ>Hs=;5~^l!Y^xAI3Ckpn7C3VRvq~TG&mkp^7=TD z+c+7P1~XwMP4RY}as6g{*0r`e%BzUyw>%EuOZvXsWHTJup$?Ze+TJRIwx$js>Z`MN z;FW1JCN>EQvp(PSsf)C)tsJT4suf4Y8mICC+9b@-rivRp$WKtNH(e*geJOae1G1rw zHN94@0tecYp5LWDPAWUVu1WldGDWww-<|Hnv5L^v)jLU$#U|!RiIN#e)c&nqZjb&E zUJTa|t9pM1$2VjsgHwR}H-;HkD3k75e_^XrOoTBw`pa~UF2hH%U;c@XkfE<1>}dYf z4Vc1)Y>{lVF9d450jN-ukdFyaj%;dR&x+yDFOu>si$jhyE!^*|EIb}YRN z{}STgAdvzEPV6kGO3Y>+u}X-v3GjcmIvrxvTa!ei!;7!zLb-pK2NNvYRCnTA{Q?jf zc4odK)e7p*nkZf2#HTwhM^S!R53^c!LE%#OgiyfkILD_GSfDz!Z!!>;naWS(ZEFzY)fmf{zQRU{*5)9-j^!NIdZ?mxTN z^RTTuksZ+rm5rsvu(X~#x$;ST=t!qFloyFs|5wR%e>LGGaUcPb&>}60(gY+(QB-;l zAXUHwr8f~o>4JnN5_*v)3K&2lQY;kdU=TwL1{%8LHZ-OF@Pc(hC9Wmp z7VzbIO+553`)F2clu^7~h;Ef;5QT^uF5e%DxH?~7E{u3;n(X?$P&W4Z9QeGq2cG+} zodLbXwCJXrwA?L_jA)DHT;-^26?WDL7I?)hsL569;C#i4JpgwuK&P3jlfdyZ3C69( zd6BQs6xDPoS5~ww;#%w~It6GhxzcZGdOuH>$XurE!xrwhZcfl}0jbRUcP z%X}NZY?oSSa%0Z_bv!iW4-5yB#E@kY_cM{{DF*{5$=$ZhLZK zTFA&4zvU~&d#%)iYqet3w8#KTN@({r_xctNt?Y>{E4VUm3-hnY^LR~l=9b&aKz<_E zoYz}Gk1tagA-h}Me_LHJDer)3@95rMe*0)^)yw9i!lYFE8zaug^7TKrXZTBYpVhe3fKKy(`UU=;JH zm3c&{nNx7z%Ww>=x+1?N-(&qFwBE(e#_M}0V~(k=J1Bms$v9!vxc%(p^`TjAKP6;uo?Sqapa&vENj?iU3$X^0DW5f`=kzJO zy-uJRCU2;Edh%n^NmKm(=jtw%C(-IK3MbXoOfhygs!5|ukh%w(I26=EuOLKN{bNqRjF4&Mw9 zS(gqZW4y0U)XDa^>67r~#9FDqzRtdNASBz3PpOmpe35e)=RCEY8d8BYK{4dRlIMcE z`qP9ngG9GEZnQRY+I&|c%kW2z>a4S0YGdD-2rUnaXWUU3{MoQRC(h%f_?u6XRvMN0 zBk`(EsWSMHR}(_-Mc#Op-RiWa`EtwR0p~86T8qi_;b&L8S)u~wl&a}(dQR^`BaVLi zX(vN2Q(i;CvW}D0I3Gjr6n=!W%v0SGX;9(yuc;Cf$l>w`i+SkYSCYBI1W zw(E>FsS6+b?l9>cK2+$G=WY&B_Pr1lfD%>Zz(%b#H0><(I>+&R5x#J5@2L-ijZRWq zbfSHpmSW-{b+5RlKX3QJk0rtBUBpd4a#`-fZKum8oH;Gi< zCQaU_kZcvm+V~?Rg!y8Sh1^M*VQga~z0lv{RxVcMnOHAV374`>dWjcAqmokR#XI_v z^V8kRT^%L^^HLk6tj$^c`JYB7JKK2}m*fO8S0*k~>(n%N)zJPGCeN%ygAW) ze@Qp}1Tk^e@CR?vLiHJ`6Me46VlF;3Qd*DAxl#YYyA}J`TDze4KPmKCRg=D5LGbeY|2|@RyE~#p0{T0cWU( zzfw~{BfjQAuYQRKC-7^Z+H==m%~4i~GvF@_1CMS{%;k?aISnwU=waO-abD2vPJ`R@ z-4`JuV5D1wwC(!chtuz9Z@PvY7n{<5$vrH4E`xR))lFzcwTG=p$3Z{q+YZ?>C}Fm* zbk}X8S_SQYE||1w5jUyL)8HDlYtQ~eQ}8x3)*wy z3uytk`SnFS4)FZWG2GG$p|j1enaamg*uQuwYGXR??$>1>0&K7CqL1h;Qy~iN9ig+@ zVD>FjJVoy&-0;4EK9M+b+ zC6>SZ#zkQ}*q)LDm^OnWK_xrz#+4e@l9!!WW;Ul-MhPL8 zPjdp%@+ZLd{1kiqnS_I}nc<4>-@ zudqhmE)dBl%4^AMk`Z*n{^I?vHYj7}RuA?G|4R1Dr_dtsVmZgAvtmwxOo#{_xQkrF zl6oHo{}mX_Swi@wJ$Y&wqos0m+6O~-`_ transports). +connected via `amqp`_ (or other supported `kombu`_ transports). .. note:: @@ -18,6 +17,7 @@ connected via `amqp`_ (or other supported `kombu production ready. .. _blueprint page: https://blueprints.launchpad.net/taskflow?searchtext=wbe +.. _kombu: http://kombu.readthedocs.org/ Terminology ----------- From cdfae5324446a7cb69563e0201460b63f3c0b368 Mon Sep 17 00:00:00 2001 From: liuqing Date: Tue, 1 Jul 2014 15:50:20 +0800 Subject: [PATCH 140/188] Fix the section name in CONTRIBUTING.rst The name of the section "If you're developer, start here" has changed in the wiki, now is "If you're a developer". This commit updates it to correctly refer to the proper section. Change-Id: I6e84c6ce670680ae1146f4ba5ff529bc6f8d72ab --- CONTRIBUTING.rst | 5 ++--- 1 file changed, 2 insertions(+), 3 deletions(-) diff --git a/CONTRIBUTING.rst b/CONTRIBUTING.rst index 84848e7f..988f2856 100644 --- a/CONTRIBUTING.rst +++ b/CONTRIBUTING.rst @@ -1,8 +1,7 @@ If you would like to contribute to the development of OpenStack, -you must follow the steps in the "If you're a developer, start here" -section of this page: +you must follow the steps documented at: - http://wiki.openstack.org/HowToContribute + http://wiki.openstack.org/HowToContribute#If_you.27re_a_developer Once those steps have been completed, changes to OpenStack should be submitted for review via the Gerrit tool, following From cb42388864d03fd1a1ebf334c896ae599caf773b Mon Sep 17 00:00:00 2001 From: Joshua Harlow Date: Wed, 2 Jul 2014 14:16:50 -0700 Subject: [PATCH 141/188] Update oslo incubator code to commit 0b02fc0f36814968 gettextutils - de4adbc4 (pep8: fixed multiple violations) importutils - 1173e46981a6d (Remove ValueError when accessing sys.modules) jsonutils - 0d7296f6885 (Add kwargs to jsonutils.load(s) functions) network_utils - 9502a21bfcc (Use the standard python logging in network_utils) - e433899db279743 (Enable configuring tcp keepalive) - 29a95c2ce11f9 (Avoid raising index error when no host) strutils - cb5a804bd4 (Move `mask_password` to strutils) timeutils - 250cd88c4040c1f9 (Fixed a new pep8 error and a small typo) Change-Id: I8a4265127d1abf90f0a83e31723490bd0c25e8ae --- taskflow/engines/helpers.py | 14 +++- taskflow/openstack/common/gettextutils.py | 4 +- taskflow/openstack/common/importutils.py | 4 +- taskflow/openstack/common/jsonutils.py | 8 +-- taskflow/openstack/common/network_utils.py | 77 ++++++++++++++++++---- taskflow/openstack/common/strutils.py | 56 ++++++++++++++++ taskflow/openstack/common/timeutils.py | 4 +- 7 files changed, 143 insertions(+), 24 deletions(-) diff --git a/taskflow/engines/helpers.py b/taskflow/engines/helpers.py index 0b13044b..c200df8a 100644 --- a/taskflow/engines/helpers.py +++ b/taskflow/engines/helpers.py @@ -31,15 +31,23 @@ from taskflow.utils import reflection ENGINES_NAMESPACE = 'taskflow.engines' +def _fetch_factory(factory_name): + try: + return importutils.import_class(factory_name) + except (ImportError, ValueError) as e: + raise ImportError("Could not import factory %r: %s" + % (factory_name, e)) + + def _fetch_validate_factory(flow_factory): if isinstance(flow_factory, six.string_types): - factory_fun = importutils.import_class(flow_factory) + factory_fun = _fetch_factory(flow_factory) factory_name = flow_factory else: factory_fun = flow_factory factory_name = reflection.get_callable_name(flow_factory) try: - reimported = importutils.import_class(factory_name) + reimported = _fetch_factory(factory_name) assert reimported == factory_fun except (ImportError, AssertionError): raise ValueError('Flow factory %r is not reimportable by name %s' @@ -242,7 +250,7 @@ def flow_from_detail(flow_detail): % (flow_detail.name, flow_detail.uuid)) try: - factory_fun = importutils.import_class(factory_data['name']) + factory_fun = _fetch_factory(factory_data['name']) except (KeyError, ImportError): raise ImportError('Could not import factory for flow %s %s' % (flow_detail.name, flow_detail.uuid)) diff --git a/taskflow/openstack/common/gettextutils.py b/taskflow/openstack/common/gettextutils.py index ad9dd71b..3fff6e30 100644 --- a/taskflow/openstack/common/gettextutils.py +++ b/taskflow/openstack/common/gettextutils.py @@ -373,8 +373,8 @@ def get_available_languages(domain): 'zh_Hant_HK': 'zh_HK', 'zh_Hant': 'zh_TW', 'fil': 'tl_PH'} - for (locale, alias) in six.iteritems(aliases): - if locale in language_list and alias not in language_list: + for (locale_, alias) in six.iteritems(aliases): + if locale_ in language_list and alias not in language_list: language_list.append(alias) _AVAILABLE_LANGUAGES[domain] = language_list diff --git a/taskflow/openstack/common/importutils.py b/taskflow/openstack/common/importutils.py index 8d412cd4..1e0e703f 100644 --- a/taskflow/openstack/common/importutils.py +++ b/taskflow/openstack/common/importutils.py @@ -24,10 +24,10 @@ import traceback def import_class(import_str): """Returns a class from a string including module and class.""" mod_str, _sep, class_str = import_str.rpartition('.') + __import__(mod_str) try: - __import__(mod_str) return getattr(sys.modules[mod_str], class_str) - except (ValueError, AttributeError): + except AttributeError: raise ImportError('Class %s cannot be found (%s)' % (class_str, traceback.format_exception(*sys.exc_info()))) diff --git a/taskflow/openstack/common/jsonutils.py b/taskflow/openstack/common/jsonutils.py index e3855ab1..acbf65d2 100644 --- a/taskflow/openstack/common/jsonutils.py +++ b/taskflow/openstack/common/jsonutils.py @@ -168,12 +168,12 @@ def dumps(value, default=to_primitive, **kwargs): return json.dumps(value, default=default, **kwargs) -def loads(s, encoding='utf-8'): - return json.loads(strutils.safe_decode(s, encoding)) +def loads(s, encoding='utf-8', **kwargs): + return json.loads(strutils.safe_decode(s, encoding), **kwargs) -def load(fp, encoding='utf-8'): - return json.load(codecs.getreader(encoding)(fp)) +def load(fp, encoding='utf-8', **kwargs): + return json.load(codecs.getreader(encoding)(fp), **kwargs) try: diff --git a/taskflow/openstack/common/network_utils.py b/taskflow/openstack/common/network_utils.py index fa812b29..2729c3fb 100644 --- a/taskflow/openstack/common/network_utils.py +++ b/taskflow/openstack/common/network_utils.py @@ -17,18 +17,15 @@ Network-related utilities and helper functions. """ -# TODO(jd) Use six.moves once -# https://bitbucket.org/gutworth/six/pull-request/28 -# is merged -try: - import urllib.parse - SplitResult = urllib.parse.SplitResult -except ImportError: - import urlparse - SplitResult = urlparse.SplitResult +import logging +import socket from six.moves.urllib import parse +from taskflow.openstack.common.gettextutils import _LW + +LOG = logging.getLogger(__name__) + def parse_host_port(address, default_port=None): """Interpret a string as a host:port pair. @@ -52,8 +49,12 @@ def parse_host_port(address, default_port=None): ('::1', 1234) >>> parse_host_port('2001:db8:85a3::8a2e:370:7334', default_port=1234) ('2001:db8:85a3::8a2e:370:7334', 1234) - + >>> parse_host_port(None) + (None, None) """ + if not address: + return (None, None) + if address[0] == '[': # Escaped ipv6 _host, _port = address[1:].split(']') @@ -74,7 +75,7 @@ def parse_host_port(address, default_port=None): return (host, None if port is None else int(port)) -class ModifiedSplitResult(SplitResult): +class ModifiedSplitResult(parse.SplitResult): """Split results class for urlsplit.""" # NOTE(dims): The functions below are needed for Python 2.6.x. @@ -106,3 +107,57 @@ def urlsplit(url, scheme='', allow_fragments=True): path, query = path.split('?', 1) return ModifiedSplitResult(scheme, netloc, path, query, fragment) + + +def set_tcp_keepalive(sock, tcp_keepalive=True, + tcp_keepidle=None, + tcp_keepalive_interval=None, + tcp_keepalive_count=None): + """Set values for tcp keepalive parameters + + This function configures tcp keepalive parameters if users wish to do + so. + + :param tcp_keepalive: Boolean, turn on or off tcp_keepalive. If users are + not sure, this should be True, and default values will be used. + + :param tcp_keepidle: time to wait before starting to send keepalive probes + :param tcp_keepalive_interval: time between successive probes, once the + initial wait time is over + :param tcp_keepalive_count: number of probes to send before the connection + is killed + """ + + # NOTE(praneshp): Despite keepalive being a tcp concept, the level is + # still SOL_SOCKET. This is a quirk. + if isinstance(tcp_keepalive, bool): + sock.setsockopt(socket.SOL_SOCKET, socket.SO_KEEPALIVE, tcp_keepalive) + else: + raise TypeError("tcp_keepalive must be a boolean") + + if not tcp_keepalive: + return + + # These options aren't available in the OS X version of eventlet, + # Idle + Count * Interval effectively gives you the total timeout. + if tcp_keepidle is not None: + if hasattr(socket, 'TCP_KEEPIDLE'): + sock.setsockopt(socket.IPPROTO_TCP, + socket.TCP_KEEPIDLE, + tcp_keepidle) + else: + LOG.warning(_LW('tcp_keepidle not available on your system')) + if tcp_keepalive_interval is not None: + if hasattr(socket, 'TCP_KEEPINTVL'): + sock.setsockopt(socket.IPPROTO_TCP, + socket.TCP_KEEPINTVL, + tcp_keepalive_interval) + else: + LOG.warning(_LW('tcp_keepintvl not available on your system')) + if tcp_keepalive_count is not None: + if hasattr(socket, 'TCP_KEEPCNT'): + sock.setsockopt(socket.IPPROTO_TCP, + socket.TCP_KEEPCNT, + tcp_keepalive_count) + else: + LOG.warning(_LW('tcp_keepknt not available on your system')) diff --git a/taskflow/openstack/common/strutils.py b/taskflow/openstack/common/strutils.py index 0c8c6e1f..660704e3 100644 --- a/taskflow/openstack/common/strutils.py +++ b/taskflow/openstack/common/strutils.py @@ -50,6 +50,28 @@ SLUGIFY_STRIP_RE = re.compile(r"[^\w\s-]") SLUGIFY_HYPHENATE_RE = re.compile(r"[-\s]+") +# NOTE(flaper87): The following 3 globals are used by `mask_password` +_SANITIZE_KEYS = ['adminPass', 'admin_pass', 'password', 'admin_password'] + +# NOTE(ldbragst): Let's build a list of regex objects using the list of +# _SANITIZE_KEYS we already have. This way, we only have to add the new key +# to the list of _SANITIZE_KEYS and we can generate regular expressions +# for XML and JSON automatically. +_SANITIZE_PATTERNS = [] +_FORMAT_PATTERNS = [r'(%(key)s\s*[=]\s*[\"\']).*?([\"\'])', + r'(<%(key)s>).*?()', + r'([\"\']%(key)s[\"\']\s*:\s*[\"\']).*?([\"\'])', + r'([\'"].*?%(key)s[\'"]\s*:\s*u?[\'"]).*?([\'"])', + r'([\'"].*?%(key)s[\'"]\s*,\s*\'--?[A-z]+\'\s*,\s*u?[\'"])' + '.*?([\'"])', + r'(%(key)s\s*--?[A-z]+\s*)\S+(\s*)'] + +for key in _SANITIZE_KEYS: + for pattern in _FORMAT_PATTERNS: + reg_ex = re.compile(pattern % {'key': key}, re.DOTALL) + _SANITIZE_PATTERNS.append(reg_ex) + + def int_from_bool_as_string(subject): """Interpret a string as a boolean and return either 1 or 0. @@ -237,3 +259,37 @@ def to_slug(value, incoming=None, errors="strict"): "ascii", "ignore").decode("ascii") value = SLUGIFY_STRIP_RE.sub("", value).strip().lower() return SLUGIFY_HYPHENATE_RE.sub("-", value) + + +def mask_password(message, secret="***"): + """Replace password with 'secret' in message. + + :param message: The string which includes security information. + :param secret: value with which to replace passwords. + :returns: The unicode value of message with the password fields masked. + + For example: + + >>> mask_password("'adminPass' : 'aaaaa'") + "'adminPass' : '***'" + >>> mask_password("'admin_pass' : 'aaaaa'") + "'admin_pass' : '***'" + >>> mask_password('"password" : "aaaaa"') + '"password" : "***"' + >>> mask_password("'original_password' : 'aaaaa'") + "'original_password' : '***'" + >>> mask_password("u'original_password' : u'aaaaa'") + "u'original_password' : u'***'" + """ + message = six.text_type(message) + + # NOTE(ldbragst): Check to see if anything in message contains any key + # specified in _SANITIZE_KEYS, if not then just return the message since + # we don't have to mask any passwords. + if not any(key in message for key in _SANITIZE_KEYS): + return message + + secret = r'\g<1>' + secret + r'\g<2>' + for pattern in _SANITIZE_PATTERNS: + message = re.sub(pattern, secret, message) + return message diff --git a/taskflow/openstack/common/timeutils.py b/taskflow/openstack/common/timeutils.py index 52688a02..c48da95f 100644 --- a/taskflow/openstack/common/timeutils.py +++ b/taskflow/openstack/common/timeutils.py @@ -114,7 +114,7 @@ def utcnow(): def iso8601_from_timestamp(timestamp): - """Returns a iso8601 formatted date from timestamp.""" + """Returns an iso8601 formatted date from timestamp.""" return isotime(datetime.datetime.utcfromtimestamp(timestamp)) @@ -134,7 +134,7 @@ def set_time_override(override_time=None): def advance_time_delta(timedelta): """Advance overridden time using a datetime.timedelta.""" - assert(not utcnow.override_time is None) + assert utcnow.override_time is not None try: for dt in utcnow.override_time: dt += timedelta From dd578418070f7cd4435fcd6523122e09561f11ec Mon Sep 17 00:00:00 2001 From: Joshua Harlow Date: Wed, 2 Jul 2014 14:51:49 -0700 Subject: [PATCH 142/188] Remove need to do special exception catching in parse_uri The upstream oslo incubator code was fixed to not throw a index error when parts of the uri are missing, so we can remove our modification that handles these errors and just rely on the fixed version. Change-Id: I01c6a647858ca45aeb4685a68cb3e28b575e1e05 --- taskflow/utils/misc.py | 29 +++++++++-------------------- 1 file changed, 9 insertions(+), 20 deletions(-) diff --git a/taskflow/utils/misc.py b/taskflow/utils/misc.py index eea56d6d..3be02c8a 100644 --- a/taskflow/utils/misc.py +++ b/taskflow/utils/misc.py @@ -106,26 +106,15 @@ def parse_uri(uri, query_duplicates=False): query_params = tmp_query_params else: query_params = {} - uri_pieces = { - 'scheme': parsed.scheme, - 'username': parsed.username, - 'password': parsed.password, - 'fragment': parsed.fragment, - 'path': parsed.path, - 'params': query_params, - } - for k in ('hostname', 'port'): - try: - uri_pieces[k] = getattr(parsed, k) - except (IndexError, ValueError): - # The underlying network_utils throws when the host string is empty - # which it may be in cases where it is not provided. - # - # NOTE(harlowja): when https://review.openstack.org/#/c/86921/ gets - # merged we can just remove this since that error will no longer - # occur. - uri_pieces[k] = None - return AttrDict(**uri_pieces) + return AttrDict( + scheme=parsed.scheme, + username=parsed.username, + password=parsed.password, + fragment=parsed.fragment, + path=parsed.path, + params=query_params, + hostname=parsed.hostname, + port=parsed.port) def binary_encode(text, encoding='utf-8'): From 6ba41082cd54bce13e5795608f5c2634f19abde0 Mon Sep 17 00:00:00 2001 From: Joshua Harlow Date: Thu, 3 Jul 2014 14:51:08 -0700 Subject: [PATCH 143/188] Move the stopwatch tests to test_types Since the stopwatch is a type in the time module we should move the test for it from utils test to the test that is used to test all the various types. Change-Id: Id462baaada2784bf2af1da9c8548a2393c1af7fb --- taskflow/tests/unit/test_types.py | 49 +++++++++++++++++++++++++++++-- taskflow/tests/unit/test_utils.py | 43 --------------------------- 2 files changed, 46 insertions(+), 46 deletions(-) diff --git a/taskflow/tests/unit/test_types.py b/taskflow/tests/unit/test_types.py index 5e5b074d..e69cac0d 100644 --- a/taskflow/tests/unit/test_types.py +++ b/taskflow/tests/unit/test_types.py @@ -14,12 +14,14 @@ # License for the specific language governing permissions and limitations # under the License. +import time + import networkx as nx -from taskflow.types import graph -from taskflow.types import tree - from taskflow import test +from taskflow.types import graph +from taskflow.types import time as tt +from taskflow.types import tree class GraphTest(test.TestCase): @@ -113,3 +115,44 @@ class TreeTest(test.TestCase): things = list([n.item for n in root.dfs_iter(include_self=True)]) self.assertEqual(set(['animal', 'reptile', 'mammal', 'horse', 'primate', 'monkey', 'human']), set(things)) + + +class StopWatchUtilsTest(test.TestCase): + def test_no_states(self): + watch = tt.StopWatch() + self.assertRaises(RuntimeError, watch.stop) + self.assertRaises(RuntimeError, watch.resume) + + def test_expiry(self): + watch = tt.StopWatch(0.1) + watch.start() + time.sleep(0.2) + self.assertTrue(watch.expired()) + + def test_no_expiry(self): + watch = tt.StopWatch(0.1) + watch.start() + self.assertFalse(watch.expired()) + + def test_elapsed(self): + watch = tt.StopWatch() + watch.start() + time.sleep(0.2) + # NOTE(harlowja): Allow for a slight variation by using 0.19. + self.assertGreaterEqual(0.19, watch.elapsed()) + + def test_pause_resume(self): + watch = tt.StopWatch() + watch.start() + time.sleep(0.05) + watch.stop() + elapsed = watch.elapsed() + time.sleep(0.05) + self.assertAlmostEqual(elapsed, watch.elapsed()) + watch.resume() + self.assertNotEqual(elapsed, watch.elapsed()) + + def test_context_manager(self): + with tt.StopWatch() as watch: + time.sleep(0.05) + self.assertGreater(0.01, watch.elapsed()) diff --git a/taskflow/tests/unit/test_utils.py b/taskflow/tests/unit/test_utils.py index 2c2dac2d..22da1b8b 100644 --- a/taskflow/tests/unit/test_utils.py +++ b/taskflow/tests/unit/test_utils.py @@ -18,12 +18,10 @@ import collections import functools import inspect import sys -import time from taskflow import states from taskflow import test from taskflow.tests import utils as test_utils -from taskflow.types import time as tt from taskflow.utils import lock_utils from taskflow.utils import misc from taskflow.utils import reflection @@ -495,47 +493,6 @@ class IsValidAttributeNameTestCase(test.TestCase): self.assertFalse(misc.is_valid_attribute_name('mañana')) -class StopWatchUtilsTest(test.TestCase): - def test_no_states(self): - watch = tt.StopWatch() - self.assertRaises(RuntimeError, watch.stop) - self.assertRaises(RuntimeError, watch.resume) - - def test_expiry(self): - watch = tt.StopWatch(0.1) - watch.start() - time.sleep(0.2) - self.assertTrue(watch.expired()) - - def test_no_expiry(self): - watch = tt.StopWatch(0.1) - watch.start() - self.assertFalse(watch.expired()) - - def test_elapsed(self): - watch = tt.StopWatch() - watch.start() - time.sleep(0.2) - # NOTE(harlowja): Allow for a slight variation by using 0.19. - self.assertGreaterEqual(0.19, watch.elapsed()) - - def test_pause_resume(self): - watch = tt.StopWatch() - watch.start() - time.sleep(0.05) - watch.stop() - elapsed = watch.elapsed() - time.sleep(0.05) - self.assertAlmostEqual(elapsed, watch.elapsed()) - watch.resume() - self.assertNotEqual(elapsed, watch.elapsed()) - - def test_context_manager(self): - with tt.StopWatch() as watch: - time.sleep(0.05) - self.assertGreater(0.01, watch.elapsed()) - - class UriParseTest(test.TestCase): def test_parse(self): url = "zookeeper://192.168.0.1:2181/a/b/?c=d" From c5242a696c4308316ad17afe12cbfbb1e58224f8 Mon Sep 17 00:00:00 2001 From: Christian Berendt Date: Fri, 4 Jul 2014 13:07:37 +0200 Subject: [PATCH 144/188] Raise NotImplementedError instead of NotImplemented Change-Id: I6ba5e21c801f724f216298d383a523f7dd865d6a --- taskflow/persistence/logbook.py | 5 +++-- 1 file changed, 3 insertions(+), 2 deletions(-) diff --git a/taskflow/persistence/logbook.py b/taskflow/persistence/logbook.py index 884873ea..12c6c996 100644 --- a/taskflow/persistence/logbook.py +++ b/taskflow/persistence/logbook.py @@ -411,7 +411,7 @@ class TaskDetail(AtomDetail): def merge(self, other, deep_copy=False): if not isinstance(other, TaskDetail): - raise NotImplemented("Can only merge with other task details") + raise NotImplementedError("Can only merge with other task details") if other is self: return self super(TaskDetail, self).merge(other, deep_copy=deep_copy) @@ -496,7 +496,8 @@ class RetryDetail(AtomDetail): def merge(self, other, deep_copy=False): if not isinstance(other, RetryDetail): - raise NotImplemented("Can only merge with other retry details") + raise NotImplementedError("Can only merge with other retry " + "details") if other is self: return self super(RetryDetail, self).merge(other, deep_copy=deep_copy) From e26d7e2babc727ec2ac43bc6725ee0c3ebc83bb1 Mon Sep 17 00:00:00 2001 From: Joshua Harlow Date: Fri, 4 Jul 2014 19:21:37 -0700 Subject: [PATCH 145/188] Make the examples documentation more relevant Instead of just linking to the examples source tree provide a list of recommended examples (in order of my perceived complexity) that can help new comers view examples in a more organized and relevant manner. Change-Id: I9f6d15fb042e3fe67fd29a9c331fc779c254241c --- doc/source/examples.rst | 105 ++++++++++++++++++++++++++++++++++++---- doc/source/index.rst | 26 +++++++++- 2 files changed, 121 insertions(+), 10 deletions(-) diff --git a/doc/source/examples.rst b/doc/source/examples.rst index c6d2e3ed..1891b425 100644 --- a/doc/source/examples.rst +++ b/doc/source/examples.rst @@ -1,11 +1,98 @@ -======== -Examples -======== +Linear phone calls +================== -While developing TaskFlow the team has worked hard to make sure the concepts -that TaskFlow provides are explained by *relevant* examples. To explore these -please check out the `examples`_ directory in the TaskFlow source tree. If the -examples provided are not satisfactory (or up to your standards) contributions -are welcome and very much appreciated to improve them. +.. literalinclude:: ../../taskflow/examples/simple_linear.py + :language: python + :linenos: + :lines: 16- + :emphasize-lines: 16-28 -.. _examples: http://git.openstack.org/cgit/openstack/taskflow/tree/taskflow/examples +Linear phone calls (reverting) +============================== + +.. literalinclude:: ../../taskflow/examples/reverting_linear.py + :language: python + :linenos: + :lines: 16- + :emphasize-lines: 17-32 + +Building a car +============== + +.. literalinclude:: ../../taskflow/examples/build_a_car.py + :language: python + :linenos: + :lines: 16- + :emphasize-lines: 20-26 + +Task dependencies +================= + +.. literalinclude:: ../../taskflow/examples/graph_flow.py + :language: python + :linenos: + :lines: 16- + :emphasize-lines: 18-31 + +Parallel calculations +===================== + +.. literalinclude:: ../../taskflow/examples/calculate_in_parallel.py + :language: python + :linenos: + :lines: 16- + :emphasize-lines: 18-21 + +Parallel pseudo-volume-create +============================= + +.. literalinclude:: ../../taskflow/examples/create_parallel_volume.py + :language: python + :linenos: + :lines: 16- + :emphasize-lines: 21-23 + +Suspended workflow reloaded +=========================== + +.. literalinclude:: ../../taskflow/examples/resume_from_backend.py + :language: python + :linenos: + :lines: 16- + :emphasize-lines: 22-39 + +Resumable vm-pseudo-boot +======================== + +.. literalinclude:: ../../taskflow/examples/resume_vm_boot.py + :language: python + :linenos: + :lines: 16- + :emphasize-lines: 32-34 + +Resumable volume-pseudo-create +============================== + +.. literalinclude:: ../../taskflow/examples/resume_volume_create.py + :language: python + :linenos: + :lines: 16- + :emphasize-lines: 28-30 + +Running engines by iteration +============================ + +.. literalinclude:: ../../taskflow/examples/run_by_iter.py + :language: python + :linenos: + :lines: 16- + :emphasize-lines: 24-27 + +Retry controlling +================= + +.. literalinclude:: ../../taskflow/examples/retry_flow.py + :language: python + :linenos: + :lines: 16- + :emphasize-lines: 17-25 diff --git a/doc/source/index.rst b/doc/source/index.rst index 85b69b06..3e9326b6 100644 --- a/doc/source/index.rst +++ b/doc/source/index.rst @@ -34,6 +34,31 @@ Contents workers +Examples +-------- + +While developing TaskFlow the team has worked *hard* to make sure the various +concepts are explained by *relevant* examples. Here are a few selected examples +to get started (ordered by *perceived* complexity): + +.. toctree:: + :maxdepth: 2 + + examples + +To explore more of these examples please check out the `examples`_ directory +in the TaskFlow `source tree`_. + +.. note:: + + If the examples provided are not satisfactory (or up to your + standards) contributions are welcome and very much appreciated to help + improve them. The higher the quality and the clearer the examples are the + better and more useful they are for everyone. + +.. _examples: http://git.openstack.org/cgit/openstack/taskflow/tree/taskflow/examples +.. _source tree: http://git.openstack.org/cgit/openstack/taskflow/ + Considerations -------------- @@ -66,7 +91,6 @@ Miscellaneous exceptions states - examples Indices and tables ================== From e54cb21d554940a93423c79733cabaf039c1590b Mon Sep 17 00:00:00 2001 From: Joshua Harlow Date: Fri, 4 Jul 2014 22:06:02 -0700 Subject: [PATCH 146/188] Use the `state_graph.py` for all states diagrams Switch to using the `state_graph.py` and dot output as the source of all of the state diagrams (this makes it easy for anyone to recreate them by just running the script in the tools directory). Also update the state diagram creator to have engine states as well as retry states and replaces all existing state diagrams with the updated prettified versions. Also adjusts some nits around wording and grammar that were encountered during this updating process. Change-Id: Ia783aed6c4136763e1e34cbd0b3e57ffb1109abe --- doc/source/img/engine_states.png | Bin 24631 -> 0 bytes doc/source/img/engine_states.svg | 8 ++ doc/source/img/engine_states.txt | 13 --- doc/source/img/flow_states.png | Bin 25297 -> 0 bytes doc/source/img/flow_states.svg | 8 ++ doc/source/img/retry_states.png | Bin 12571 -> 0 bytes doc/source/img/retry_states.svg | 8 ++ doc/source/img/task_states.png | Bin 10359 -> 0 bytes doc/source/img/task_states.svg | 8 ++ doc/source/states.rst | 99 +++++++++++----------- tools/generate_states.sh | 32 ++++++++ tools/state_graph.py | 137 +++++++++++++++++++++---------- 12 files changed, 205 insertions(+), 108 deletions(-) delete mode 100644 doc/source/img/engine_states.png create mode 100644 doc/source/img/engine_states.svg delete mode 100644 doc/source/img/engine_states.txt delete mode 100644 doc/source/img/flow_states.png create mode 100644 doc/source/img/flow_states.svg delete mode 100644 doc/source/img/retry_states.png create mode 100644 doc/source/img/retry_states.svg delete mode 100644 doc/source/img/task_states.png create mode 100644 doc/source/img/task_states.svg create mode 100755 tools/generate_states.sh mode change 100644 => 100755 tools/state_graph.py diff --git a/doc/source/img/engine_states.png b/doc/source/img/engine_states.png deleted file mode 100644 index 3fc83da326a6e3cd280c2d3f2d374047c8e88405..0000000000000000000000000000000000000000 GIT binary patch literal 0 HcmV?d00001 literal 24631 zcmdqJbyU{twmWYCggD(azoxzi&Kw?p(a5kMG#diH(WTd#t7npJSdef!*XDk@?Qfr;e*BFF0B+z~y}F z`&dX0pNOrq_ZZ+q9R2_FgV&moC8}MJ{_+C9WP>BQ#8)BbX2LZiR?R!R2P_z}kreME z8HOsN8ylt5me@C?Tkr4ilJ8c8SAA)(o}uHWm8vwEif#(Vfophgm(AGV^}Xg9Yjo`5bj^adE%g z=l(h9=;+3rnyI4RB&4Ka*C<3h%JdL{F`F?3nx)wT-_ORe>4yget!;0s(&ID6FtW3+ z;rQ-JXlf?(kyKy6@_ZRYWOO&2EJ4(}x~8V)cAA~DbBdtzk2Hy=L`Ktw*TQi%h55EB znD9eBJUL&aUD;i1#9}pKx1fz(9w!^w{IJBCL|?kJ35vcFirb}hllfu`Rch%VKmiUkofvAo_NV6^u<1A zSHo<>@s4(t&KtFSN@2GLdsbRniL$h$><`4fz3b*|HAM_Ql~kBnx;Kp7lW4JOe{(Ty9ZOD!TzX?)K`W?^I)em7UzGs%C zqF(P%&n*y#vU|6^;bfbwDYDXLoO8t71D@JPXvHf_KA6czjQe z=3jbtfBl*eBrGBV7dfg{Q>L07xU3lXdE@1fMLApj9u@A_(NTpY-WAkU?XAzuISAn1 zk(6{GqYH0~lw!frBjobTRVU%x@6uRl39ro*+JV0tKg`13* zSM8Ku!O*3LZ7*&<*fbi?=6IXgsHVTcq*ib}JxI4b6YoTiX5%9olh;lnRPv#?xkCLZf?7*V`#~EjVoT?w+~O)bgOS# z73AkX3k|(V#l}|j>67$kN^f5HJqG60ednb?+`b~;^4)8q-0NXakslh&9!*hlI&ObZXpxkZ1Pj2($VgdVpUp~ETAGSV zTYD59Mv##8>B%v&%&-NsPI|)k)SC(u8=EG5@?@t{vWbhQ{s=+ro#%S_@6QRI_s^d{ zVY$LPz_Xp7{}Iig+=1FczOw z%tshk#B1-q?O}x2$w! zL~eEHpD2+j+|9wS6>$81rEfmfcj8|Uq{SlD^QQonpzF`%;)erys>m{z6)mc2yQ#+$ zJ5SEz{1{+2Xgr%B>vFiW0?+7Obv0aF(^{r9gU8W*7cwbyUa`U0h!+6_S{sp*HLh&9 za*5nhA1Qt;EqQx+;bXSoh#ft!U=#g_+5w%?)_f@ry3-r7I5;>|oWj_)dayP7>Lf#n zN{rgVX=Gx{*y7zczYl!*k}T@2Ra^4v)whvy%OjF{nhM*=pL28N4?ALE=_@}x+3)ck z92l@3E;i_Wb<0Dc-T3YB@G#sQH#c_~$5HnKgPzkF%F}QBKMN}-9PI3RnFL&xhvz!t zJUu)roO>z9EsN?Tkw*=UDH)~;fp<|4C^Cpuzn%xR9>esV^>G&FI|IAtBH zb&m@A-jh0MAyop`ir3d1jMs4ZQ%HG9rf^X5Tt5Ex_BLaT#p36zX+v21`oRl1sXlM* zVjgp}_7vwT=OliTfNB{;FD5p5WFShVEU`l`VQ$b9LCO~%#H?diY8ToWSC-ir7%Cq# ze!3CY(Cgb%PA=#a6co#*Z$tD_B_F>a2j^j%`<9v{a2aAgP0_AB z#KgorIzHZ4q~y2zmS0rwv9~q_3twi0=OCKhe{;Ua)y3r++hdW-yi%DUU;NiwyX)(U zxhk>I(TrIO)2OEa3;X=}^K5KvCo3Bn5u#M-Gi?!Et=~HnP_;r?xrwO8ZVDZDk>((@ zBsgKsK_nzFJH?8oV8!*^aR2GV_{Pv6c6d%Q0UFP}3qHVBA49qO8lD}hz5 zuIC;QBzm+akbs{Wlcz}{ zp1i(uytuHs&HJ;D4<}Ct^9$t|Fj7mhGc(slS0@|TqS_}smRJU3qaSm`rlzhgEG$e- zDQan1XlSs;iR}yxrn-!uWIP=bk#Wa3z3I_6YWRYOZ~MECqo4tvI+z{wyzcYhMBx*hIVAMC3t z_lTX=?!_?q(-}yA@YuTVl6?2laAZk^S15BqZ)BWsQF?k~e}DhPgdSYY_7~;p{Dzi2 zAtdbg8}$^LV>&qT4k;;03&*|!R>SNkT;^Y1EAIXr$BVO4CgkSf(S2t(1#qLuC_+RM zdHM2XKAW-POx_HJJJ!~Pb#--iXls+FC$3c0KF5NVM$#XMo<&5^r9*`vzE)9M%5_9H z(P?UY81LTZ)tu;V;-QzNRBj&A7|Fq@|G5!U$T#Nk&_lW>&iUiD?p(MuH*eaJ%`Ytp z`5d|Vc&e#g!-YM|(F%J}0SSF>ntVU0*kyHs#cIDz?BrH!Y*CgRgGLYer|``nG;Pk` z1+c}hdk!p<4=>5srvEOmePG=W5O=W8dL{RmqK~zj8g&w zFA;y{QHDbAv$fD?e4rJgTjSD|z%>t*^0shFloB^DFC#uC7E?5D*23GU57h(NSy}73 z5-u*hxOMyAla5?Oo7|4p+lY*8Ds1u^&c1+ zZRUs_2M75aEYcZd=&(8n-8?mNjtBcoE+ zRlO>mdWSjbI!l}VZ9U~miZ25L1o-%687=Zq&=nFmVgH;SIcCIo?M*#R^*yNqRCrg? zqB%<@&Ph=B{{830S>*j0-X62YZceawE6dC49Oga^>HEqdm2>3hDmQaJb8>RN(QnYZ ze}4_t6OTx00ZD|B@-45dY^%AFlauG?=WqWUjwB0w%T;b$&=sn9SC+!Zf#f*e%*-ra z%-2U(cN|c(&*|Z0WroA%)Bf^oox8IPCN6vjtLUkd!3~5xDpDnFl)lM9K1b_!>sDNU zj@?Ly(u}DMJ1_4(y|lFSmh^@)9W!(2sd6%bn!5Vv$VmOsM#m{Mgy4lwF-(CELjx_g zPAN`mYI$wO=vg~+?u&|wO0qGGS6FKco%{GamxLnA1h99*3)7F!ZyXW07#{5wZK~H| zt~+%zVw{F`9AWqgW92-hY3h@sdoqxp!fhU@l$*?B8Jm)lvZpP3nL_v;a`SsvMyfBv z6(g&TSQaEcIeDQv!K8ZQ0sB(CDyCi!DXeUIG;RJrR>3|*VSz0 zH!_*{N6OPO`^@{p-)zxHxJ+bk-Fj49QeXe_wDXF7dyDVr3;;As`v_B0)6JaIfF=8_ zK)b;LO{mNLISRYm+XSqY=K9i#+S)=7-Xl&vkuU>G54$ zTy(jzifiZhSIynOCzv+Vl)ilnt5Fh3>Am%W|7Ey51MH>!)dF%po8-L%2Don(hC77G zm(+@Qd3j@lfI93s2<%LXz2j{Cc?B;JdT@wfjhtwnKAbbG2?)Pf@Wi7>^EmD4l%raD(qDT=J z=V#;WF!c2F6ol8C0i`ZXPg9*tzY6{JM=?Wky>d1fn}pkJx;Yr0i?H_rx9^f}+lTuu zkMELMHgQ;321M*d>Q_0?;N}ke$haE;40B5{FWrV?PLN80qqHX1mk_$aEhOA_*8R_s z@y>$dcPYK`uU?&AUS^JJKiFF2vY&2lI6YpKWTQBO4k!v}^n`@i)pZvjn9q6(;WVd* zroo*%0ZO?y({1bbTUCggo^WpZD!945Gr+MUi%U)xcKj}Tl7|l8vbJV-qSm=P;kk+{ zld1Z?5a!a>{$Sez_%%Kz25zu#nqOP0r_ag$d<*IRqy@(|d6O@^_j_A3*G#epsoyBA zd3=V(=yKj_UqhQ;GFIKJT}H2SL6+;(XJB(e#JKaZ(X}<#O>I4jg6=QR4d^}7E|WNQ zW_1s(9Mq0tT=HNhyR|gAjs}8)$p39!;XnR9KRIpJ=k3PixaXZjFVWn?aJ;A$AoZE_ zBazWy_=f{y&MgVsM+%N)Vtb%WLek$y7XCn)h#OL{eZ^P#M zIZ=1mEwt(s7ejzREMON)_ob|*=qM_3KuJD1KJ?w{y9>o-@N!<#2PDRym)pTrLkUIh z%NG?C_bYt;y4Zyut{i!fp_)NRsK(#l|J%23u(Q6;&1nje!QW)!kH^Z=9&V)lWqBZ9ohDZRF3?jqmOiT8!O`*8vk`x~5ULsifNdPMI^0`T zRh5Fswu+k@f17=TUg5K69i+Ck>kHdK<5iArO-&ipm}fvRvm!sK!{7itQ;CFLHnu(9kb)h%y+KyBbX4Eb9dly{qXkYfm5biwt9P@kRtJ0#Z z`}Fw8Or_j~c~jQrh=-hCe~#AxE*r4e2L3N+&E0@yv^?%QNzQNgN-6geCg#CbzXEno zpRp+|f^g~vx8Al0m*igcQ34IBCO zCo(b;nmj=5XTFSdba#hkj^cwFo0Ff&2WE z8(Rcx?0Djg@iLFQ?=du!k&%7w@88eq%*3DXNi_{!kJFi*oh2GK^NG755eQH3q5=Hh z;vEe&we0)!J1e8=s;VvaIsl60bMS_82P{DKSz21s*3yC>(-QXm`}ZF$R%N_RZvaj| zp0M9PJdBHt-N5+#`EzYuU5gwM%iphofBl263uh}Rf*I-QvCNu=8iB{c`v4UwiM@Kt z`ogkbc6D@+3OHo0EeGLJZ+!ykj}oK|ATMogrep8y+uPe6_Io^<1_yaXM7$gQFT$&v zsQ0ef%LN&*vYc0Mvfg|9@(p0A{ryo5)Fz*z8Q>xHS~jXcmGfiGQZC9WE-lrrb~0B} zi*=(;m5q*$4&o!}BJG^y%-gks6%Kb8AXZvhy8X$Ln*_)gmua_(ii#Ed?!NRpSy@@r z=zuO!`=QiSK~l06^vGrf;%uzKL6aX>Kqk%0&)<*E&&y+^rj{tWsEpcl)zRJc!pgZS z4rFaPxbpJy$mqnxygoCRpJNQx5-(o70994Y^{0-UTvy98Lg0hjm0>0_ilJv~ntPOjbypWzQLEiZ=^dc+ver@u0dm6Y36TU=aRZaD;Wb9NqHtC$#c z^uk}i`7!8c@FuLStvfz_3WANWv{E_wlFxSH)vYHcJug|TB}z(4*cy&DY<)IKVeey; z@>)&QYOZqCE2N6nxU7VPhB~>p=z$6exN|H=ffB?rKR*N?A0O4m+S)Ln#(VdIii(O- zQc%$K_U+rvJV98bg33!6gN4X6a5%+>ls8*MCj98MUh;xOoNw6_lLEP{k5 z4VPIt>MYNcP=XYGk+=6$yrNKLR`Bg^k_MkR4HZ}ku@4dA&sNtWpWU$$C;MXB?w}$a3>o5f;9Tu4W zEaoCRzpA1W?f<%Y{H@I}G(6X)jgrzJMcoi}WETGT!P32T=REk=hzoS0@bQ1{7W_9Z zLxBQq`hEJ=aB|P>AF7t;2W~)#UUw{q-c6ts!a4;v3~C4zX&)P#pUMg%1QZnA_Q~Z> zz+W*tbM%_;MY->o4sF!j?5r?w_oLL9m>3%y8-R^anTpl!k>C7Xk_S^9_HKCo`J6Pe z793DWBv*y;q$fK&8=3@EM_4GYU(*+=$snbqq|ngN01bm21LQp`HC6eFSZurTYQt%L zYHF%ETN^%hAqs)6B88j?Ad*y>wWp`& zhjRB1G;zX5znAPQYNFB||6ZAm-^Dh-^!WH#Mn-184Jhfv!~{sXm;r`Rrl7!+wG9jo z{%{iHVrbc&^z9+)Uv%<_aCC5BF489}R?F||>?|lMvNboCQcxhwqe=&LimP=G>z0Fq z!_m>v)vH(i$?@>;3QtB#JrKQj9{k2B{Of%*Yn1JwC9SQkuV23g;q!vR-26O^h^DXY3VQGq`% zFAugYH%VA&=_X)mMn*>9o$t*0(m#Jzs%QUIeKjTwlYY#rs;DsQRy*~)6d@)ihQ54N zEXy_Gby3l3R}x=Gl}F3uWvcz8$&VkX=E&Zw=BfPlM)2L->KA96_P zLJX<5mHV;bXoaj1&g{^Y7+a7|HI0R_Sd zH~!*vf=(K=pV>5xx-E4VTS?sh_%HHaP;vauJ_lR%fBWHdEE` zTc}X9!(PYTy^5xWh-L!ZQV-!w{FS)qX#6WzK&zRVpRWYzh04Ol2E4VMwYB@XDh37y z)0At%;HOkpR%#Val$ql-WaZ|XZ~uUYy*?ZHcdWuB0baO>8szP>)~N}KJCjf3fDY@a$h zdXfd;&hH|DWZ!mcG+LKz@(69Ey{uO&gd6bm>CnIc4ILexqLfr{pPGN$zg3(Nv1HJC z?WHmbiN-t3l8^Mr+Ai+<=T6uxJ-^sCT2G$#^n)xS_W|q0UffVntz^5rH2vmgkL;xC24jofYPEG*8cs(jBN=`*tv`n<{wwcmIV~2mY;W%A>gm|XK1uVg-LRklZ38eXsM_{9wl9xc#Iw);3pD~ zhZBnZ0W;B)_sr;k4ueKUck|}n-d-pvAFF_XCMeo9uC}0Mcs&B7>HqX8)*QG#GRW!v zYtVyUzI@4A$Suc%NsXtz!tMC&ty=}mKE^aDF~QCgY8j}((iN; z-u}UXhtJe4*nlD)Dxe{I?e&h19)8JH_Id;_9~K|h_9heFbQ=`Qfq{XeR;Xp2ot=-) zoTsIw#groxaPU8=k^W#$X0&{?ziC|T^Lt@_SSnOnXc_zwj8moKGxv$0HC{f4xS|Crf$kMGoQ$a0CQ)9e0u~TxNf5vJzUlY71vV)!n8M1E=ZJ+N&!u84|? z%E_6BhyC%v@%v(mjFMnY_yOA5q*ty0;oEZgT49}}RR{%T7~DO8o@3+VVQ-XzT=}Zw z`uJN^!5_Uu%FE2k+WiGL_{k7;r(In+W~cj~u!hrQdCSX|Ix3CQgJeDH2BZ%FfZn|6 z&px=O|F4QF-t&~}Is%ab%y|p*BO?MKND^xqRBSbXM_fmz2CapgB$Vfh-u88JD9M74 z7aH&kn}cvQH8lsrF$BrJ3ttgECoUK-gYI+|mlmnuuZqczQ{;i7w}>GnaoZ1*y9O)t zP%oJ?@q3wqR;e)%e&K%6=pLf8cz^s-yN`76OV2A|F0?;a0GLSvUukqB6&}3oyX}Ar zm56O5P;3^KF80*-8fS30&@vxa-F(l8u_Y_Hc@e&f|6^g$bE6$vBFjJTTO$kl1LETK zzkd`b@>mXrQi|#AZT9${l*-c9cpvhfni?3~@SP0l^9Bz*?1VefJ+uTwuJh-g?qFO( zJU9nIl>CbqFIM>+dq_x}Ih}|8KI^c**q^g6#-?9yWob!DMrP%tPe>TYH{tdq2ng)V z)Rck2vw(mA$aOF{`m1@ z*+P0~Kd5elZ2r}30$6|;tshEX5kb7~`Sk;Qjt|0!IHot|I*?B5(|4e@5iqHFfrZM# z!h&>y)(kO$O&UN;XVB5DtgL>cPh>hCCKTpXc3Cj&z;+v4wTRd;P0GcGRf>C>k| zf`YJN-)NQRm6W_F+5rg_!Xt#NI#*ULiO!&`Gw49T%%EEpYFA$E18sF@ql0a;2khfq zl|0~eGNZPD%s3ugI_~v!DlgG1w-~@;t9?)82_}X58=bFXW6RUi4sdy`t&(7OfHu^s z0nPMwT2OFsZeE_WoE%L$paFocCmR0eLreZs!(y8@p+DGD<5L7`R9i*7}}^I(=N)&eAKq<#WLXO`%lWgnF!L@b%p>o2dozpLJyw#5c z6=7>8LhLdT`}^+h?(y-IAaLo!2DAGY_cpIXlwfzv5t zVkhenP+)!qLcslTg$^;@Xkw^Q<5HNBF#}p#(Ch4M6Oe$Vvo^-69D76$m%*5>Hg1n} z(UHVNjN=0I(ddV|0kz5v6m4K0K0x}B_<#dJ;f2TySk&F^?ZH7-*4E$`(#giNdu%O0 z?F8ZsYB}U#U=ag91x-grUOu)%uD%txGJp8y?(Y2Q*6uDaD$!H7wQnCh0=k-ei}b~) zu5O$QxQ%=58i@EP7L_94XadrJ^^Yrvh5BM?v*4jWL(QQT*7(Y&k})}btI z2dE-~PV~3{7S`L*aEWQDObpp|{J}OlNx$<8275;&NpXN3| zYuoVkaC5U7tzfx%vw3J}sJq){q|AJpQWFpp=7kG(j*isnkpEa7DQDv3{QUK+J9IA; z^o8hHCwHCgq-cyZkJsMXt5>h!)h9MQY@*-fSeWYZ{_^EZ?cVgW*lj2yXZMb|sgA;R zY#*2WBmP43KV;Kaq$|6|4{p~F<}}1Qc4qlzDumR?HG|x8i3g=^y{l~bAy9Az5Lc2ZcO0H zx$1Q3rd8eT3%iZ6kOB{u2$zgZ772@MYjePD%03@s?>wm3kc+E>K*?W~OZ-zNeGh3F z86J|bcP3QQva**hTqrInS^uSf;x%wUwVPb{UG3~?LQBBf2gnOCi2B?&R*~CTLsWj(mTkTXV3plCP9& zXlUqvB4GDT96$;gIk~>aVix2=?6m_$vI+BObTJVrA!tSmPbZohOJHZ>N|Sh789ai3 zO)>=H-n?OjKU7rdVEuv=M^8@=!qC_lm`OEOUpW;NdQbq_`rk0~pDZ1!UX1h{paBaD z3ne8be9Su!AJQ}sw%Okn{Xit-ec%L?8|*S@Lq_c{I9nm)7XZDA5Fg+8;X{mb=USVa zgRpEz${3(@GBM?YzQN7y1Tiy{9jbJYWfa3Lr>{MvaV$VH?ju6|aVQpG_DSfBG>WzQ+?|0zIGYP z3O&G`p&?iBL7{ell425GR#BnG8i!Q_v97~|gAgnnGJbXefdt>=q$JOU-Wyz=xP5=o z9rj${k?+?*jpfqU*SBYf7Z#^Pw4x*d+g!48<}Y_d!IAaOc`0^3`gW1u8t$9>^v(HC z9IBG_Z);RoeFdFJAe?K2+h`dI56V*%aG0%i-`vA7YSoaI{sbT!R##g%dD8Re*Oz%7 zeu{=-1s#8pTprf(RMS&B@G`B2iyuEmSH7DV7k3v4>+dL4?075ccG?v}HG2HpSFf{| z$6YDxYxAm^uA6nJ;wNgN=wBjkLhmG^;!ITi_WN4DX<;Isv9|_+5}zrDF}rbE+x&+{ ztF833DZxFUqAp?(ZV86GUF>GSoauCBE`8d&H{_J9D2jT zz|h_Uim$Jam)8-*en5DKmsbkN+n_11ZsQZvQ7*Q97g?J$8VA6llOfp`n11X8Bopc${I!eE9Gov;<6E+M73t>cZ~uKmQLcVcHOjkTo6{3 zzW9u-{xyUfQc|d?sZoX04oq!uhal-dPDTde6O_rWeEWmCz}Wwg0x7M`P&+iWVnmUkXS+}ENCkbCAM6PZjAJPou4Mn=@C0kF0~hR7Sq zj7droa$4w>C&F4Evd>e=18?H_^D93_xX`KzfnxyDaUyW2(rNU198_&3YQVyO2E8{8 z8iOCf>wzBtm0f&lfTsvv{!LSo9u#MZA)LYbkyU$VEPx-yYJOkR#~)^hvAAVGb4j1L22P)nUGy z93rD9CBLoAKC~AVVOIkEO9-xAqW~YOxcGkJ6*O_Bck(e8hL(0{b?pk^hXA`djPN~@ z7`{qMdf3G`sZ;CLw;>JJ0n+8jz~wMH1FNfQ0~n_0PRhUl0XJwU%gA&cy-Lb984b{hY!V>dz$Mr z|Ip!?E-x3T08-PWJQ~^;oDvFvcaj#^KW(IN{R8%0OLvwlZ_;GqhMvH+J zphl1pLQ5d!r&FjFK`s=_u>*!KB-H^R0{ense|&sAXf7zfoRO9F8e^q!1^B48@BPrA z8v&&OF&7~5>j}4DSH2Meum1uN-LgHdAZxr=xIW7^77JB2k@d_(mH=QbvjTjF3Qnsv4H&9SXkyLbF#ABJw2aM@Bge>O-@b* z&0?@fpOTa`Uxg76hwJh%2PAA-S$^CYhAdrVbo5PbP!^48v)ECWI5sw>=J~RS z?)vptMkktYmrwd9{LTj5yZ3Rr<=IeyX8+ijmHT;gCk7}51yZ>x-7iHv;nhrs zK}7jYVWG3V{rcJ(ggRiYAMHT0{Ddo>S%$VVdNwLJGV-dyvuDp_oSk`2Z@?YKym)bg z6=ZMI&iM8H`P9j3XG_?kD9Q&&4J6pM9Q?m~ugoHDPliD(PfbW*I7&!Ku{P1?N}-N( z&2i<0hr6_N2+_uqwI&>hEi4TdmbxGjD_>Id+z=w4Q%|;XK%^|vsoMD7l>}D@BKOfG z3=#m;ObUWm4gM%O0fF=CL>*|mEkMwb$oO38)kNtG_{$)A9v;<&|LHa_Yh2;fp7Mz?*8l%yxEm_wxsU zITz*T_F=uv%lp*Y`W>nrwNXKR{lwtl&i0T#;Y)}N{#}6!U}uhy%jvfUaMRHtlXhP= zk$bT>O#+tkG|?7o5BD-z9lD8ASHgRuQ7Flv)j-0e>{eWgObU)`|JA8C(-UWn#$(9E ze7pcaWJrchpUQ$p4yZa6^TqwRl#3V`7^vwGU#2TgSh_B(&Z5bC2){Se@StU4ncF{p zguHhZJP~c%x^mDN!6Ndut4yu274r>U3H*)H>R9FdZKU+TKU~NhQ!fC+#5*w1! z?<&#b3#N3BC?d+v|EqsK8j3N!tsahj$)NTuY)sJ3u8K?l=Z{{&Rt>1-Po6v}KT{ID z$Jbm_^wJ(5Q3vv!-{8M`Nw3PI-(HFnvY%YtQQhpPqEp6wG>?nWQ41mUMMcF$`GBk~ zCN9qPKYRh!<;%1%uEKV(Z&ADUZUFu02@rgkn$UY^m-bf!<Pu7>CF;{X2c(FJ$H&^syBAw$<%}r6N2<7ncgV>kuplXR5xwzSN`(X62&gbGRHc%gf6F zG!Bah2z(tHQrB^V7XvN&Xa<^*#M8?OU!MilpPqQO5}jK2Pq%@);8g>r){$PS_f7&N zJAUyh8HP}oT;hM+5OO4X8YH3b%9wTNjd$MxHDkN>Y0kmtq5*Vws8@6R-DdrvCD4e% zp&-tFe1S>^pPul${eMRR35gFAoToS@-?*_=>9AgIEpFTLXe~x?sBy4*aAAh# zeK|g2xLmchxbKkuAuI~?B75rwvMbvl0Yn>K91Jj4>PUxVfqGAdB zE4&M1V`DNfaVKB*nE?|3V^10W3h>*VXV)$caRaI}gsB2RSb#~UJs^P%b~hjmK8V$G zxweAp_0gD9^v?D4+S*#!4Z&DjAPT>F<>w1OtL|aPHJKgG8R)u4PXKmIQC)m#5V5#2 zYJrAWFoSM7(u$HTm6VlvNV>XHgdklUcQ%pX`UNa3?HU(rh+g>kfKa|UGc(f`N*om% zi_#+>nwsK}@-D}y>rE+G2U$YCeuf{?5`}7@=(|u^SXq5pbV^H0qf>ZkWnuBQqy%yn zP@a$Uz@3FYGg)jP0XGb+q|F{2bC8a)FJCqZ)va~ofCScpHFM*m&X*!WjEp}ZWDKF8 z5TKGlL01U1-`N=eUzC^chIbA4sE6)s;m?PXkeh-_1OPMXHWL-C3O!gA{m`&d=4=9{t|yHgD3>3}Cnm zb_zU0ptgXKKmp}(&h+SlnpqEuBIwj#zY1`2DnWY_{xYzSbev7MzW>@x18qGz5i$&ds1ptN8Hx7pTk-c^`k(A zdy&`p{kut)5`(Js=TBW-Fz{q%dRb2W_$ zU3wRbjwnzlR0Fv38G6)v>JR^=PF8QoE_^C^bR#qlkq=qe13+MwD_0QJl&El%m?MBb z@IN(#F#Ud>hY;xd)79m#rI2bXbHPW9Uj-M(@ATVdu$pgCxx}KAU>zIb`2|)WbBHU> zVL&vYeL@p0AUz{~h_a0q828WOW@TriREEKD^j9KwOIWxzXg}oG2jAoMoXyzxSN$&K z?7Yq9v;Is$?3kOL{`IEz)141BA*gHX&38`O4hE0|HUtdq0Y`LFMls0UADH|8@;RW6 z41@FmKn4aH7>_s+^JqcnxCK%)kH4Bjr-1^`?kMcZqFVymW2 z@Pk0}CSc`N zQ_iX(KXQ2f?*-%kbyfp_$M0}QnGK*=&|OnpC(PUG0n`H6A8{ix0_FuFl+S9T_JZkn z)kc>@KsiGAsj;_S#lr(<)(*rz%eD>5e_3zbs^6fZfJ8;!=hQYZX72I+hLapT5`@-z z6ONd%iHQ$P!N5!uO5rPSx%9iF58Dyy3dCywI}>2Sz$j#L-8?)%_m|S?`O_zhfSjxf;{eR|27f$hh?dSO=A8WU zrOaa+BJ(TXzh5ID$QrO{QN=ia9`6O1bjMyI`t2+XV1QD7`E@S9m<8I&hu=|NHl245 z>IL)wm`G;e4q5$hi;9fg3;Uf)BT#Q7dKMmj`O+n3CZ-uC?#cbwX7l0XJ_+6t&+i!l z6$bB#4$^kPLhac|cyB)jiskO|*CwJ-py^;~vA($}EL;n+Gzh0bP~buJA;7#ND=Q1V z!@`0Q7ZSSjv$Jbm-jH;Ir~rsLzd9a3@fotVU{XvRoQTlS+0#%UIzd4wiMF#7^2#BA zYVXo~G5=8#@MpZYx>|6v%Nukrn3~z#+QO&2Y}C214wIn&h_Xrhu!Ya~dp+iG0f(#J z`w+Bil&7F_T1k?V@LhI0cuzB6j=NgB8U8K}ydZG# z`FMFZd=LI2XtxApNi%>9(#1X0+q<^jMtL3sLu7w8%5!Hqx3F*^ziVL~?3aIpR{mdv zY>(qfi)52Tt^vAf(=bRU&}RP1oVXJF0j$m%VGDPdHm>@fde*g!S5A1yyq*PI(l`TM zwRITCEA&S$huM$74HmQTLrXxNE`l=%z{D6FtN?+>Yz}}dil5ba?)tM}2HYNdt;0%q z_aURvH@5BKTEDe*^I@_E|6&j=;5$Rqm7B|nMfCn>CEMBI?@D$PH4onJ*E4a0=%)=n zE^!y*UtiFaEHIcw$Z$f%UGXgh(HJuFM|TU>E;0(UvH>xk1JBqX6smMn^yAaE16vpd zDeVzuDpS(dK5WKQNEY!_8!M}+@k9~ocOYhSaVZ}a7_`2B>U47CuFMdSfG(L-V?QGa zCWL*tydp{G=d?>UpWxSkLUjJ>CHjYfTk1X+qfV>vl9`qGpAnZN%+G(vM(S*d z3=guaK#qacff}%ah7I8rPxvW&%B0hYVH8F3Iat`(BJLZ;%F0oIT$FOlDl6xA9h{uV zAk7CtFz|2)4GM(I$jPD963CPpErZAb{_uC(RqoJ!GsjpmPTvdJ>EjS*b#^YFVErA2 z0o#TL&!EU;B{^G097qi`D@}epwKqD%BqWRzjrL>DZ%9nI&0w2Y!8morKQYSEaVj}6 z@#42{_m!2YFSia34$Av;sQp#tcCdFhS!*~PwQX1duYGaR{o8vqA5RE&!(khs;i-xj zo!jg}NknaJIK-T@fI&TYNGaINFe^teOi9w;W88y$&s}DE!Yv;eO z2@!8aj(@RiHwJAU9X1+Mw?7Ou0{+_t$ErknDdO?pK@GW_7hRromb8=aS`9a?x zY%NaLXHgFP1V-!nAgWwQF5~%e&f((bMXDc?`~DQkXdIXTPQ>Y|0Cqz-{k8<^Pa!#$ zFrzU+V6l#b|HFJx2e4s6#EdCJOaMe}ZW8S8S8>=FscNgXe&Yj!f8}qgMQZcc)^?=* zBNG1gT?I;Im_sNBx9q&{SG8q(pfyo-~V)G7B3rxcM!$+ z8yC5LeWAwLeSi0NVIdbPqkiEL16pxyh=4$tuE`4>#*tLvU>3hY;x0PzOK>M!lBiNy zF5zc$ZXcQdUpIjG{3mqy%ji;XHJ~C0;cTr=>Zje`J2*g1l7iIA0CUw)Pe(_jZEW83 z>6>t#s`NDdp>RLXtk4^9exQqjGR{Ty2Nn?Vf<_HfdoQT(cBhIpm~@eV2;%-bdDCak zga0o{dxISz7#&s)iCUX)kbr=)Ld;`*z6IrU3n#l(`FAwYl@nLvN9en zu?bED#X0e(jf@zGGKiEog9Hg`?@=lsXpjz&U&hA3>jmtcrj!fj2mrAF-Eaz#A1f<` zYWWcLZnibIu+XaWDAy`SSZ>%j0RKQ5Pr@+=43wQG=yUWgB0>R7Uets&q`g3#66NQQ4+5%nDW7NL zjX}AI1$b|u?ZXiPPR`D{GakA&8f*s7m?VrA6&QzshxbI#?Elv7C#s$E zug^^_e!1<003~ z)%tJoQW2+xE2r`Q3Yhj>B{KS-1E!F{2bmJ^3S0w!cs}3(h`X=j^#KCW#t!CC)6`sq zDR~Iz$R+VE2BfvA)C3MB=B54euu+4)zO=t=Y^_KbcOQW$=E1=MpBVWHf;d*Arwhc`Z)r`%c@jpKk3Cr&OdY}Xs< zfKo3;?DyCP+`gel1?>+rU>c$(S9d;r{MakQ0D}e(EyVEO-?#ytT$e-0383kf+;ZAvf^+ZXE5X}?bDbrek zME{(_J`n!x@W#gHL`ER2f*pIjUF2H_hv%FyD5tJxkMI->b}|OY(&9O!fIG@1L*hXv z3IRXx8HtPpQ^LilGT;V)l*8tSR_hIkHWRAf_Yh}_-V0O-!e{0u&#rlJfximto`Z^N z8ki^afJyJ&@zbN8QzR03r4MW-n0bPs2iQ2pppig?Hz^lp*sUPU1*uF32ZJ{g5fSm9 z^7)TYI{hC~{PI{;f20S@>YJG@K-e!SF%c9A;1n=5Nb}T0gP_fxt#0cxXvM(Tz(RDK zZoZht?o0kUKGy|1pGWmcR8&7)9ZaP`>vV+*Gd!$OZ`AdTY3d&m3U@zFc=)Nl=Pm{Q z!|hYFqs?WiIE-s{2L9qoDy$h75?U|4nkM{&MUwVvQ60N5y76X*NvCW-ZAxqHo!lt* z8!Wb(i3z;fW>3E9%5d0oAzkor^C_a1;8Syz`exGKJ<$9$Fw-($~$jRnUYxWhF8kM;wr7E z82l6&SQZ6peBOWs1liz>8af&p$k>+7yx4)G3}CLK8m4t*sN=e?+BMDxqMZz1#64M% zF*U@u?BBjt}pJVomNAr<>cH*|`g&x{zNB!U-{@A>K2g&6u#ioW8KzIP9Jf!`Kr zE{I}0_(7TXRY5MmqQP*|dkvXjWB*8WK^_$t19rG2g_c5|2OvQ6RVG4Q6)xe7j^rKx zj&?yjrV>#&BBG{Q!vlH9b31=hGz~esA2Xz7}vtGnj6UbB(G~Jc)7Ty#4hn|4$?GsxOTrGkn~UnYuHw0!M2o zTY^(u>i_PCSzqLB_O{VcPsrXwF4pXA)}u5{0B(RptgH&O3L&WjYBb1fAXPvom#l;F z2W^tbsWb=@-?{?n4oGd9RoDZR2s`&+L}2MAEZ6opE_!BWl(Gt^Za_FbR|WhD$d>{d^c`!`1dQwrxOl@iU->)E6GaL^k(p>F5&^`hC4G3eQ+GL&Q&{iM#o42mO zj(}|C>0PzE_XPT`RaEeKS3}Ckmj;f@yFyGXg=CD$d;R*Ov<$$N-FkQ&K-I6StquCv)3XW}Cns+n#T?N2^8QATm$&yZXc|Eu2L^Z; z7|daK7RHP*10DxK6WsQwa}pNjxVrDqRHg|a^1*}axX=}J8+^v4Ph%;j-5bjKjm-*e zU_eU-2_ACh)&qBUfx~2SE|bu$J+Kjfx&M+W(Xp{4qH!>wVCLc$=#C1UW__JrT$Dn7 zre;!q!|feAY<_!@|Lvt=j{@ruT!p7{^>g6%-a^8$9ejclFt-gf_DUZl36YB2hf+Ll zYY(U#?Cndq8(k6|Sag-*v-HLSOiNI`M{<^IAGV*Z{q>@0GcrZ7IRMctk5y9PtlK<+}d%I$o7vtiJ z3W8HDQ0M!3Urn41ihS(*eZDh60ttPx0YXJn-{6}Mx0miBLCts({{KSQJ*0Zn&^;DDJ9raXb z<&$5liHq9@sKaBBTX2k))=LCy zI6VWuY5lu*8fo`oglA}QkfdA~=Gk$vi8+yi6nEqB;mA8oTnJb9493~GNcHVRr4iJS z3pm`wJwDp(38XHn-N3}H+ZnM$MyICQI(b1%OH!n{wH5QR-ApUCf`S4VvM`p@Kh>_P z4ak-f{u%~)_J&fvDRwUOqXFH|EXB6_XD2)RAuMNO;c0IJH~Dvio3 zzmc=*+1lQwq3TbaDg-sdB$WUcw*-bwKmzBsWe1lAWIISE#8NObFhFs*q}?=M0>M&W z-%}X$f;BO&lmvG^^yLejnNkE=h$IeF3m7hhEEWiKq)#WW@|*hfXUloi`M~5KOwdZQ zfWZ3v#fz!=c~i^Y6+X`$D=NdBMwSdCx+f`+0lAF?)in?% zL6lb%b#%s{EV2}`@q8tPCJt#g$W}v=-@Fv+B5ogmJAQQrq@po#Y<#>~?qxvC*HPeD z^VD{vUFqZUMVk3e2;@Bv;wK#AH!`lHQFsa*5Nb%NM5Iy7ZL7`p-jMNY`1n^Ku*i5g z4+|lf9*zvm>4y_qz%FsAg+AZjo{M^}3a1Mh5et)(BQOUqg#;HayD7>t=JxVJniJXK z`7z^yeWT{DH|Et*X%z_bg3DH}Weul;!jL3P7H9R;K@fOnX&-VhVH7`z7=}L4w7^*a zK#iK!QZt{WBEJBXV8_x(0@n{hbl0{~<3)3E`V-}r>QK3QmJ+=`UTK6K3V!+`)i8*@ z)KpYR$JAS~8{$Pn5SYd51I%hrQ34*QWQMs#_wpQ61jp|K7S7`0h*N%+J7Z<20)WiF zg>&f=G$~=0IYx#NH_kG{N;sGf76w4D20Us*{B&?;~rq+(}J4;@v; zZGHTfTTjk`8VWmB5g&|4$fg3mmy0WU$jl(htET2N8J%g7iK1bsu6as?j2XPN*_$eX}|5@rcA)KOO61V<2cilfy$FYd+1&!eMJ zI$*|x)&En>)&4bEhS4ZfGzBUY4G?4vHVtf9%8Qf02jtyYiZF7(5D)^a%2#p03K$k>{9{MjJ}%OqAwd_TAn-y^DCnTNnE!UeS&-ME~AZT zG`CtDQeN0R5HjF%CKdPn@1gCV;yV*Jr!2af`%|%*8xN1%W%T%$)9&7d)`rI6g3QbX zz{%LMWq2}>5~!U>rt>p=2;wNuy>fEc^q>Khr;@iP!U6Vqc-34?G%bY}Ym9Wz>q9dFouwIv!6Fqt;jeFCE(q)pAY2Kjt zT7dTzQ*#AVx)%X~qwf4KTiSgt-yQM~j)>?^sD-_Xog7b;_V^|rj�HMD1<(>8^g zLCeIZ7PE^^DFI}4g^SIxQ_W@}Lc-{^xn{RP1$8zT7L9#KlHN68tleD)Fm2s1a59k? zfv7(qp|Vmciy6b04AEw8qC=cOCB>IaZR^OLfut}b6NnEo`0U$}KCFydk_hO=`W zDuTD=-oBXBlI-+q9^rdCc%#;tZ<6nD_Zj04-K~;jZWCuSfjx7qP1C+7EBig+tzO{I zs0lyGt`Y{12iea^dW*(b-ejA=!%KiHjHd{?f z2$nP1!NJ%Ak!vaY+L6-`#aBgLg%yVZ+ss*JZDLceQa%qBY@o)Qsn?dYx;#dkq{#58 z3&wZA-h+GbKwW*FvMIqEo?V!ek&+POtjwO=DK#lrIF%(7k{+Big9h+eNMkTgTF1G| zAvi-;U{P@~cFZf3)A#V!oH&j5$Ho$>>K{x@aKggkdDp}`Gllrq#?Wx#Ny@3?)KXQs z5O@n^7o;AP=P1HW%Ov5m`l_r^TX|+@?X5sTuWl&pLTF=5xLH^_0 zyNeOO0mBnaq8}vOV8sEi>CPyBtf;JnJLWHB5Bx?33rg0fDXT=PqSH)$)U;vWsAO-k zC~W@XrAv46{gLD8|NWb`pNjWYl*V-x#90_7$L?f}SLAZ1BjVgRi~fu+*t&q<9#dm< z;`abk>27kmd$J1~fd-!T1DKX9EG)F!1He^#6X=8Yj8?kW2K-lPt=U2So2*i|7#0M~c}=jBQD>;z(29TEn9QNfrKw01mN4 zTL=rN?D4fv;KI|(PLKKSKSAsj$%FA2?jriJk z^qQ=syK=}%oRV+<@)=|3^w6*XV(7qQQQ+p}Qo2LG?4ksfyKi27)bID9XDwWxR;g-x z&j)1y>;)VF_6K{S*e(gkR5rNnECGL2wD|aI>hG*qJW3u_?bzl8M;0 zkSRtm=;gfgkwSMKJB{(7mRW3V0BT|nie)Mcw@5$e;G6rGfu*ag`~n3HKneM(Kn>_X zkaz$wML-DzF$CL?6Yy*%E*&0w__d)%G@U?U#Ff^_!7M%;!MooPwU#%`S5ihJ<3M*QJyKVT-A3dC&*7uf^XdOI@ ze4PovK7J4zv*SCBFNg$+lhXujlbE<-zZ-Nh@l?I4%dU>kP8`40Rr{>7V2#kHEJRNN zH7Rq7=QVCOY!zG@npQ9G$q)#P?Ob{HPZUwR^RbnI2NlE(xZOD(NO8h86*Wkry7@Jp zI!h#q*}vn3EysXJVB4hVEsJ5|z#<_-kYnS%>j#ncphA(>4Wc_R4$U3Z$23GHiQ5k2 zkRe>C!8`@;g&$ORzhcz}oT6$7e$KJWmdv@*-|q&Xgwex0eEfoDE(WHD8bHgbZAJ(P z3e;1a4DxxSSvBqC-9h`#^P|4$JL_q5HGMxk6O3mNXP?s8F9~6FbfI-n zGL(BxGb5jernCuKHcD$7^Hc$so~PY;w1GM2mP!E0om7w;9?3yu8T|Myo=%?1y%Ih(RyV=wod|AmK>NA8JtC5LS@ lxu1VS5)o?Vf9EA;-2HnpXR1clQ}Ktd_<98q7d<17{0m{`pfLad diff --git a/doc/source/img/engine_states.svg b/doc/source/img/engine_states.svg new file mode 100644 index 00000000..497c31ef --- /dev/null +++ b/doc/source/img/engine_states.svg @@ -0,0 +1,8 @@ + + + + + +Engines statesRESUMINGSCHEDULINGWAITINGSUCCESSSUSPENDEDREVERTEDANALYZINGstart + diff --git a/doc/source/img/engine_states.txt b/doc/source/img/engine_states.txt deleted file mode 100644 index 3b33255e..00000000 --- a/doc/source/img/engine_states.txt +++ /dev/null @@ -1,13 +0,0 @@ -# Created using web sequence diagrams. -# -# https://www.websequencediagrams.com/ - -note over RESUMING -Running starts here -end note - -RESUMING->SCHEDULING: Resumes and \nschedules initial tasks. -SCHEDULING->WAITING: Waits for any \nfuture to complete. -WAITING->WAITING: Continue waiting for \nfuture to complete. -WAITING->ANALYZING: Analyze future results. -ANALYZING->SCHEDULING: Schedules next set of tasks. diff --git a/doc/source/img/flow_states.png b/doc/source/img/flow_states.png deleted file mode 100644 index 8ee0cb2a4200bfe10bdbaa7a2f4e77f6ccdf5d30..0000000000000000000000000000000000000000 GIT binary patch literal 0 HcmV?d00001 literal 25297 zcma&NbzD{9`z=U!NOy=J9RkwbigbDC?v(Bjq(eFmARyfwN;;(BkkXyvp^>h;@jJh{ zHFIbF@T2Z{_Zv^FXFcmgsw&H4V~}IO!NFm_Q;<=IgM%Lf{-mQJ09UHbIdFi#-nz+t zbTe~u_O`dMbc2((aJ2Yj;$~q^YwAsF?dIkz#L4MwZ{q0Y?qJX1>g4{(%)-*?la&RB zqlKH0l@@iW(p(Bo9Ea8W4+x^FL%Da-1ADrvw=k?!e! z?MGGY*6jZ#7QpN>qT;ya>EQl0;l?=FL0t07^urrbQlg(6eajz=8bZx8KhOkfENi(XAU|E{pPUMM zB1)%`~=}g%Fsa*73lFJ1r-0EFC@;mU;NQ3 zR3{@KAfTjNF4ek54lF7xG-z>)+?1xnvwFwD%*a?A`1F{4fYzl|{)>>|c`W>r=H_N) z%1jW$w!LyI;pg`61Pw4=;-J|**NuY z$z#XH#$@!Khp8(s*Qplzozzg=;eW?M7&SGej%mxB_&l1q-uuG3B--|~VOHCQ#Sq=P z`u|*g^X5&D00lXDW@@UXB>Tm$L|~PP4xs)ap>i?*TY3hD!s22(Uh-^ZD=yNKtDxuQ z+ovFsBqt^&g28M_Ll_ts>4k+}|6WT6hh;Q;Tr4YNDVS6^rT-cMp`X0PzP&ueWW@_D(Z}aH_SonNJJSa4&~LIuD%RH4LN7<9{!+#~&k#J3%&Z~sa>NG<4n#gD z`~1H0Z(tt{8|^`B9sX2*hjd#ozs1C0Wfn{PyNOmAkgc4Sl5%kis}BG2<#K;|AVkM& zzPVYz(kvs%BB9W`>EDrruFGw_Q&d#^W$Ui1OHPWBI4mRzUp8F%vjvmX+=hzm`4B!y zefvIXUH^@k>+Kb~n-7ABQlq+h`3dH#1dQ(AHn~dmu7&8v$|1rqo_D3_BYI#+%{4A? z-!JKT_oq{5Lv*v1byQX1pkgTOHV^R;L1`2V_wpf)Ctg(I-E+xgF%KIzKiPsa*c@Chzy);;}3%^;-v zx0&z6!Hx)e7ZgMe0!uYov1^xrdtL9Qj>#TOR#ybUtbk8`W2 zCf(YW_7ECUPdX(x|`U( zO&KD5@o%d^;$Z9K6+W3s2QTgT8aomWbqNK)KHvH6h!bOr>ru5vNl8ARRMUsw`>!=X z4#3pZa2~=joG)d3IzG}wY)#cpCo2JO3=RC9zMv>h4!fh!|P-{ifl&z|yDm_Bp z6vyOLKF_Tb*8htJrV~(fL;&x#gA`vK5l|U0_6HIs3_bk%?oy!9+uvf;PpPxE+43;R z#%jiwndTiZZyEQzU2EkNnF$Q9xBHq;&6mGa#HmhF5OwTkiipBSyDrjHd1<9>gY`Ue2zUX3jAXqSf-zRmcMeDQ1=y5K`M#g$gi=m^rOiG=B0_h`22@KBMdU4jagA6s!8 z?LHUaw|4izh#7oqX2)*lUGvgW=mc`>?_1nqFt)t6NT29jig>%<@(n>_#R|aQ$6r1yELvz1I131aH_Uag4QcMRiJ6J8-1ZP;PhFYzichSX`+n zF`W9Ap*LR?y683lE^fL}|IahI0f>)tI#qt6VgYf8P=tcZ`--vY(_L1bj_n^P{~p0W zgBrGKKhL&UWl#8bZnltny(qL%T$IK3pwYpJzzR5de3#4IYAegCs7UYrg;SJEWpOu! z)Ai5(r<<`9MjK3=DzE>=Q1QKr&yS!HJp40-tW(HD`_(Fs-R$+E?vKy4Mu#f=oFLT< zKHHh&R(Dv`D-l0u$D{Cn0h8VUd9Y3IKl9B*FsCBB`Jw`NFUYyZ~ ztW^)$G)Jr)M$J;IEdTjb9h69mr3HJOhWFyE)KomkzIPTE9@IBsjRMN}AP1x*U=GKy ziOcL^-yW^@-f`lill>!vU2~#%<+NfeE~urs`GnBg^EB_cGd>s%^Om6wAgB{fR%f`e zaE&yf{a4gr4R1FNW*cQgQ@oo^cpkI-#}82T+%Y?gwrD38Y?V>69K3!Ccf=2y?b*m# zdX;qGVt;-+oPLuTuuCp=ajc?uYfff9Jw071jBQOL_s?A$>g&HrVz&aBkap?o*PF$q z-0)G&Ri3G~N^o1CsZ!6sVu#OJ$x6rm%Wi+T9lZbG7EBh>27ZMd;n?e zl|$xIBf7=YHoWBPkuEIk?AV>3^*{M8olq|p^*1!T#80hO@9+M0xrutbyPqtv`K9RH3{%H{0vsIS~`OX$bjcRZh8X|%EH zTiu3O)P=tu4&-^m7(k^inZN*~7JS*tZsU^DQf7AcFb2)?UvO1D`IYbL0=CBN52;z6 znriEOVI!eBptZDwLVw!FJozh3U<{0+H!Vg9^)LbavkWqV$76BJ23-gfxq zO>LVs-I0=(^T3?B4}pk*Wz-oQ$3pt64D@@riqY-t`ixb?b1|&qu4gkFV^QEIuSWZ` ze8mA}jmBr|iku=|CGHAqNB+^lG5L$Ber;xU->LNeP^tCH=0FU%;ANlm?!O+^el`b1TQSqYKZ5w;R|5>0P2{X)$p?k2&9__K zeOVHE`hOZQNtlnH6EiNFSgrGk{ev8G0RPb~0vOr3@MuDbq|8hwxbcEDsoJPTDnsEp zJhJolU&(U7Ib`S|S)EQZR^0>H*CL6;=2n(kW!Pj+3R1eq;X1 zNJB#0Wm)YVC5z4b?b&#LCEl6(p`)sFvbQz=}G$1_7FRe&@j|?mm zA2I+}xto*`Mu<_g^-d62#QM>bp@GUIEilO$vvuN?=G%gR0tD(#ZcDNSKVUc+QIw;p z3q>6q&b7E}%BVG|m(SY`JjxMVgSuOd=nzj|G=0MPeDGjkL)7Epi#aIB-PIIbLm0Ca zrlGf>i-izirEmCw11D zu0V?XZJ)!jx0Y*SP;WW&xHRpKuE)$qzggkr%T%vbI?x5PePbYSofZoiONwV~!iw0> z9*{-ThVXH;z}KR}7XWtcC<8Ea{aK!;1xtBl49F6`ynV}0mTHmG%N2mc%rw01=>m$k zB>qNo3BV?a&ZIVzpAU}|x99EicSsW}ZUbQ;pPug0EAflAC?hn{@dzh-DZRKrKtbDT zhG7w2Ly`l4y)7j=0EQFOYu;L>&Pa|o%bL^aA00mwhfk|KB2Y`KNwHQ0W9w5r&?fbh zDrg&P+|%yr!E7bAAK`+2Ur!s)Cs$V@vSRARjYW6Kzb`ie1n;a`v{ z5ZB$&8KIg3D8JpKwr3B`&cAEJ5>=MTH|E->yzIx1A3(u=mL@>K#6rb??4^#=Cz)xg ze7U~a3JH>(iY|UJ5P)>tRr->`NZM_SSC?E^0>KnpxfU zBE&VdwDvE2q}W|%a|@$&-}^EsXvcc*a6#l8GIJ$UsZdkfKF{m~fGei=pU3O`b^4w1 z&zpog-GQOFi@r%@a69T*l{C*lXT_xT^8-&azaV?R{QLck<|OhGD(XJ>|IRnOP#ml) zaK2Y0yOKBmUTD|wT>F3So>*S~aH9;EUW$?-U{=|iw4ev)7WA<{w{OcI(jlmG6_Vsh zbN=-I*aKTz+Zef5Al=Q1+~4MwCV+?X^2iT!r~C?$w8Hefeq*J=)buKr`S>-EM*l+~ z{KM7)Fx(b<>zTjNZK|%xQZFsurX27eV>Qa}4uEC+U3NAgm*+1t z0tW064y`n34&Gcx{l7pr#oIquMuFKD-t7;H82=TE6QcKc(i%gQ}RGvDTfL+YC5LLjMChyW?_Lj`Zui;sHj)2Q3=*uWC@U+!d#A=k*w@$R z)1LDwR{+)vM74REGzFIvMe$a@n_OyNdpKOHQ`glt5NwX~)1^_|+H|?jcr-~TEfn7h zQ8(6GNU2ldqb^36xXUbE{enu)=+<_{kmlKhbf@{p?I)5w-wMtc0l-%(s6Q`1YCub_ z)S2$5#&bmVcs&j#4i0b+TpC^XmX_}Ac82G+w)Wm5>0WaGJ{n1w|EHa`8-Zy&?(ana8ceAPFr?Pq5^n~Kkat5GcFe+p9vL31ojp`E- zoX3oP>1=*_rt!n+u=nb6pKYLAVA&4Cdzo8c1|hD#L<=orP}8 zA~VrZRlVRHdzVwj)b2_+RuQ(!L`1TLupuyRxXKtj~38*NgR>scUq17N3{ z^ExT8ifY1-8*;L~sT5Ob?eUwR1xP=O)o2IfcU-HY>eTq`PDIbI`-bH9JOn;W5n+Ty zm_Af;Wm(U#9%CU>Zb;wczR_(VT=$0f#Ia?yq1iXwAdTuiwp4o^by-<>;-ooUhXPi8 zp+(tc0z{8u)ren+h{uta@bgiNBg`<2z^E8wYZSd`DMMgWZCT3QZ5gA1^9)+-J#OEl zNoc=q@^9_UNpr{f+TIW)sr@=>vL)gth(^O=wC>Nm79lDLaxr=*I(JAs1D*?jW{)R) zb^l)dxqTJ6Z=W6vv2H|%t(ci+lwVxngu42gFG%8@9rf;)M06;si!t^H`p5LlUR^;acnQD53 z+`T1A=Y4o+YgE^5+?E|}iV^wa);Ghr>6V{BLhpY}t%x{k!vnQr zu<~8D!9%AGdy8Yno$;}x;_rIWe4?T&Ez?1lCiKwv1_0Q~m;C|8j}fP2Wg0rckRJZ5 zV`)%src-(sWAe&8>-}Y`JL|X9RO0%gjSGNNHg~F24{!hWM|e>rX_y4ZB7WjvpqOEC05^X7$r=D4sFu7e_yq^=A^foaS4?9mZy()`YCGfEx8-{ZR;UyarZ(6p z(f~6a$$tfxC8jgyygojZHu-S8B56EZh7B-`U-Y3*l$XV-7f8nVon5=qV3 zMNt&U@f9pn1y~KbKM>*MVB5*%{ho-TexT(ERCQbJ%D66+R;`0av?`o z@erqjb*pK7uqFHvOWSYv0go(REbX!rQv-kYyW>h6J?%>c97$l0+@H<*+FH!?o9aBh zW$!>)ega9bBI(Bg%x11OgaP|F?eOC(I@`%%4kjkmQcbdQ@ln6^QmUlKuMlN?C zLB?fT=i>cbTt;9jfHFsaLSNQVznWS#oPHGD=j0qZAbM2M^O=jNzd4@2tD1RBV|htF z^bIVw;P*5&#`kChmC6_5)6#{M`m94~@-G^vPg< z-pr6=p`4r5b~>tyaLHXjg}4;EOJ}I+guk$Om@nQJ9~{*LBGkjUKtJ^ke6 zt@11{hfge`N6$F~6#4b%T3m@U!0_pwuF%h(GD;V7%CyT(%m9M!D-wMuEpSLJ9V@8@ z0404v`Vj`0_Dy{sfS|iUsG>D+wE+bou{#Kc<3w6MgtQE~{NESfe*`v;sIGqun2T8PMm}7NoEtm-VHf%*lEkCE z!*uUSWV|Np?oZujPk!Su2b=n%ErJ-A@94;n>IbcbtI`#8nvj~BTDbx|^p}J9R)zkZ z85~_95A7{EQYJ6imPOHgy>2{wa|pd0u0Qr&ewmbIzj)cE%a}(`cND$vST>hSR+h{j92&NK|fEIGm{^K4IlD^z7*5&sfIcTE?9hZ;9^7Us&#w zhi$CXh~a(F(Mk7sG+Ur<`D-4M90IG6;*979M$OLhwOi>KG(dirOSL;$^YV-s)cpeWxWiwBiHdVjJy8Etk zMg`pJ@)@T3}u&j3~K84lg~k(mE7u!}iWJU$>H(kE_r=7m)u2cgTd z6K~&ax(1359buRJ9R+nN&Grfb^+@WVP$Ao;;MHPL7Mdf~J7N!^&Cde!e$7qiBq|wm zEo02BZU?*}-VpaJbJx8t2kFIX)6vUK!0EW1x5~Uk1e9wzs!F)#RRx)^z7PPgj488u z4Z%;1gPz6`Lv_MQv;HO9W-qlvt(I|+uF;w{RjAZ?A;5Cz%sM)WnHpm`I9e0ZGl!Q%4p2 zF5#G7S3S1Lf&geIJ5Qp9W<6!UE)OqMlq)bhdW=L}F585`U?n>1kZpETQKr69#X0GK8gO1%fzi)=Le=1@GWj0o@-zhu#%F8GlniMP=Qw(;jPcAxe$l7 zc>zFTW&J1U9a`TTxZ-($^?5aqv4wbU@sh13dX_1R5EFX4V2*t)d@Vy?spP6xbD~BS zrwkvVmeg>XWlbp{1_L@uRs1!oWoiX*f8F8!zDuQj0i8fn(qL{OOy>Z1#6xHT23g6Tr9^|F(7(&N04NI7uY z^{m~Xr&Ug>WJw2>+xe-_(fk{~PaNz?A}>9N(q{4oWKbff;{DENg>svXbE(7S!55-= zg;>9l292djTka(%0pka=m&qK4WKptPFF!qs8j_YG?j|^U)@P2q=G@$$T_d2jgG9rL zkvz6YkFaf~I}%<1m}k=Yw757E*#78?K;)hvkfoXH;{#da9C7A2Q2ReDb#cF?c|+Sy z=P}rbCk8GaV0K#nko5}ngib84*AYIUR+>_n;Xy9!V=sFI&#;f9~$-d->e) zK)2Lg9Rf+P#_%Sb_hxufc4J$Uz9z8Px8 zY6v&eM7jB#FMN-*ZI<=bBwwZ!Afr06QReZjR@`ZBuYy3$#nOs=0DRP1CGs$Lx0MjR zy54Zpcqg-*bMny>*Gx(;$7<;1%nohG=lnsQr7ZslVIeJ*?=~B^rMSTYtR3^@r2!ed zUIe@-M4nh9Evyx`N4`&JHH;%gb-E|N0q4X08+*cgwedIv>~ zR3X}Lt&lHXzOa%dfS?3i5~9Uj0GvT)ZDzpNu+I2}zAL}}@lZ6cPbLx*Nmhmx7j)cw z?uR{n6*;ej4}M9G1<(9o)>{xjIt<#c*CBAxrpyyLjq(0Vw1HzHJ#mf#6-i7ty#7f8 zm@48}`nOEUGECHQ4+}{+D_=%0%aGbHKgPl=EBY25eExnOO^gt@0=W~?)4xB#|N0X& zH4acrSpZ!q&V^(ahr;cMH9xO0Y-)ByVqX{{ktGpR^iFLsI_1rX$O1Mtsf&kb-Wkk! zhrL;0+bmqg;wuE$a`us+AJRaR(1ii87ewzo&dmx%sYyw_aoi&65H3fDTQE)BluQ*%r3#n~qsBE`SOLS0}-)!V_(ka4<9~oO>627jPw%eoYMf?|SF=c+?WIOQAEyw2&b!WfNo-Gwra-l!kdr zTC8{VazB$)2^V=dFLXu^poRrqpxKw0;!LIR=Ywca3oEBaWvdqxv2ke5mVW`n;Wdjz zphJ6iH1R`ygk-aV{}^=$C%rFh!^4yZN9e#+W(Z0{7<+a1a6%5rXz#VRbfFyzN}iFc zJRNUtUsy~kUn_CGWCcpvWX~^Wq`!!#k@+jr=1mmw-fbPc2WmfvaoG2TuSi)ajUhEb zXyffj6NF)HAH^J8Xk$z&?$G6!c;lk8-*jWo8Erj0o=Ny7bX{T*Ra(xw7Etm?*@<&u}sCv%>NZgHAh0F`x=z=J> zjW)Fbnrl6y8^}n0t?XHQ+18}qr-Dc<_~KbPir^jm1C()u3u*#VH6SXE_g3*qC25=_ zZr2FX3a1~j4rCmt^O!jKO5L@VL}i>t{Q9x-3%uu3m-gbr#=&1tk3=a7z|Mvt6h~uv z(u8dSk?Vkq0VG!^Xf{J&M5`H~7*1!;=njQ3KGF~aqhDH(@W(?4K2ZD@$-FwXjFa%H zd@mv~Vt(r#d6P$tJ|C~p{6Ts9q%>x~?D41|3*Kr^tKDSY(GZ2ew7B*XE%Z~pXfb=L z0s%^;;)_l?A<3?E)MhpC$bx;_FAeQ{Dz0Dm*ClHo4Q9$O?6h;)mt7p=*jKoNJ}gy2FB>b5_iN-VOX4fC`RTtPj+``%|2YgS30P5=irz^<%^0*G~V2>;^}jd;LSUzL2( zU~9Pc;iAK{`XmWfvZL=E^o(27QY|Ew(8My=rrw(*zkv;$Oro3p1j%E!;CK?bC#hDr z$anX*82VDU4xgyqk2}{R77>Syemg?&9gy`8-mIF)e)XuTrByfwGmtq1T9N?yv>hnE z%J!b0!MDH9#-?*6HMUHb&XYkkx<6yQQdbe&PziXY;R3t9429i~?CXIp{@s+`QSt_x zpBktG!3@W=?|;xR0;&!YHQ0)S;W<*UtY0>6~r{ zQ%S5?nT5GmHN+)RQ4AJf3i#q~8-!cN1%e7Dnt#uG1PQG_a2 z8t4&tXmv7*%=Vi7m=Myqpgx?r)*b-5I6>GhWo(wHv6Hgx@97Uum?)pr`+;)aW?#3m zv8-&^n2VR6A9L_ky}>ofljzqXqN`H2j%>)5p?Z7$2Y{dw3)S7U*E8Vb1*pvZA#MDi zL1I~=rq8*ZCWA3Tpo2Atrf{7&#nIj`3zkT}_IO(85b}Ih(z?t=p zou$6)@F+@DQ9%lon$^{8YD4C z!HMNNKGOOCCYn0#70+37C+z75YLDM>b(lOOg3beUgxWPgD~Ze`*=iBNH06Jl^FS?= z0NV92j~0l6Q5~j~zK#8YU*H|P^k7p$9t~6S?0~w&RQU6mc($jk^BZqOD}=BkV)h}4+GCZCs)yVSHHFR;t(6~BUp`-i4%e!6gy+Stv zd-*bc{vP9!&o7|bP}FLSJaAp2XbjNsQ2kfy;(w3_{qL;{A)=FG7&&@|^A9Hmvum8k;(28k;%_nWxvn>LM?m|6g+-lF z3F9~cLX`suIx`YZ&dlPmYp&E%=KvahWLVcODer*9Z)A5&@u>4j4AZs}P%izwz0K}? zbf1W$IyPw4j*YIUT8003?yP-zsp~vDVz1wE-tl()t-$l zJ8P#ec1afg6mdhuIli91uQF(kSdz))sry)*%UjI#!YWYv7nrE3I&}MEi9JBhO8!}sMw4DxdZtz@N>D~3U z&sqIX2j)b?N|OKa&=wBwN@*B@|At0!J+sip2DEWC{5tU`$Pj_fcezET47ONpQ#Vz=o;n|Kp30TA{ZODeb^on4>1>|a%LuY5(6#N~w)1@uuLy82iV~qU~ zB<6wEombx}bBJK{D;`&^52HNLIQ|;XTcDjZL*Ru+5{ak-gNNoL?5+`G4Ws95ABEQ+ zs8JMS1C0U};}6l{YSFBJZ!yl~jCq?q066xcA{viwW>wNqf4^YHE*kGGX~;WTCh>;E@}G+X?J_(px8w zO#=-8eL(L>(=+jrjPEJ(7$Psn>g4%bAGi8p^`v$TG=BBK2c6m6Q_rqUbVoAm8RVu3rc6gq&jvmdFTK3sxRo`7cib$?4WP z#Wgn?-C)_)JD}Qy@&j2i;YSX$Y)w6QOH0+b&L=K;G3kL1=-kS@8s|w8iG=V0f|!FD zIYe-7=^xR|Ks`|xy?1}a%=GPbr&=Pw^4;kq7LsP$WEmlXf_l zxn9?Ik}ArmhcjRUPK?k#|KT-8WuUw*+}e%QLWJO9{;Dl+6aEmdgh;SM4(Q!b2MV`j z%kJccd_sr#{WdYIz8YEgf9l9!$AfJkShAGw&?xEw4V|YWVM9VTJp7$ui0LLHt!UWw@LJ0Z1_*+@a&(Ru^-?Q}w-Wze1~Upv^j`AsDs1tcH=9TX1L8xZ|mP zy4G1I^t_WT_OTHx4zHEBn#%C|mG|cCUWx~P08~#UvoM(IkoEiGlr9wtg6f6HH=B6G zM_N>+3cX+>AP{@nA@#o6IBQ0_VYe_hN7LK81Ne<6Gao`7tET=JvdWcSH2?x(^s#Yj zgepBppxy>4V;AC8jpURzT#nUHk6btHme4gxX_cS2_CnL?;2>lk|vS1 zVski`)XKeGpQJonC`J><47I-{-l6R=|)FpndC>8-q`E{=?y0CL^}j)?G~2jQm)jz>aGPv?!O_t_^02i4 z?E8tcw748lx=-Pxa~xSU0Z5YU2A!%DCTp>;2VtCDpji2Kmk z#Y$7?lbt;Ou61XVVSNF_8oW_FBy4M^*I1zV8Y-a8^sh12(X|yOTbE;U#;+sfPOMiO z2J70{IJfF}d2;sXRC!Q^SGT5|+WA;ze_nhR^TAsfL~FPIMk3+`Q!yXWo;)Lk{d(Wj zjwYuAz+L1Y55mL%iOsY>W03NpVQ({?s6X058iPb$`TOkEkj5P`#uVGe`>nFH^>rCQOaTjWSa+H_sjzg9b(wnu>mpvQe+wjz8oiB1|#HO%} zEFoHf5fns>fS$oGAwPUmEV`#;*@^rW*26Lj?zZq#Jiidf`FRJPDH|-1a5=&$0V0(t zb_U^s8U9p@OAM9lC*`_x1gq@3D%la9WAd-Jf#pfac1xG_zxlZX9^7_2W0i8-nSid@ zXczZOG&E!cI=DK1J=F1C+^F3veM2Jx%-JjmJJTtXy`1`|7b={pD`lB3de2A0_3>RYLDrVEcu zv#Yszv&LM24n8?GwP&F8mu-z)H{(s4MNq}&6F!lYMZo59+x*qA`1Qw)x|5e@8&8;v zEa4B;mcJ~K;PWITkR(!xSRVVDB+MVbq908I5`zssuc9&t_WSEA(WH>~17ogNWo7|T zP%5nFAO{c(HY$hs66%%TM0BSRee2d(z646()0g3!P4jjXA+Ld7J^|e+9GnPAU$P43 zUDegP*eFAlT_$Ftb;6TaEm^)^oP#^e_I&80yY^KTV8n8?quuS~@S+bHe#Uk=Q7ay?HT zm&=ng%Wd+x;aBn$psrLhrX;XyLq>G6!aFm?$TLzcO)QY8ypI|P8`Z!R$^Rqwb&*PlgujOC!-)>uHHKP?eiukbS^QS=HQdV< zR+*{c*PQAX)Ui{Wnmpb{+X&FTROQ2$yJVy;m+iMdZ`T}>#hDiU%QvohtTup5vyFUc zolO7kP+{5RRkCX-|8jlJB^P<~xQX#fri5Qd#KR)Tr&QS?B}L4~XBC(EEZaKeSA?~n zLl(?feAcd>-}*N*OoEXxAOeOnfd4dG@?T6{vs;<`U0(#-qJ1L`maGZyKmg^)F<~8i z{pq$$4Tl|Z0H4H>pF|W5;nx$!h8N&>ungmC0FGO-KiFD0z-tVb7T2ku%UjRIaS0y6 zxw$Au?aJ<`d(|Z_h}pJ#nY%eN#lm#E&i%fz`U7RXmxr(PY{EUDx{#ob!m+}wU{coB zAB^f-$f_i$4%TfF(>7h%0s2Y7?pYt?IZhah|53k(ZkNzu3W20OsOjfff0$!Px75D- z037bqHd{=VhKAZ0EBqH|$~t%v=T|@>hbHL>9>Dl^6MC_#D94hl3`LEAFl|?;2)XNhsS0vl_>0?kVq&9tYT^bAg81Hl4lIG|Lekyn zKM@>UwO^V1nK%W=Hm#>Ob}J&3C9)4Twwrs0qM)EJx|&kd>m*r!;b+%jCz!ApDe64A zJ;Cfl)K_EubbnZKGK7IPrN13H2f>-*U-t5c1PJZxBZdH`f(b9XIodgfV{XEueW2?F z@O|qj{hWqmui+UnJ6cP>vT^`aCe-l^%a)zp+F(XqQwmV0JY6PbgSVuWEs1@+Vl@Nu zi1$Da#7;r{fAdl-*y1EttYVsZe%{AIPBfh1!A&pF`pDb)s2M&=+!X^HLQybiod?lA2xFXuVb0aCtbvP+xVkQ}JRO|INFsU*+U^oHBFj z+aR&cCPKA$xd>#x5Y*(wbA9UW+8(lv&2yRz2A@9G(V-T1#}Z)yUx>SfR=usL$YsfG zJgYlTD#9sKN!&niN?aD>LSgjBj`IU_l!4Ei8+y~u83r1wWGtrB6V(FIpN4*Tql_Z! z@`7bjBN8~)v6^ToDB|-c{C)=29k~y-_%;OcyQJm;%ts;60W)>SCx^ebHO~zw=y?sx z_UT_Ia;bXz=l1q?^?i24O5|fNXqkNxe%d>R$hA%k2{aE+sQpprQl@W;wdYB-PiX}? zXCZRRA1s?8*&fzw=71nzy`8x;j`>y|Km=bQKpuh=vDm8Gh#}dT-FjV8hZhI)|E(0n zWOJueZaATV<}+jaZmK8~6Lz`xT+ns``k02m;ZWgOy=V#FZ>?R3;C64jPZ+8&G95ct z6fU%AAHq`^cuPXDf>l=g1q|GBK&o3zK(?Rw$p|94v5Q}BG^rxs3;ddia2jU`lG&a%sI(ezDMIY-rc1R~2LU--;8Iky)8q8@{+eeA51{k( zw7xaQQ%(1`&Me${lq#1>KK&Zd1DYwPs?rPQetCL?K=_luMt7c-a2V)Ex(kHDIbGgo z4mVqg?3#=AE@WwIRILd8PX7U)Lu_~X0Q{OQM~XlxOJxqu$LWY8x>AQ4zP*3m$GBup5IBTsn!UPXc8S>RjD&rK^3T{d;9eUM1iv$IJF z!gVEnTS53L<@y~~iIKa7~r|Lc?=HoxBJWFj>82~`AT8@S8R^}KBrOBres9GTtu z#8rFzXW{%?jT@K^!q{v%8@aKGz4Zp#^Tn&9wTl&O;f-|eHGv(UmtZ84w3J&oJ&xoDi0DvFR8@z01ZbXk8X<_(>IVHoChEz;Kro4?tbl_zuapnf@yj zN#7%Uq#Ed#pY@TYmq_VX{PIhOG?N9f`)c%o5GP0qH62E6&-DD=ZF{?@E29&gII&UF^UP)$JpBbE zukG@BSaqxJb*npU(Tq_UN;797LQY9kOi8ndX7-lE5^VOgco?fd z#8a(Kllggyi%r;mQn0O;c60eKX&>4=4j=AXZ4|i07RAQ;aEO}R_0|iyP6GevMKVu3 zwwFB*4OZY;1#D7|PkvkB=a5e}B$d3~-#2$Jl9)voMyl(?ZO#c%^)c{niJe8~NO3@& z+fcSRns{uM87X^XEP7To_I6!G6UA{wbWo|wg~7-#5t6#X3zBQJ`2pu)Dw$fGzR4|d z52$o8P{-wzo9|p$u)g-8f}RgQPE|n9ZwXaZU0E9g-0-b2_H-}u5|BBaG!{^pAB+Pe z)`1HG_vGq6Kut!~F(Ok?epy1o_Ew2QG@B-#$MRY(RI6p?#5R@9TIMG55F8sUTPkI+S1ZZaEcn$t)i2p>In|Q2y_}L0_$e)s zGk;?qDJ&{c6E|~CaHMC|@|vrv>~<%GDhF+9BipJ@(P>pz24!a$49b>X>?C7%1RRSl zQ#JX8Ks?axW@nudP%dCHvRxoZOR1_(YjD7*VfSupb*zJ!x)vcr`mO|`O>N1bMbS}B z%c1t)qd%$`s8|wjCOLc|;zwL!ECOgJK#Q;!jTysJ{miwb5WlL}taxXuW{MKgVV|Rb#FbDW zc>@GuG%{#cU~z5L@J-DjvM^lDQ5z@S)8wAkl}KqbkLIMiSN9et+Hax-?_+C4Za}I0 zZfxj%6ohz&TVlN|S+vJX=qDz%FZ@km8|w#;yM&6?-W1Qcnx@7B0M)L{eM5P4-a5d^ zj&>R#Y3KxcOSqyR%UELmE|E>CI(UQ6|HT3g$G0))Q!ign zvXt*$wAE7xKGt9zF;f5@*DGT9Q5^CCrY|48Ee;XZHi1|;lJat~;&VhCD^QBRSoZn1osoa875vFK$dN+ z0A1nV7%wy2J!5Va{r@=v$3BHflxlbiaHTP zdH<3FYFp;k)K^#HMc}71JCj`83G0M9JZ z;)9!&RaGowS&)sqntYrcO#nyv9h&#t#=$~19BPfzCdHqQrwSdQ;A~h}w(QmQvvwrq z&~jG$O9rh2Se=)g&Y2Zsc2xmR6p6pg%*R4-^~r3EJ?cEgy~`QIc%M40&|w}Y-wUB=sdA%ujABAt;zXE~OQ+daFEHeAh{GFk(~0#Ek8jDa{1z6ye1{Z#KI3bYwd z)BSFGgqvqLi`p61mlG!g|65sS9uH;vws8>^5?M;wrAD@ree8QF@<`SUg^_)1Q5j2? zB5R@STiMsKuaS~%q(Xy`Wy&71Z|^yJe(&@A-uJ&g&2V4$HP?O3c^&6*d~LogzDIjW zC(@^Ti*dsH)d*Rt{kkQ(X?!pAW11f=ap|ga?n8|9@xK<8{p%%D3yAJN6Ez7d8zQ`W zMuBd}TwitUe$}9!yk(JEP=7hk`D5}3eG4;p#KEW249!MEVcGe=tdeE%EH~iMCU^AT zIYJ&^Yqb?#fQp&^^5g4}BCYkr4}bTq$kn57i*}zQt}GMSG03Ro2m!H7nom$j0{=}2M7ZOgBB+Vno5>0>yIb*USiwesk%@Vtzn;idw3z8 zOm%KAA#K)rvTxxOAuZ3Ho+s0;JKJDUTH;v{*;s=IUoQvP4VeRuYK%|X-!b#rD3cW1 zc|SPQvmEnJ+k3_WQT!Itg9rgQ&1{B+P%JATQdaw5c7@RT?*$oG**Y7?iA9MH?Bx1Y zBBI*g&}GLaqjJ}tSwT+VOX(tF88OI?b$RgRimWQRa#IGx8rTbW5Rte)AuY@l#Pi*C zi~5R;yephrtK~#dvf&P9DAr2~m3skwy3`Et=z3*0A!hC6E@PI}@I1Fx1Qb+GExwX& z8W%`MEyp~);}y`{H!vgK<$a6()3K;6Wxr?<`<)8ya0+A4G7HNQmO;7+s}Xjno!_4i z>`65se@VM2@29!^5ue-m9kFi+P@6;aBL0DH2I|t(jfNMPDr+WbrnT3Cg73Oc zNYnIZOv_#AMY8pE_Fp>uSenTRt8Of{6fq+HvZb91MH^L>whqU6D{)bbR0kWO>7COS zAHQI0%=`L473&W&!8d$z#ZHA4>OC4Gh9=4x+{meOt@3zt>BI^p_L;^=zDeju=BJ^I ze2*T=sNN1#{?)#jG`k`sszDqnHgAm>@h+K(;$NCi)35b7O-uFcvga!e(gec!{qi^T z)!{ejqFslSlt~7hi`7iDX(yM$;V48J{YxA~=R%dO*X?kXJAUVlF$O>J3_jVpxu0fM z{BeF`M7l3E2{^DOI@_M0n_NdxrS(1~uf8NznmeXtAo}5$@|i1$@H4Mz5(I|Yh}X|7 z*EkH6k@-;~=>#tt-22nSwjd~CTp4Y@wG*IZo46I!$0xYlPpj-FcI>_vx_*>)DS^J4 zgZg6Y>9X)Owiz9rHffWl|BayqSW&#|x|Z!OfVKn5gPzLyZShCfxpV~q@o+p*3CqaS zuYy5fLv0ef=uTyX-WN|bZwhZ-6tNoOjEaga?6vKo2apa(M%!tbRYyV-?n&F3oppD~ z_d!Q`Ab|WH{GFtcU`rK_19g$)QS0x>FwcDAqqmIz3&?6OS^=luRb1< zP=KsEVj`O+ga+T*xQw&-fe$pXho-b{^6~M3jGk)cH;uOxU2KUOJ1gioJZbMcaf;bq zR~Z8(JCse$#F!)B?Xyg#hwBHkbsP6KXYwVAOv=2skT~lwr@bSjW+-ZIWMOm}Nr>+1 zFHo2WN7nqF?q%$E9#uSx=CEujQsT&b^$K*pB<*19K*M)Y3XZcYICN-w`uo+NJ?uG$ z_j}dmUOrno72Bl_gN;|C5W>4Hn~kw3Wo3?%jU&XUpkOBGtG!Y4nCTN2bNh%-TBsFq zly5RR!0M-xxAU_{!zc8l{O^w9=8*q-7X7E8J-Ev_J#%k&vtJ46Q5PKAvUby8q1Z?-gjq6WG~f3r^x_`; zYHHx)8|-}hI@hlTPR>8r7-$@aTg?ipW#k?AKDhT>?5~qK(309c6pUSjtL&{W)?N3rfy->ZdNPD z=~1qx5g$}vmd*lX28*k-47L7@a)BDXSPu>(Z$Ax)@e>xKF8=`5}FKCAW&iAd~S zR1VE5PPQ`QXIk;_jlql^9R0wH*v^W6-KF=ZMN*aTG-kLwdh4={g%3Caw<_OaL^FB- zFT-5<`t_KBnU^qR385>hHqIUxgAK}+mMs8QtqMIir zIhqoEP(de2h+@@jjR!r5+J*(H^WuX4PI$V_RF+GL4{25;nZRRb;L+yR)}8)ZdA$^; z;A|R18+5Cw=~%kNTR+sVb7&*`<&DC1qmQaG*w{;seJ40M70_RcHRkFYAskt-r*B-t zUM@%1cg!XNCv~&%A$!Wu`^UGcm-uu7D1ve1qRN5_b_;pPhqtJ)9-lIQb$05{uCXJB z6QnL)B|{HBcQ{ZPfk=}AL%u+*WzB#>XvpuTLtCZD&r1Fo>F;?yN|~1@Drc_zsQs#d zbf31F`rYmFzUwt->yx8LrTcnMKD?GtlB?-C`CB$St4tPKrkKub^k>kJD*YV$1o4+rJ_fD5dvHJJDwB?e=iSq zd?BjwXN*BI4m%m)lh~YmNNQBjhd^u!H%ya4>7Mgcjk8G<65b6s`rd6w_xLH9!@<6V zP;w&&6*}A&>enD^+fZ?Hex;QX|NdIu4nlAiQ{_AVtv2n5^*5cc=HNl*g_dr~k*9a} z0{l{NQqHV>MZT07>N^c!u8?Yed*}xddvcWY5XE9jrDoECu`i=bhZadwbtvu(1HSo2 z+Sy_(WjT#mS^uQ1R}{|P^|r?I>o>ZLFwTo-FG_1IYfR=zQX|5dBrPx*UX>gP2l<}j zu+j^CIi&-)Ya&5V8jYf=kUsrkCXMaNRj~+*;q4sXF-;63i{yffL~37Dl+(JZBGtyE zjHq@@uganc2>dQ3nGiVKlMP8u=A}8s+x>y*JxxeLH)hrPO@3&0)q_mmbS8XKT zKop{C(x>GHRtl2tCOOTlFeWG?s6I4^LvN58&((OOr!vKR;s(OpT6~9xl;R7- zNm95fD3zTx7LYOURKhGvgHlu8HoCTA+$e@aL>e_>PYGG=R|{@CS7HdAyX{Y|f8n!- z2#vrM)(r1=gx#;*?elEHBfp-6)}>Z$@Sq8=2?HY=a}8rlfdc!riY~C5vXn^{@H41v z>1lO*#j1pGvlTmbL2fUIvb}4*_=i=J3PcERvvNIaGc_8C7$d68@EE%lX8#3buHDp} zI34D#YaHVCJM?LKv|%j!WmFYsSK7%vQ6B_M?(vGrbl1ds%t|XMqlsMZ5Bde==?cZa zGn#K>R&x0r<;(DEjkF_#20bsGs@!{3oV5PCt>r~8p6S!Y3+r^#3p{#wQT6b<-KxeH zDvO>>J_yKk!f`IZA!(Ei-KD357b8Z9^JeT9sze=~PmRc5)m}e3gtM0 zt=1>h!59MMvX68x96R|LOWdT#>|I@PWln+E$n1&S9>izce69h%*ry6L#jiMdX09WAF1FY7zGI|@mV-eo(Ip-yBqZegWz-wm znX?7@)5Rxlcxxn}_12ftW*Opl*4?a3{l)nOStEzepIB>sUA58PFy`UvprKB*bMoO3 z)${7sM73u;P@kvu8kTjOioNH0rzzIsv9td4<2a_!n}%$1XoUG4NIukIr$T;6b=Y2b z)C9&%%12!?;?e!yWp}1*p%R!pvamJ8ZBj|7Ib6|Are#MCy*kPS(VW(~t{74W{H2qo zkfYf|{+y|nw)UPmPUpEtg4e_of1z^vg6-=S8+sOL>kWn6(0>orVmjUL7|Qgf<%-WpKqx4wQJXEURiR+@o{T1MF$-Wp`@8=6>{%c>a11I zY$&<<7iO7f|HoaLSm;9lNCAyz$92QW*Dk$nIUkzu{y_=dR2Z$%duXajo);2@fM6&S z1IwoflWF7=Xc)f|G8N9Lf~^ayH$YKmj%21*tbi_APl~s6mywnwBxqp>ckl8~A1pTx zo4L7O6?yQ?!8WDzNU<~5O${{_9EpYXeRKyezv~K3T#5LkO_^|siMA9nW}6TtUT}?7 ztEjFf*odV|yqRo%FkTbt4Z}F@Kf$F7d0B*vU?ykfcG%_i2A#=*uV?;@$NxK{3{koCrn?z7Ls|E zP6eZ0FNw6-t{g0tyBqqqM8pbmjE4l;uwh8->RBuXSG zW6&x5CRP>*>>huxCMN-{2fQIY=Xgo4r{1(Jj0N0{n!>F{%$bXFZ~-vq9rF^fk}<0AD+bynX45ls6hWLx-u_rgE%)D7jp8G4Pfgmh@Xu<7s36&P0d znZO2P!20kv1GuPdP61E`bpXcEn#d*Ptzg^xR7<+D4yUzD%BN2lK@u~Tq}kpogfIS?LUN_GP%PQx7OerAOI;rRSxH3i&77;qdpg$48B zqh-I_$`NSP6Z!nIKwG#_3@ImAEH!SkWlpygfHl<8n+xHQ7kPYms#4)EY*uo`;DJgD zUt^|#3qB#*f))x_nEwbEoLkL(pm#4FQX4;!atRWl(KgwE9lzcvs*Tx?X1{a$4n0tC zgJ5BX0w3JEOK&gOp6?03 z9bku~ojo^JLC%AP0hxH@OQWA?Pq>E`xAJ1{d6Id0Z^&+^ed1f1YtKo5Z;+YQ*S=tZ zNj+Cs@y^Xp>hvhkOhB1RWxpU-x%V7Y5bWo*D}W&S(%YOoN-WtxWqB|=$lrE;Ly*|x zyIhAJ1!I(n^#f>?h8!?E@<_BdNeN7KN_JdWXZjnKHU9kA-nb*&lfL6|X z0@#jbD_odYjgL*AW;I%?gf#^5 z3|Al1F@CEOf$?SFX+_t{&_hpiJd5zmgO0E~k+=P0cYHbm=%K8Zy^Wq%K8r-l><*p3 zz;s5g%2N+=ZQuu6!~wd*tor_{EF}cIqC>zI>?zBS24@9T>#D}hZs#nC6a4iny4A<7 z6{22VW$<`*w*c<7*YIX{6f9eIfLnQ!!@_iu%7E3jK<9NNh)JTanUJ)mCM7;S8q640 zIv9iiM&C4px{k4`?lQyMCt?Nf%3#fsTM|wR+6u!8Xqa}6@Xq|Q+fS=?OmTQG;ro|! z6ACP0xR1WC%&8eq(F4ZCm)T4lX(JmBA5dEvY!=ad;IFx6RSpQsBpJMA+|51ABwk*p zB+YpH+_b`t(dUd)8+dHS=-d9d=jv2Rf3a|yHF%>Y{GrPNpdaWQbf_QZx@&0+Pw`al z#iF{01E=G$<8v$Zo1de4T~#lu&Muw_(-HiPo}wl*1Q+iC6D^)GvOc&+Ew4NWzz_b` zNz#q8IdqlM8^p!wbh82!ZqxpIIA|QZ_t^&P8bqzJj(bOl7&>nrprl<1tMDl*bXqqo z2lKT%<-|dq^C=Z4sa7qmzXLUEw zcOkw!`6~y46WE#3W-pG-6yy?#M8Hwp)7XW6AsOO=F+&nrW^)OErf5D)OP;@KRW3H~ z55I)-;<*RTi-QNW3_CEdFizhiSxfQU@KqG1o>5h1HGV*`xBZ2cLv~32-Eh()St*kt zY@r>?)e+9wxOKaqS8w8*N}k8uKH&NCN!#!0u(6d4>sMfC!ePT%e>Qs_6U$@>wm@Sh zQQpW}ngLEw07Gh(?}e`e(;~*K=!cR}-T9dVz6w^44nV`&vO8a!KQtlYq)jzA4EYs% zU!cvVdb6+bR#ZsS33|249$6o^Ow#VBD(51?q^ABzTNhG1|G+%((a{o_~2H+s$gBTmFf9ywN$b{?p+;jZ3@VZQLrUi7?3u_Do@6+ zA`-!k9f7oYDm<;9p4*jSJ{$V7@@N}C8}j#(8|*%9i;R485fhJNIYh$3X+NIp(ub0h zyO)S@?mE?S4&WT3FY{AVQ(wP+ZzFc!yqJVzTjG4#ZrczN`(G0&erkBUm`wY9i=?JQ#q z8wq1~7TujJ6cTc3FhgSvIsfxG%W9Q6&aZwrM=wiap{#Cfl<74b4b@a>@7jF0yIi9d z+pj_rvY2Nb*0;^-@8jdeci;G31z_ry*n=!7;{QXh@{#azPuh%pMdh*h$&MP3rgF12 zlf?MzRPY|2kV5&J$NFYi!^kW}>Y|{4UBNYcDe9Nd-zt^hkQ*4qwK1JWP@py$Y4!Ux znWsjp0Bo>jLXbA~j(xe$pO8c3;@=?$v%!Bt4${(tyG*T#K)g&zOiYZ9PQQ)_2@PGj zFZZ9jS5Y}6WGuEd(I$(9@w5Z7YN26^IwIv=x7V_D0jlxCMYM(PLPKq^UShdCR6%gv zx;DapX)8(Mc6B|5U%AojiOm81(px}Jv^?+iGYw@{w;o7=ufVj@$%nP(?n70hHKez? z`S(sVK8{GaWdAi|wYax>c@Cf4ys_CHQ#9_%=FEbc$WQstrOq@FBshM7v|&ukPxr{; z1a1Df-a*$@kotl->Eyj-MwFnnkdu)#mjZf6A_;*wJTVL6g^^kE!}0e94l??2f2dIT zI%EeEr7qGINy)2SKw~WbU|j{qkHVJ#d|%Zk_Sz~?(>$mHai;d(g2dgws#ZP=`7(2` zY&VmnfRd-w#rHxe$d`U+2LX-O`hws;LIPeI+ehU8!(tWgsWVCTs$A+l4Z9?iaj7=! ztgWqSEvn11fn6!E3B$hc3n*fVXu88)6{Phu-W_TOrHd`@7PSmjn{^F!os|M#Ei z7|VV*z>~WA>%jtprAW$o{6@qP>q}exE3~DeFQ4|Qk!Xd5)4;vDN3v#1L?Y=Mc4m41 z`(#U^55e1bvZt=z*)_am3oQWvE&EB3e}8%wm=ms=4Wgd@_TQ{&EG?{+3D%B%IT^Tp474|9*{o)&Ua193LH!_v#g; z(Czhde}8g zq6j2YtY`i(VsJZ0mRE0Bo8^P?eOh{!gTb97CZ|j9r}(R9#lrTFX$MdzZ9aYa^b7fs z_ZS)XOHILkSu^qOgMg^tYVcxV9c#$ob(RBJU& zxefoH^A&O7P118R`slW8 + + + + +Flow statesPENDINGRUNNINGRESUMINGFAILURESUCCESSREVERTEDSUSPENDINGSUSPENDEDstart + diff --git a/doc/source/img/retry_states.png b/doc/source/img/retry_states.png deleted file mode 100644 index ccab5c6b916fbe079886645cc81a12a60907c28e..0000000000000000000000000000000000000000 GIT binary patch literal 0 HcmV?d00001 literal 12571 zcmZv@byOT(vptNv1`9qw@BqOEcPChY;1Fc+!3lxjK0$(8a1R<7G(fNfcMXGUAi>?? z*LmLeKHt4-eSh=~)POa@_qqn2|1$;&$vew5uFD}*nkeEBxBWX!Lyb(8VxpX~W21dk zR3+6;S-K4&ZcFN0{hO->YW&8gI2W6T48&!5s%leg_#P(ai)9%N_;+?OBRiB%cg*@h zJ|@-|@Xzb%q&>19H+VEliwb_ok~~!z$QYkQQ2&Q?svc6hGGT`BEx7}TV zCAMo$2(?sY(IfcX1Op4(%EHSh(J)wim$6~s%uWp@Wk>7-ChrOGQPUb#IUmN#ISMAC zPdvDN>LjTv2dUcwZwS@kFS|%jkN~@B8B#=;${}k&%&A zdPNZg4tZ9d8*PtXH4z#YqA-GB=CmnB4!`>i#3pCU&)q6yi)T1aj_w0eq(LIAL?Otb zK`_`4WKts16r+i*AktEDryfkJLmVYd50b>JXRHH0dn-*+?cYaz_P)(N>^ICx-tO(6 zaiXAvn68G&2L%Q7NBrmb_m{sXfIrOfMU9*rQfBMPbFCFxZcRxTsZoZ;v-I`I8`ah* zV?@67R44!bWlBTzR?u()ag3CJA3V{_z@zsu3%yEjjj{;mu}G4ky%===%PofNck7bz z5L4PF0b%gT&=Tq&%S(?qcNo+JseTAvLpH94i+)(1Qt&7)etP9b_Hrujb!6~cZ_VIt z{>_z?a~KqB*m8bgT%oi6)7AvxVU`WgNIl713p6eWPyg`r)_Q+`|MGA}2_okzj+N=< zyhY_a9;0^oNs~0jpTdJ&mu^_AVumJ%Xx z)X~vOaD@mlhriWtNL1?9>pQLaj_RcNz75{?X!h#W#Q1opL%T=>w5O+sQ@@&q-$rdL zMDHTsC6ZJQO$z_jIXO6aLO}Ul|E~i8H@<|CRJ5e zbAv#g14xUDi%&>M=q28V!Q{D&j7yI2EYIhd6l|XyN@zT3lg`(w`Q+looGH)vd#W@p zi9M(X)t-?%L%vZdkK>NSnLkz;y^B5y6BDLjuUh=%P6f6yzpy}zsI;aECT3D3;@Au+nK<40FY$N$zb9Z<5=Z`~!(MU^#1uMLvzCI{=2SQ*ooQ^E{ z{rYtKEAQS*v3XU;y!!^yqSIjLPd)+}Yn^rtHFfor0SGfQ^Qszrv>EXMN=!_wVbhI3 z`0UPBf%FnxVCM67T#~r^hrnbLLP8y6t*7){)HF26qkC4>LfouOD=i4&!G=tVv?JkP zzI-v^X1$Jx$@~goVn#H5iOyZg+#a+2vzfF5)?s~f%Yuw-l`@#U8M^w^9hNkpgoRe< zi6ck%b5>POwrwqfoBP|hHnWkzdHHbYdO8{?Q~6nl945cEga_sA4Cw`j#Uy8M-cSQG0_7D2&ga2jCSvmfmANz=wA@xlMkZk?Im1e1h6 z>S9gZshYL*O_oTPZhu6TN=T#p`$VEiCirhgKEBBT?Pn}3CB?Ea8TD_06s*Lq_?gY$(7P7@ki4aef8h3W=j%ctp^j^K{5*5m3;H5?qZ`%EN8uJ6H z*gw0k4jRc6l3=%Iznkeu6}EHaqyz7vhZu=U{a&O=7&&&{{75TiuwTOe0|*CF4D{Hc z(Phh6>#48U&pDTij_qVOft(Tfin(`2jPNac6nNU|6+XlZ3 zlDe`Pbh!vTMx(Diyue$>}Ws?N-UL> zpb5(57b;-oCj-6x{Y?W|^G571$oa&?#-uqoNf_Z1*h6LxeSLjQ@CoI{f&%{^-rv6R z8aD~$Yk=36g1j_*zV?qDs#Byb$NTyBZT@mrf)q|QXNZ|P{+TZGrXWJJpc9rlt{saJ zA=qRX6M||B`s=0c)(Am5=2Q_Dc0onPM-GhcPL>6~iZ2dAVNl)U^Pg{Y))s0kzIz=m ztH0pl;=;$jr$z~stYyyoy7P#^ zvF23vv*Vsz{znkQus?Pqa4hdhZh5eWMxn zae2tbYEOvNJ?nZ17C}rZA_Xc+awX`3$r!7stE*3-+~Ep@zF9>(J7x>-SFcmv4acVz?ydhwuR(X} zL+?RuUI9+zCIsw=Zz!JNcLNKEpB)=Wtc-m1O&;Z4SgJx^$mmDRK9s`drn~05OJwgA z?5p(#h##_r_>K4UlR+6I3<(lfY$NP5Z3UyCKu!*hRo41dLWTNeJ4q_ip4wQ!qJszT zMX!D39N6gu9E<#ua6E+oG3a$D@PwWERaVIOXR&QiQ^<>VLd0eNGw*vW)+Np13+v60w$IRlurDwS?4p;u~i+bibgul0I#I^>0_qJFlK z;9g-p@(ont){eBg)uR1G9T1odmXEGygo`XU2XEkylJPnr%`;C(GmHjXWea6sFu*z~ zeyq+S?{3FR@RL*XDSW4QJeU%!4TeqSal6K((knr%7*{$`q^-iSSg+Z`NHR`S4WPz^0Z5qJxK9!S zzi}_I`L@(~@sp+CWOv#f`G@nT(dW5rMkgugm%)4_Hk81x(igmnZUcm2>Isc;&5hM*gN&z-RPn zbf*Ydf3H?MF+XM}sWdKAD?JmtQYk=nd=5 z4laqizAb9bl9Q2Yw-F{&suFBP8s#|!5F-qvMQSj{wcjp7915#z1a`yAj+%lcAwFsY zev%-U$&fpCTup8j5FgKssqnkz8l&JQ0h%cnn*dz$O=Y_1zW zrW(V8A0DeKmE0A90YGh0(OvkvV?WJTrV1_c3(Gc}(rAF^C0!zzkvh<=^#mU`(lm0Kk%=y{cnyJGk{E` z4YESeb&dFN7`K**iqLX3c^{}T!jB17CFZAt1wt~zx4wS2J(>MGG#l0DYzB7g)I|F_ z=+oAM_wL_;SnoOiftZQE{d9BW2@G34cc1y+T@6Vw6)Fw#%VX}IK200nC-SeX;wq-mF}cQ zG1#rg*P$Z~k=vmIT(S3JA-#7})*O3{nGv{aD*rIFth*3l%~|~tr!xZ*PHRJg@HDI1 ziI3IiG27|B#Dq6I-s>w8Qx}3p#gYg>3q3ys_K$g;;0@%t-`W&H`mMNCA6sM>29c+n zv#YHN{V{&(KUV0-gNKe^>TnWOT14tbLiLk8R?}-;!qV~f(WN>iYc5B~&bn!G3ir~} z)i3s*+0lpjGxcM1)!J0#=WGypJb!@&YRN?3lHb%}OYi$5Lbs(&`iH_9N6 z>Z-(^t+T$GY;6KXjeOPX+?vR=M{2zj1Wc8U?>H`tsO^sD{Lk1e9t0AiBNydD9o(r| zw*`@+Z;M>r`78nxN&RXD`rh`+BMOz=Auid|OB^XTE7n=$xGR&3ynd4%!R@5lm*c}} zJBJs0x%0f=GqZnSbeNzSt`s4`ulI+K;c#s2XKgnyWz1cgV5gAP*kas(=|1eQ+iIdQ zP$GTE*(rGt{2aGgX*CSROKp4qA*hq&BV0nvSBxI~+OisvM8Rk-}EId^hi3-g?tg z688=ez7B$lo?3a!|4m}2V!^5&TYHtClP7|{xW%-^yAkmR0Y5j{>?b*CqOj2n=)PY3 zs#X0p)Q3WBnns7(XOOMqU|TjXtExLN`#-qkZee*7Pb0&5xd-``eiGS*p<20!=4Y_ z;JqzC-6Q{Orx-4Q!e{br>@aPjf$V*ntK@c&EA$yXcLP0DF(e);pX&*ro5pDD$#1_% z(5+T_vd~j7`KKKk&efx8>=%g$80cR}j{8h8(|>HBZ!t(>ULg5lPeVe$klQv)iM>&v%j6P zb$=NZ6%`SI5xtP)4taVRCfCpUJq6?1wc*}iB`T|hws^+xu7K1{Q~PyAM@aeB0dcu5 z$I!fvM^02lcd#x+>#XXvYwNuM0vLPRn8q$1Y9Q(bnZ)n(jY~q~(PAS_ZD57XHgnL( zN@}Ma&-OAZJy6H;q{=Zxg}a@FH;zQM}CKWn`I=T#QPN4IMb@vGz~*$|KS zQSJVwC@8&VUlR+HG((0duoQnU%G@TJeS1&vbQNr?;rcqIXu$)|l%T9pkLt-fKC_$K z@BOWF2fbMq4$CKf&w|qB852ynBcD?EAr;(cl3%-%o5X0&Ji0HMtX*Ay^Lg+q3CJkZ zAHUMPNF!uttc+Y8tX3Q9pPkfxN=LW(XP)BhVB@q!wsxcl24YVUQ;MTQZg>jb3zv&* z=gV>V%ue;bKHo38Se_LD3%$J@va+IR1Mr7jH{)rk`su*18sDU4z=L6f@8zGa&_JaV z^;;Ubw4Xn1zg-n5y}yZwB;*8&c3r&^RI4%=ws8`gC8IwQma7Uj(U88E|KuOjggpKiDvdAGHovs-kvdp@ z?jf>2Tu!ItXZf9LwQ~Z4mAQUECqSZWRbeA7DJoJfo-8O}bZu@3N6Hpj_Q+cAX%e)( zAfmavy*k)(tjCR1MK6PmKAbQpBulViX{3tcYM^o5Y#H$@5GU~Qa*OU~0WeRYz~zhw zD5w7GH_pF)%j>wi*^MhR_!pu*Wu^Ay0^pLDuSQc^=&8^4C_A+v3qm?@e@nt$DQxD8 zCitcB?ySFqys$2ZvVtc;!WaoL{?S6!F*pi$nGquR-4Ioeng`63h}zBWB-#61+GY| z-3d4RDqtou0ke1Sw@!b^@JU%WtL3p3%+40XNd@$M-|(@MTAdBKReB{qtz!M(Ps1<- zdz>;vAdk(#i1MqR!l6rM1gt9n0OYW)>4dBJYJz4f2Cp|3DDV9^etqfq`^&NEik`+O?S(#Ug)K_icu8yZcta_Zl-M+N zWV}%QS6v)*)lkN6V%l?Bc^Jd2Ils3k(8t?*JQ&r{&PC01C_iq*^HgGwxYC=UNK!-j zxuJEnYhGyBQMTQIt8@!eJQa1Ew3Mo;q+$TP!w;*(Vk{;F|42;ynfw~1Ln;jKnom(4 zEy1s;HY49JX{Mep%nBI+Qh7$r#Iyo_Conm_)zF|lP6YAt$_duhW2tZFO^|a!ce&gD zq+E87(uuMbFGpITANcgiy80DsXQcgMMIrwDf z$DN|Qt{FGVdP-_Qg%lOSsXgD}pEOfU8c`lrWC=D;9`yxxdW=cnw!5q2J5Rp(*`7tz z1hhg{9pp~VX_GQrm#BD&XN2l|BGJ3qIp&qx(Lzg5xllENI^OsZmq~fgRr#qsKs2w% z85xV+OXJ3NR~aw%clrEVzFgFVF%TylPOWlasq1LZQjQ5tlj2LvD!LZftQqVRB4Awr$aCI+0;4n2Z{i-(1lKAaRM+vj6yV-eV_5xb?}}m^@_2i1<*l zc7QM|(g$tb@`H8k2g2Mr119<$KT93qx9#ZK>uY_58?|Mxgg+Y3re7T$e-=Dh8|f=d zRsQg1h%w8MywI&7$;)st{i^192yp2;9BXFcjUfv&9oN-VNutFliV0EhyRP-#A6w{w zIoWpK{F*yfH3M;rf20-f9hh+?-;WY{oOdV{59!6S8JPYD_HS_jSfs)Cu;_` z48@+q+y0J45a{)>Rq!hs1Ie3D-hquuK_z2mZRCMCwFb~x+rXP{_N+VJv{d@Ei_?Nv zk|qvPMai2TO^sxf3kHSfil-BKY!}WkNY>m0=@l9Ak^*Q8ZJEi$p2{Flouu8Np77wn zt3|+79jT#A!p5~{v^qg@>TFSfNC%H_Om(jR`K3cX0;b$#+M zYAMdFZmei;Z$Ce~Qi<;58J6@o!i}JpGib6VWzQYnX0eV4bjH1|X}aHe`Jq}55(5R= zpqN8g$s=Q9m{P9~fDT@~6lY`Ykn8#2arCgk(+Vz-wqiH8L8ERa!&-kEBj3{f9DIbn zfBT5K&N>>(Mz7aIudRysGFDPcCX%t21=_3l{2573#rrd`mrel_EU4nW$1<<&`ExT3 zMkcVx;Vjm83*t^)eGjsh{}eDp&d|{0YAIyD{KrpaK|+wO!F1D#lej9X@5a09&HHdD zCi`Ejoh0Za*vGlD`9ZBDamX%YAJ@8sxvp9qCC%ROWOIjcON->!hehIVMOrzg{%mY= z&ASUlIm*U#&FoSGi%)@dOQ1%5TI2m?fBJ%Kr>|$dC&08u+LsnXZ=s%-xv4VM#x)Xld}Kd z>RqARx>8r9t9s2N!UrV>{y8H_fhc7Xt#5aY)5JAqsv0%}xCPP;M)+Y7k-_-krR0XO zA!ll^ceyc-ZFz1i{Nf=Wx3(xIFxgm>$p`V6OC70=Z?Ph!4{TQ;nzxnxHZ`GsJ5MIo z40E@KAL0G~SWNwZCyYOa>Mmpud%{%)f24T+_Bc_3-c?{VX?UA;<6AG;sL_NpeO{K9 z5nEmCr-c15xjc9JobA(Bp(Ex(*9JTV&*Rt|lr#l|Q2_t{)>kUDxdzwRSX*#1n3Ax& zWLJyvozV7wg)3cf%2G(PgopSj6983xTsu|Q@`+duGz=91a0LeX6s?cgsE@X+5Gupn z5PfgYOEX1!o@bFBobP6O_Y@#?M4h~#*9&DHKT0{7b;y&X?kA^>=vk*%R3mcZTQR-r z7uW01@|fAgU&qzg zB@6PF7N%&lM95rY{>prV(vU*r@wdlqkKi!eU4mLslUh0vxzbQKIY=S?R)XUM2+{-q z>WkkngJnH>*_g#OeuNpWO#=Rn%Mm$I!&%qGE02oOaio?CmQVKNA+h z4c4od^hATQ(nC$xj)MtxF6609a74Mvz!K_2a=&wem>)}p?fR$aK>b+{E1JZ7$Ss-r zJRe?rEL||6kCz3bZTWY3c)p5pMBe=XJ-yZo8}5Z|IlgSD-DE0*NgAnL;7Om;;(awc z0DZY_iINsTe}Qdo9hjaqZ5GaJ6wMbK&!3`asl5#vQ%Oh|BGb_WN_8{R;t1AI&Y zlwCW%^dX~1HmMOWU)Y>MpVdi*wE!oMCGM*L!GIlH2I4LkfC790;3bh?CJ9Im78)+Z zUbmK(RJP03{MpcySJXT%2_T2J)Z-y;8xH$H(7gA;L3!D}7+Lv{)=yb-?@6&l+sPxh z6U-$9D#qW{FNWlOtJ^?MxZXMVwws7@72R(`0<=~7wh5DBnHHF?N?SOyDvd_Re@i!( z_hiD@e*)nKbZuk^gv_@1lcD3WGAq5oqr>FFtuJaNHTnn2$=XEp=N;Spw6qXn70H3` z(_VO&UV)899FJtdk&hwgG<9$oG4MQ=CVv;+jzJ7lSaPZ z+-$~Wy!@B|f+NU@NKS3(^1ia&@8W2T&R`VAO7WA+He^X@Y@4}so51gN--TP9YT3(w zCtzpounqLSB}f-UAW3ON?4HHri{>{@ewtmc6DYAM=9n6xc$oAxzW>o1Tb4KM<-K5o z+we(Ex-nv3TcdoA0}e#vV0S8~c<4Z0YBBrNNUN}U*UaYk=QvLQnC3JcAzY|eQZ^?G zUy9Gw`iK`Q$|fGFyOs+;wI$wK^2S2blE4o>|RUXkjnwKg8J^XrLV0=u5S#YS3epzHh#a~(}eV;gf?T>#z$1nkPi!py{{BLkVYE3MmgLMcn z-qTbdd*y42V{=uE?+9yNXyn6}6Y~h8Dx2q9Cl4odTnp+)@>K--wUiLl$ef+KtGpNh zRu583;2!S2cH1HA zHi)YB=nh~wf9vLlRfOs)isP3Jd8-_ugv)lRwF6n+VABGYn)$;A+5ialW;XoQXk`C%W6PyYarasI*o z#mvLJ&T$9JLpvMiM;ElX)mwE&v;~%`5J%3c;K_P`A=DyKH(Zfj`acYz{foiol!v!H zZ&iZPDNYQl2ww3r!-=Ql0MzXL8T+%~w8X!tIS1p+Z*?}Qcdffc?xQI&nk9HO^PhXB zUk}T}MI;nf+PMP)Tymsx%aSzL$Me7Z8pfXOkdyzRd!_Q|$8`Db55dwe$CV=eFFB|f z%`%kb<LjBP5rmy`9-R5lAFlaTRcRN}DR^X|ayR zN%MaIw~*(*fO|-H*bYvNNl35P+rvc#aJ22DuxW%(@6_q#&oA5dw%r}^*iyf9UG5&1 zBzrd1|36%8o9Gh8QX{C6huuA*#eJvvOEM$<|c_R$>{obsrI|e;CBaEjQe;`Df^2G$g*#Z<-p#~j}O(! z0>>u{iPhFKAeG$Hq!x9^v}y#V1Z*hj9zVkt8(QjMfMN;0@`d(k_l zGJOH*2NjGemQ&Hup#?uLdbgYbCD*J!bg>==JElsVr*kW6*@*ZudWAeL z8EwB9HDqhT7C(LZm?&~|hKf0R1cT1Il_!+z>6L`WJ@irshM;=*6p&ypycf_mCh>86 zx7kK4b7g)`{j?s>8)ywxGB?dt=o-TAaWvg}mD=g&{op&cK$n$zpxDdFDxU# zm?FR9MSI~m9ZP-rpM9$F_VO?^9R1Js_I79o=V?^G=cU!HkSS%gS>#GOmoaHY6{3O9Yo#+s7tg@#$ddv7tObY7A7=>pHK@hoGpTNDt+C>(A1M-WD>Hf# zld6I^s~;hk>RI)NpxM4?82s72zxL(cc4Aa^<=1pAysKs+oz&!H=}S=BQ2v>ADrz|r zS@0j};yj0#CAzV<#7OqlG))&h?xG9#E>{T79ARB`}u^ZK~Z*3+rb2#il?>X4>24r8IeD!aj$@pib!jVJH5Y!zl>Z2 zz!8?NjySt44>KB%oH*ejWGMlu7~SIY=V`5&pBg5T{rmMO09f$-9^e=|@&SrY#QoyF zap5mNi3P?)Q||LGgx)g_RZjE+=ul+S8pKE);&goV%61^Rr~Jq+lRyI{mp$P74JQ>y zX7vRCzVu(%IL{=GfSEHB(&hmp;<8FZTt{b6Aq?2x*Eyn|=K&8cp?-OZ19dwE(ABii zM>&YxswAi~5&z6(RA(ZS~|3g|~uj4#VF@LWx%y;0!g?6eLrifZdCcbZZx^;rE)^muZ&)<*k4(Iw?#L zOuJt5k=9_$%Hj(L#$HOtSx*vqFd8=!G$Ei>e>>Tzvk4$)0r1(tB*(68J*5W* zM4aH$v;kE0ujTDe$|}xf9(6T~SLWEi&d4vHedv#n{@?=CHtx*mmToxYRhF}%wp z%%Ud-c4shnRZ|Q&?+zW!^w~=136h0cwWYgHG@3>d{@LT2JlWHF01i8*I?q(Lw%8>l zQ~-o4=Tm-)1bpOU_3upp9od=TQQ?)*hGx>&Ce@MlTl9Fd-Wraa1kcHhk>H=bQr`Gm zA@VW+@gj8XW#JFE3H)CcbT;aGml#1rB(M=B z&2N8rY+hKsCnSNo3sjCkk74s-mgNILjd36Nm&5`AheDJR%q~XRGCkgV6h%>*(S~gx zwUxU(BOLEK!0)o;=pYyn;InD;r;ZJ<*&G2d?$nJIN=hSLzBorY8vz@}gk6NpuUU9p zIIECtpNysv67c!B1K~!n=!6LbuT0}XYc{2M*^$}CF&)Z?SkR+X|?BS*SWCneW`Br)MDO{7r2#5V^c4>Jv*qo%l(HcntLViuD}( zm<#>4P@EN>%F6z-5#85k@4R6we4d1#qnE}627y3K$EctGtFDCoA9bbG|58`}kc^i2 zOZfNJC*?;6+5ZEpB0h} zIn)rbw0NW$K=kv$!XO|Z;O*N6qpHR_py>mM5&&R7E^3T3`BkRTkuow*l>H5BNlD2d z1fH1q#Fb?qx(Iwni<0B9p)wIBK$K79C;Hs4AJBvp37ob_RK7>;F|+GB^(SY!%Z9!AF`zq8jFcwvjo4eMf>5> zoPIa)KXSSSA|##ut3A2>w;Bsn>WtJ=cUW(we_hI7+2IZeHQKL&x{a>dPBt*NRKQiZ zblV}CpT+YBQb0lIFCNut_g`gPYOG*@aKb{9&qbJg(>F!2H58-<2r!Sve*i&8dIX zbX%xh!u&rPxED*!GGVCIX{yEAY^383AAaG!BBGHC?ajbsBTJ421aI~K2;R);S2`u7 zbt10*>fMG>4Z;7ktYg5l(todzg!U`3BoUXO8hPNHA{8^@@g$R|MmNUDnlrC}ew6{T zX@HLRU;xTM!I%FS##hAxn&<^1M8pOIO&1qlK#M#Ag^KseX8coh`rjx29U#AO$5@l& zZqAuU8#-ZUPM4>v1!iNI@1#Qxegg=Qr2ihP + + + + +Retries statesPENDINGRUNNINGFAILURESUCCESSREVERTINGRETRYINGREVERTEDstart + diff --git a/doc/source/img/task_states.png b/doc/source/img/task_states.png deleted file mode 100644 index 6654ded37a93a251e614d21650075587766bb231..0000000000000000000000000000000000000000 GIT binary patch literal 0 HcmV?d00001 literal 10359 zcmZX4by$?$w>BNp9YYLAx5QAwkW$i!bPO%s-7u7NNrSWsNTYOvNOyyzbcfV=e9wD+ z-#OR!2iIKeXYE>ht+m&^?j5D3B8T&g@)-gG0*->bj0OS%A_MS;f`J6I;0mt90zU$7 zvN~>Nj!xco7M5-ZauyC2E~ai4=3tmN*xJp_Nrao*$zL>jb z684e0M7Pt9=qg8rT7FGR!YW9m=81URhoUTrKij<7Oz)<}2^#+OODx^<4vTH^)t<ux!)-dzhs3pIU7WK7usRX-1Cs0*Er3S_W_$f01+km+sS~YO%@$S_9yoDrvj1cOhvc1 zs2B>?1;`IJ^9Tq`X9_ZsT3!o>{#XV?Zx;ggijDH$>AT%>EY?>%%{(0cnb+1IN1~^X zG%@6^pPERuKh)OO*XK5VQ>Ilx|LZ}UyB-B;@U=WP8a5!r$R$Z2TdiU1W+M@;AJd>X zJ$;N*SbIBQpY*!x=ca$aPQT-apIg~GVxuj`qUo+O*Z3G=a$u=v_!z_>q+pmd7@q}# zg47F_$e+WDu3?7$LeYVDVfCQANXyOv_Fga|EcRVv9__19HdjTdyh{>#_WQhBkH-* z$oZE|R6YVz^%O}`V^bOdG&RX~XzCIwTv>Dk4Qag)aMqDT@G6RVBj*u#M>%i3XL zV&aEuj3}2*N^Zj;z$ixUP#2Zk0ouC_5KNRemc=5nPE z#l*zy@9m9Q0$Y6}KuxVeGuv#0<>B(e=ZxPC%A3}EQG?01{)d}kQYR}$Knk& zHbK7kA_n@dd>i)(<^PhOkI+-1#kR#16ewwVLWCO=1Sk+JEZ=eABW8`+gLd?J4&|gu zr8G3pse=|JI#z%TT@R@`xNPq=!c&M~Xx>->U=4OiJ#b6FKGp30HP8aURR6VJHM{?q z?|(ki0Sf>BKmYCWk9q%}&wob*PV3~29jDbLq-W;nE#M5>FGor&DP4v*qASWbZ@%I1 z);&Y&?ZXYIa6}58wohy%S`4o<(0PNGFy2-(>fqXAZTSGHIJ zrt#6eY<5nG(Y&JcSN|yK``6hvjt)hit*WO?OKFbs{ zP7at(v(x^bK+TXmzP?91Sm%`)(9@wO5%XNaE;Uf~l@jppp>yr2qN}dE=Ag6o4@ORzVi&8BiUFx0PLK$+tOiN17z_wCf}UJ%QlG_>@XF2N=SznO6U z%bcus?|fu|P1~VTt~D%Syc_RhsRi-TlkVonjS?esV>@V>B7bN}$G2(3#D%$Q+u0(1 zQcFW|8K(xnMt?Ke`+jfa!h#XD_LIya!=rr-Xc?|8n(`KbaN5oMy7u5{ofI*ennAiPN_dm@kXyO#aor;CnhEq7JeuS0y0qu1I|QH zsXZu1jrkk9DS_XcAB+7;F@MCuyTiMUnMKgG15)jI$PJD)QdEi1go&YRervymNufyl zP`gX|#CoBu?@XBR^3{MpmQQPg=JDX=$55Zvwr}6&e?OvlXLbw}+zit4hLZ%K%m?cH zmQCwLqu?~yLs@%J9;*M2dBiW~#R$}Zu!TT_V_Etb zPn%fUc58+%vrq2^cgE^kxO?>~X1+a6B~+2H39G$JN^2g7|9Tf6(<*Pyx{zCi8@c(r z^?9=?!tDu$?v(Gl`h{`x%mBcHo zbVXhPN*oy^d~fNJp%&(?8@Xp6s95BfT6dm7Lyn=97ay-+g=iS%hRO6=nJSgK$T-AR ztBUwCl9HWKM9kP|$lWoGYlFcq4(C3lx2J^B*+;U6y)y>dIC3@|KMC2C$!f19kSEiN zEzQkYQh!WLXkZ|d8?~US;->s*H5MI?PgmP?<9^58P?EIKCn!W!UWTdwJ3G&qdiMwv1l;FNe5WQ>@-@Zah;(`i&8xA-Su~p43E{MNzgv zOi?l)HOj&L;p9Q@9v>vzM9_1U2YH@cmegIkx{O5p^-PVdwsGnnl6P(kSm{Mjb5~28 zRP177Wvi>Q-JiRyX+CAFeRxU5x(F zbKlqA$`S2nW3QE{er-*kR*9OqOO8Njkcs&2Msb6=VmZ`3P(NUQ&wXbgn2nJ+5al5# zYR>M&EL{Ze+)OdCi+OYI%VuC!OTkbW{qAs76kq2rwc85x;$hrJ?_LCYZ5^C{J~cS|*zYXx8DXGaaqbEueb z%%yVvD)Gl}kPQ`8h3ne-Kzu;!Tt@FE<@%!mlX@!oK6K?}*E`_7h`2?E6Opf%g2#_{k$@ic1!Mn9D$DW1`q^ z8y0GWXqs|c@TKB^j!h*p7767CwkzajamfHlUBJTE7FN6ajQnWjsAwkIb(J6_urQq> z2{{Nqt|8B`dfqz!u%UKn^Z{Y;G->&e1WqgdY>#GJ*a2&Ww5y~eAuyv80MHt?qN zhq}6ecjDetB_`K)>h+3v9`$6?f}rjg(7xH4tMp_;xdwlIVaW0Cc8SNn1bR(Z>2SF? zDp)#w!^^CC%|*je;PN&{EtmCwKm-YcWY%Xyl5dXyBX0KOwrJ&3?=E2XlfJ%{E6q&b2*`?*)-Zu+)$a z&E;e8ddGarsK4~c`VD*5k6bO8IH=v<-@doIEN|PKC&Gx1)%iozGft9~o2+>yV^`lk zT>8mJ{o@ONgf$f53(japrIn~S+>&2JW8GnxbYJ1ZP)NWT4>ah!S?ijW64H$sEkUP7 zRHoXTy^1DFLPCgdq&q*m?fIOuvT^41U$f1fgw-b2jx+K&IW_Ot1RODQtTw@fGw6dd;PGMv~E}F08yUg6St}7o~!|{c3d#%A(*m=@y z{Xh!mHh+EgblxJk_60oSx~0-0k+vvOfxC+=@kvvqxP+4X=VH4{B$J~r!Mt!l%Z{x1 zj!)vOKTH$<6tYe$sc13c&g6L0(?=4}vPqQn@suyeWDpBVL#^;WPd2H@HD?ecPU9X< zTXpL-MUmEGs}p8+P83^kwP3H}$>c~*+6nYC)d?fQ%oFWTBEz|v{PKn{2YIkVs0IfR zpu@YFv`qo7%fOC2xoq=6eRxfCK!N{Cl?fH7ka3ZbQkNK$fysqwXq52sdL(bOlyw6%sktX5$v8`?#46JzB_qRT?}3_ftb|$^)!0t+kuK9y^VRr03k`KeJD#MVHF+ ztb(Dg^`5XXAzYf#k*Oh2R?(t}!aB}+_IL?gl*`0ylXF@=V)cc3l3d+4jMh`VPAGhr z1|@KwaUPi?>cA))Oo&Q`d7GzK0WZ`n4YnppTkm0D*O|EzH;T{qNXKtLxC~p+Ab?EK zrWsNgElzGqJlw&P^*xgvLbGn1$z-E!BcB8lFN6%N6gKgV^UZ`9d&+I_z?R4vlRFA| zRE|`jgZU_!U{EFJ6xAMHg-J29H=L@_FfZprwRP&8y%R0;D4&IWw<@XW78c*_o+g8P zib_L7uQhBso zT1hjG!&ZcQw0VHpgrJ?$^}p-E&ywQO(bIcYqg}4$q%8~r`-BtbM69a5ZjMWETw$~# zQBy7BoT7M#d+F(41655z1~a#)wp~J98atQozse`RDDc^S<1N?m-|N!N?=(S>>>lih z=+ccbd}W%3bhdnV9hl35mOTHMh<=V0J&!y^^bX4%o>?mpTVXX&kZ`?DCQKyG;Q@Nt zfu~P=n02Ob^m8;?6sfY7!6lA&j6WN%$8d+24Pwsn#ICr)DSzvA6MS+_5+Bdl9LJJb z^EFOPLad-c%}UVy`E{anfeUj6?U_T);@XP2P}h3c&)L|(OL zg0YAZ0?&jo)y55H+&XLh#<%cFg|{Gss{=JGJj;Sy&hBg6)mWwr||_Ln3|al;)RFh;7lIY7TVKM9YO#qySV2nZ>Bj99>p!E#T2$T|$Q=bmtu3k9 zeJP+15MBwZi#+z+p^8;HTiBM4P84+FUDfa8tnR!7eMC)36TS(UUA!c(@#$nD24(SV zrcpo337_EYKin4_;CkL746hTaM~EKwUi$8%ye|5z*n$99y72{$(VUa~^c3O?q$Y08 zUwYvw{(W^2V_BPw4f&rfftF;xrroNHM`ObVtU+)ffn!{E1%HsS$tZN0*{dzjFq%8L zhqjL=RLNF!KUSZu&XFASCzdd?K+sjdb9oPH9uu})*ogR-zB@5Ym7hs*4L!Vkt z+|4_C5)6nzZsnnp#~ zsvml$OA(u8b4Tk+`ZMeHp7CJ#OL68mJj3fB=T3?_ZFwFxM?q>rc4(`s5u4xtX=-9% z=YQB#oojkD^aqqF^^Sn77tU=zzZ;k^_N?B$eHZk4vi3Wt3t|i2pnEkiE(8H~SKp?P zplmDrSc9xnxIt+QT8;Rkt%XN*4AZ^z31{k3FGo_1i6|{_N@!lx`k=Le{%YY9Oq)F-*&}_$n@N3eYR+ zeBBZvk)+)l*ID%{1Htq+J2=@$wqd6%d4;ghA##I76sH%i=y9V870RSoH8$bg!wH_$ z<$ECA#|79ht#K)pp}is+Jn<+T(05v`4{$78iu`i0ZOj@QIinoa*o2&?(z#auJ{G@M z(|!#^u8_tJYXd^D0QN3oCmap*9*i0rW-|MN(mJ$a|MLYHoki&q2}nlqW+F{M;yvfU z_`v1wyJJhrGxqCp;80d{2@*fn`0AW@ z^+%QGkI;B6Y~$80aDp4Y54%Wa&M{X1H`Z1|ml;>!RQ(5t-?w7f+~?u?g`8mG$f97AoJ!~>$M-09)mx@a8BD1yQ$*P-(p`jr)`eok_ z{>c@~8C?DsntwGIYb;{TU}W(=vuJBPW%3KP1*`-X`qhXU zpLHrr3qp_M-I!QYc0Kh@K=4wVpnWNIw!ZaWsi9Lraj#P;GpdY>+&beXt)%jCYw%pq z8cmYTQRHFXd~YT8-A=vbAwCfw;UW-S`y6j(io6OKN9>L8?8*KwcJ|C(1Y%lEGy3v( zpW!I~*7+~gFxV@YE!SuG0#G>dE@0fd4#2TjHBQU?rw*y4q(nEdmg35OH`Jw6BgO1- zVOi@D8ATBl>mzn?M%RVKiC6(|u;S9qNlRl&Riv8;Vy6wetaBBrg{k((VIT>E01QYC zO!HIur)+f;O}<>bgQ+QZj(CN2V{TurSe|MgjYU9MQ`6~5^6eEQj=KgWW?va^qTV)r zrPaRx_stt#cv=EeC8Cl%17WHnsttb_1^i)^#Wxn7C*tR)6v;4+_(DY{$&a7C^{vU; zn#W~Px@u06oE&;LVzaxi5XG{e#H?uK*}b`5U7|G3=duh1S4KR*q!z=ZwQ`-(zhX}#@NZ~;kLc@WSez1?EcW0l>|OfMM8>kM ziP>G;rLymE7zlN{;oM`fXY3X&pXLyO-gq3yXY|OD)Dg2&k%=~QmwaJV@2`ssw{HeU1_szRAiy1>NoZS6y1*dq z+uI#pXuJ4qrN8tYg%j`Bq^LIw)T<=bo!+$GJrRkFx!!MPAYUknZwpObR@Jj92Ty>i z-w$y!svM`#cFSVY?U9@|=Z!88sAP!5TZsW!A!0x_F7p@E{qvpndduPdsk4RpbMf0v z<;6vP(Kdn4I->H;?;4v^GSm4!zg>Mo7#Gx$r`4PYH?|Zow07X~{_~re#YUIkx!DzS ztX2R*D*GyKU~t}Kad7wY4jp?J&lG`JCn%6Yq}Ze1v$r-l=Yvl}K1EG#XH9@v)MH|w z_yX(6o!FhwWiG?q!1#-zTU%`(#EF)ko{SRPj(kCf`%ncd{E)ePa_sPjyX)cwyTWn+ zW+n_sc^}qS*i= z4$CxfLP`-1Zb!bn9(a{)BC9@!UI>ZqBip-g@8a)*Js@_*qTgK>|C z`bSf78}@*gdkRDwUoo!QGni)`HWKHKmhP3Q@y}!05Ql#V5N!lOA#$O=1){OvKVR%1 zt44g6 zM82=Bfh#=t8V`;n->PPBMCwN6c3uf8$qR_zgxD!MF?qbQ1N%f;cTP*ulxu%3NYiqs z+Y7g_Mi_LJAj+rH!pBn%wHU#Lj%+qCbkij-JIv%56ZyYdWPVB}d1cIRU32pKV3WVS z1RlJPs73v2xeguwoXSl)Uww?<8myY?aoj2431kV>4m0mZHHt=Ge{I&CIT5tLi)Z*A zoAUGawF#3)0Fv(l&zd*gMEK97?^+Uvlis-2NbbTBuOughL@kJ=v!67)d3Kj^1wWg6 zeB?h-76l#NNAJ?eSm@TsN%6KVF)|Rv7|dtzV)ry9Xr`s5b!|x&Ye*h$y+r3nq(mAH z{%0HtdRHnh@wi)aK?h#;+bbCCpxL7#;~I6G{o>IuHbO3GoiE%RmKbv<{Q%D5C&zX& z4N?r{hA)tvQ;nfmWALL>_HNk(WNiExr?kj>flZ6tH27t&oeK{01-;pQU9-sUl9zB- zh;~KeZ{qviG>*rc2c6`YNX-I9b%Ghq(D1qERJd zb&Ua6e21odcYARyw|b8~(_tY+&LmOoO^~S=_xYaw1A6j-6e!C{9iS%>r-p^p6;xO` zXrJYv)`y3GSMD44a)1~~R?~UG_fWGwtY(;#<V>Zv^LVe_HZ7PtM?}WX#0yiN)tAt56{VNBI!o3cFWm#g(;9`|$X=>{ACF(ekQoel zbXzSX0w#TeoFKY|lYOxCdPxO&#OtBi2M#Kn!|w`)K-T4dzJx!2T@wQ2%NeU#z*54H zF&59B?)!kG{4c?^Ua&cArn*8!sDBxvOqC%;(F-?0Pmf{K3LwB$=HH^Io@O{C>^lq~ z2K|DTEz-|oXU;76k@Ui0(oNI}U;t%p9ii*uF2}z7X00Fb5?iL4ZYid@@i-+e>vOEK z)Bdo+7+<%e&ha`9VOWs!-JjSlQ$E1AjMRBTE^hsuN?kiVc|y3oaC~uag~k=dWI~n2!vOAY^j`SMv-^CJFDKOQTh=(u5!1s4 zw+}~Da8P<&);?}0u=i}~Wm4fAK@VDwJjEqv%U&8!1}}j5Yj=hS5SFC$ifQD+&7BVJlokTiQL` zg2UMMcCS4s)(1xe0TwZC4+p6QfZbGLBeGHOT#X$U_3*S*&;OZ%>^_ zgz5kr{=8$)AJ_b6Tmb-Mkm!HEpZ8 z-facS4m6YwcPbl_4wwitU8qNGu zijF{4tD5IB@qOfH%Wpgy`A5hYo5-H4+hOdCyKLVPokP^lUm7NnKvJn14Lfu@)5s>y z4jeKzXs315Og(Eo!f&mm0JDLPB^VotySIsef9R3U2FtF4fa2o5RAy$D<_f^qzdjB9 zB*SMNeOe+fQ3SbqSP)>14cAS_BcNhkwO`dCcyeCpur{;YB{NE{i?L0TuBJOc_OZX6 z3LlG#K{6J!W_W6IKR8-u8%fLibQQcez!kg~WIc*~^&TeuwWRL(3)B-#t=> zc=X4|w$ zQQgRyC_7tJT|J2#XH7h76=$XF)n6UmBb|#WHW&B6Ea{w7htnmE@85yGQ z0dePRH2mJh$rdd(tX;8CaXM+VfX>@Ex?a!zzrQ9{48KK}ak*c4-ffcP=M~1WCswbw z9P9(^^b37IR0?z_zMA?fwX4jt=%K4P>2>TYDC%lzPQQ9ELp#?@cYtW4^U76M;9{h&i^CM4p|5DHRsPgWy?>8gtf33k z_d1Ira)4lFN&#Tx0#74x84PK=F??&4?WAcN8yk&s;t&NVhlFpyLx6kytlg)si_)E3 z&`Bv&Y|>%F18WO=r2z?^-2z8i3Hb8*4z{>JudAVCw}-kB|}t{yO92}8X} z=NxT-^`ZmZA4V^{Bl|Gm7+@d`0Yb8#ib7bbKxasC1kLMA;Ylg|%y4iXI7UhD27qp` zsF|K}(VqDPaE00U`a2^z3lAujghg4&wTNOM;8hIA@GxUOk!A$)6GVVykyQWb(SI~= zwD@Uy|N8C@>d^M0r}*nuEa*w18hA|c1|-OpEp{g`DG>N{XRc$TZ>N;jf}F@~jhVK; zj~B!jCO|3}7XxZHlcWFpP~|hF$VL3t7Hm?$!os*)(bk5%m2e7m1^Y3)JuiULF+e|@ z4WLc{#?@>Dj!%qo`<*nS1{K&l70MO;S^1RKhVp)C-LB7;GznN z!Jp|bd>(aLE1v}1F-;hq*fCCa0UuwO>No$@9Ty7Enrf}Kf*URa^2PuJa7rrF)rkK? zQ~;sa3F^v+JK*yjOcb4 z67j&Pil-auD5d{W3w{|o8~xAcw=}I14Ux9mK}scd1*vEbIOgzfdazJzS{kXmb z|Ev2Kk0UbRPDHPLcMZixm*TF_{tRTAvl`1}HDDb$uCfcB8haTiwN%pHlp=yw?nKpb zjW(ZM`Z4(m0d^Ez3Hqmc-4`W*e?JM5fhF9sq$>V!mvB_sp~=@y*$DTMvUmQ$g2H`? zCOisI`HD(PorYyvY`eoQM~LrG-9ss*fMT@}1${`m55T0nAje)~$tRMwLu>u^!8b@x zaycKarkcG?*6BMKTon|6tx_2GZ?jOa7}B~SU~qq5mr3c|7|(6YtvF3bd;lH-A}GkJ K$W%(1g!~r{xXcj% diff --git a/doc/source/img/task_states.svg b/doc/source/img/task_states.svg new file mode 100644 index 00000000..f40501ac --- /dev/null +++ b/doc/source/img/task_states.svg @@ -0,0 +1,8 @@ + + + + + +Tasks statesPENDINGRUNNINGFAILURESUCCESSREVERTINGREVERTEDstart + diff --git a/doc/source/states.rst b/doc/source/states.rst index 34841f4d..02fcaf15 100644 --- a/doc/source/states.rst +++ b/doc/source/states.rst @@ -7,46 +7,40 @@ States Engine ====== -.. image:: img/engine_states.png - :height: 265px - :align: right +.. image:: img/engine_states.svg + :width: 660px + :align: left :alt: Action engine state transitions -Executing ---------- +**RESUMING** - Prepares flow & atoms to be resumed. -**RESUMING** - Prepare flow to be resumed. +**SCHEDULING** - Schedules and submits atoms to be worked on. -**SCHEDULING** - Schedule nodes to be worked on. +**WAITING** - Wait for atoms to finish executing. -**WAITING** - Wait for nodes to finish executing. +**ANALYZING** - Analyzes and processes result/s of atom completion. -**ANALYZING** - Analyze and process result/s of node completion. +**SUCCESS** - Completed successfully. -End ---- - -**SUCCESS** - Engine completed successfully. - -**REVERTED** - Engine reverting was induced and all nodes were not completed +**REVERTED** - Reverting was induced and all atoms were **not** completed successfully. -**SUSPENDED** - Engine was suspended while running.. +**SUSPENDED** - Suspended while running. Flow ==== -.. image:: img/flow_states.png - :height: 400px - :align: right +.. image:: img/flow_states.svg + :width: 660px + :align: left :alt: Flow state transitions **PENDING** - A flow starts its life in this state. **RUNNING** - In this state flow makes a progress, executes and/or reverts its -tasks. +atoms. -**SUCCESS** - Once all tasks have finished successfully the flow transitions to +**SUCCESS** - Once all atoms have finished successfully the flow transitions to the SUCCESS state. **REVERTED** - The flow transitions to this state when it has been reverted @@ -57,14 +51,14 @@ after the failure. **SUSPENDING** - In the RUNNING state the flow can be suspended. When this happens, flow transitions to the SUSPENDING state immediately. In that state -the engine running the flow waits for running tasks to finish (since the engine -can not preempt tasks that are active). +the engine running the flow waits for running atoms to finish (since the engine +can not preempt atoms that are active). -**SUSPENDED** - When no tasks are running and all results received so far are +**SUSPENDED** - When no atoms are running and all results received so far are saved, the flow transitions from the SUSPENDING state to SUSPENDED. Also it may -go to the SUCCESS state if all tasks were in fact ran, or to the REVERTED state -if the flow was reverting and all tasks were reverted while the engine was -waiting for running tasks to finish, or to the FAILURE state if tasks were run +go to the SUCCESS state if all atoms were in fact ran, or to the REVERTED state +if the flow was reverting and all atoms were reverted while the engine was +waiting for running atoms to finish, or to the FAILURE state if atoms were run or reverted and some of them failed. **RESUMING** - When the flow is interrupted 'in a hard way' (e.g. server @@ -79,24 +73,25 @@ From the SUCCESS, FAILURE or REVERTED states the flow can be ran again (and thus it goes back into the RUNNING state). One of the possible use cases for this transition is to allow for alteration of a flow or flow details associated with a previously ran flow after the flow has finished, and client code wants -to ensure that each task from this new (potentially updated) flow has its +to ensure that each atom from this new (potentially updated) flow has its chance to run. .. note:: The current code also contains strong checks during each flow state - transition using the model described above and raises the InvalidState - exception if an invalid transition is attempted. This exception being - triggered usually means there is some kind of bug in the engine code or some - type of misuse/state violation is occurring, and should be reported as such. + transition using the model described above and raises the + :py:class:`~taskflow.exceptions.InvalidState` exception if an invalid + transition is attempted. This exception being triggered usually means there + is some kind of bug in the engine code or some type of misuse/state violation + is occurring, and should be reported as such. Task ==== -.. image:: img/task_states.png - :height: 265px - :align: right +.. image:: img/task_states.svg + :width: 660px + :align: left :alt: Task state transitions **PENDING** - When a task is added to a flow, it starts in the PENDING state, @@ -105,7 +100,8 @@ on to complete. The task transitions to the PENDING state after it was reverted and its flow was restarted or retried. **RUNNING** - When flow starts to execute the task, it transitions to the -RUNNING state, and stays in this state until its execute() method returns. +RUNNING state, and stays in this state until its +:py:meth:`execute() ` method returns. **SUCCESS** - The task transitions to this state after it was finished successfully. @@ -115,20 +111,20 @@ error. When the flow containing this task is being reverted, all its tasks are walked in particular order. **REVERTING** - The task transitions to this state when the flow starts to -revert it and its revert() method is called. Only tasks in the SUCCESS or -FAILURE state can be reverted. If this method fails (raises exception), task -goes to the FAILURE state. +revert it and its :py:meth:`revert() ` method +is called. Only tasks in the SUCCESS or FAILURE state can be reverted. If this +method fails (raises exception), the task goes to the FAILURE state. -**REVERTED** - The task that has been reverted appears it this state. +**REVERTED** - A task that has been reverted appears in this state. Retry ===== -.. image:: img/retry_states.png - :height: 275px - :align: right - :alt: Task state transitions +.. image:: img/retry_states.svg + :width: 660px + :align: left + :alt: Retry state transitions Retry has the same states as a task and one additional state. @@ -138,7 +134,8 @@ on to complete. The retry transitions to the PENDING state after it was reverted and its flow was restarted or retried. **RUNNING** - When flow starts to execute the retry, it transitions to the -RUNNING state, and stays in this state until its execute() method returns. +RUNNING state, and stays in this state until its +:py:meth:`execute() ` method returns. **SUCCESS** - The retry transitions to this state after it was finished successfully. @@ -148,14 +145,12 @@ error. When the flow containing this retry is being reverted, all its tasks are walked in particular order. **REVERTING** - The retry transitions to this state when the flow starts to -revert it and its revert() method is called. Only retries in SUCCESS or FAILURE -state can be reverted. If this method fails (raises exception), task goes to -the FAILURE. +revert it and its :py:meth:`revert() ` method is +called. Only retries in SUCCESS or FAILURE state can be reverted. If this +method fails (raises exception), the retry goes to the FAILURE state. -**REVERTED** - The retry that has been reverted appears it this state. +**REVERTED** - A retry that has been reverted appears in this state. **RETRYING** - If flow that is managed by the current retry was failed and -reverted, the retry prepares it for the next run and transitions to the +reverted, the engine prepares it for the next run and transitions to the RETRYING state. - - diff --git a/tools/generate_states.sh b/tools/generate_states.sh new file mode 100755 index 00000000..2da75817 --- /dev/null +++ b/tools/generate_states.sh @@ -0,0 +1,32 @@ +#!/bin/bash + +set -u +xsltproc=`which xsltproc` +if [ -z "$xsltproc" ]; then + echo "Please install xsltproc before continuing." + exit 1 +fi + +set -e +if [ ! -d "$PWD/.diagram-tools" ]; then + git clone "https://github.com/vidarh/diagram-tools.git" "$PWD/.diagram-tools" +fi + +script_dir="$( cd "$( dirname "${BASH_SOURCE[0]}" )" && pwd )" +img_dir="$script_dir/../doc/source/img" + +echo "---- Updating task state diagram ----" +python $script_dir/state_graph.py -t -f /tmp/states.svg +$xsltproc $PWD/.diagram-tools/notugly.xsl /tmp/states.svg > $img_dir/task_states.svg + +echo "---- Updating flow state diagram ----" +python $script_dir/state_graph.py -f /tmp/states.svg +$xsltproc $PWD/.diagram-tools/notugly.xsl /tmp/states.svg > $img_dir/flow_states.svg + +echo "---- Updating engine state diagram ----" +python $script_dir/state_graph.py -e -f /tmp/states.svg +$xsltproc $PWD/.diagram-tools/notugly.xsl /tmp/states.svg > $img_dir/engine_states.svg + +echo "---- Updating retry state diagram ----" +python $script_dir/state_graph.py -r -f /tmp/states.svg +$xsltproc $PWD/.diagram-tools/notugly.xsl /tmp/states.svg > $img_dir/retry_states.svg diff --git a/tools/state_graph.py b/tools/state_graph.py old mode 100644 new mode 100755 index f6a2057d..77b85636 --- a/tools/state_graph.py +++ b/tools/state_graph.py @@ -1,5 +1,6 @@ #!/usr/bin/env python +import optparse import os import sys @@ -7,41 +8,13 @@ top_dir = os.path.abspath(os.path.join(os.path.dirname(__file__), os.pardir)) sys.path.insert(0, top_dir) -import optparse -import subprocess -import tempfile +import networkx as nx + +# To get this installed you may have to follow: +# https://code.google.com/p/pydot/issues/detail?id=93 (until fixed). +import pydot from taskflow import states -from taskflow.types import graph as gr - - -def mini_exec(cmd, ok_codes=(0,)): - stdout = subprocess.PIPE - stderr = subprocess.PIPE - proc = subprocess.Popen(cmd, stdout=stdout, stderr=stderr, stdin=None) - (stdout, stderr) = proc.communicate() - rc = proc.returncode - if rc not in ok_codes: - raise RuntimeError("Could not run %s [%s]\nStderr: %s" - % (cmd, rc, stderr)) - return (stdout, stderr) - - -def make_svg(graph, output_filename, output_format): - # NOTE(harlowja): requires pydot! - gdot = graph.export_to_dot() - if output_format == 'dot': - output = gdot - elif output_format in ('svg', 'svgz', 'png'): - with tempfile.NamedTemporaryFile(suffix=".dot") as fh: - fh.write(gdot) - fh.flush() - cmd = ['dot', '-T%s' % output_format, fh.name] - output, _stderr = mini_exec(cmd) - else: - raise ValueError('Unknown format: %s' % output_filename) - with open(output_filename, "wb") as fh: - fh.write(output) def main(): @@ -52,6 +25,14 @@ def main(): action='store_true', help="use task state transitions", default=False) + parser.add_option("-r", "--retries", dest="retries", + action='store_true', + help="use retry state transitions", + default=False) + parser.add_option("-e", "--engines", dest="engines", + action='store_true', + help="use engine state transitions", + default=False) parser.add_option("-T", "--format", dest="format", help="output in given format", default='svg') @@ -60,20 +41,90 @@ def main(): if options.filename is None: options.filename = 'states.%s' % options.format - g = gr.DiGraph(name="State transitions") - if not options.tasks: - source = states._ALLOWED_FLOW_TRANSITIONS + types = [options.engines, options.retries, options.tasks] + if sum([int(i) for i in types]) > 1: + parser.error("Only one of task/retry/engines may be specified.") + + disallowed = set() + start_node = states.PENDING + if options.tasks: + source = list(states._ALLOWED_TASK_TRANSITIONS) + source_type = "Tasks" + disallowed.add(states.RETRYING) + elif options.retries: + source = list(states._ALLOWED_TASK_TRANSITIONS) + source_type = "Retries" + elif options.engines: + # TODO(harlowja): place this in states.py + source = [ + (states.RESUMING, states.SCHEDULING), + (states.SCHEDULING, states.WAITING), + (states.WAITING, states.ANALYZING), + (states.ANALYZING, states.SCHEDULING), + (states.ANALYZING, states.WAITING), + ] + for u in (states.SCHEDULING, states.ANALYZING): + for v in (states.SUSPENDED, states.SUCCESS, states.REVERTED): + source.append((u, v)) + source_type = "Engines" + start_node = states.RESUMING else: - source = states._ALLOWED_TASK_TRANSITIONS + source = list(states._ALLOWED_FLOW_TRANSITIONS) + source_type = "Flow" + + transitions = nx.DiGraph() for (u, v) in source: - if not g.has_node(u): - g.add_node(u) - if not g.has_node(v): - g.add_node(v) - g.add_edge(u, v) - make_svg(g, options.filename, options.format) + if u not in disallowed: + transitions.add_node(u) + if v not in disallowed: + transitions.add_node(v) + for (u, v) in source: + if not transitions.has_node(u) or not transitions.has_node(v): + continue + transitions.add_edge(u, v) + + graph_name = "%s states" % source_type + g = pydot.Dot(graph_name=graph_name, rankdir='LR', + nodesep='0.25', overlap='false', + ranksep="0.5", size="11x8.5", + splines='true', ordering='in') + node_attrs = { + 'fontsize': '11', + } + nodes = {} + nodes_order = [] + edges_added = [] + for (u, v) in nx.bfs_edges(transitions, source=start_node): + if u not in nodes: + nodes[u] = pydot.Node(u, **node_attrs) + g.add_node(nodes[u]) + nodes_order.append(u) + if v not in nodes: + nodes[v] = pydot.Node(v, **node_attrs) + g.add_node(nodes[v]) + nodes_order.append(v) + for u in nodes_order: + for v in transitions.successors_iter(u): + if (u, v) not in edges_added: + g.add_edge(pydot.Edge(nodes[u], nodes[v])) + edges_added.append((u, v)) + start = pydot.Node("__start__", shape="point", width="0.1", + xlabel='start', fontcolor='green', **node_attrs) + g.add_node(start) + g.add_edge(pydot.Edge(start, nodes[start_node], style='dotted')) + + print("*" * len(graph_name)) + print(graph_name) + print("*" * len(graph_name)) + print(g.to_string().strip()) + + g.write(options.filename, format=options.format) print("Created %s at '%s'" % (options.format, options.filename)) + # To make the svg more pretty use the following: + # $ xsltproc ../diagram-tools/notugly.xsl ./states.svg > pretty-states.svg + # Get diagram-tools from https://github.com/vidarh/diagram-tools.git + if __name__ == '__main__': main() From 6dff5d3b48794d7e34b180e730267b29549d7b1a Mon Sep 17 00:00:00 2001 From: Joshua Harlow Date: Sun, 6 Jul 2014 16:27:05 -0700 Subject: [PATCH 147/188] Remove pbr as a runtime dependency The pbr package is only a build time dependency and as such should not be in the requirements.txt requirements file. Change-Id: I5ee90767850c2caa4534f69b967f6dce6eca0de1 --- requirements.txt | 1 - 1 file changed, 1 deletion(-) diff --git a/requirements.txt b/requirements.txt index 87e82ad3..21fc544a 100644 --- a/requirements.txt +++ b/requirements.txt @@ -1,5 +1,4 @@ # Packages needed for using this library. -pbr>=0.6,!=0.7,<1.0 anyjson>=0.3.3 iso8601>=0.1.9 # Python 2->3 compatibility library. From c5b1b5f15b8a3a46a754186ca1d1ee8264eb5e9f Mon Sep 17 00:00:00 2001 From: Joshua Harlow Date: Tue, 8 Jul 2014 18:44:23 -0700 Subject: [PATCH 148/188] Add the arch/big picture omnigraffle diagram To avoid losing this diagram again let's keep it in the source tree so that others can modify it and extend it as they desire (and also prevents it from disappearing). Will need omnigraffle to view: https://www.omnigroup.com/omnigraffle Change-Id: I44eb15722c9d6f2c63d965ff8f849efbdf15bbfb --- doc/diagrams/core.graffle | 8023 +++++++++++++++++++++++++++++++++++++ 1 file changed, 8023 insertions(+) create mode 100644 doc/diagrams/core.graffle diff --git a/doc/diagrams/core.graffle b/doc/diagrams/core.graffle new file mode 100644 index 00000000..a570fe59 --- /dev/null +++ b/doc/diagrams/core.graffle @@ -0,0 +1,8023 @@ + + + + + ActiveLayerIndex + 0 + ApplicationVersion + + com.omnigroup.OmniGrafflePro + 139.18.0.187838 + + AutoAdjust + + BackgroundGraphic + + Bounds + {{0, 0}, {1152, 2199}} + Class + SolidGraphic + ID + 2 + Style + + shadow + + Draws + NO + + stroke + + Draws + NO + + + + BaseZoom + 0 + CanvasOrigin + {0, 0} + ColumnAlign + 1 + ColumnSpacing + 36 + CreationDate + 2014-07-08 20:47:01 +0000 + Creator + Joshua Harlow + DisplayScale + 1 0/72 in = 1.0000 in + ExportShapes + + + InspectorGroup + 255 + ShapeImageRect + {{2, 2}, {22, 22}} + ShapeName + 33C70F48-B008-4466-BD81-E84D73C055CA-438-0000056AF6035FFB + ShouldExport + YES + StrokePath + + elements + + + element + MOVETO + point + {0.40652500000000003, 0.088786000000000004} + + + control1 + {0.39769700000000002, -0.059801} + control2 + {0.312282, -0.20657200000000001} + element + CURVETO + point + {0.15027599999999999, -0.32002000000000003} + + + control1 + {-0.028644599999999999, -0.44531500000000002} + control2 + {-0.26560600000000001, -0.50519099999999995} + element + CURVETO + point + {-0.5, -0.49964799999999998} + + + element + LINETO + point + {-0.5, -0.25638699999999998} + + + control1 + {-0.358902, -0.262291} + control2 + {-0.21507999999999999, -0.22622900000000001} + element + CURVETO + point + {-0.10728, -0.148201} + + + control1 + {-0.0160971, -0.082201999999999997} + control2 + {0.033605599999999999, 0.0024510600000000001} + element + CURVETO + point + {0.041826200000000001, 0.088786000000000004} + + + element + LINETO + point + {-0.043046000000000001, 0.088786000000000004} + + + element + LINETO + point + {0.22847700000000001, 0.5} + + + element + LINETO + point + {0.5, 0.088786000000000004} + + + element + LINETO + point + {0.40652500000000003, 0.088786000000000004} + + + element + CLOSE + + + element + MOVETO + point + {0.40652500000000003, 0.088786000000000004} + + + + TextBounds + {{0, 0}, {1, 1}} + + + GraphDocumentVersion + 8 + GraphicsList + + + Class + LineGraphic + ID + 1169 + Points + + {148.34850886899716, 1297.778564453125} + {148.34850886899716, 1565.8355233257191} + + Style + + stroke + + HeadArrow + 0 + Legacy + + LineType + 1 + Pattern + 1 + TailArrow + 0 + + + + + Bounds + {{108.29570600619962, 1459.9910998882619}, {30, 14}} + Class + ShapedGraphic + FitText + YES + Flow + Resize + FontInfo + + Font + Helvetica + Size + 12 + + ID + 1167 + Shape + Rectangle + Style + + fill + + Draws + NO + + shadow + + Draws + NO + + stroke + + Draws + NO + + + Text + + Pad + 0 + Text + {\rtf1\ansi\ansicpg1252\cocoartf1265\cocoasubrtf200 +\cocoascreenfonts1{\fonttbl\f0\fswiss\fcharset0 Helvetica;} +{\colortbl;\red255\green255\blue255;} +\pard\tx560\tx1120\tx1680\tx2240\tx2800\tx3360\tx3920\tx4480\tx5040\tx5600\tx6160\tx6720\pardirnatural\qc + +\f0\i\fs24 \cf0 Emits} + VerticalPad + 0 + + Wrap + NO + + + Class + LineGraphic + ID + 1166 + Points + + {172.01007495190493, 1463.7899284362793} + {108.29570625246291, 1489.3205401648188} + + Style + + stroke + + HeadArrow + FilledArrow + Legacy + + LineType + 1 + Pattern + 1 + TailArrow + 0 + + + + + Bounds + {{28, 1489.3205331673671}, {108.00000616531918, 60.376010894775391}} + Class + ShapedGraphic + ID + 1165 + Shape + Cloud + Style + + Text + + Text + {\rtf1\ansi\ansicpg1252\cocoartf1265\cocoasubrtf200 +\cocoascreenfonts1{\fonttbl\f0\fswiss\fcharset0 Helvetica;} +{\colortbl;\red255\green255\blue255;} +\pard\tx560\tx1120\tx1680\tx2240\tx2800\tx3360\tx3920\tx4480\tx5040\tx5600\tx6160\tx6720\pardirnatural\qc + +\f0\fs24 \cf0 Board\ +Notifications} + VerticalPad + 0 + + + + Class + LineGraphic + ID + 1161 + Points + + {16.938813712387287, 1214.6957778930664} + {550.61227271339396, 1214.6957778930664} + + Style + + stroke + + HeadArrow + 0 + Legacy + + LineType + 1 + Pattern + 1 + TailArrow + 0 + + + + + Class + Group + Graphics + + + Bounds + {{177.05329513549805, 1254.1663719071589}, {82, 22}} + Class + ShapedGraphic + FitText + YES + Flow + Resize + ID + 1163 + Shape + Rectangle + Style + + fill + + Draws + NO + + shadow + + Draws + NO + + stroke + + Draws + NO + + + Text + + Pad + 0 + Text + {\rtf1\ansi\ansicpg1252\cocoartf1265\cocoasubrtf200 +\cocoascreenfonts1{\fonttbl\f0\fswiss\fcharset0 Helvetica;} +{\colortbl;\red255\green255\blue255;} +\pard\tx560\tx1120\tx1680\tx2240\tx2800\tx3360\tx3920\tx4480\tx5040\tx5600\tx6160\tx6720\pardirnatural\qc + +\f0\i\b\fs36 \cf0 (optional)} + VerticalPad + 0 + + Wrap + NO + + + Bounds + {{56.053289698640896, 1231.7786193741999}, {116, 66}} + Class + ShapedGraphic + FitText + YES + Flow + Resize + FontInfo + + Font + Helvetica-BoldOblique + Size + 18 + + ID + 1164 + Shape + Rectangle + Style + + fill + + Draws + NO + + shadow + + Draws + NO + + stroke + + Draws + NO + + + Text + + Pad + 0 + Text + {\rtf1\ansi\ansicpg1252\cocoartf1265\cocoasubrtf200 +\cocoascreenfonts1{\fonttbl\f0\fswiss\fcharset0 Helvetica;} +{\colortbl;\red255\green255\blue255;} +\pard\tx560\tx1120\tx1680\tx2240\tx2800\tx3360\tx3920\tx4480\tx5040\tx5600\tx6160\tx6720\pardirnatural\qc + +\f0\i\b\fs36 \cf0 Posting & \ +Consumption\ +Phase} + VerticalPad + 0 + + Wrap + NO + + + ID + 1162 + + + Bounds + {{560.82414838901218, 1484.9621440334549}, {71, 28}} + Class + ShapedGraphic + FitText + YES + Flow + Resize + FontInfo + + Font + Helvetica + Size + 12 + + ID + 1159 + Shape + Rectangle + Style + + fill + + Draws + NO + + shadow + + Draws + NO + + stroke + + Draws + NO + + + Text + + Pad + 0 + Text + {\rtf1\ansi\ansicpg1252\cocoartf1265\cocoasubrtf200 +\cocoascreenfonts1{\fonttbl\f0\fswiss\fcharset0 Helvetica;} +{\colortbl;\red255\green255\blue255;} +\pard\tx560\tx1120\tx1680\tx2240\tx2800\tx3360\tx3920\tx4480\tx5040\tx5600\tx6160\tx6720\pardirnatural\qc + +\f0\i\fs24 \cf0 Consumption\ +Loop} + VerticalPad + 0 + + Wrap + NO + + + Class + Group + Graphics + + + Bounds + {{521.16725664085266, 1494.1828820625792}, {27.016406012875592, 38.542124503311257}} + Class + ShapedGraphic + ID + 1155 + Magnets + + {0.15027599999999999, -0.32002000000000003} + {-0.5, -0.49964799999999998} + {-0.5, -0.25638699999999998} + {-0.10728, -0.148201} + {0.041826500000000003, 0.088786000000000004} + {-0.043045800000000002, 0.088786000000000004} + {0.22847700000000001, 0.5} + {0.5, 0.088786000000000004} + {0.40652500000000003, 0.088786000000000004} + {0.40652500000000003, 0.088786000000000004} + {0.40652500000000003, 0.088786000000000004} + + Rotation + 90 + Shape + Bezier + ShapeData + + UnitPoints + + {0.406219, 0.101163} + {0.39736100000000002, -0.042958700000000002} + {0.31166700000000003, -0.185311} + {0.149117, -0.29534500000000002} + {-0.030395499999999999, -0.41686299999999998} + {-0.261517, -0.50514099999999995} + {-0.49668800000000002, -0.49976700000000002} + {-0.496693, -0.49976500000000001} + {-0.062913899999999995, -0.36058899999999999} + {-0.062913899999999995, -0.36058899999999999} + {-0.062918699999999994, -0.36058899999999999} + {-0.5, -0.21609700000000001} + {-0.5, -0.21609600000000001} + {-0.35843000000000003, -0.22182399999999999} + {-0.217449, -0.204378} + {-0.10928400000000001, -0.12870000000000001} + {-0.017806099999999998, -0.064687300000000003} + {0.032062500000000001, 0.0174179} + {0.040309900000000003, 0.101163} + {0.040309900000000003, 0.101163} + {-0.044847499999999998, 0.101163} + {-0.044847499999999998, 0.101163} + {-0.044847499999999998, 0.101163} + {0.22758200000000001, 0.5} + {0.22758200000000001, 0.5} + {0.22758200000000001, 0.5} + {0.5, 0.101163} + {0.5, 0.101163} + {0.5, 0.101163} + {0.406219, 0.101163} + + + + + Bounds + {{501.82414838901218, 1486.2594805049087}, {26.999999999999996, 38.288223134554855}} + Class + ShapedGraphic + ID + 1156 + Magnets + + {0.15027599999999999, -0.32002000000000003} + {-0.5, -0.49964799999999998} + {-0.5, -0.25638699999999998} + {-0.10728, -0.148201} + {0.041826500000000003, 0.088786000000000004} + {-0.043045800000000002, 0.088786000000000004} + {0.22847700000000001, 0.5} + {0.5, 0.088786000000000004} + {0.40652500000000003, 0.088786000000000004} + {0.40652500000000003, 0.088786000000000004} + {0.40652500000000003, 0.088786000000000004} + + Rotation + 180 + Shape + 33C70F48-B008-4466-BD81-E84D73C055CA-438-0000056AF6035FFB + + + Bounds + {{509.94240895873349, 1465.3378642343948}, {27.016406012875589, 38.264972185430459}} + Class + ShapedGraphic + ID + 1157 + Magnets + + {0.15027599999999999, -0.32002000000000003} + {-0.5, -0.49964799999999998} + {-0.5, -0.25638699999999998} + {-0.10728, -0.148201} + {0.041826500000000003, 0.088786000000000004} + {-0.043045800000000002, 0.088786000000000004} + {0.22847700000000001, 0.5} + {0.5, 0.088786000000000004} + {0.40652500000000003, 0.088786000000000004} + {0.40652500000000003, 0.088786000000000004} + {0.40652500000000003, 0.088786000000000004} + + Rotation + 270 + Shape + 33C70F48-B008-4466-BD81-E84D73C055CA-438-0000056AF6035FFB + + + Bounds + {{528.82414838901218, 1473.3774855848621}, {27.000000000000004, 38.288223134554862}} + Class + ShapedGraphic + ID + 1158 + Magnets + + {0.15027599999999999, -0.32002000000000003} + {-0.5, -0.49964799999999998} + {-0.5, -0.25638699999999998} + {-0.10728, -0.148201} + {0.041826500000000003, 0.088786000000000004} + {-0.043045800000000002, 0.088786000000000004} + {0.22847700000000001, 0.5} + {0.5, 0.088786000000000004} + {0.40652500000000003, 0.088786000000000004} + {0.40652500000000003, 0.088786000000000004} + {0.40652500000000003, 0.088786000000000004} + + Shape + 33C70F48-B008-4466-BD81-E84D73C055CA-438-0000056AF6035FFB + + + ID + 1154 + + + Class + Group + Graphics + + + Bounds + {{302.01802465549162, 1480.6049629105769}, {37, 28}} + Class + ShapedGraphic + FitText + YES + Flow + Resize + ID + 1150 + Shape + Rectangle + Style + + fill + + Draws + NO + + shadow + + Draws + NO + + stroke + + Draws + NO + + + Text + + Align + 0 + Pad + 0 + Text + {\rtf1\ansi\ansicpg1252\cocoartf1265\cocoasubrtf200 +\cocoascreenfonts1{\fonttbl\f0\fswiss\fcharset0 Helvetica;} +{\colortbl;\red255\green255\blue255;} +\pard\tx560\tx1120\tx1680\tx2240\tx2800\tx3360\tx3920\tx4480\tx5040\tx5600\tx6160\tx6720\pardirnatural + +\f0\fs24 \cf0 - wait()\ +....} + VerticalPad + 0 + + Wrap + NO + + + Bounds + {{230.34967062106779, 1480.6049629105769}, {63, 42}} + Class + ShapedGraphic + FitText + YES + Flow + Resize + ID + 1151 + Shape + Rectangle + Style + + fill + + Draws + NO + + shadow + + Draws + NO + + stroke + + Draws + NO + + + Text + + Align + 0 + Pad + 0 + Text + {\rtf1\ansi\ansicpg1252\cocoartf1265\cocoasubrtf200 +\cocoascreenfonts1{\fonttbl\f0\fswiss\fcharset0 Helvetica;} +{\colortbl;\red255\green255\blue255;} +\pard\tx560\tx1120\tx1680\tx2240\tx2800\tx3360\tx3920\tx4480\tx5040\tx5600\tx6160\tx6720\pardirnatural + +\f0\fs24 \cf0 - abandon()\ +- iterjobs()\ +} + VerticalPad + 0 + + Wrap + NO + + + Bounds + {{177.68130514255216, 1480.6049629105769}, {44, 42}} + Class + ShapedGraphic + FitText + YES + Flow + Resize + ID + 1152 + Shape + Rectangle + Style + + fill + + Draws + NO + + shadow + + Draws + NO + + stroke + + Draws + NO + + + Text + + Align + 0 + Pad + 0 + Text + {\rtf1\ansi\ansicpg1252\cocoartf1265\cocoasubrtf200 +\cocoascreenfonts1{\fonttbl\f0\fswiss\fcharset0 Helvetica;} +{\colortbl;\red255\green255\blue255;} +\pard\tx560\tx1120\tx1680\tx2240\tx2800\tx3360\tx3920\tx4480\tx5040\tx5600\tx6160\tx6720\pardirnatural + +\f0\fs24 \cf0 - post()\ +- claim()\ +} + VerticalPad + 0 + + Wrap + NO + + + Bounds + {{170.82414838901212, 1475.3192573441706}, {175.99597549438477, 38.571445465087891}} + Class + ShapedGraphic + ID + 1153 + Shape + Rectangle + Style + + stroke + + Pattern + 1 + + + + + ID + 1149 + + + Class + LineGraphic + ID + 1148 + Points + + {290.10982343784025, 1540.438820465416} + {289.2121722540532, 1513.7478166474543} + + Style + + stroke + + HeadArrow + UMLInheritance + Legacy + + LineType + 1 + TailArrow + 0 + + + Tail + + ID + 1147 + + + + Bounds + {{245.10982343784025, 1540.438820465416}, {90, 36}} + Class + ShapedGraphic + ID + 1147 + Magnets + + {0, 1} + {0, -1} + {1, 0} + {-1, 0} + + Shape + Rectangle + Style + + Text + + Text + {\rtf1\ansi\ansicpg1252\cocoartf1265\cocoasubrtf200 +\cocoascreenfonts1{\fonttbl\f0\fswiss\fcharset0 Helvetica;} +{\colortbl;\red255\green255\blue255;} +\pard\tx560\tx1120\tx1680\tx2240\tx2800\tx3360\tx3920\tx4480\tx5040\tx5600\tx6160\tx6720\qc + +\f0\i\b\fs24 \cf0 Zookeeper\ +Jobboard} + VerticalPad + 0 + + + + Bounds + {{435.40182134738615, 1470.9621696472168}, {58, 56}} + Class + ShapedGraphic + FitText + YES + Flow + Resize + FontInfo + + Font + Helvetica + Size + 12 + + ID + 1146 + Shape + Rectangle + Style + + fill + + Draws + NO + + shadow + + Draws + NO + + stroke + + Draws + NO + + + Text + + Align + 0 + Pad + 0 + Text + {\rtf1\ansi\ansicpg1252\cocoartf1265\cocoasubrtf200 +\cocoascreenfonts1{\fonttbl\f0\fswiss\fcharset0 Helvetica;} +{\colortbl;\red255\green255\blue255;} +\pard\tx560\tx1120\tx1680\tx2240\tx2800\tx3360\tx3920\tx4480\tx5040\tx5600\tx6160\tx6720\pardirnatural + +\f0\i\fs24 \cf0 - Claim job\ +- Load job\ +- Translate\ +- Activate} + VerticalPad + 0 + + Wrap + NO + + + Class + Group + Graphics + + + Class + Group + Graphics + + + Bounds + {{539.9018270694321, 1424.6444211854182}, {33, 12}} + Class + ShapedGraphic + FitText + YES + Flow + Resize + ID + 1117 + Shape + Rectangle + Style + + fill + + Draws + NO + + shadow + + Draws + NO + + stroke + + Draws + NO + + + Text + + Pad + 0 + Text + {\rtf1\ansi\ansicpg1252\cocoartf1265\cocoasubrtf200 +\cocoascreenfonts1{\fonttbl\f0\fswiss\fcharset0 Helvetica;} +{\colortbl;\red255\green255\blue255;} +\pard\tx560\tx1120\tx1680\tx2240\tx2800\tx3360\tx3920\tx4480\tx5040\tx5600\tx6160\tx6720\pardirnatural\qc + +\f0\fs20 \cf0 Worker} + VerticalPad + 0 + + Wrap + NO + + + Class + Group + Graphics + + + Bounds + {{530.40181944003757, 1370.6049924744807}, {52, 72}} + Class + ShapedGraphic + FitText + YES + Flow + Resize + ID + 1119 + Shape + Rectangle + Style + + fill + + Draws + NO + + shadow + + Draws + NO + + stroke + + Draws + NO + + + Text + + Pad + 0 + Text + {\rtf1\ansi\ansicpg1252\cocoartf1265\cocoasubrtf200 +\cocoascreenfonts1{\fonttbl\f0\fswiss\fcharset0 Helvetica;} +{\colortbl;\red255\green255\blue255;} +\pard\tx560\tx1120\tx1680\tx2240\tx2800\tx3360\tx3920\tx4480\tx5040\tx5600\tx6160\tx6720\pardirnatural\qc + +\f0\fs20 \cf0 Specialized\ +\ +\ +\ +\ +} + VerticalPad + 0 + + Wrap + NO + + + Class + Group + Graphics + + + Bounds + {{545.72859289858161, 1420.3446371019186}, {19.299808229718884, 19.299808879032174}} + Class + ShapedGraphic + ID + 1121 + Shape + Rectangle + Style + + fill + + Color + + b + 0.4 + g + 1 + r + 1 + + Draws + NO + FillType + 2 + GradientAngle + 90 + GradientColor + + b + 0.4 + g + 1 + r + 1 + + MiddleColor + + b + 0.4 + g + 1 + r + 1 + + TrippleBlend + YES + + shadow + + Beneath + YES + Draws + NO + Fuzziness + 2.5038185119628906 + ShadowVector + {0, 1} + + stroke + + Draws + NO + Width + 1.5 + + + VFlip + YES + Wrap + NO + + + Class + LineGraphic + ID + 1122 + Points + + {545.72859289858161, 1402.9748103444847} + {565.02840112830063, 1402.9748103444847} + {565.02840112830063, 1402.9748103444847} + + Style + + shadow + + Beneath + YES + Draws + YES + Fuzziness + 2.5038185119628906 + ShadowVector + {0, 1} + + stroke + + CornerRadius + 1 + HeadArrow + 0 + Legacy + + LineType + 1 + TailArrow + 0 + Width + 1.5 + + + + + Class + LineGraphic + ID + 1123 + Points + + {555.37849701344112, 1410.6947336363717} + {545.7285925739252, 1420.3446377512312} + + Style + + shadow + + Beneath + YES + Draws + YES + Fuzziness + 2.5038185119628906 + ShadowVector + {0, 1} + + stroke + + CornerRadius + 1 + HeadArrow + 0 + Legacy + + LineType + 1 + TailArrow + 0 + Width + 1.5 + + + + + Class + LineGraphic + ID + 1124 + Points + + {555.37849701344112, 1410.6947336363719} + {565.02840112830063, 1420.7306576742712} + + Style + + shadow + + Beneath + YES + Draws + YES + Fuzziness + 2.5038185119628906 + ShadowVector + {0, 1} + + stroke + + CornerRadius + 1 + HeadArrow + 0 + Legacy + + LineType + 1 + TailArrow + 0 + Width + 1.5 + + + + + Class + LineGraphic + ID + 1125 + Points + + {555.37849701344112, 1397.1848678755687} + {555.37849701344112, 1410.6947352596553} + + Style + + shadow + + Beneath + YES + Draws + YES + Fuzziness + 2.5038185119628906 + ShadowVector + {0, 1} + + stroke + + CornerRadius + 1 + HeadArrow + 0 + Legacy + + LineType + 1 + TailArrow + 0 + Width + 1.5 + + + + + Bounds + {{549.58855454452544, 1385.6049829377375}, {11.57988428851805, 11.57988428851805}} + Class + ShapedGraphic + ID + 1126 + Shape + Circle + Style + + fill + + Color + + b + 0.4 + g + 1 + r + 1 + + Draws + NO + FillType + 2 + GradientAngle + 90 + GradientColor + + b + 0.4 + g + 1 + r + 1 + + MiddleColor + + b + 0.4 + g + 1 + r + 1 + + TrippleBlend + YES + + shadow + + Beneath + YES + Fuzziness + 2.5038185119628906 + ShadowVector + {0, 1} + + stroke + + CornerRadius + 1 + Width + 1.5 + + + + + ID + 1120 + + + ID + 1118 + + + ID + 1116 + + + Class + Group + Graphics + + + Bounds + {{492.40181944003757, 1394.5655408753596}, {47, 12}} + Class + ShapedGraphic + FitText + YES + Flow + Resize + ID + 1128 + Shape + Rectangle + Style + + fill + + Draws + NO + + shadow + + Draws + NO + + stroke + + Draws + NO + + + Text + + Pad + 0 + Text + {\rtf1\ansi\ansicpg1252\cocoartf1265\cocoasubrtf200 +\cocoascreenfonts1{\fonttbl\f0\fswiss\fcharset0 Helvetica;} +{\colortbl;\red255\green255\blue255;} +\pard\tx560\tx1120\tx1680\tx2240\tx2800\tx3360\tx3920\tx4480\tx5040\tx5600\tx6160\tx6720\pardirnatural\qc + +\f0\fs20 \cf0 Conductor} + VerticalPad + 0 + + Wrap + NO + + + Class + Group + Graphics + + + Bounds + {{505.22859289858167, 1444.305185502797}, {19.299808229718884, 19.299808879032174}} + Class + ShapedGraphic + ID + 1130 + Shape + Rectangle + Style + + fill + + Color + + b + 0.4 + g + 1 + r + 1 + + Draws + NO + FillType + 2 + GradientAngle + 90 + GradientColor + + b + 0.4 + g + 1 + r + 1 + + MiddleColor + + b + 0.4 + g + 1 + r + 1 + + TrippleBlend + YES + + shadow + + Beneath + YES + Draws + NO + Fuzziness + 2.5038185119628906 + ShadowVector + {0, 1} + + stroke + + Draws + NO + Width + 1.5 + + + VFlip + YES + Wrap + NO + + + Class + LineGraphic + ID + 1131 + Points + + {505.22859289858161, 1426.9353587453638} + {524.52840112830063, 1426.9353587453638} + {524.52840112830063, 1426.9353587453638} + + Style + + shadow + + Beneath + YES + Draws + YES + Fuzziness + 2.5038185119628906 + ShadowVector + {0, 1} + + stroke + + CornerRadius + 1 + HeadArrow + 0 + Legacy + + LineType + 1 + TailArrow + 0 + Width + 1.5 + + + + + Class + LineGraphic + ID + 1132 + Points + + {514.87849701344112, 1434.6552820372506} + {505.2285925739252, 1444.3051861521101} + + Style + + shadow + + Beneath + YES + Draws + YES + Fuzziness + 2.5038185119628906 + ShadowVector + {0, 1} + + stroke + + CornerRadius + 1 + HeadArrow + 0 + Legacy + + LineType + 1 + TailArrow + 0 + Width + 1.5 + + + + + Class + LineGraphic + ID + 1133 + Points + + {514.87849701344112, 1434.6552820372508} + {524.52840112830063, 1444.6912060751501} + + Style + + shadow + + Beneath + YES + Draws + YES + Fuzziness + 2.5038185119628906 + ShadowVector + {0, 1} + + stroke + + CornerRadius + 1 + HeadArrow + 0 + Legacy + + LineType + 1 + TailArrow + 0 + Width + 1.5 + + + + + Class + LineGraphic + ID + 1134 + Points + + {514.87849701344112, 1421.1454162764476} + {514.87849701344112, 1434.6552836605342} + + Style + + shadow + + Beneath + YES + Draws + YES + Fuzziness + 2.5038185119628906 + ShadowVector + {0, 1} + + stroke + + CornerRadius + 1 + HeadArrow + 0 + Legacy + + LineType + 1 + TailArrow + 0 + Width + 1.5 + + + + + Bounds + {{509.08855454452544, 1409.5655313386164}, {11.57988428851805, 11.57988428851805}} + Class + ShapedGraphic + ID + 1135 + Shape + Circle + Style + + fill + + Color + + b + 0.4 + g + 1 + r + 1 + + Draws + NO + FillType + 2 + GradientAngle + 90 + GradientColor + + b + 0.4 + g + 1 + r + 1 + + MiddleColor + + b + 0.4 + g + 1 + r + 1 + + TrippleBlend + YES + + shadow + + Beneath + YES + Fuzziness + 2.5038185119628906 + ShadowVector + {0, 1} + + stroke + + CornerRadius + 1 + Width + 1.5 + + + + + ID + 1129 + + + ID + 1127 + + + Class + Group + Graphics + + + Bounds + {{454.40181944003757, 1373.5655408753596}, {47, 12}} + Class + ShapedGraphic + FitText + YES + Flow + Resize + ID + 1137 + Shape + Rectangle + Style + + fill + + Draws + NO + + shadow + + Draws + NO + + stroke + + Draws + NO + + + Text + + Pad + 0 + Text + {\rtf1\ansi\ansicpg1252\cocoartf1265\cocoasubrtf200 +\cocoascreenfonts1{\fonttbl\f0\fswiss\fcharset0 Helvetica;} +{\colortbl;\red255\green255\blue255;} +\pard\tx560\tx1120\tx1680\tx2240\tx2800\tx3360\tx3920\tx4480\tx5040\tx5600\tx6160\tx6720\pardirnatural\qc + +\f0\fs20 \cf0 Conductor} + VerticalPad + 0 + + Wrap + NO + + + Class + Group + Graphics + + + Bounds + {{467.22859289858161, 1423.305185502797}, {19.299808229718884, 19.299808879032174}} + Class + ShapedGraphic + ID + 1139 + Shape + Rectangle + Style + + fill + + Color + + b + 0.4 + g + 1 + r + 1 + + Draws + NO + FillType + 2 + GradientAngle + 90 + GradientColor + + b + 0.4 + g + 1 + r + 1 + + MiddleColor + + b + 0.4 + g + 1 + r + 1 + + TrippleBlend + YES + + shadow + + Beneath + YES + Draws + NO + Fuzziness + 2.5038185119628906 + ShadowVector + {0, 1} + + stroke + + Draws + NO + Width + 1.5 + + + VFlip + YES + Wrap + NO + + + Class + LineGraphic + ID + 1140 + Points + + {467.22859289858161, 1405.9353587453638} + {486.52840112830063, 1405.9353587453638} + {486.52840112830063, 1405.9353587453638} + + Style + + shadow + + Beneath + YES + Draws + YES + Fuzziness + 2.5038185119628906 + ShadowVector + {0, 1} + + stroke + + CornerRadius + 1 + HeadArrow + 0 + Legacy + + LineType + 1 + TailArrow + 0 + Width + 1.5 + + + + + Class + LineGraphic + ID + 1141 + Points + + {476.87849701344112, 1413.6552820372506} + {467.2285925739252, 1423.3051861521101} + + Style + + shadow + + Beneath + YES + Draws + YES + Fuzziness + 2.5038185119628906 + ShadowVector + {0, 1} + + stroke + + CornerRadius + 1 + HeadArrow + 0 + Legacy + + LineType + 1 + TailArrow + 0 + Width + 1.5 + + + + + Class + LineGraphic + ID + 1142 + Points + + {476.87849701344112, 1413.6552820372508} + {486.52840112830063, 1423.6912060751501} + + Style + + shadow + + Beneath + YES + Draws + YES + Fuzziness + 2.5038185119628906 + ShadowVector + {0, 1} + + stroke + + CornerRadius + 1 + HeadArrow + 0 + Legacy + + LineType + 1 + TailArrow + 0 + Width + 1.5 + + + + + Class + LineGraphic + ID + 1143 + Points + + {476.87849701344112, 1400.1454162764476} + {476.87849701344112, 1413.6552836605342} + + Style + + shadow + + Beneath + YES + Draws + YES + Fuzziness + 2.5038185119628906 + ShadowVector + {0, 1} + + stroke + + CornerRadius + 1 + HeadArrow + 0 + Legacy + + LineType + 1 + TailArrow + 0 + Width + 1.5 + + + + + Bounds + {{471.08855454452544, 1388.5655313386164}, {11.57988428851805, 11.57988428851805}} + Class + ShapedGraphic + ID + 1144 + Shape + Circle + Style + + fill + + Color + + b + 0.4 + g + 1 + r + 1 + + Draws + NO + FillType + 2 + GradientAngle + 90 + GradientColor + + b + 0.4 + g + 1 + r + 1 + + MiddleColor + + b + 0.4 + g + 1 + r + 1 + + TrippleBlend + YES + + shadow + + Beneath + YES + Fuzziness + 2.5038185119628906 + ShadowVector + {0, 1} + + stroke + + CornerRadius + 1 + Width + 1.5 + + + + + ID + 1138 + + + ID + 1136 + + + Bounds + {{428.40181944003757, 1349.4199200524531}, {175.99597549438477, 117.33065032958984}} + Class + ShapedGraphic + ID + 1145 + Shape + Cloud + Style + + Text + + Text + {\rtf1\ansi\ansicpg1252\cocoartf1265\cocoasubrtf200 +\cocoascreenfonts1{\fonttbl\f0\fswiss\fcharset0 Helvetica;} +{\colortbl;\red255\green255\blue255;} +\pard\tx560\tx1120\tx1680\tx2240\tx2800\tx3360\tx3920\tx4480\tx5040\tx5600\tx6160\tx6720\pardirnatural\qc + +\f0\i\fs24 \cf0 Workers} + VerticalPad + 0 + + TextPlacement + 0 + TextRelativeArea + {{0.14999999999999999, -0.15000001192092893}, {0.69999999999999996, 0.69999999999999996}} + + + ID + 1115 + + + Class + LineGraphic + Head + + ID + 968 + + ID + 1065 + Points + + {440.82414838901212, 1414.6049619569026} + {387.27826521030119, 1414.6049619569026} + + Style + + stroke + + HeadArrow + FilledArrow + Legacy + + LineType + 1 + TailArrow + FilledArrow + + + + + Bounds + {{93.246466708152184, 1417.5425142326339}, {50, 42}} + Class + ShapedGraphic + FitText + YES + Flow + Resize + ID + 996 + Shape + Rectangle + Style + + fill + + Draws + NO + + shadow + + Draws + NO + + stroke + + Draws + NO + + + Text + + Pad + 0 + Text + {\rtf1\ansi\ansicpg1252\cocoartf1265\cocoasubrtf200 +\cocoascreenfonts1{\fonttbl\f0\fswiss\fcharset0 Helvetica;} +{\colortbl;\red255\green255\blue255;} +\pard\tx560\tx1120\tx1680\tx2240\tx2800\tx3360\tx3920\tx4480\tx5040\tx5600\tx6160\tx6720\pardirnatural\qc + +\f0\fs24 \cf0 Receives\ +Job\ +} + VerticalPad + 0 + + Wrap + NO + + + Bounds + {{91.82415492531527, 1366.6049531974777}, {50, 42}} + Class + ShapedGraphic + FitText + YES + Flow + Resize + ID + 995 + Shape + Rectangle + Style + + fill + + Draws + NO + + shadow + + Draws + NO + + stroke + + Draws + NO + + + Text + + Pad + 0 + Text + {\rtf1\ansi\ansicpg1252\cocoartf1265\cocoasubrtf200 +\cocoascreenfonts1{\fonttbl\f0\fswiss\fcharset0 Helvetica;} +{\colortbl;\red255\green255\blue255;} +\pard\tx560\tx1120\tx1680\tx2240\tx2800\tx3360\tx3920\tx4480\tx5040\tx5600\tx6160\tx6720\pardirnatural\qc + +\f0\fs24 \cf0 Posts\ +Workflow\ +} + VerticalPad + 0 + + Wrap + NO + + + Class + LineGraphic + ID + 994 + Points + + {110.33829994431926, 1405.0729529199277} + {164.08388984446532, 1405.5} + + Style + + stroke + + HeadArrow + FilledArrow + Legacy + + LineType + 1 + TailArrow + FilledArrow + + + + + Class + Group + Graphics + + + Bounds + {{50.1444289081536, 1351.4198986563467}, {35, 28}} + Class + ShapedGraphic + FitText + YES + Flow + Resize + ID + 986 + Shape + Rectangle + Style + + fill + + Draws + NO + + shadow + + Draws + NO + + stroke + + Draws + NO + + + Text + + Pad + 0 + Text + {\rtf1\ansi\ansicpg1252\cocoartf1265\cocoasubrtf200 +\cocoascreenfonts1{\fonttbl\f0\fnil\fcharset0 GillSans;} +{\colortbl;\red255\green255\blue255;} +\pard\tx560\tx1120\tx1680\tx2240\tx2800\tx3360\tx3920\tx4480\tx5040\tx5600\tx6160\tx6720\pardirnatural\qc + +\f0\fs24 \cf0 Library\ +User} + VerticalPad + 0 + + Wrap + NO + + + Class + Group + Graphics + + + Bounds + {{53.471203982158727, 1435.4435211691641}, {28.346457481384277, 28.346458435058594}} + Class + ShapedGraphic + ID + 988 + Shape + Rectangle + Style + + fill + + Color + + b + 0.4 + g + 1 + r + 1 + + Draws + NO + FillType + 2 + GradientAngle + 90 + GradientColor + + b + 0.4 + g + 1 + r + 1 + + MiddleColor + + b + 0.4 + g + 1 + r + 1 + + TrippleBlend + YES + + shadow + + Beneath + YES + Draws + NO + Fuzziness + 2.5038185119628906 + ShadowVector + {0, 1} + + stroke + + Draws + NO + Width + 1.5 + + + VFlip + YES + Wrap + NO + + + Class + LineGraphic + ID + 989 + Points + + {53.471203982158727, 1409.9317103895926} + {81.817661463543004, 1409.9317103895926} + {81.817661463543004, 1409.9317103895926} + + Style + + shadow + + Beneath + YES + Draws + YES + Fuzziness + 2.5038185119628906 + ShadowVector + {0, 1} + + stroke + + CornerRadius + 1 + HeadArrow + 0 + Legacy + + LineType + 1 + TailArrow + 0 + Width + 1.5 + + + + + Class + LineGraphic + ID + 990 + Points + + {67.644432722850866, 1421.2702933821463} + {53.471203505321569, 1435.4435221228384} + + Style + + shadow + + Beneath + YES + Draws + YES + Fuzziness + 2.5038185119628906 + ShadowVector + {0, 1} + + stroke + + CornerRadius + 1 + HeadArrow + 0 + Legacy + + LineType + 1 + TailArrow + 0 + Width + 1.5 + + + + + Class + LineGraphic + ID + 991 + Points + + {67.644432722850866, 1421.2702933821463} + {81.817661463543004, 1436.0104861675161} + + Style + + shadow + + Beneath + YES + Draws + YES + Fuzziness + 2.5038185119628906 + ShadowVector + {0, 1} + + stroke + + CornerRadius + 1 + HeadArrow + 0 + Legacy + + LineType + 1 + TailArrow + 0 + Width + 1.5 + + + + + Class + LineGraphic + ID + 992 + Points + + {67.644432722850866, 1401.4277731451773} + {67.644432722850866, 1421.2702957663321} + + Style + + shadow + + Beneath + YES + Draws + YES + Fuzziness + 2.5038185119628906 + ShadowVector + {0, 1} + + stroke + + CornerRadius + 1 + HeadArrow + 0 + Legacy + + LineType + 1 + TailArrow + 0 + Width + 1.5 + + + + + Bounds + {{59.140495478435582, 1384.4198986563467}, {17.00787353515625, 17.00787353515625}} + Class + ShapedGraphic + ID + 993 + Shape + Circle + Style + + fill + + Color + + b + 0.4 + g + 1 + r + 1 + + Draws + NO + FillType + 2 + GradientAngle + 90 + GradientColor + + b + 0.4 + g + 1 + r + 1 + + MiddleColor + + b + 0.4 + g + 1 + r + 1 + + TrippleBlend + YES + + shadow + + Beneath + YES + Fuzziness + 2.5038185119628906 + ShadowVector + {0, 1} + + stroke + + CornerRadius + 1 + Width + 1.5 + + + + + ID + 987 + + + ID + 1001 + + + Bounds + {{237.44151899448087, 1414.6049619569026}, {54, 36}} + Class + ShapedGraphic + ID + 961 + Shape + Rectangle + Text + + Text + {\rtf1\ansi\ansicpg1252\cocoartf1265\cocoasubrtf200 +\cocoascreenfonts1{\fonttbl\f0\fswiss\fcharset0 Helvetica;} +{\colortbl;\red255\green255\blue255;} +\pard\tx560\tx1120\tx1680\tx2240\tx2800\tx3360\tx3920\tx4480\tx5040\tx5600\tx6160\tx6720\pardirnatural\qc + +\f0\fs24 \cf0 Job} + + + + Bounds + {{228.44151899448087, 1405.6049619569026}, {54, 36}} + Class + ShapedGraphic + ID + 1000 + Shape + Rectangle + Text + + Text + {\rtf1\ansi\ansicpg1252\cocoartf1265\cocoasubrtf200 +\cocoascreenfonts1{\fonttbl\f0\fswiss\fcharset0 Helvetica;} +{\colortbl;\red255\green255\blue255;} +\pard\tx560\tx1120\tx1680\tx2240\tx2800\tx3360\tx3920\tx4480\tx5040\tx5600\tx6160\tx6720\pardirnatural\qc + +\f0\fs24 \cf0 Job1} + + + + Class + Group + Graphics + + + Class + LineGraphic + Head + + ID + 968 + + ID + 967 + Points + + {273.94151899473252, 1414.6017734596319} + {332.27826523991331, 1414.5950095848621} + + Style + + stroke + + HeadArrow + 0 + Legacy + + LineType + 1 + Pattern + 1 + TailArrow + FilledArrow + + + Tail + + ID + 969 + + + + Bounds + {{332.77826521030119, 1396.6049619569026}, {54, 36}} + Class + ShapedGraphic + ID + 968 + Shape + Rectangle + Style + + stroke + + Pattern + 1 + + + Text + + Text + {\rtf1\ansi\ansicpg1252\cocoartf1265\cocoasubrtf200 +\cocoascreenfonts1{\fonttbl\f0\fswiss\fcharset0 Helvetica;} +{\colortbl;\red255\green255\blue255;} +\pard\tx560\tx1120\tx1680\tx2240\tx2800\tx3360\tx3920\tx4480\tx5040\tx5600\tx6160\tx6720\pardirnatural\qc + +\f0\fs24 \cf0 Owner} + + + + Bounds + {{219.44151899448087, 1396.6049619569026}, {54, 36}} + Class + ShapedGraphic + ID + 969 + Shape + Rectangle + Text + + Text + {\rtf1\ansi\ansicpg1252\cocoartf1265\cocoasubrtf200 +\cocoascreenfonts1{\fonttbl\f0\fswiss\fcharset0 Helvetica;} +{\colortbl;\red255\green255\blue255;} +\pard\tx560\tx1120\tx1680\tx2240\tx2800\tx3360\tx3920\tx4480\tx5040\tx5600\tx6160\tx6720\pardirnatural\qc + +\f0\fs24 \cf0 Job1} + + + + ID + 966 + + + Class + Group + Graphics + + + Class + LineGraphic + Head + + ID + 972 + + ID + 971 + Points + + {264.94151899473252, 1405.6017734596319} + {323.27826523991331, 1405.5950095848621} + + Style + + stroke + + HeadArrow + 0 + Legacy + + LineType + 1 + Pattern + 1 + TailArrow + FilledArrow + + + Tail + + ID + 973 + + + + Bounds + {{323.77826521030119, 1387.6049619569026}, {54, 36}} + Class + ShapedGraphic + ID + 972 + Shape + Rectangle + Style + + stroke + + Pattern + 1 + + + Text + + Text + {\rtf1\ansi\ansicpg1252\cocoartf1265\cocoasubrtf200 +\cocoascreenfonts1{\fonttbl\f0\fswiss\fcharset0 Helvetica;} +{\colortbl;\red255\green255\blue255;} +\pard\tx560\tx1120\tx1680\tx2240\tx2800\tx3360\tx3920\tx4480\tx5040\tx5600\tx6160\tx6720\pardirnatural\qc + +\f0\fs24 \cf0 Owner} + + + + Bounds + {{210.44151899448087, 1387.6049619569026}, {54, 36}} + Class + ShapedGraphic + ID + 973 + Shape + Rectangle + Text + + Text + {\rtf1\ansi\ansicpg1252\cocoartf1265\cocoasubrtf200 +\cocoascreenfonts1{\fonttbl\f0\fswiss\fcharset0 Helvetica;} +{\colortbl;\red255\green255\blue255;} +\pard\tx560\tx1120\tx1680\tx2240\tx2800\tx3360\tx3920\tx4480\tx5040\tx5600\tx6160\tx6720\pardirnatural\qc + +\f0\fs24 \cf0 Job1} + + + + ID + 970 + + + Class + Group + Graphics + + + Class + LineGraphic + Head + + ID + 976 + + ID + 975 + Points + + {255.94151899473252, 1396.6017734596319} + {314.27826523991337, 1396.5950095848621} + + Style + + stroke + + HeadArrow + 0 + Legacy + + LineType + 1 + Pattern + 1 + TailArrow + FilledArrow + + + Tail + + ID + 977 + + + + Bounds + {{314.77826521030119, 1378.6049619569026}, {54, 36}} + Class + ShapedGraphic + ID + 976 + Shape + Rectangle + Style + + stroke + + Pattern + 1 + + + Text + + Text + {\rtf1\ansi\ansicpg1252\cocoartf1265\cocoasubrtf200 +\cocoascreenfonts1{\fonttbl\f0\fswiss\fcharset0 Helvetica;} +{\colortbl;\red255\green255\blue255;} +\pard\tx560\tx1120\tx1680\tx2240\tx2800\tx3360\tx3920\tx4480\tx5040\tx5600\tx6160\tx6720\pardirnatural\qc + +\f0\fs24 \cf0 Owner} + + + + Bounds + {{201.44151899448087, 1378.6049619569026}, {54, 36}} + Class + ShapedGraphic + ID + 977 + Shape + Rectangle + Text + + Text + {\rtf1\ansi\ansicpg1252\cocoartf1265\cocoasubrtf200 +\cocoascreenfonts1{\fonttbl\f0\fswiss\fcharset0 Helvetica;} +{\colortbl;\red255\green255\blue255;} +\pard\tx560\tx1120\tx1680\tx2240\tx2800\tx3360\tx3920\tx4480\tx5040\tx5600\tx6160\tx6720\pardirnatural\qc + +\f0\fs24 \cf0 Job1} + + + + ID + 974 + + + Class + Group + Graphics + + + Class + LineGraphic + Head + + ID + 980 + + ID + 979 + Points + + {246.94151899473252, 1387.6017734596319} + {305.27826523991337, 1387.5950095848621} + + Style + + stroke + + HeadArrow + 0 + Legacy + + LineType + 1 + Pattern + 1 + TailArrow + FilledArrow + + + Tail + + ID + 981 + + + + Bounds + {{305.77826521030119, 1369.6049619569026}, {54, 36}} + Class + ShapedGraphic + ID + 980 + Shape + Rectangle + Style + + stroke + + Pattern + 1 + + + Text + + Text + {\rtf1\ansi\ansicpg1252\cocoartf1265\cocoasubrtf200 +\cocoascreenfonts1{\fonttbl\f0\fswiss\fcharset0 Helvetica;} +{\colortbl;\red255\green255\blue255;} +\pard\tx560\tx1120\tx1680\tx2240\tx2800\tx3360\tx3920\tx4480\tx5040\tx5600\tx6160\tx6720\pardirnatural\qc + +\f0\fs24 \cf0 Owner} + + + + Bounds + {{192.44151899448087, 1369.6049619569026}, {54, 36}} + Class + ShapedGraphic + ID + 981 + Shape + Rectangle + Text + + Text + {\rtf1\ansi\ansicpg1252\cocoartf1265\cocoasubrtf200 +\cocoascreenfonts1{\fonttbl\f0\fswiss\fcharset0 Helvetica;} +{\colortbl;\red255\green255\blue255;} +\pard\tx560\tx1120\tx1680\tx2240\tx2800\tx3360\tx3920\tx4480\tx5040\tx5600\tx6160\tx6720\pardirnatural\qc + +\f0\fs24 \cf0 Job1} + + + + ID + 978 + + + Bounds + {{170.82414838901212, 1345.6049695862971}, {236.99999999999997, 168}} + Class + ShapedGraphic + FitText + Vertical + Flow + Resize + ID + 983 + Shape + Rectangle + Style + + fill + + GradientCenter + {-0.29411799999999999, -0.264706} + + + Text + + Align + 0 + Text + {\rtf1\ansi\ansicpg1252\cocoartf1265\cocoasubrtf200 +\cocoascreenfonts1{\fonttbl\f0\fswiss\fcharset0 Helvetica;} +{\colortbl;\red255\green255\blue255;} +\pard\tx560\tx1120\tx1680\tx2240\tx2800\tx3360\tx3920\tx4480\tx5040\tx5600\tx6160\tx6720 + +\f0\fs24 \cf0 \ +\ +\ +\ +\ +\ +\ +\ +\ +\ +\ +} + VerticalPad + 0 + + TextPlacement + 0 + + + Bounds + {{170.82414838901212, 1331.6049695862971}, {236.99999999999997, 14}} + Class + ShapedGraphic + FitText + Vertical + Flow + Resize + ID + 984 + Shape + Rectangle + Style + + fill + + GradientCenter + {-0.29411799999999999, -0.264706} + + + Text + + Text + {\rtf1\ansi\ansicpg1252\cocoartf1265\cocoasubrtf200 +\cocoascreenfonts1{\fonttbl\f0\fswiss\fcharset0 Helvetica;} +{\colortbl;\red255\green255\blue255;} +\pard\tx560\tx1120\tx1680\tx2240\tx2800\tx3360\tx3920\tx4480\tx5040\tx5600\tx6160\tx6720\qc + +\f0\i\b\fs24 \cf0 Jobboard} + VerticalPad + 0 + + TextPlacement + 0 + + + Class + LineGraphic + ID + 861 + Points + + {470.1300977351811, 156.79728666398489} + {409.22449458705552, 177.09915438002673} + + Style + + stroke + + HeadArrow + FilledArrow + Legacy + + LineType + 1 + Pattern + 1 + TailArrow + 0 + + + + + Bounds + {{476.1300977351811, 138.79728666398486}, {41, 28}} + Class + ShapedGraphic + FitText + YES + Flow + Resize + FontInfo + + Font + Helvetica + Size + 12 + + ID + 860 + Shape + Rectangle + Style + + fill + + Draws + NO + + shadow + + Draws + NO + + stroke + + Draws + NO + + + Text + + Pad + 0 + Text + {\rtf1\ansi\ansicpg1252\cocoartf1265\cocoasubrtf200 +\cocoascreenfonts1{\fonttbl\f0\fswiss\fcharset0 Helvetica;} +{\colortbl;\red255\green255\blue255;} +\pard\tx560\tx1120\tx1680\tx2240\tx2800\tx3360\tx3920\tx4480\tx5040\tx5600\tx6160\tx6720\pardirnatural\qc + +\f0\i\fs24 \cf0 Nested\ +subflow} + VerticalPad + 0 + + Wrap + NO + + + Class + LineGraphic + ID + 859 + Points + + {382.65871206690008, 221.8325309753418} + {382.65871206690008, 249.83253047325724} + + Style + + stroke + + HeadArrow + FilledArrow + Legacy + + LineType + 1 + TailArrow + 0 + + + + + Bounds + {{359.27167431166708, 255.11224365234375}, {47, 47}} + Class + ShapedGraphic + HFlip + YES + ID + 855 + Shape + Circle + Style + + Text + + Text + {\rtf1\ansi\ansicpg1252\cocoartf1265\cocoasubrtf200 +\cocoascreenfonts1{\fonttbl\f0\fswiss\fcharset0 Helvetica;} +{\colortbl;\red255\green255\blue255;} +\pard\tx560\tx1120\tx1680\tx2240\tx2800\tx3360\tx3920\tx4480\tx5040\tx5600\tx6160\tx6720\pardirnatural\qc + +\f0\fs24 \cf0 Flow} + VerticalPad + 0 + + + + Bounds + {{355.77167171239853, 251.61224365234375}, {54, 54}} + Class + ShapedGraphic + HFlip + YES + ID + 856 + Shape + Circle + Style + + Text + + Text + {\rtf1\ansi\ansicpg1252\cocoartf1265\cocoasubrtf200 +\cocoascreenfonts1{\fonttbl\f0\fswiss\fcharset0 Helvetica;} +{\colortbl;\red255\green255\blue255;} +\pard\tx560\tx1120\tx1680\tx2240\tx2800\tx3360\tx3920\tx4480\tx5040\tx5600\tx6160\tx6720\pardirnatural\qc + +\f0\fs24 \cf0 Retry} + VerticalPad + 0 + + TextPlacement + 0 + TextRelativeArea + {{0.099999999999999978, 1.0000000238418578}, {0.80000000000000004, 0.69999999999999996}} + TextRotation + 305.1478271484375 + + + Bounds + {{290.73464965820312, 1032.5300847720423}, {27, 28}} + Class + ShapedGraphic + FitText + YES + Flow + Resize + FontInfo + + Font + Helvetica + Size + 12 + + ID + 839 + Shape + Rectangle + Style + + fill + + Draws + NO + + shadow + + Draws + NO + + stroke + + Draws + NO + + + Text + + Pad + 0 + Text + {\rtf1\ansi\ansicpg1252\cocoartf1265\cocoasubrtf200 +\cocoascreenfonts1{\fonttbl\f0\fswiss\fcharset0 Helvetica;} +{\colortbl;\red255\green255\blue255;} +\pard\tx560\tx1120\tx1680\tx2240\tx2800\tx3360\tx3920\tx4480\tx5040\tx5600\tx6160\tx6720\pardirnatural\qc + +\f0\i\fs24 \cf0 Run\ +Loop} + VerticalPad + 0 + + Wrap + NO + + + Class + LineGraphic + ID + 838 + Points + + {16.938772201538086, 784.51440811157227} + {550.61223120254476, 784.51440811157227} + + Style + + stroke + + HeadArrow + 0 + Legacy + + LineType + 1 + Pattern + 1 + TailArrow + 0 + + + + + Bounds + {{478.53062537152402, 1122.9011524936079}, {63.714366912841797, 31.333333333333332}} + Class + ShapedGraphic + ID + 837 + Shape + Rectangle + Style + + Text + + Text + {\rtf1\ansi\ansicpg1252\cocoartf1265\cocoasubrtf200 +\cocoascreenfonts1{\fonttbl\f0\fswiss\fcharset0 Helvetica;} +{\colortbl;\red255\green255\blue255;} +\pard\tx560\tx1120\tx1680\tx2240\tx2800\tx3360\tx3920\tx4480\tx5040\tx5600\tx6160\tx6720\pardirnatural\qc + +\f0\fs20 \cf0 Completer} + VerticalPad + 0 + + + + Bounds + {{478.53062537152402, 1079.8705891391157}, {63.714366912841797, 31.333333333333332}} + Class + ShapedGraphic + ID + 836 + Shape + Rectangle + Style + + Text + + Text + {\rtf1\ansi\ansicpg1252\cocoartf1265\cocoasubrtf200 +\cocoascreenfonts1{\fonttbl\f0\fswiss\fcharset0 Helvetica;} +{\colortbl;\red255\green255\blue255;} +\pard\tx560\tx1120\tx1680\tx2240\tx2800\tx3360\tx3920\tx4480\tx5040\tx5600\tx6160\tx6720\pardirnatural\qc + +\f0\fs20 \cf0 Scheduler} + VerticalPad + 0 + + + + Bounds + {{372.92606544494629, 1123.8570556640625}, {61, 56}} + Class + ShapedGraphic + FitText + YES + Flow + Resize + ID + 834 + Shape + Rectangle + Style + + fill + + Draws + NO + + shadow + + Draws + NO + + stroke + + Draws + NO + + + Text + + Align + 0 + Pad + 0 + Text + {\rtf1\ansi\ansicpg1252\cocoartf1265\cocoasubrtf200 +\cocoascreenfonts1{\fonttbl\f0\fswiss\fcharset0 Helvetica;} +{\colortbl;\red255\green255\blue255;} +\pard\tx560\tx1120\tx1680\tx2240\tx2800\tx3360\tx3920\tx4480\tx5040\tx5600\tx6160\tx6720\pardirnatural + +\f0\fs24 \cf0 - run()\ +- suspend()\ +...\ +} + VerticalPad + 0 + + Wrap + NO + + + Bounds + {{390.8163413254731, 1078.5120424153899}, {63.714366912841797, 31.333333333333332}} + Class + ShapedGraphic + ID + 832 + Shape + Rectangle + Style + + Text + + Text + {\rtf1\ansi\ansicpg1252\cocoartf1265\cocoasubrtf200 +\cocoascreenfonts1{\fonttbl\f0\fswiss\fcharset0 Helvetica;} +{\colortbl;\red255\green255\blue255;} +\pard\tx560\tx1120\tx1680\tx2240\tx2800\tx3360\tx3920\tx4480\tx5040\tx5600\tx6160\tx6720\pardirnatural\qc + +\f0\fs20 \cf0 Compiler} + VerticalPad + 0 + + + + Bounds + {{209.22450065612793, 852.73572444915771}, {80, 28}} + Class + ShapedGraphic + FitText + YES + Flow + Resize + FontInfo + + Font + Helvetica + Size + 12 + + ID + 831 + Shape + Rectangle + Style + + fill + + Draws + NO + + shadow + + Draws + NO + + stroke + + Draws + NO + + + Text + + Pad + 0 + Text + {\rtf1\ansi\ansicpg1252\cocoartf1265\cocoasubrtf200 +\cocoascreenfonts1{\fonttbl\f0\fswiss\fcharset0 Helvetica;} +{\colortbl;\red255\green255\blue255;} +\pard\tx560\tx1120\tx1680\tx2240\tx2800\tx3360\tx3920\tx4480\tx5040\tx5600\tx6160\tx6720\pardirnatural\qc + +\f0\i\fs24 \cf0 States, results,\ +progress...} + VerticalPad + 0 + + Wrap + NO + + + Bounds + {{195.91839599609375, 1080.2736424160523}, {156, 70}} + Class + ShapedGraphic + FitText + YES + Flow + Resize + FontInfo + + Font + Helvetica + Size + 12 + + ID + 828 + Shape + Rectangle + Style + + fill + + Draws + NO + + shadow + + Draws + NO + + stroke + + Draws + NO + + + Text + + Align + 0 + Pad + 0 + Text + {\rtf1\ansi\ansicpg1252\cocoartf1265\cocoasubrtf200 +\cocoascreenfonts1{\fonttbl\f0\fswiss\fcharset0 Helvetica;} +{\colortbl;\red255\green255\blue255;} +\pard\tx560\tx1120\tx1680\tx2240\tx2800\tx3360\tx3920\tx4480\tx5040\tx5600\tx6160\tx6720\pardirnatural + +\f0\i\fs24 \cf0 - PENDING -> RUNNING\ +- RUNNING -> SUCCESS\ +- SUSPENDED -> RUNNING\ +- FAILURE -> REVERTING\ +....} + VerticalPad + 0 + + Wrap + NO + + + Bounds + {{179.01475524902344, 1044.5300637912073}, {30, 14}} + Class + ShapedGraphic + FitText + YES + Flow + Resize + FontInfo + + Font + Helvetica + Size + 12 + + ID + 827 + Shape + Rectangle + Style + + fill + + Draws + NO + + shadow + + Draws + NO + + stroke + + Draws + NO + + + Text + + Pad + 0 + Text + {\rtf1\ansi\ansicpg1252\cocoartf1265\cocoasubrtf200 +\cocoascreenfonts1{\fonttbl\f0\fswiss\fcharset0 Helvetica;} +{\colortbl;\red255\green255\blue255;} +\pard\tx560\tx1120\tx1680\tx2240\tx2800\tx3360\tx3920\tx4480\tx5040\tx5600\tx6160\tx6720\pardirnatural\qc + +\f0\i\fs24 \cf0 Emits} + VerticalPad + 0 + + Wrap + NO + + + Class + LineGraphic + ID + 826 + Points + + {228.92846501504124, 1022.9387556204747} + {165.21409631559922, 1048.4693673490142} + + Style + + stroke + + HeadArrow + FilledArrow + Legacy + + LineType + 1 + Pattern + 1 + TailArrow + 0 + + + + + Bounds + {{84.918390063136314, 1048.4693603515625}, {108.00000616531918, 60.376010894775391}} + Class + ShapedGraphic + ID + 9 + Shape + Cloud + Style + + Text + + Text + {\rtf1\ansi\ansicpg1252\cocoartf1265\cocoasubrtf200 +\cocoascreenfonts1{\fonttbl\f0\fswiss\fcharset0 Helvetica;} +{\colortbl;\red255\green255\blue255;} +\pard\tx560\tx1120\tx1680\tx2240\tx2800\tx3360\tx3920\tx4480\tx5040\tx5600\tx6160\tx6720\pardirnatural\qc + +\f0\fs24 \cf0 State\ +Transition\ +Notifications} + VerticalPad + 0 + + + + Class + Group + Graphics + + + Bounds + {{253.38389312817009, 1036.7868945785101}, {27.016406012875592, 38.542124503311257}} + Class + ShapedGraphic + ID + 93 + Magnets + + {0.15027599999999999, -0.32002000000000003} + {-0.5, -0.49964799999999998} + {-0.5, -0.25638699999999998} + {-0.10728, -0.148201} + {0.041826500000000003, 0.088786000000000004} + {-0.043045800000000002, 0.088786000000000004} + {0.22847700000000001, 0.5} + {0.5, 0.088786000000000004} + {0.40652500000000003, 0.088786000000000004} + {0.40652500000000003, 0.088786000000000004} + {0.40652500000000003, 0.088786000000000004} + + Rotation + 90 + Shape + Bezier + ShapeData + + UnitPoints + + {0.406219, 0.101163} + {0.39736100000000002, -0.042958700000000002} + {0.31166700000000003, -0.185311} + {0.149117, -0.29534500000000002} + {-0.030395499999999999, -0.41686299999999998} + {-0.261517, -0.50514099999999995} + {-0.49668800000000002, -0.49976700000000002} + {-0.496693, -0.49976500000000001} + {-0.062913899999999995, -0.36058899999999999} + {-0.062913899999999995, -0.36058899999999999} + {-0.062918699999999994, -0.36058899999999999} + {-0.5, -0.21609700000000001} + {-0.5, -0.21609600000000001} + {-0.35843000000000003, -0.22182399999999999} + {-0.217449, -0.204378} + {-0.10928400000000001, -0.12870000000000001} + {-0.017806099999999998, -0.064687300000000003} + {0.032062500000000001, 0.0174179} + {0.040309900000000003, 0.101163} + {0.040309900000000003, 0.101163} + {-0.044847499999999998, 0.101163} + {-0.044847499999999998, 0.101163} + {-0.044847499999999998, 0.101163} + {0.22758200000000001, 0.5} + {0.22758200000000001, 0.5} + {0.22758200000000001, 0.5} + {0.5, 0.101163} + {0.5, 0.101163} + {0.5, 0.101163} + {0.406219, 0.101163} + + + + + Bounds + {{234.04078487632972, 1028.8634930208395}, {26.999999999999996, 38.288223134554855}} + Class + ShapedGraphic + ID + 94 + Magnets + + {0.15027599999999999, -0.32002000000000003} + {-0.5, -0.49964799999999998} + {-0.5, -0.25638699999999998} + {-0.10728, -0.148201} + {0.041826500000000003, 0.088786000000000004} + {-0.043045800000000002, 0.088786000000000004} + {0.22847700000000001, 0.5} + {0.5, 0.088786000000000004} + {0.40652500000000003, 0.088786000000000004} + {0.40652500000000003, 0.088786000000000004} + {0.40652500000000003, 0.088786000000000004} + + Rotation + 180 + Shape + 33C70F48-B008-4466-BD81-E84D73C055CA-438-0000056AF6035FFB + + + Bounds + {{242.15904544605092, 1007.9418767503257}, {27.016406012875589, 38.264972185430459}} + Class + ShapedGraphic + ID + 95 + Magnets + + {0.15027599999999999, -0.32002000000000003} + {-0.5, -0.49964799999999998} + {-0.5, -0.25638699999999998} + {-0.10728, -0.148201} + {0.041826500000000003, 0.088786000000000004} + {-0.043045800000000002, 0.088786000000000004} + {0.22847700000000001, 0.5} + {0.5, 0.088786000000000004} + {0.40652500000000003, 0.088786000000000004} + {0.40652500000000003, 0.088786000000000004} + {0.40652500000000003, 0.088786000000000004} + + Rotation + 270 + Shape + 33C70F48-B008-4466-BD81-E84D73C055CA-438-0000056AF6035FFB + + + Bounds + {{261.04078487632967, 1015.981498100793}, {27.000000000000004, 38.288223134554862}} + Class + ShapedGraphic + ID + 96 + Magnets + + {0.15027599999999999, -0.32002000000000003} + {-0.5, -0.49964799999999998} + {-0.5, -0.25638699999999998} + {-0.10728, -0.148201} + {0.041826500000000003, 0.088786000000000004} + {-0.043045800000000002, 0.088786000000000004} + {0.22847700000000001, 0.5} + {0.5, 0.088786000000000004} + {0.40652500000000003, 0.088786000000000004} + {0.40652500000000003, 0.088786000000000004} + {0.40652500000000003, 0.088786000000000004} + + Shape + 33C70F48-B008-4466-BD81-E84D73C055CA-438-0000056AF6035FFB + + + ID + 92 + + + Bounds + {{396.52035685550777, 974.46163584936892}, {142, 14}} + Class + ShapedGraphic + FitText + YES + Flow + Resize + FontInfo + + Font + Helvetica-Bold + Size + 12 + + ID + 457 + Shape + Rectangle + Style + + fill + + Draws + NO + + shadow + + Draws + NO + + stroke + + Draws + NO + + + Text + + Pad + 0 + Text + {\rtf1\ansi\ansicpg1252\cocoartf1265\cocoasubrtf200 +\cocoascreenfonts1{\fonttbl\f0\fswiss\fcharset0 Helvetica;} +{\colortbl;\red255\green255\blue255;} +\pard\tx560\tx1120\tx1680\tx2240\tx2800\tx3360\tx3920\tx4480\tx5040\tx5600\tx6160\tx6720\pardirnatural\qc + +\f0\i\b\fs24 \cf0 ActionEngine (one impl.)} + VerticalPad + 0 + + Wrap + NO + + + Bounds + {{390.8163413254731, 1038.2328676107024}, {63.714366912841797, 31.333333333333332}} + Class + ShapedGraphic + ID + 450 + Shape + Rectangle + Style + + Text + + Text + {\rtf1\ansi\ansicpg1252\cocoartf1265\cocoasubrtf200 +\cocoascreenfonts1{\fonttbl\f0\fswiss\fcharset0 Helvetica;} +{\colortbl;\red255\green255\blue255;} +\pard\tx560\tx1120\tx1680\tx2240\tx2800\tx3360\tx3920\tx4480\tx5040\tx5600\tx6160\tx6720\pardirnatural\qc + +\f0\fs20 \cf0 Runner} + VerticalPad + 0 + + + + Bounds + {{478.53062537152402, 1038.2328900119185}, {63.714366912841797, 31.333333333333332}} + Class + ShapedGraphic + ID + 449 + Shape + Rectangle + Style + + Text + + Text + {\rtf1\ansi\ansicpg1252\cocoartf1265\cocoasubrtf200 +\cocoascreenfonts1{\fonttbl\f0\fswiss\fcharset0 Helvetica;} +{\colortbl;\red255\green255\blue255;} +\pard\tx560\tx1120\tx1680\tx2240\tx2800\tx3360\tx3920\tx4480\tx5040\tx5600\tx6160\tx6720\pardirnatural\qc + +\f0\fs20 \cf0 Runtime} + VerticalPad + 0 + + + + Bounds + {{478.5306334878843, 994.19207080251738}, {63.714366912841797, 31.333333333333332}} + Class + ShapedGraphic + ID + 447 + Shape + Rectangle + Style + + Text + + Text + {\rtf1\ansi\ansicpg1252\cocoartf1265\cocoasubrtf200 +\cocoascreenfonts1{\fonttbl\f0\fswiss\fcharset0 Helvetica;} +{\colortbl;\red255\green255\blue255;} +\pard\tx560\tx1120\tx1680\tx2240\tx2800\tx3360\tx3920\tx4480\tx5040\tx5600\tx6160\tx6720\pardirnatural\qc + +\f0\fs20 \cf0 Executor} + VerticalPad + 0 + + + + Bounds + {{390.81631892425446, 994.19204840129885}, {63.714366912841797, 31.333333333333332}} + Class + ShapedGraphic + ID + 446 + Shape + Rectangle + Style + + Text + + Text + {\rtf1\ansi\ansicpg1252\cocoartf1265\cocoasubrtf200 +\cocoascreenfonts1{\fonttbl\f0\fswiss\fcharset0 Helvetica;} +{\colortbl;\red255\green255\blue255;} +\pard\tx560\tx1120\tx1680\tx2240\tx2800\tx3360\tx3920\tx4480\tx5040\tx5600\tx6160\tx6720\pardirnatural\qc + +\f0\fs20 \cf0 Analyzer} + VerticalPad + 0 + + + + Class + LineGraphic + Head + + ID + 444 + + ID + 445 + Points + + {304.30400417385465, 1005.6686926988394} + {365.81839492659333, 1029.0751702876107} + + Style + + stroke + + HeadArrow + FilledArrow + Legacy + + LineType + 1 + Pattern + 1 + TailArrow + 0 + + + Tail + + ID + 423 + + + + Class + LineGraphic + Head + + ID + 10 + + ID + 433 + Points + + {437.73468537749687, 869.13090571936129} + {473.25508692784757, 868.8206769098332} + + Style + + stroke + + HeadArrow + FilledArrow + Legacy + + LineType + 1 + TailArrow + FilledArrow + + + + + Bounds + {{473.75506787377572, 840.81631016602194}, {63.714366912841797, 56}} + Class + ShapedGraphic + ID + 10 + Magnets + + {0, 1} + {0, -1} + {1, 0} + {-1, 0} + + Shape + Cylinder + Style + + Text + + Text + {\rtf1\ansi\ansicpg1252\cocoartf1265\cocoasubrtf200 +\cocoascreenfonts1{\fonttbl\f0\fswiss\fcharset0 Helvetica;} +{\colortbl;\red255\green255\blue255;} +\pard\tx560\tx1120\tx1680\tx2240\tx2800\tx3360\tx3920\tx4480\tx5040\tx5600\tx6160\tx6720\qc + +\f0\fs20 \cf0 Persistence\ +Backend} + VerticalPad + 0 + + + + Class + LineGraphic + ID + 428 + OrthogonalBarAutomatic + + OrthogonalBarPoint + {0, 0} + OrthogonalBarPosition + -1 + Points + + {258.38771438598633, 947} + {308.12470245361328, 886} + + Style + + stroke + + HeadArrow + FilledArrow + Legacy + + LineType + 2 + TailArrow + FilledArrow + + + + + Class + TableGroup + Graphics + + + Bounds + {{310.93862753220276, 826.66148410306198}, {126, 14}} + Class + ShapedGraphic + FitText + Vertical + Flow + Resize + ID + 426 + Shape + Rectangle + Style + + fill + + GradientCenter + {-0.29411799999999999, -0.264706} + + + Text + + Text + {\rtf1\ansi\ansicpg1252\cocoartf1265\cocoasubrtf200 +\cocoascreenfonts1{\fonttbl\f0\fswiss\fcharset0 Helvetica;} +{\colortbl;\red255\green255\blue255;} +\pard\tx560\tx1120\tx1680\tx2240\tx2800\tx3360\tx3920\tx4480\tx5040\tx5600\tx6160\tx6720\qc + +\f0\b\fs24 \cf0 Storage} + VerticalPad + 0 + + TextPlacement + 0 + + + Bounds + {{310.93862753220276, 840.66148410306198}, {126, 28}} + Class + ShapedGraphic + FitText + Vertical + Flow + Resize + ID + 43 + Shape + Rectangle + Style + + fill + + GradientCenter + {-0.29411799999999999, -0.264706} + + + Text + + Align + 0 + Text + {\rtf1\ansi\ansicpg1252\cocoartf1265\cocoasubrtf200 +\cocoascreenfonts1{\fonttbl\f0\fswiss\fcharset0 Helvetica;} +{\colortbl;\red255\green255\blue255;} +\pard\tx560\tx1120\tx1680\tx2240\tx2800\tx3360\tx3920\tx4480\tx5040\tx5600\tx6160\tx6720 + +\f0\fs24 \cf0 - flow_name\ +- flow_uuid} + VerticalPad + 0 + + TextPlacement + 0 + + + Bounds + {{310.93862753220276, 868.66148410306198}, {126, 56}} + Class + ShapedGraphic + FitText + Vertical + Flow + Resize + ID + 427 + Shape + Rectangle + Style + + fill + + GradientCenter + {-0.29411799999999999, -0.264706} + + + Text + + Align + 0 + Text + {\rtf1\ansi\ansicpg1252\cocoartf1265\cocoasubrtf200 +\cocoascreenfonts1{\fonttbl\f0\fswiss\fcharset0 Helvetica;} +{\colortbl;\red255\green255\blue255;} +\pard\tx560\tx1120\tx1680\tx2240\tx2800\tx3360\tx3920\tx4480\tx5040\tx5600\tx6160\tx6720 + +\f0\fs24 \cf0 - save()\ +- get()\ +- get_failures()\ +...} + VerticalPad + 0 + + TextPlacement + 0 + + + GridH + + 426 + 43 + 427 + + + ID + 425 + + + Bounds + {{207.28567728426299, 974.78645878243321}, {105.10203552246094, 36}} + Class + ShapedGraphic + ID + 421 + Shape + Cloud + Style + + Text + + Text + {\rtf1\ansi\ansicpg1252\cocoartf1265\cocoasubrtf200 +\cocoascreenfonts1{\fonttbl\f0\fswiss\fcharset0 Helvetica;} +{\colortbl;\red255\green255\blue255;} +\pard\tx560\tx1120\tx1680\tx2240\tx2800\tx3360\tx3920\tx4480\tx5040\tx5600\tx6160\tx6720\pardirnatural\qc + +\f0\fs24 \cf0 Compilation} + VerticalPad + 0 + + + + Bounds + {{240.83671598660843, 957.79548143397199}, {38, 14}} + Class + ShapedGraphic + FitText + YES + Flow + Resize + ID + 422 + Shape + Rectangle + Style + + fill + + Draws + NO + + shadow + + Draws + NO + + stroke + + Draws + NO + + + Text + + Pad + 0 + Text + {\rtf1\ansi\ansicpg1252\cocoartf1265\cocoasubrtf200 +\cocoascreenfonts1{\fonttbl\f0\fswiss\fcharset0 Helvetica;} +{\colortbl;\red255\green255\blue255;} +\pard\tx560\tx1120\tx1680\tx2240\tx2800\tx3360\tx3920\tx4480\tx5040\tx5600\tx6160\tx6720\pardirnatural\qc + +\f0\fs24 \cf0 Engine} + VerticalPad + 0 + + Wrap + NO + + + Bounds + {{215.83669066280191, 952.49403624989395}, {88, 72.509323120117188}} + Class + ShapedGraphic + ID + 423 + Shape + Rectangle + + + Class + LineGraphic + ID + 418 + Points + + {175.01475125757293, 858.46545582024169} + {175.01475125757293, 1126.5224146928358} + + Style + + stroke + + HeadArrow + 0 + Legacy + + LineType + 1 + Pattern + 1 + TailArrow + 0 + + + + + Bounds + {{56.053440093994141, 802.0387135699907}, {88, 44}} + Class + ShapedGraphic + FitText + YES + Flow + Resize + FontInfo + + Font + Helvetica-BoldOblique + Size + 18 + + ID + 414 + Shape + Rectangle + Style + + fill + + Draws + NO + + shadow + + Draws + NO + + stroke + + Draws + NO + + + Text + + Pad + 0 + Text + {\rtf1\ansi\ansicpg1252\cocoartf1265\cocoasubrtf200 +\cocoascreenfonts1{\fonttbl\f0\fswiss\fcharset0 Helvetica;} +{\colortbl;\red255\green255\blue255;} +\pard\tx560\tx1120\tx1680\tx2240\tx2800\tx3360\tx3920\tx4480\tx5040\tx5600\tx6160\tx6720\pardirnatural\qc + +\f0\i\b\fs36 \cf0 Activation\ +Phase} + VerticalPad + 0 + + Wrap + NO + + + Bounds + {{105.08388984446533, 1003.3409264674543}, {59, 28}} + Class + ShapedGraphic + FitText + YES + Flow + Resize + ID + 413 + Shape + Rectangle + Style + + fill + + Draws + NO + + shadow + + Draws + NO + + stroke + + Draws + NO + + + Text + + Pad + 0 + Text + {\rtf1\ansi\ansicpg1252\cocoartf1265\cocoasubrtf200 +\cocoascreenfonts1{\fonttbl\f0\fswiss\fcharset0 Helvetica;} +{\colortbl;\red255\green255\blue255;} +\pard\tx560\tx1120\tx1680\tx2240\tx2800\tx3360\tx3920\tx4480\tx5040\tx5600\tx6160\tx6720\pardirnatural\qc + +\f0\fs24 \cf0 Results/\ +Exceptions} + VerticalPad + 0 + + Wrap + NO + + + Class + LineGraphic + ID + 412 + Points + + {109.26527080670799, 991.99397346428293} + {192.17343756180355, 991.99397346428293} + + Style + + stroke + + HeadArrow + 0 + Legacy + + LineType + 1 + TailArrow + FilledArrow + + + + + Bounds + {{115.18593484369178, 915.30610463461369}, {49, 56}} + Class + ShapedGraphic + FitText + YES + Flow + Resize + ID + 411 + Shape + Rectangle + Style + + fill + + Draws + NO + + shadow + + Draws + NO + + stroke + + Draws + NO + + + Text + + Pad + 0 + Text + {\rtf1\ansi\ansicpg1252\cocoartf1265\cocoasubrtf200 +\cocoascreenfonts1{\fonttbl\f0\fswiss\fcharset0 Helvetica;} +{\colortbl;\red255\green255\blue255;} +\pard\tx560\tx1120\tx1680\tx2240\tx2800\tx3360\tx3920\tx4480\tx5040\tx5600\tx6160\tx6720\pardirnatural\qc + +\f0\fs24 \cf0 Run/\ +Resume/\ +Revert/\ +Suspend} + VerticalPad + 0 + + Wrap + NO + + + Class + LineGraphic + ID + 410 + Points + + {113.34690338032949, 979.62663303122656} + {203.34690321589053, 979.62663303122656} + + Style + + stroke + + HeadArrow + FilledArrow + Legacy + + LineType + 1 + TailArrow + 0 + + + + + Class + Group + Graphics + + + Bounds + {{59.15303234416389, 922.95317062424022}, {35, 28}} + Class + ShapedGraphic + FitText + YES + Flow + Resize + ID + 402 + Shape + Rectangle + Style + + fill + + Draws + NO + + shadow + + Draws + NO + + stroke + + Draws + NO + + + Text + + Pad + 0 + Text + {\rtf1\ansi\ansicpg1252\cocoartf1265\cocoasubrtf200 +\cocoascreenfonts1{\fonttbl\f0\fnil\fcharset0 GillSans;} +{\colortbl;\red255\green255\blue255;} +\pard\tx560\tx1120\tx1680\tx2240\tx2800\tx3360\tx3920\tx4480\tx5040\tx5600\tx6160\tx6720\pardirnatural\qc + +\f0\fs24 \cf0 Library\ +User} + VerticalPad + 0 + + Wrap + NO + + + Class + Group + Graphics + + + Bounds + {{62.479807418169017, 1006.9767931370576}, {28.346457481384277, 28.346458435058594}} + Class + ShapedGraphic + ID + 404 + Shape + Rectangle + Style + + fill + + Color + + b + 0.4 + g + 1 + r + 1 + + Draws + NO + FillType + 2 + GradientAngle + 90 + GradientColor + + b + 0.4 + g + 1 + r + 1 + + MiddleColor + + b + 0.4 + g + 1 + r + 1 + + TrippleBlend + YES + + shadow + + Beneath + YES + Draws + NO + Fuzziness + 2.5038185119628906 + ShadowVector + {0, 1} + + stroke + + Draws + NO + Width + 1.5 + + + VFlip + YES + Wrap + NO + + + Class + LineGraphic + ID + 405 + Points + + {62.479807418169017, 981.46498235748606} + {90.826264899553294, 981.46498235748606} + {90.826264899553294, 981.46498235748606} + + Style + + shadow + + Beneath + YES + Draws + YES + Fuzziness + 2.5038185119628906 + ShadowVector + {0, 1} + + stroke + + CornerRadius + 1 + HeadArrow + 0 + Legacy + + LineType + 1 + TailArrow + 0 + Width + 1.5 + + + + + Class + LineGraphic + ID + 406 + Points + + {76.653036158861156, 992.80356535003978} + {62.479806941331859, 1006.9767940907319} + + Style + + shadow + + Beneath + YES + Draws + YES + Fuzziness + 2.5038185119628906 + ShadowVector + {0, 1} + + stroke + + CornerRadius + 1 + HeadArrow + 0 + Legacy + + LineType + 1 + TailArrow + 0 + Width + 1.5 + + + + + Class + LineGraphic + ID + 407 + Points + + {76.653036158861156, 992.80356535003978} + {90.826264899553294, 1007.5437581354097} + + Style + + shadow + + Beneath + YES + Draws + YES + Fuzziness + 2.5038185119628906 + ShadowVector + {0, 1} + + stroke + + CornerRadius + 1 + HeadArrow + 0 + Legacy + + LineType + 1 + TailArrow + 0 + Width + 1.5 + + + + + Class + LineGraphic + ID + 408 + Points + + {76.653036158861156, 972.96104511307078} + {76.653036158861156, 992.80356773422557} + + Style + + shadow + + Beneath + YES + Draws + YES + Fuzziness + 2.5038185119628906 + ShadowVector + {0, 1} + + stroke + + CornerRadius + 1 + HeadArrow + 0 + Legacy + + LineType + 1 + TailArrow + 0 + Width + 1.5 + + + + + Bounds + {{68.149098914445872, 955.95317062424022}, {17.00787353515625, 17.00787353515625}} + Class + ShapedGraphic + ID + 409 + Shape + Circle + Style + + fill + + Color + + b + 0.4 + g + 1 + r + 1 + + Draws + NO + FillType + 2 + GradientAngle + 90 + GradientColor + + b + 0.4 + g + 1 + r + 1 + + MiddleColor + + b + 0.4 + g + 1 + r + 1 + + TrippleBlend + YES + + shadow + + Beneath + YES + Fuzziness + 2.5038185119628906 + ShadowVector + {0, 1} + + stroke + + CornerRadius + 1 + Width + 1.5 + + + + + ID + 403 + + + ID + 401 + + + Class + LineGraphic + ID + 399 + Points + + {450.39306747989752, 692.4796011495929} + {451.28062907089827, 609.48823926071918} + + Style + + stroke + + HeadArrow + UMLInheritance + Legacy + + LineType + 1 + TailArrow + 0 + + + Tail + + ID + 398 + Info + 2 + + + + Bounds + {{405.3877204726607, 692.97957255114432}, {90, 36}} + Class + ShapedGraphic + ID + 398 + Magnets + + {0, 1} + {0, -1} + {1, 0} + {-1, 0} + + Shape + Rectangle + Style + + Text + + Text + {\rtf1\ansi\ansicpg1252\cocoartf1265\cocoasubrtf200 +\cocoascreenfonts1{\fonttbl\f0\fswiss\fcharset0 Helvetica;} +{\colortbl;\red255\green255\blue255;} +\pard\tx560\tx1120\tx1680\tx2240\tx2800\tx3360\tx3920\tx4480\tx5040\tx5600\tx6160\tx6720\qc + +\f0\b\fs24 \cf0 Distributed\ +Engine} + VerticalPad + 0 + + + + Class + LineGraphic + Head + + ID + 395 + + ID + 397 + Points + + {515.69384736152347, 643.69388126880108} + {479.18127560660326, 607.7427600751555} + + Style + + stroke + + HeadArrow + UMLInheritance + Legacy + + LineType + 1 + TailArrow + 0 + + + Tail + + ID + 396 + Info + 2 + + + + Bounds + {{470.69384736152347, 643.69388126880108}, {90, 36}} + Class + ShapedGraphic + ID + 396 + Magnets + + {0, 1} + {0, -1} + {1, 0} + {-1, 0} + + Shape + Rectangle + Style + + Text + + Text + {\rtf1\ansi\ansicpg1252\cocoartf1265\cocoasubrtf200 +\cocoascreenfonts1{\fonttbl\f0\fswiss\fcharset0 Helvetica;} +{\colortbl;\red255\green255\blue255;} +\pard\tx560\tx1120\tx1680\tx2240\tx2800\tx3360\tx3920\tx4480\tx5040\tx5600\tx6160\tx6720\qc + +\f0\b\fs24 \cf0 No-Thread\ +Engine} + VerticalPad + 0 + + + + Class + LineGraphic + Head + + ID + 395 + + ID + 27 + Points + + {387.9411398922191, 643.33613420977974} + {422.69414909838434, 607.74967407906797} + + Style + + stroke + + HeadArrow + UMLInheritance + Legacy + + LineType + 1 + TailArrow + 0 + + + Tail + + ID + 11 + + + + Bounds + {{342.59180028217145, 643.69385172515479}, {90, 36}} + Class + ShapedGraphic + ID + 11 + Magnets + + {0, 1} + {0, -1} + {1, 0} + {-1, 0} + + Shape + Rectangle + Style + + Text + + Text + {\rtf1\ansi\ansicpg1252\cocoartf1265\cocoasubrtf200 +\cocoascreenfonts1{\fonttbl\f0\fswiss\fcharset0 Helvetica;} +{\colortbl;\red255\green255\blue255;} +\pard\tx560\tx1120\tx1680\tx2240\tx2800\tx3360\tx3920\tx4480\tx5040\tx5600\tx6160\tx6720\qc + +\f0\i\b\fs24 \cf0 K +\i0 -Threaded\ +Engine} + VerticalPad + 0 + + + + Class + TableGroup + Graphics + + + Bounds + {{405.38771609711887, 495.39195656369293}, {90, 14}} + Class + ShapedGraphic + FitText + Vertical + Flow + Resize + ID + 393 + Shape + Rectangle + Style + + fill + + GradientCenter + {-0.29411799999999999, -0.264706} + + + Text + + Text + {\rtf1\ansi\ansicpg1252\cocoartf1265\cocoasubrtf200 +\cocoascreenfonts1{\fonttbl\f0\fswiss\fcharset0 Helvetica;} +{\colortbl;\red255\green255\blue255;} +\pard\tx560\tx1120\tx1680\tx2240\tx2800\tx3360\tx3920\tx4480\tx5040\tx5600\tx6160\tx6720\qc + +\f0\i\b\fs24 \cf0 Engine} + VerticalPad + 0 + + TextPlacement + 0 + + + Bounds + {{405.38771609711887, 509.39195656369293}, {90, 42}} + Class + ShapedGraphic + FitText + Vertical + Flow + Resize + ID + 394 + Shape + Rectangle + Style + + fill + + GradientCenter + {-0.29411799999999999, -0.264706} + + + Text + + Align + 0 + Text + {\rtf1\ansi\ansicpg1252\cocoartf1265\cocoasubrtf200 +\cocoascreenfonts1{\fonttbl\f0\fswiss\fcharset0 Helvetica;} +{\colortbl;\red255\green255\blue255;} +\pard\tx560\tx1120\tx1680\tx2240\tx2800\tx3360\tx3920\tx4480\tx5040\tx5600\tx6160\tx6720 + +\f0\fs24 \cf0 - notifier\ +- atom_notifier\ +- storage} + VerticalPad + 0 + + TextPlacement + 0 + + + Bounds + {{405.38771609711887, 551.39195656369293}, {90, 56}} + Class + ShapedGraphic + FitText + Vertical + Flow + Resize + ID + 395 + Shape + Rectangle + Style + + fill + + GradientCenter + {-0.29411799999999999, -0.264706} + + + Text + + Align + 0 + Text + {\rtf1\ansi\ansicpg1252\cocoartf1265\cocoasubrtf200 +\cocoascreenfonts1{\fonttbl\f0\fswiss\fcharset0 Helvetica;} +{\colortbl;\red255\green255\blue255;} +\pard\tx560\tx1120\tx1680\tx2240\tx2800\tx3360\tx3920\tx4480\tx5040\tx5600\tx6160\tx6720 + +\f0\fs24 \cf0 - compile()\ +- prepare()\ +- run()\ +- suspend()} + VerticalPad + 0 + + TextPlacement + 0 + + + GridH + + 393 + 394 + 395 + + + ID + 392 + + + Bounds + {{324.43479725203395, 600.47359077973181}, {35, 14}} + Class + ShapedGraphic + FitText + YES + Flow + Resize + FontInfo + + Font + Helvetica + Size + 12 + + ID + 390 + Shape + Rectangle + Style + + fill + + Draws + NO + + shadow + + Draws + NO + + stroke + + Draws + NO + + + Text + + Pad + 0 + Text + {\rtf1\ansi\ansicpg1252\cocoartf1265\cocoasubrtf200 +\cocoascreenfonts1{\fonttbl\f0\fswiss\fcharset0 Helvetica;} +{\colortbl;\red255\green255\blue255;} +\pard\tx560\tx1120\tx1680\tx2240\tx2800\tx3360\tx3920\tx4480\tx5040\tx5600\tx6160\tx6720\pardirnatural\qc + +\f0\i\fs24 \cf0 Load()} + VerticalPad + 0 + + Wrap + NO + + + Class + LineGraphic + ID + 389 + Points + + {299.19385094739675, 622.37332926589443} + {349.19384944761669, 622.37332926589443} + + Style + + stroke + + HeadArrow + FilledArrow + Legacy + + LineType + 1 + TailArrow + FilledArrow + + + + + Class + LineGraphic + ID + 388 + Points + + {315.73465810741618, 475.31037359821562} + {315.73465810741618, 743.36733247080952} + + Style + + stroke + + HeadArrow + 0 + Legacy + + LineType + 1 + Pattern + 1 + TailArrow + 0 + + + + + Bounds + {{183.17344081803969, 484.69386811754907}, {72, 42}} + Class + ShapedGraphic + FitText + YES + Flow + Resize + FontInfo + + Font + Helvetica + Size + 12 + + ID + 387 + Shape + Rectangle + Style + + fill + + Draws + NO + + shadow + + Draws + NO + + stroke + + Draws + NO + + + Text + + Pad + 0 + Text + {\rtf1\ansi\ansicpg1252\cocoartf1265\cocoasubrtf200 +\cocoascreenfonts1{\fonttbl\f0\fswiss\fcharset0 Helvetica;} +{\colortbl;\red255\green255\blue255;} +\pard\tx560\tx1120\tx1680\tx2240\tx2800\tx3360\tx3920\tx4480\tx5040\tx5600\tx6160\tx6720\pardirnatural\qc + +\f0\i\fs24 \cf0 Workflow +\ +Runtime\ +Configuration} + VerticalPad + 0 + + Wrap + NO + + + Bounds + {{192.17344567816366, 658.51018444452041}, {54, 36}} + Class + ShapedGraphic + ID + 386 + Shape + Rectangle + Text + + Text + {\rtf1\ansi\ansicpg1252\cocoartf1265\cocoasubrtf200 +\cocoascreenfonts1{\fonttbl\f0\fswiss\fcharset0 Helvetica;} +{\colortbl;\red255\green255\blue255;} +\pard\tx560\tx1120\tx1680\tx2240\tx2800\tx3360\tx3920\tx4480\tx5040\tx5600\tx6160\tx6720\pardirnatural\qc + +\f0\fs24 \cf0 Storage\ +Config} + + + + Bounds + {{192.17343756180355, 606.36734600615216}, {54, 36}} + Class + ShapedGraphic + ID + 385 + Shape + Rectangle + Text + + Text + {\rtf1\ansi\ansicpg1252\cocoartf1265\cocoasubrtf200 +\cocoascreenfonts1{\fonttbl\f0\fswiss\fcharset0 Helvetica;} +{\colortbl;\red255\green255\blue255;} +\pard\tx560\tx1120\tx1680\tx2240\tx2800\tx3360\tx3920\tx4480\tx5040\tx5600\tx6160\tx6720\pardirnatural\qc + +\f0\fs24 \cf0 Engine\ +Config} + + + + Bounds + {{192.1734294454435, 554.22448349078456}, {54, 36}} + Class + ShapedGraphic + ID + 1 + Shape + Rectangle + Text + + Text + {\rtf1\ansi\ansicpg1252\cocoartf1265\cocoasubrtf200 +\cocoascreenfonts1{\fonttbl\f0\fswiss\fcharset0 Helvetica;} +{\colortbl;\red255\green255\blue255;} +\pard\tx560\tx1120\tx1680\tx2240\tx2800\tx3360\tx3920\tx4480\tx5040\tx5600\tx6160\tx6720\pardirnatural\qc + +\f0\fs24 \cf0 Flow} + + + + Bounds + {{161.23465842199704, 531.90531247737556}, {126, 182}} + Class + ShapedGraphic + ID + 15 + Shape + NoteShape + Style + + Text + + VerticalPad + 0 + + + + Bounds + {{81.034519768316954, 649.04956274775338}, {43, 28}} + Class + ShapedGraphic + FitText + YES + Flow + Resize + ID + 384 + Shape + Rectangle + Style + + fill + + Draws + NO + + shadow + + Draws + NO + + stroke + + Draws + NO + + + Text + + Pad + 0 + Text + {\rtf1\ansi\ansicpg1252\cocoartf1265\cocoasubrtf200 +\cocoascreenfonts1{\fonttbl\f0\fswiss\fcharset0 Helvetica;} +{\colortbl;\red255\green255\blue255;} +\pard\tx560\tx1120\tx1680\tx2240\tx2800\tx3360\tx3920\tx4480\tx5040\tx5600\tx6160\tx6720\pardirnatural\qc + +\f0\fs24 \cf0 Returns\ +Engine} + VerticalPad + 0 + + Wrap + NO + + + Class + LineGraphic + ID + 383 + Points + + {83.544734716513915, 635.76110575309315} + {129.28172189203261, 635.76110575309315} + + Style + + stroke + + HeadArrow + 0 + Legacy + + LineType + 1 + TailArrow + FilledArrow + + + + + Bounds + {{80.054942197373691, 597.00601059533471}, {47, 14}} + Class + ShapedGraphic + FitText + YES + Flow + Resize + ID + 382 + Shape + Rectangle + Style + + fill + + Draws + NO + + shadow + + Draws + NO + + stroke + + Draws + NO + + + Text + + Pad + 0 + Text + {\rtf1\ansi\ansicpg1252\cocoartf1265\cocoasubrtf200 +\cocoascreenfonts1{\fonttbl\f0\fswiss\fcharset0 Helvetica;} +{\colortbl;\red255\green255\blue255;} +\pard\tx560\tx1120\tx1680\tx2240\tx2800\tx3360\tx3920\tx4480\tx5040\tx5600\tx6160\tx6720\pardirnatural\qc + +\f0\fs24 \cf0 Provides} + VerticalPad + 0 + + Wrap + NO + + + Class + LineGraphic + ID + 381 + Points + + {87.626367290135391, 623.39376532003678} + {133.36335446565408, 623.39376532003678} + + Style + + stroke + + HeadArrow + FilledArrow + Legacy + + LineType + 1 + TailArrow + 0 + + + + + Class + Group + Graphics + + + Bounds + {{33.432496253969788, 566.72030291305043}, {35, 28}} + Class + ShapedGraphic + FitText + YES + Flow + Resize + ID + 373 + Shape + Rectangle + Style + + fill + + Draws + NO + + shadow + + Draws + NO + + stroke + + Draws + NO + + + Text + + Pad + 0 + Text + {\rtf1\ansi\ansicpg1252\cocoartf1265\cocoasubrtf200 +\cocoascreenfonts1{\fonttbl\f0\fnil\fcharset0 GillSans;} +{\colortbl;\red255\green255\blue255;} +\pard\tx560\tx1120\tx1680\tx2240\tx2800\tx3360\tx3920\tx4480\tx5040\tx5600\tx6160\tx6720\pardirnatural\qc + +\f0\fs24 \cf0 Library\ +User} + VerticalPad + 0 + + Wrap + NO + + + Class + Group + Graphics + + + Bounds + {{36.759271327974915, 650.74392542586781}, {28.346457481384277, 28.346458435058594}} + Class + ShapedGraphic + ID + 375 + Shape + Rectangle + Style + + fill + + Color + + b + 0.4 + g + 1 + r + 1 + + Draws + NO + FillType + 2 + GradientAngle + 90 + GradientColor + + b + 0.4 + g + 1 + r + 1 + + MiddleColor + + b + 0.4 + g + 1 + r + 1 + + TrippleBlend + YES + + shadow + + Beneath + YES + Draws + NO + Fuzziness + 2.5038185119628906 + ShadowVector + {0, 1} + + stroke + + Draws + NO + Width + 1.5 + + + VFlip + YES + Wrap + NO + + + Class + LineGraphic + ID + 376 + Points + + {36.759271327974915, 625.23211464629628} + {65.105728809359192, 625.23211464629628} + {65.105728809359192, 625.23211464629628} + + Style + + shadow + + Beneath + YES + Draws + YES + Fuzziness + 2.5038185119628906 + ShadowVector + {0, 1} + + stroke + + CornerRadius + 1 + HeadArrow + 0 + Legacy + + LineType + 1 + TailArrow + 0 + Width + 1.5 + + + + + Class + LineGraphic + ID + 377 + Points + + {50.932500068667053, 636.57069763884999} + {36.759270851137757, 650.74392637954213} + + Style + + shadow + + Beneath + YES + Draws + YES + Fuzziness + 2.5038185119628906 + ShadowVector + {0, 1} + + stroke + + CornerRadius + 1 + HeadArrow + 0 + Legacy + + LineType + 1 + TailArrow + 0 + Width + 1.5 + + + + + Class + LineGraphic + ID + 378 + Points + + {50.932500068667053, 636.57069763884999} + {65.105728809359192, 651.31089042421991} + + Style + + shadow + + Beneath + YES + Draws + YES + Fuzziness + 2.5038185119628906 + ShadowVector + {0, 1} + + stroke + + CornerRadius + 1 + HeadArrow + 0 + Legacy + + LineType + 1 + TailArrow + 0 + Width + 1.5 + + + + + Class + LineGraphic + ID + 379 + Points + + {50.932500068667053, 616.728177401881} + {50.932500068667053, 636.57070002303578} + + Style + + shadow + + Beneath + YES + Draws + YES + Fuzziness + 2.5038185119628906 + ShadowVector + {0, 1} + + stroke + + CornerRadius + 1 + HeadArrow + 0 + Legacy + + LineType + 1 + TailArrow + 0 + Width + 1.5 + + + + + Bounds + {{42.42856282425177, 599.72030291305043}, {17.00787353515625, 17.00787353515625}} + Class + ShapedGraphic + ID + 380 + Shape + Circle + Style + + fill + + Color + + b + 0.4 + g + 1 + r + 1 + + Draws + NO + FillType + 2 + GradientAngle + 90 + GradientColor + + b + 0.4 + g + 1 + r + 1 + + MiddleColor + + b + 0.4 + g + 1 + r + 1 + + TrippleBlend + YES + + shadow + + Beneath + YES + Fuzziness + 2.5038185119628906 + ShadowVector + {0, 1} + + stroke + + CornerRadius + 1 + Width + 1.5 + + + + + ID + 374 + + + ID + 372 + + + Bounds + {{56.053440093994141, 454.08162381538807}, {97, 44}} + Class + ShapedGraphic + FitText + YES + Flow + Resize + FontInfo + + Font + Helvetica-BoldOblique + Size + 18 + + ID + 371 + Shape + Rectangle + Style + + fill + + Draws + NO + + shadow + + Draws + NO + + stroke + + Draws + NO + + + Text + + Pad + 0 + Text + {\rtf1\ansi\ansicpg1252\cocoartf1265\cocoasubrtf200 +\cocoascreenfonts1{\fonttbl\f0\fswiss\fcharset0 Helvetica;} +{\colortbl;\red255\green255\blue255;} +\pard\tx560\tx1120\tx1680\tx2240\tx2800\tx3360\tx3920\tx4480\tx5040\tx5600\tx6160\tx6720\pardirnatural\qc + +\f0\i\b\fs36 \cf0 Translation\ +Phase} + VerticalPad + 0 + + Wrap + NO + + + Class + LineGraphic + ID + 370 + Points + + {239.72452365274654, 129.59183421248156} + {249.59182766764883, 184.82352424792543} + + Style + + stroke + + HeadArrow + FilledArrow + Legacy + + LineType + 1 + Pattern + 1 + TailArrow + 0 + + + + + Bounds + {{203.06122053766794, 96.734692512775737}, {75, 28}} + Class + ShapedGraphic + FitText + YES + Flow + Resize + FontInfo + + Font + Helvetica + Size + 12 + + ID + 369 + Shape + Rectangle + Style + + fill + + Draws + NO + + shadow + + Draws + NO + + stroke + + Draws + NO + + + Text + + Pad + 0 + Text + {\rtf1\ansi\ansicpg1252\cocoartf1265\cocoasubrtf200 +\cocoascreenfonts1{\fonttbl\f0\fswiss\fcharset0 Helvetica;} +{\colortbl;\red255\green255\blue255;} +\pard\tx560\tx1120\tx1680\tx2240\tx2800\tx3360\tx3920\tx4480\tx5040\tx5600\tx6160\tx6720\pardirnatural\qc + +\f0\i\fs24 \cf0 Explicit\ +dependencies} + VerticalPad + 0 + + Wrap + NO + + + Class + LineGraphic + ID + 368 + Points + + {16.938771677235714, 434.69386909068618} + {550.61223067824244, 434.69386909068618} + + Style + + stroke + + HeadArrow + 0 + Legacy + + LineType + 1 + Pattern + 1 + TailArrow + 0 + + + + + Bounds + {{56.053440093994141, 39.83673387962002}, {112, 44}} + Class + ShapedGraphic + FitText + YES + Flow + Resize + FontInfo + + Font + Helvetica-BoldOblique + Size + 18 + + ID + 367 + Shape + Rectangle + Style + + fill + + Draws + NO + + shadow + + Draws + NO + + stroke + + Draws + NO + + + Text + + Pad + 0 + Text + {\rtf1\ansi\ansicpg1252\cocoartf1265\cocoasubrtf200 +\cocoascreenfonts1{\fonttbl\f0\fswiss\fcharset0 Helvetica;} +{\colortbl;\red255\green255\blue255;} +\pard\tx560\tx1120\tx1680\tx2240\tx2800\tx3360\tx3920\tx4480\tx5040\tx5600\tx6160\tx6720\pardirnatural\qc + +\f0\i\b\fs36 \cf0 Construction\ +Phase} + VerticalPad + 0 + + Wrap + NO + + + Bounds + {{100.22448568729375, 191.89795101130832}, {50, 56}} + Class + ShapedGraphic + FitText + YES + Flow + Resize + ID + 366 + Shape + Rectangle + Style + + fill + + Draws + NO + + shadow + + Draws + NO + + stroke + + Draws + NO + + + Text + + Pad + 0 + Text + {\rtf1\ansi\ansicpg1252\cocoartf1265\cocoasubrtf200 +\cocoascreenfonts1{\fonttbl\f0\fswiss\fcharset0 Helvetica;} +{\colortbl;\red255\green255\blue255;} +\pard\tx560\tx1120\tx1680\tx2240\tx2800\tx3360\tx3920\tx4480\tx5040\tx5600\tx6160\tx6720\pardirnatural\qc + +\f0\fs24 \cf0 Creates\ +\ +\ +Workflow} + VerticalPad + 0 + + Wrap + NO + + + Class + LineGraphic + ID + 364 + Points + + {110.31631892346081, 220.32652202282102} + {156.05330609897936, 220.32652202282102} + + Style + + stroke + + HeadArrow + FilledArrow + Legacy + + LineType + 1 + TailArrow + FilledArrow + + + + + Class + TableGroup + Graphics + + + Bounds + {{306.40872322346235, 337.6122433290239}, {126, 14}} + Class + ShapedGraphic + FitText + Vertical + Flow + Resize + ID + 361 + Shape + Rectangle + Style + + fill + + GradientCenter + {-0.29411799999999999, -0.264706} + + + Text + + Text + {\rtf1\ansi\ansicpg1252\cocoartf1265\cocoasubrtf200 +\cocoascreenfonts1{\fonttbl\f0\fswiss\fcharset0 Helvetica;} +{\colortbl;\red255\green255\blue255;} +\pard\tx560\tx1120\tx1680\tx2240\tx2800\tx3360\tx3920\tx4480\tx5040\tx5600\tx6160\tx6720\qc + +\f0\i\b\fs24 \cf0 Retry (Atom)} + VerticalPad + 0 + + TextPlacement + 0 + + + Bounds + {{306.40872322346235, 351.6122433290239}, {126, 56}} + Class + ShapedGraphic + FitText + Vertical + Flow + Resize + ID + 362 + Shape + Rectangle + Style + + fill + + GradientCenter + {-0.29411799999999999, -0.264706} + + + Text + + Align + 0 + Text + {\rtf1\ansi\ansicpg1252\cocoartf1265\cocoasubrtf200 +\cocoascreenfonts1{\fonttbl\f0\fswiss\fcharset0 Helvetica;} +{\colortbl;\red255\green255\blue255;} +\pard\tx560\tx1120\tx1680\tx2240\tx2800\tx3360\tx3920\tx4480\tx5040\tx5600\tx6160\tx6720 + +\f0\fs24 \cf0 - execute()\ +- revert()\ +- on_failure()\ +...} + VerticalPad + 0 + + TextPlacement + 0 + + + GridH + + 361 + 362 + + + ID + 360 + + + Class + TableGroup + Graphics + + + Bounds + {{165.22448860432195, 337.6122433290239}, {126, 14}} + Class + ShapedGraphic + FitText + Vertical + Flow + Resize + ID + 42 + Shape + Rectangle + Style + + fill + + GradientCenter + {-0.29411799999999999, -0.264706} + + + Text + + Text + {\rtf1\ansi\ansicpg1252\cocoartf1265\cocoasubrtf200 +\cocoascreenfonts1{\fonttbl\f0\fswiss\fcharset0 Helvetica;} +{\colortbl;\red255\green255\blue255;} +\pard\tx560\tx1120\tx1680\tx2240\tx2800\tx3360\tx3920\tx4480\tx5040\tx5600\tx6160\tx6720\qc + +\f0\i\b\fs24 \cf0 Task (Atom)} + VerticalPad + 0 + + TextPlacement + 0 + + + Bounds + {{165.22448860432195, 351.6122433290239}, {126, 56}} + Class + ShapedGraphic + FitText + Vertical + Flow + Resize + ID + 44 + Shape + Rectangle + Style + + fill + + GradientCenter + {-0.29411799999999999, -0.264706} + + + Text + + Align + 0 + Text + {\rtf1\ansi\ansicpg1252\cocoartf1265\cocoasubrtf200 +\cocoascreenfonts1{\fonttbl\f0\fswiss\fcharset0 Helvetica;} +{\colortbl;\red255\green255\blue255;} +\pard\tx560\tx1120\tx1680\tx2240\tx2800\tx3360\tx3920\tx4480\tx5040\tx5600\tx6160\tx6720 + +\f0\fs24 \cf0 - execute()\ +- revert()\ +- update_progress()\ +...} + VerticalPad + 0 + + TextPlacement + 0 + + + GridH + + 42 + 44 + + + ID + 352 + + + Class + LineGraphic + Head + + ID + 840 + + ID + 842 + Points + + {381.22447887295158, 117.34693649161716} + {381.85914273540726, 165.11464199273692} + + Style + + stroke + + HeadArrow + FilledArrow + Legacy + + LineType + 1 + Pattern + 1 + TailArrow + 0 + + + + + Class + LineGraphic + ID + 347 + Points + + {395.51019288062668, 111.79728682683641} + {394.92858042750709, 139.79728698730469} + + Style + + stroke + + HeadArrow + FilledArrow + Legacy + + LineType + 1 + Pattern + 1 + TailArrow + 0 + + + + + Bounds + {{302.34693227416039, 79.112244302955403}, {185, 28}} + Class + ShapedGraphic + FitText + YES + Flow + Resize + FontInfo + + Font + Helvetica + Size + 12 + + ID + 345 + Shape + Rectangle + Style + + fill + + Draws + NO + + shadow + + Draws + NO + + stroke + + Draws + NO + + + Text + + Pad + 0 + Text + {\rtf1\ansi\ansicpg1252\cocoartf1265\cocoasubrtf200 +\cocoascreenfonts1{\fonttbl\f0\fswiss\fcharset0 Helvetica;} +{\colortbl;\red255\green255\blue255;} +\pard\tx560\tx1120\tx1680\tx2240\tx2800\tx3360\tx3920\tx4480\tx5040\tx5600\tx6160\tx6720\pardirnatural\qc + +\f0\i\fs24 \cf0 Workflow (declarative) structure\ +& code (not executed immediately)} + VerticalPad + 0 + + Wrap + NO + + + Class + LineGraphic + ID + 329 + Points + + {472.22448860432172, 249.61224332902393} + {411.31888545619614, 269.91411104506579} + + Style + + stroke + + HeadArrow + FilledArrow + Legacy + + LineType + 1 + Pattern + 1 + TailArrow + 0 + + + + + Class + LineGraphic + ID + 343 + Points + + {474.22448860432172, 205.97599760148498} + {409.22448860432172, 228.24848490451151} + + Style + + stroke + + HeadArrow + FilledArrow + Legacy + + LineType + 1 + Pattern + 1 + TailArrow + 0 + + + + + Bounds + {{478.22448860432172, 179.61224332902398}, {83, 42}} + Class + ShapedGraphic + FitText + YES + Flow + Resize + FontInfo + + Font + Helvetica + Size + 12 + + ID + 344 + Shape + Rectangle + Style + + fill + + Draws + NO + + shadow + + Draws + NO + + stroke + + Draws + NO + + + Text + + Pad + 0 + Text + {\rtf1\ansi\ansicpg1252\cocoartf1265\cocoasubrtf200 +\cocoascreenfonts1{\fonttbl\f0\fswiss\fcharset0 Helvetica;} +{\colortbl;\red255\green255\blue255;} +\pard\tx560\tx1120\tx1680\tx2240\tx2800\tx3360\tx3920\tx4480\tx5040\tx5600\tx6160\tx6720\pardirnatural\qc + +\f0\i\fs24 \cf0 Dataflow\ +(symbol-based)\ +dependencies} + VerticalPad + 0 + + Wrap + NO + + + Class + LineGraphic + ID + 341 + Points + + {361.09295431543518, 212.90662923905811} + {315.35596770538763, 253.81785993690883} + + Style + + stroke + + HeadArrow + FilledArrow + Legacy + + LineType + 1 + TailArrow + 0 + + + + + Bounds + {{387.44897720864344, 229.36224332902393}, {30, 14}} + Class + ShapedGraphic + FitText + YES + Flow + Resize + ID + 336 + Shape + Rectangle + Style + + fill + + Draws + NO + + shadow + + Draws + NO + + stroke + + Draws + NO + + + Text + + Pad + 0 + Text + {\rtf1\ansi\ansicpg1252\cocoartf1265\cocoasubrtf200 +\cocoascreenfonts1{\fonttbl\f0\fswiss\fcharset0 Helvetica;} +{\colortbl;\red255\green255\blue255;} +\pard\tx560\tx1120\tx1680\tx2240\tx2800\tx3360\tx3920\tx4480\tx5040\tx5600\tx6160\tx6720\pardirnatural\qc + +\f0\fs24 \cf0 out/in} + VerticalPad + 0 + + Wrap + NO + + + Class + LineGraphic + ID + 334 + Points + + {239.72451559473222, 278.11224001641114} + {269.72448860432183, 278.11224001641114} + + Style + + stroke + + HeadArrow + 0 + Legacy + + LineType + 1 + TailArrow + FilledArrow + + + + + Class + LineGraphic + ID + 333 + Points + + {324.22448507715393, 278.11224001641114} + {354.2244580867436, 278.11224001641114} + + Style + + stroke + + HeadArrow + 0 + Legacy + + LineType + 1 + TailArrow + FilledArrow + + + + + Class + LineGraphic + ID + 332 + Points + + {325.22451559473205, 192.11224001641122} + {355.22448860432172, 192.11224001641122} + + Style + + stroke + + HeadArrow + FilledArrow + Legacy + + LineType + 1 + TailArrow + 0 + + + + + Class + LineGraphic + Head + + ID + 324 + + ID + 331 + Points + + {239.72450209952797, 192.61225527520028} + {269.72447510911758, 192.61225527520028} + + Style + + stroke + + HeadArrow + FilledArrow + Legacy + + LineType + 1 + TailArrow + 0 + + + Tail + + ID + 28 + + + + Bounds + {{474.22448860432172, 234.61224332902393}, {49, 42}} + Class + ShapedGraphic + FitText + YES + Flow + Resize + FontInfo + + Font + Helvetica + Size + 12 + + ID + 330 + Shape + Rectangle + Style + + fill + + Draws + NO + + shadow + + Draws + NO + + stroke + + Draws + NO + + + Text + + Pad + 0 + Text + {\rtf1\ansi\ansicpg1252\cocoartf1265\cocoasubrtf200 +\cocoascreenfonts1{\fonttbl\f0\fswiss\fcharset0 Helvetica;} +{\colortbl;\red255\green255\blue255;} +\pard\tx560\tx1120\tx1680\tx2240\tx2800\tx3360\tx3920\tx4480\tx5040\tx5600\tx6160\tx6720\pardirnatural\qc + +\f0\i\fs24 \cf0 Nested\ +subflow\ +with retry} + VerticalPad + 0 + + Wrap + NO + + + Bounds + {{271.72448860432172, 251.61224332902393}, {54, 54}} + Class + ShapedGraphic + HFlip + YES + ID + 328 + Shape + Circle + Style + + Text + + Text + {\rtf1\ansi\ansicpg1252\cocoartf1265\cocoasubrtf200 +\cocoascreenfonts1{\fonttbl\f0\fswiss\fcharset0 Helvetica;} +{\colortbl;\red255\green255\blue255;} +\pard\tx560\tx1120\tx1680\tx2240\tx2800\tx3360\tx3920\tx4480\tx5040\tx5600\tx6160\tx6720\pardirnatural\qc + +\f0\fs24 \cf0 Task} + VerticalPad + 0 + + + + Bounds + {{183.72451554562139, 251.61224332902393}, {54, 54}} + Class + ShapedGraphic + HFlip + YES + ID + 327 + Shape + Circle + Style + + Text + + Text + {\rtf1\ansi\ansicpg1252\cocoartf1265\cocoasubrtf200 +\cocoascreenfonts1{\fonttbl\f0\fswiss\fcharset0 Helvetica;} +{\colortbl;\red255\green255\blue255;} +\pard\tx560\tx1120\tx1680\tx2240\tx2800\tx3360\tx3920\tx4480\tx5040\tx5600\tx6160\tx6720\pardirnatural\qc + +\f0\fs24 \cf0 Task} + VerticalPad + 0 + + + + Bounds + {{355.22448860432172, 165.61224332902398}, {54, 54}} + Class + ShapedGraphic + HFlip + YES + ID + 840 + Shape + Circle + Style + + Text + + Text + {\rtf1\ansi\ansicpg1252\cocoartf1265\cocoasubrtf200 +\cocoascreenfonts1{\fonttbl\f0\fswiss\fcharset0 Helvetica;} +{\colortbl;\red255\green255\blue255;} +\pard\tx560\tx1120\tx1680\tx2240\tx2800\tx3360\tx3920\tx4480\tx5040\tx5600\tx6160\tx6720\pardirnatural\qc + +\f0\fs24 \cf0 Flow} + VerticalPad + 0 + + + + Bounds + {{270.22448860432183, 165.61224332902398}, {54, 54}} + Class + ShapedGraphic + HFlip + YES + ID + 324 + Shape + Circle + Style + + Text + + Text + {\rtf1\ansi\ansicpg1252\cocoartf1265\cocoasubrtf200 +\cocoascreenfonts1{\fonttbl\f0\fswiss\fcharset0 Helvetica;} +{\colortbl;\red255\green255\blue255;} +\pard\tx560\tx1120\tx1680\tx2240\tx2800\tx3360\tx3920\tx4480\tx5040\tx5600\tx6160\tx6720\pardirnatural\qc + +\f0\fs24 \cf0 Task} + VerticalPad + 0 + + + + Bounds + {{185.22448860432195, 165.61224332902398}, {54, 54}} + Class + ShapedGraphic + HFlip + YES + ID + 28 + Shape + Circle + Style + + Text + + Text + {\rtf1\ansi\ansicpg1252\cocoartf1265\cocoasubrtf200 +\cocoascreenfonts1{\fonttbl\f0\fswiss\fcharset0 Helvetica;} +{\colortbl;\red255\green255\blue255;} +\pard\tx560\tx1120\tx1680\tx2240\tx2800\tx3360\tx3920\tx4480\tx5040\tx5600\tx6160\tx6720\pardirnatural\qc + +\f0\fs24 \cf0 Task} + VerticalPad + 0 + + + + Class + TableGroup + Graphics + + + Bounds + {{165.224488604322, 153.79728666398492}, {269, 168}} + Class + ShapedGraphic + FitText + Vertical + Flow + Resize + ID + 35 + Shape + Rectangle + Style + + fill + + GradientCenter + {-0.29411799999999999, -0.264706} + + + Text + + Align + 0 + Text + {\rtf1\ansi\ansicpg1252\cocoartf1265\cocoasubrtf200 +\cocoascreenfonts1{\fonttbl\f0\fswiss\fcharset0 Helvetica;} +{\colortbl;\red255\green255\blue255;} +\pard\tx560\tx1120\tx1680\tx2240\tx2800\tx3360\tx3920\tx4480\tx5040\tx5600\tx6160\tx6720 + +\f0\fs24 \cf0 \ +\ +\ +\ +\ +\ +\ +\ +\ +\ +\ +} + VerticalPad + 0 + + TextPlacement + 0 + + + Bounds + {{165.224488604322, 139.79728666398492}, {269, 14}} + Class + ShapedGraphic + FitText + Vertical + Flow + Resize + ID + 34 + Shape + Rectangle + Style + + fill + + GradientCenter + {-0.29411799999999999, -0.264706} + + + Text + + Text + {\rtf1\ansi\ansicpg1252\cocoartf1265\cocoasubrtf200 +\cocoascreenfonts1{\fonttbl\f0\fswiss\fcharset0 Helvetica;} +{\colortbl;\red255\green255\blue255;} +\pard\tx560\tx1120\tx1680\tx2240\tx2800\tx3360\tx3920\tx4480\tx5040\tx5600\tx6160\tx6720\qc + +\f0\i\b\fs24 \cf0 Flow (pattern)} + VerticalPad + 0 + + TextPlacement + 0 + + + GridH + + 34 + 35 + + + ID + 33 + + + Class + Group + Graphics + + + Bounds + {{56.122447887295152, 164.67346775924003}, {35, 28}} + Class + ShapedGraphic + FitText + YES + Flow + Resize + ID + 61 + Shape + Rectangle + Style + + fill + + Draws + NO + + shadow + + Draws + NO + + stroke + + Draws + NO + + + Text + + Pad + 0 + Text + {\rtf1\ansi\ansicpg1252\cocoartf1265\cocoasubrtf200 +\cocoascreenfonts1{\fonttbl\f0\fnil\fcharset0 GillSans;} +{\colortbl;\red255\green255\blue255;} +\pard\tx560\tx1120\tx1680\tx2240\tx2800\tx3360\tx3920\tx4480\tx5040\tx5600\tx6160\tx6720\pardirnatural\qc + +\f0\fs24 \cf0 Library\ +User} + VerticalPad + 0 + + Wrap + NO + + + Class + Group + Graphics + + + Bounds + {{59.449222961300279, 248.69709027205738}, {28.346457481384277, 28.346458435058594}} + Class + ShapedGraphic + ID + 63 + Shape + Rectangle + Style + + fill + + Color + + b + 0.4 + g + 1 + r + 1 + + Draws + NO + FillType + 2 + GradientAngle + 90 + GradientColor + + b + 0.4 + g + 1 + r + 1 + + MiddleColor + + b + 0.4 + g + 1 + r + 1 + + TrippleBlend + YES + + shadow + + Beneath + YES + Draws + NO + Fuzziness + 2.5038185119628906 + ShadowVector + {0, 1} + + stroke + + Draws + NO + Width + 1.5 + + + VFlip + YES + Wrap + NO + + + Class + LineGraphic + ID + 64 + Points + + {59.449222961300279, 223.18527949248588} + {87.795680442684557, 223.18527949248588} + {87.795680442684557, 223.18527949248588} + + Style + + shadow + + Beneath + YES + Draws + YES + Fuzziness + 2.5038185119628906 + ShadowVector + {0, 1} + + stroke + + CornerRadius + 1 + HeadArrow + 0 + Legacy + + LineType + 1 + TailArrow + 0 + Width + 1.5 + + + + + Class + LineGraphic + ID + 65 + Points + + {73.622451701992418, 234.52386248503961} + {59.449222484463121, 248.6970912257317} + + Style + + shadow + + Beneath + YES + Draws + YES + Fuzziness + 2.5038185119628906 + ShadowVector + {0, 1} + + stroke + + CornerRadius + 1 + HeadArrow + 0 + Legacy + + LineType + 1 + TailArrow + 0 + Width + 1.5 + + + + + Class + LineGraphic + ID + 66 + Points + + {73.622451701992418, 234.52386248503953} + {87.795680442684557, 249.26405527040947} + + Style + + shadow + + Beneath + YES + Draws + YES + Fuzziness + 2.5038185119628906 + ShadowVector + {0, 1} + + stroke + + CornerRadius + 1 + HeadArrow + 0 + Legacy + + LineType + 1 + TailArrow + 0 + Width + 1.5 + + + + + Class + LineGraphic + ID + 67 + Points + + {73.622451701992418, 214.68134224807059} + {73.622451701992418, 234.52386486922538} + + Style + + shadow + + Beneath + YES + Draws + YES + Fuzziness + 2.5038185119628906 + ShadowVector + {0, 1} + + stroke + + CornerRadius + 1 + HeadArrow + 0 + Legacy + + LineType + 1 + TailArrow + 0 + Width + 1.5 + + + + + Bounds + {{65.118514457577135, 197.67346775924003}, {17.00787353515625, 17.00787353515625}} + Class + ShapedGraphic + ID + 68 + Shape + Circle + Style + + fill + + Color + + b + 0.4 + g + 1 + r + 1 + + Draws + NO + FillType + 2 + GradientAngle + 90 + GradientColor + + b + 0.4 + g + 1 + r + 1 + + MiddleColor + + b + 0.4 + g + 1 + r + 1 + + TrippleBlend + YES + + shadow + + Beneath + YES + Fuzziness + 2.5038185119628906 + ShadowVector + {0, 1} + + stroke + + CornerRadius + 1 + Width + 1.5 + + + + + ID + 62 + + + ID + 60 + + + Class + Group + Graphics + + + Bounds + {{524.20410965184897, 903.84686831164879}, {90, 36}} + Class + ShapedGraphic + ID + 440 + Magnets + + {0, 1} + {0, -1} + {1, 0} + {-1, 0} + + Shape + RoundRect + Style + + Text + + Text + {\rtf1\ansi\ansicpg1252\cocoartf1265\cocoasubrtf200 +\cocoascreenfonts1{\fonttbl\f0\fswiss\fcharset0 Helvetica;} +{\colortbl;\red255\green255\blue255;} +\pard\tx560\tx1120\tx1680\tx2240\tx2800\tx3360\tx3920\tx4480\tx5040\tx5600\tx6160\tx6720\qc + +\f0\fs20 \cf0 Zookeeper} + VerticalPad + 0 + + + + Bounds + {{524.20413205306681, 867.84684591043094}, {90, 36}} + Class + ShapedGraphic + ID + 441 + Magnets + + {0, 1} + {0, -1} + {1, 0} + {-1, 0} + + Shape + RoundRect + Style + + Text + + Text + {\rtf1\ansi\ansicpg1252\cocoartf1265\cocoasubrtf200 +\cocoascreenfonts1{\fonttbl\f0\fswiss\fcharset0 Helvetica;} +{\colortbl;\red255\green255\blue255;} +\pard\tx560\tx1120\tx1680\tx2240\tx2800\tx3360\tx3920\tx4480\tx5040\tx5600\tx6160\tx6720\qc + +\f0\fs20 \cf0 Filesystem} + VerticalPad + 0 + + + + Bounds + {{524.20410965184885, 832.86723361478039}, {90, 36}} + Class + ShapedGraphic + ID + 442 + Magnets + + {0, 1} + {0, -1} + {1, 0} + {-1, 0} + + Shape + RoundRect + Style + + Text + + Text + {\rtf1\ansi\ansicpg1252\cocoartf1265\cocoasubrtf200 +\cocoascreenfonts1{\fonttbl\f0\fswiss\fcharset0 Helvetica;} +{\colortbl;\red255\green255\blue255;} +\pard\tx560\tx1120\tx1680\tx2240\tx2800\tx3360\tx3920\tx4480\tx5040\tx5600\tx6160\tx6720\qc + +\f0\fs20 \cf0 Memory} + VerticalPad + 0 + + + + Bounds + {{524.20409341912841, 797.78565525800093}, {90, 36}} + Class + ShapedGraphic + ID + 443 + Magnets + + {0, 1} + {0, -1} + {1, 0} + {-1, 0} + + Shape + RoundRect + Style + + Text + + Text + {\rtf1\ansi\ansicpg1252\cocoartf1265\cocoasubrtf200 +\cocoascreenfonts1{\fonttbl\f0\fswiss\fcharset0 Helvetica;} +{\colortbl;\red255\green255\blue255;} +\pard\tx560\tx1120\tx1680\tx2240\tx2800\tx3360\tx3920\tx4480\tx5040\tx5600\tx6160\tx6720\qc + +\f0\fs20 \cf0 SQLAlchemy} + VerticalPad + 0 + + + + ID + 439 + + + Bounds + {{366.28570556640625, 1120.9999961853027}, {75.71429443359375, 44}} + Class + ShapedGraphic + ID + 835 + Shape + Rectangle + Style + + stroke + + Pattern + 1 + + + + + Bounds + {{366.28571101041302, 969.38000187999}, {202.04080200195312, 196.6199951171875}} + Class + ShapedGraphic + ID + 444 + Shape + Rectangle + + + Bounds + {{379.54083251953125, 960.24970708018532}, {202.04080200195312, 196.6199951171875}} + Class + ShapedGraphic + ID + 1170 + Shape + Rectangle + + + Bounds + {{181.81308267749165, 1321.1440843224241}, {236.99999999999997, 168}} + Class + ShapedGraphic + FitText + Vertical + Flow + Resize + ID + 1172 + Shape + Rectangle + Style + + fill + + GradientCenter + {-0.29411799999999999, -0.264706} + + + Text + + Align + 0 + Text + {\rtf1\ansi\ansicpg1252\cocoartf1265\cocoasubrtf200 +\cocoascreenfonts1{\fonttbl\f0\fswiss\fcharset0 Helvetica;} +{\colortbl;\red255\green255\blue255;} +\pard\tx560\tx1120\tx1680\tx2240\tx2800\tx3360\tx3920\tx4480\tx5040\tx5600\tx6160\tx6720 + +\f0\fs24 \cf0 \ +\ +\ +\ +\ +\ +\ +\ +\ +\ +\ +} + VerticalPad + 0 + + TextPlacement + 0 + + + GridInfo + + GuidesLocked + NO + GuidesVisible + YES + HPages + 2 + ImageCounter + 1 + KeepToScale + + Layers + + + Lock + NO + Name + Layer 1 + Print + YES + View + YES + + + LayoutInfo + + Animate + NO + circoMinDist + 18 + circoSeparation + 0.0 + layoutEngine + dot + neatoSeparation + 0.0 + twopiSeparation + 0.0 + + LinksVisible + NO + MagnetsVisible + NO + MasterSheets + + ModificationDate + 2014-07-09 22:24:00 +0000 + Modifier + Joshua Harlow + NotesVisible + NO + Orientation + 2 + OriginVisible + NO + PageBreaks + YES + PrintInfo + + NSBottomMargin + + float + 41 + + NSHorizonalPagination + + coded + BAtzdHJlYW10eXBlZIHoA4QBQISEhAhOU051bWJlcgCEhAdOU1ZhbHVlAISECE5TT2JqZWN0AIWEASqEhAFxlwCG + + NSLeftMargin + + float + 18 + + NSPaperSize + + size + {612, 792} + + NSPrintReverseOrientation + + int + 0 + + NSRightMargin + + float + 18 + + NSTopMargin + + float + 18 + + + PrintOnePage + + ReadOnly + NO + RowAlign + 1 + RowSpacing + 36 + SheetTitle + Canvas 1 + SmartAlignmentGuidesActive + YES + SmartDistanceGuidesActive + YES + UniqueID + 1 + UseEntirePage + + VPages + 3 + WindowInfo + + CurrentSheet + 0 + ExpandedCanvases + + + name + Canvas 1 + + + Frame + {{77, 45}, {1067, 833}} + ListView + + OutlineWidth + 142 + RightSidebar + + ShowRuler + + Sidebar + + SidebarWidth + 120 + VisibleRegion + {{8.8235295767602651, 949.50982167692416}, {900.00001682954701, 665.68628695780228}} + Zoom + 1.0199999809265137 + ZoomValues + + + Canvas 1 + 1.0199999809265137 + 1 + + + + + From 0fae765bdce19e1ea8c567f7ea7d530f43390b98 Mon Sep 17 00:00:00 2001 From: Joshua Harlow Date: Wed, 9 Jul 2014 13:57:48 -0700 Subject: [PATCH 149/188] Make greenexecutor not keep greenthreads active Instead of keeping all greenthreads active even if they are not being used only keep the greenthreads active if there exists active work to complete. This saves resources (each greenthread takes up memory) and allows a pool to shrink and grow in a more dynamic fashion. Fixes bug 1339406 Change-Id: Idc8ab8447045915a0ffbaf21fa5c4bdb7a9e3593 --- taskflow/tests/unit/test_green_executor.py | 36 +++++++-- taskflow/utils/eventlet_utils.py | 89 +++++++++++++--------- 2 files changed, 80 insertions(+), 45 deletions(-) diff --git a/taskflow/tests/unit/test_green_executor.py b/taskflow/tests/unit/test_green_executor.py index 3524a9c2..eae523dc 100644 --- a/taskflow/tests/unit/test_green_executor.py +++ b/taskflow/tests/unit/test_green_executor.py @@ -31,7 +31,7 @@ class GreenExecutorTest(test.TestCase): called[name] += 1 for i in range(0, amount): - yield functools.partial(store_call, name=int(i)) + yield functools.partial(store_call, name=i) def test_func_calls(self): called = collections.defaultdict(int) @@ -44,20 +44,21 @@ class GreenExecutorTest(test.TestCase): self.assertEqual(1, called[1]) def test_no_construction(self): - self.assertRaises(AssertionError, eu.GreenExecutor, 0) - self.assertRaises(AssertionError, eu.GreenExecutor, -1) - self.assertRaises(AssertionError, eu.GreenExecutor, "-1") + self.assertRaises(ValueError, eu.GreenExecutor, 0) + self.assertRaises(ValueError, eu.GreenExecutor, -1) + self.assertRaises(ValueError, eu.GreenExecutor, "-1") def test_result_callback(self): called = collections.defaultdict(int) - def call_back(future): + def callback(future): called[future] += 1 funcs = list(self.make_funcs(called, 1)) with eu.GreenExecutor(2) as e: - f = e.submit(funcs[0]) - f.add_done_callback(call_back) + for func in funcs: + f = e.submit(func) + f.add_done_callback(callback) self.assertEqual(2, len(called)) @@ -87,6 +88,27 @@ class GreenExecutorTest(test.TestCase): result = fs[i].result() self.assertEqual(i, result) + def test_called_restricted_size(self): + called = collections.defaultdict(int) + + with eu.GreenExecutor(1) as e: + for f in self.make_funcs(called, 100): + e.submit(f) + self.assertEqual(99, e.amount_delayed) + + self.assertFalse(e.alive) + self.assertEqual(100, len(called)) + self.assertGreaterEqual(1, e.workers_created) + self.assertEqual(0, e.amount_delayed) + + def test_shutdown_twice(self): + e = eu.GreenExecutor(1) + self.assertTrue(e.alive) + e.shutdown() + self.assertFalse(e.alive) + e.shutdown() + self.assertFalse(e.alive) + def test_func_cancellation(self): called = collections.defaultdict(int) diff --git a/taskflow/utils/eventlet_utils.py b/taskflow/utils/eventlet_utils.py index 347fba31..cc26dfe1 100644 --- a/taskflow/utils/eventlet_utils.py +++ b/taskflow/utils/eventlet_utils.py @@ -15,15 +15,14 @@ # under the License. import logging -import threading from concurrent import futures try: - from eventlet.green import threading as green_threading + from eventlet.green import threading as greenthreading from eventlet import greenpool - from eventlet import patcher - from eventlet import queue + from eventlet import patcher as greenpatcher + from eventlet import queue as greenqueue EVENTLET_AVAILABLE = True except ImportError: EVENTLET_AVAILABLE = False @@ -33,10 +32,6 @@ from taskflow.utils import lock_utils LOG = logging.getLogger(__name__) -# NOTE(harlowja): this object signals to threads that they should stop -# working and rest in peace. -_TOMBSTONE = object() - _DONE_STATES = frozenset([ futures._base.CANCELLED_AND_NOTIFIED, futures._base.FINISHED, @@ -62,26 +57,29 @@ class _WorkItem(object): class _Worker(object): - def __init__(self, executor, work_queue, worker_id): + def __init__(self, executor, work, work_queue): self.executor = executor + self.work = work self.work_queue = work_queue - self.worker_id = worker_id def __call__(self): + # Run our main piece of work. try: + self.work.run() + finally: + # Consume any delayed work before finishing (this is how we finish + # work that was to big for the pool size, but needs to be finished + # no matter). while True: - work = self.work_queue.get(block=True) - if work is _TOMBSTONE: - # NOTE(harlowja): give notice to other workers (this is - # basically a chain of tombstone calls that will cause all - # the workers on the queue to eventually shut-down). - self.work_queue.put(_TOMBSTONE) + try: + w = self.work_queue.get_nowait() + except greenqueue.Empty: break else: - work.run() - except BaseException: - LOG.critical("Exception in worker %s of '%s'", - self.worker_id, self.executor, exc_info=True) + try: + w.run() + finally: + self.work_queue.task_done() class GreenFuture(futures.Future): @@ -93,8 +91,8 @@ class GreenFuture(futures.Future): # functions will correctly yield to eventlet. If this is not done then # waiting on the future never actually causes the greenthreads to run # and thus you wait for infinity. - if not patcher.is_monkey_patched('threading'): - self._condition = green_threading.Condition() + if not greenpatcher.is_monkey_patched('threading'): + self._condition = greenthreading.Condition() class GreenExecutor(futures.Executor): @@ -102,44 +100,59 @@ class GreenExecutor(futures.Executor): def __init__(self, max_workers=1000): assert EVENTLET_AVAILABLE, 'eventlet is needed to use a green executor' - assert int(max_workers) > 0, 'Max workers must be greater than zero' self._max_workers = int(max_workers) + if self._max_workers <= 0: + raise ValueError('Max workers must be greater than zero') self._pool = greenpool.GreenPool(self._max_workers) - self._work_queue = queue.LightQueue() - self._shutdown_lock = threading.RLock() + self._delayed_work = greenqueue.Queue() + self._shutdown_lock = greenthreading.Lock() self._shutdown = False + self._workers_created = 0 + + @property + def workers_created(self): + return self._workers_created + + @property + def amount_delayed(self): + return self._delayed_work.qsize() + + @property + def alive(self): + return not self._shutdown @lock_utils.locked(lock='_shutdown_lock') def submit(self, fn, *args, **kwargs): if self._shutdown: raise RuntimeError('cannot schedule new futures after shutdown') f = GreenFuture() - w = _WorkItem(f, fn, args, kwargs) - self._work_queue.put(w) - # Spin up any new workers (since they are spun up on demand and - # not at executor initialization). - self._spin_up() + work = _WorkItem(f, fn, args, kwargs) + if not self._spin_up(work): + self._delayed_work.put(work) return f - def _spin_up(self): - cur_am = (self._pool.running() + self._pool.waiting()) - if cur_am < self._max_workers and cur_am < self._work_queue.qsize(): - # Spin up a new worker to do the work as we are behind. - worker = _Worker(self, self._work_queue, cur_am + 1) - self._pool.spawn(worker) + def _spin_up(self, work): + alive = self._pool.running() + self._pool.waiting() + if alive < self._max_workers: + self._pool.spawn_n(_Worker(self, work, self._delayed_work)) + self._workers_created += 1 + return True + return False def shutdown(self, wait=True): with self._shutdown_lock: self._shutdown = True - self._work_queue.put(_TOMBSTONE) if wait: self._pool.waitall() + # NOTE(harlowja): Fixed in eventlet 0.15 (remove when able to use) + if not self._delayed_work.empty(): + self._delayed_work.join() class _GreenWaiter(object): """Provides the event that wait_for_any() blocks on.""" def __init__(self): - self.event = green_threading.Event() + self.event = greenthreading.Event() def add_result(self, future): self.event.set() From 9a239a0a2e93a1ecd757d46598393ab76bbdcaa4 Mon Sep 17 00:00:00 2001 From: Joshua Harlow Date: Fri, 11 Jul 2014 14:13:01 -0700 Subject: [PATCH 150/188] Cleanup some of the example code & docs This commit makes a set of small adjustments to examples. - Rework some of the comments to be more clear. - Add links to the original source tree file. - Rename some of the examples to make it clear the concept the example is intented to show. - Move some common example functionality to the example utility file. Change-Id: I858e0dbf72fe8cb40a05bfdbb0857720ffb71c7f --- doc/source/conf.py | 6 ++ doc/source/examples.rst | 112 +++++++++++++++++---- taskflow/examples/build_a_car.py | 38 +++---- taskflow/examples/buildsystem.py | 23 +++-- taskflow/examples/calculate_in_parallel.py | 16 +-- taskflow/examples/calculate_linear.py | 16 +-- taskflow/examples/example_utils.py | 6 ++ taskflow/examples/fake_billing.py | 8 +- taskflow/examples/graph_flow.py | 16 +-- taskflow/examples/persistence_example.py | 18 ++-- taskflow/examples/resume_from_backend.py | 17 +--- taskflow/examples/resume_vm_boot.py | 18 ++-- taskflow/examples/reverting_linear.py | 16 +-- taskflow/examples/simple_linear.py | 13 +-- taskflow/examples/wrapped_exception.py | 14 +-- taskflow/tests/test_examples.py | 2 +- 16 files changed, 198 insertions(+), 141 deletions(-) diff --git a/doc/source/conf.py b/doc/source/conf.py index 6ecddae8..3b0c35ce 100644 --- a/doc/source/conf.py +++ b/doc/source/conf.py @@ -11,6 +11,7 @@ sys.path.insert(0, os.path.abspath('../..')) extensions = [ 'sphinx.ext.autodoc', 'sphinx.ext.doctest', + 'sphinx.ext.extlinks', 'sphinx.ext.inheritance_diagram', 'sphinx.ext.intersphinx', 'sphinx.ext.viewcode', @@ -37,6 +38,7 @@ exclude_patterns = ['_build'] # General information about the project. project = u'TaskFlow' copyright = u'2013-2014, OpenStack Foundation' +source_tree = 'http://git.openstack.org/cgit/openstack/taskflow/tree' # If true, '()' will be appended to :func: etc. cross-reference text. add_function_parentheses = True @@ -51,6 +53,10 @@ pygments_style = 'sphinx' # Prefixes that are ignored for sorting the Python module index modindex_common_prefix = ['taskflow.'] +# Shortened external links. +extlinks = { + 'example': (source_tree + '/taskflow/examples/%s.py', ''), +} # -- Options for HTML output -------------------------------------------------- diff --git a/doc/source/examples.rst b/doc/source/examples.rst index 1891b425..40c0a2d2 100644 --- a/doc/source/examples.rst +++ b/doc/source/examples.rst @@ -1,32 +1,59 @@ -Linear phone calls +Making phone calls ================== +.. note:: + + Full source located at :example:`simple_linear`. + .. literalinclude:: ../../taskflow/examples/simple_linear.py :language: python :linenos: :lines: 16- - :emphasize-lines: 16-28 + :emphasize-lines: 16-29 -Linear phone calls (reverting) -============================== +Making phone calls (automatically reverting) +============================================ + +.. note:: + + Full source located at :example:`reverting_linear`. .. literalinclude:: ../../taskflow/examples/reverting_linear.py :language: python :linenos: :lines: 16- - :emphasize-lines: 17-32 + :emphasize-lines: 17-26 Building a car ============== +.. note:: + + Full source located at :example:`build_a_car`. + .. literalinclude:: ../../taskflow/examples/build_a_car.py :language: python :linenos: :lines: 16- - :emphasize-lines: 20-26 + :emphasize-lines: 22-28 -Task dependencies -================= +Linear equation solver (explicit dependencies) +============================================== + +.. note:: + + Full source located at :example:`calculate_linear`. + +.. literalinclude:: ../../taskflow/examples/calculate_linear.py + :language: python + :linenos: + :lines: 16- + :emphasize-lines: 17-27 + +Linear equation solver (inferred dependencies) +============================================== + +``Source:`` :example:`graph_flow.py` .. literalinclude:: ../../taskflow/examples/graph_flow.py :language: python @@ -34,8 +61,12 @@ Task dependencies :lines: 16- :emphasize-lines: 18-31 -Parallel calculations -===================== +Linear equation solver (in parallel) +==================================== + +.. note:: + + Full source located at :example:`calculate_in_parallel` .. literalinclude:: ../../taskflow/examples/calculate_in_parallel.py :language: python @@ -43,8 +74,12 @@ Parallel calculations :lines: 16- :emphasize-lines: 18-21 -Parallel pseudo-volume-create -============================= +Creating a volume (in parallel) +=============================== + +.. note:: + + Full source located at :example:`create_parallel_volume` .. literalinclude:: ../../taskflow/examples/create_parallel_volume.py :language: python @@ -52,8 +87,25 @@ Parallel pseudo-volume-create :lines: 16- :emphasize-lines: 21-23 -Suspended workflow reloaded -=========================== +Storing & emitting a bill +========================= + +.. note:: + + Full source located at :example:`fake_billing` + +.. literalinclude:: ../../taskflow/examples/fake_billing.py + :language: python + :linenos: + :lines: 16- + :emphasize-lines: 24-32 + +Suspending a workflow & resuming +================================ + +.. note:: + + Full source located at :example:`resume_from_backend` .. literalinclude:: ../../taskflow/examples/resume_from_backend.py :language: python @@ -61,8 +113,12 @@ Suspended workflow reloaded :lines: 16- :emphasize-lines: 22-39 -Resumable vm-pseudo-boot -======================== +Creating a virtual machine (resumable) +====================================== + +.. note:: + + Full source located at :example:`resume_vm_boot` .. literalinclude:: ../../taskflow/examples/resume_vm_boot.py :language: python @@ -70,8 +126,12 @@ Resumable vm-pseudo-boot :lines: 16- :emphasize-lines: 32-34 -Resumable volume-pseudo-create -============================== +Creating a volume (resumable) +============================= + +.. note:: + + Full source located at :example:`resume_volume_create` .. literalinclude:: ../../taskflow/examples/resume_volume_create.py :language: python @@ -79,8 +139,12 @@ Resumable volume-pseudo-create :lines: 16- :emphasize-lines: 28-30 -Running engines by iteration -============================ +Running engines via iteration +============================= + +.. note:: + + Full source located at :example:`run_by_iter` .. literalinclude:: ../../taskflow/examples/run_by_iter.py :language: python @@ -88,8 +152,12 @@ Running engines by iteration :lines: 16- :emphasize-lines: 24-27 -Retry controlling -================= +Controlling retries using a retry controller +============================================ + +.. note:: + + Full source located at :example:`retry_flow` .. literalinclude:: ../../taskflow/examples/retry_flow.py :language: python diff --git a/taskflow/examples/build_a_car.py b/taskflow/examples/build_a_car.py index 7367c348..1655f2a6 100644 --- a/taskflow/examples/build_a_car.py +++ b/taskflow/examples/build_a_car.py @@ -32,14 +32,16 @@ from taskflow.patterns import graph_flow as gf from taskflow.patterns import linear_flow as lf from taskflow import task +import example_utils as eu # noqa -# INTRO: This examples shows how a graph_flow and linear_flow can be used -# together to execute non-dependent tasks by going through the steps required -# to build a simplistic car (an assembly line if you will). It also shows -# how raw functions can be wrapped into a task object instead of being forced -# to use the more heavy task base class. This is useful in scenarios where -# pre-existing code has functions that you easily want to plug-in to taskflow, -# without requiring a large amount of code changes. + +# INTRO: This examples shows how a graph flow and linear flow can be used +# together to execute dependent & non-dependent tasks by going through the +# steps required to build a simplistic car (an assembly line if you will). It +# also shows how raw functions can be wrapped into a task object instead of +# being forced to use the more *heavy* task base class. This is useful in +# scenarios where pre-existing code has functions that you easily want to +# plug-in to taskflow, without requiring a large amount of code changes. def build_frame(): @@ -58,6 +60,9 @@ def build_wheels(): return '4' +# These just return true to indiciate success, they would in the real work +# do more than just that. + def install_engine(frame, engine): return True @@ -75,13 +80,7 @@ def install_wheels(frame, engine, engine_installed, wheels): def trash(**kwargs): - print_wrapped("Throwing away pieces of car!") - - -def print_wrapped(text): - print("-" * (len(text))) - print(text) - print("-" * (len(text))) + eu.print_wrapped("Throwing away pieces of car!") def startup(**kwargs): @@ -114,6 +113,9 @@ def task_watch(state, details): flow = lf.Flow("make-auto").add( task.FunctorTask(startup, revert=trash, provides='ran'), + # A graph flow allows automatic dependency based ordering, the ordering + # is determined by analyzing the symbols required and provided and ordering + # execution based on a functioning order (if one exists). gf.Flow("install-parts").add( task.FunctorTask(build_frame, provides='frame'), task.FunctorTask(build_engine, provides='engine'), @@ -141,7 +143,7 @@ flow = lf.Flow("make-auto").add( # the tasks should produce, in this example this specification will influence # what those tasks do and what output they create. Different tasks depend on # different information from this specification, all of which will be provided -# automatically by the engine. +# automatically by the engine to those tasks. spec = { "frame": 'steel', "engine": 'honda', @@ -164,7 +166,7 @@ engine = taskflow.engines.load(flow, store={'spec': spec.copy()}) engine.notifier.register('*', flow_watch) engine.task_notifier.register('*', task_watch) -print_wrapped("Building a car") +eu.print_wrapped("Building a car") engine.run() # Alter the specification and ensure that the reverting logic gets triggered @@ -177,8 +179,8 @@ engine = taskflow.engines.load(flow, store={'spec': spec.copy()}) engine.notifier.register('*', flow_watch) engine.task_notifier.register('*', task_watch) -print_wrapped("Building a wrong car that doesn't match specification") +eu.print_wrapped("Building a wrong car that doesn't match specification") try: engine.run() except Exception as e: - print_wrapped("Flow failed: %s" % e) + eu.print_wrapped("Flow failed: %s" % e) diff --git a/taskflow/examples/buildsystem.py b/taskflow/examples/buildsystem.py index c17628a5..38f03040 100644 --- a/taskflow/examples/buildsystem.py +++ b/taskflow/examples/buildsystem.py @@ -29,8 +29,11 @@ import taskflow.engines from taskflow.patterns import graph_flow as gf from taskflow import task +import example_utils as eu # noqa -# In this example we demonstrate use of TargetedFlow to make oversimplified + +# In this example we demonstrate use of a target flow (a flow that only +# executes up to a specified target) to make an *oversimplified* pseudo # build system. It pretends to compile all sources to object files and # link them into an executable. It also can build docs, but this can be # "switched off" via targeted flow special power -- ability to ignore @@ -75,7 +78,7 @@ class BuildDocsTask(task.Task): def make_flow_and_store(source_files, executable_only=False): - flow = gf.TargetedFlow('build flow') + flow = gf.TargetedFlow('build-flow') object_targets = [] store = {} for source in source_files: @@ -97,12 +100,12 @@ def make_flow_and_store(source_files, executable_only=False): return flow, store -SOURCE_FILES = ['first.c', 'second.cpp', 'main.cpp'] +if __name__ == "__main__": + SOURCE_FILES = ['first.c', 'second.cpp', 'main.cpp'] + eu.print_wrapped('Running all tasks:') + flow, store = make_flow_and_store(SOURCE_FILES) + taskflow.engines.run(flow, store=store) -print('Running all tasks:') -flow, store = make_flow_and_store(SOURCE_FILES) -taskflow.engines.run(flow, store=store) - -print('\nBuilding executable, no docs:') -flow, store = make_flow_and_store(SOURCE_FILES, executable_only=True) -taskflow.engines.run(flow, store=store) + eu.print_wrapped('Building executable, no docs:') + flow, store = make_flow_and_store(SOURCE_FILES, executable_only=True) + taskflow.engines.run(flow, store=store) diff --git a/taskflow/examples/calculate_in_parallel.py b/taskflow/examples/calculate_in_parallel.py index f179c3ee..76cfc370 100644 --- a/taskflow/examples/calculate_in_parallel.py +++ b/taskflow/examples/calculate_in_parallel.py @@ -31,20 +31,20 @@ from taskflow.patterns import linear_flow as lf from taskflow.patterns import unordered_flow as uf from taskflow import task -# INTRO: This examples shows how linear_flow and unordered_flow can be used -# together to execute calculations in parallel and then use the -# result for the next task. Adder task is used for all calculations -# and arguments' bindings are used to set correct parameters to the task. +# INTRO: This examples shows how a linear flow and a unordered flow can be +# used together to execute calculations in parallel and then use the +# result for the next task/s. The adder task is used for all calculations +# and argument bindings are used to set correct parameters for each task. # This task provides some values from as a result of execution, this can be # useful when you want to provide values from a static set to other tasks that # depend on those values existing before those tasks can run. # -# This method is *depreciated* in favor of a simpler mechanism that just -# provides those values on engine running by prepopulating the storage backend -# before your tasks are ran (which accomplishes a similar goal in a more -# uniform manner). +# NOTE(harlowja): this usage is *depreciated* in favor of a simpler mechanism +# that provides those values on engine running by prepopulating the storage +# backend before your tasks are ran (which accomplishes a similar goal in a +# more uniform manner). class Provider(task.Task): def __init__(self, name, *args, **kwargs): super(Provider, self).__init__(name=name, **kwargs) diff --git a/taskflow/examples/calculate_linear.py b/taskflow/examples/calculate_linear.py index 45c3f328..8d6f4c03 100644 --- a/taskflow/examples/calculate_linear.py +++ b/taskflow/examples/calculate_linear.py @@ -30,11 +30,11 @@ from taskflow.patterns import linear_flow as lf from taskflow import task -# INTRO: In this example linear_flow is used to group four tasks to calculate +# INTRO: In this example a linear flow is used to group four tasks to calculate # a value. A single added task is used twice, showing how this can be done # and the twice added task takes in different bound values. In the first case # it uses default parameters ('x' and 'y') and in the second case arguments -# are bound with ('z', 'd') keys from the engines storage mechanism. +# are bound with ('z', 'd') keys from the engines internal storage mechanism. # # A multiplier task uses a binding that another task also provides, but this # example explicitly shows that 'z' parameter is bound with 'a' key @@ -47,10 +47,10 @@ from taskflow import task # useful when you want to provide values from a static set to other tasks that # depend on those values existing before those tasks can run. # -# This method is *depreciated* in favor of a simpler mechanism that just -# provides those values on engine running by prepopulating the storage backend -# before your tasks are ran (which accomplishes a similar goal in a more -# uniform manner). +# NOTE(harlowja): this usage is *depreciated* in favor of a simpler mechanism +# that just provides those values on engine running by prepopulating the +# storage backend before your tasks are ran (which accomplishes a similar goal +# in a more uniform manner). class Provider(task.Task): def __init__(self, name, *args, **kwargs): @@ -89,8 +89,8 @@ class Multiplier(task.Task): # Note here that the ordering is established so that the correct sequences # of operations occurs where the adding and multiplying is done according -# to the expected and typical mathematical model. A graph_flow could also be -# used here to automatically ensure the correct ordering. +# to the expected and typical mathematical model. A graph flow could also be +# used here to automatically infer & ensure the correct ordering. flow = lf.Flow('root').add( # Provide the initial values for other tasks to depend on. # diff --git a/taskflow/examples/example_utils.py b/taskflow/examples/example_utils.py index 3da680e2..be08e53a 100644 --- a/taskflow/examples/example_utils.py +++ b/taskflow/examples/example_utils.py @@ -35,6 +35,12 @@ except ImportError: SQLALCHEMY_AVAILABLE = False +def print_wrapped(text): + print("-" * (len(text))) + print(text) + print("-" * (len(text))) + + def rm_path(persist_path): if not os.path.exists(persist_path): return diff --git a/taskflow/examples/fake_billing.py b/taskflow/examples/fake_billing.py index 16829226..76a893d1 100644 --- a/taskflow/examples/fake_billing.py +++ b/taskflow/examples/fake_billing.py @@ -70,7 +70,7 @@ class UrlCaller(object): # Since engines save the output of tasks to a optional persistent storage # backend resources have to be dealt with in a slightly different manner since -# resources are transient and can not be persisted (or serialized). For tasks +# resources are transient and can *not* be persisted (or serialized). For tasks # that require access to a set of resources it is a common pattern to provide # a object (in this case this object) on construction of those tasks via the # task constructor. @@ -149,9 +149,9 @@ class DeclareSuccess(task.Task): print("All data processed and sent to %s" % (sent_to)) -# Resources (db handles and similar) of course can't be persisted so we need -# to make sure that we pass this resource fetcher to the tasks constructor so -# that the tasks have access to any needed resources (the resources are +# Resources (db handles and similar) of course can *not* be persisted so we +# need to make sure that we pass this resource fetcher to the tasks constructor +# so that the tasks have access to any needed resources (the resources are # lazily loaded so that they are only created when they are used). resources = ResourceFetcher() flow = lf.Flow("initialize-me") diff --git a/taskflow/examples/graph_flow.py b/taskflow/examples/graph_flow.py index fd96d24a..99dfdd45 100644 --- a/taskflow/examples/graph_flow.py +++ b/taskflow/examples/graph_flow.py @@ -31,20 +31,20 @@ from taskflow.patterns import linear_flow as lf from taskflow import task -# In this example there are complex dependencies between tasks that are used to -# perform a simple set of linear equations. +# In this example there are complex *inferred* dependencies between tasks that +# are used to perform a simple set of linear equations. # # As you will see below the tasks just define what they require as input # and produce as output (named values). Then the user doesn't care about -# ordering the TASKS (in this case the tasks calculate pieces of the overall +# ordering the tasks (in this case the tasks calculate pieces of the overall # equation). # -# As you will notice graph_flow resolves dependencies automatically using the -# tasks requirements and provided values and no ordering dependency has to be -# manually created. +# As you will notice a graph flow resolves dependencies automatically using the +# tasks symbol requirements and provided symbol values and no orderin +# dependency has to be manually created. # -# Also notice that flows of any types can be nested into a graph_flow; subflows -# dependencies will be resolved too!! Pretty cool right! +# Also notice that flows of any types can be nested into a graph flow; showing +# that subflow dependencies (and associated ordering) will be inferred too. class Adder(task.Task): diff --git a/taskflow/examples/persistence_example.py b/taskflow/examples/persistence_example.py index a8112a3d..720914cd 100644 --- a/taskflow/examples/persistence_example.py +++ b/taskflow/examples/persistence_example.py @@ -35,7 +35,7 @@ from taskflow.persistence import logbook from taskflow import task from taskflow.utils import persistence_utils as p_utils -import example_utils # noqa +import example_utils as eu # noqa # INTRO: In this example we create two tasks, one that will say hi and one # that will say bye with optional capability to raise an error while @@ -49,12 +49,6 @@ import example_utils # noqa # the database during both of these modes (failing or not failing). -def print_wrapped(text): - print("-" * (len(text))) - print(text) - print("-" * (len(text))) - - class HiTask(task.Task): def execute(self): print("Hi!") @@ -84,7 +78,7 @@ def make_flow(blowup=False): # Persist the flow and task state here, if the file/dir exists already blowup # if not don't blowup, this allows a user to see both the modes and to see # what is stored in each case. -if example_utils.SQLALCHEMY_AVAILABLE: +if eu.SQLALCHEMY_AVAILABLE: persist_path = os.path.join(tempfile.gettempdir(), "persisting.db") backend_uri = "sqlite:///%s" % (persist_path) else: @@ -96,7 +90,7 @@ if os.path.exists(persist_path): else: blowup = True -with example_utils.get_backend(backend_uri) as backend: +with eu.get_backend(backend_uri) as backend: # Now we can run. engine_config = { 'backend': backend, @@ -108,17 +102,17 @@ with example_utils.get_backend(backend_uri) as backend: # did exist, assume we won't blowup (and therefore this shows the undo # and redo that a flow will go through). flow = make_flow(blowup=blowup) - print_wrapped("Running") + eu.print_wrapped("Running") try: eng = engines.load(flow, **engine_config) eng.run() if not blowup: - example_utils.rm_path(persist_path) + eu.rm_path(persist_path) except Exception: # NOTE(harlowja): don't exit with non-zero status code, so that we can # print the book contents, as well as avoiding exiting also makes the # unit tests (which also runs these examples) pass. traceback.print_exc(file=sys.stdout) - print_wrapped("Book contents") + eu.print_wrapped("Book contents") print(p_utils.pformat(engine_config['book'])) diff --git a/taskflow/examples/resume_from_backend.py b/taskflow/examples/resume_from_backend.py index ef20a160..69247382 100644 --- a/taskflow/examples/resume_from_backend.py +++ b/taskflow/examples/resume_from_backend.py @@ -33,7 +33,7 @@ from taskflow.patterns import linear_flow as lf from taskflow import task from taskflow.utils import persistence_utils as p_utils -import example_utils # noqa +import example_utils as eu # noqa # INTRO: In this example linear_flow is used to group three tasks, one which # will suspend the future work the engine may do. This suspend engine is then @@ -53,20 +53,13 @@ import example_utils # noqa # # python taskflow/examples/resume_from_backend.py \ # zookeeper://127.0.0.1:2181/taskflow/resume_from_backend/ -# # UTILITY FUNCTIONS ######################################### -def print_wrapped(text): - print("-" * (len(text))) - print(text) - print("-" * (len(text))) - - def print_task_states(flowdetail, msg): - print_wrapped(msg) + eu.print_wrapped(msg) print("Flow '%s' state: %s" % (flowdetail.name, flowdetail.state)) # Sort by these so that our test validation doesn't get confused by the # order in which the items in the flow detail can be in. @@ -106,7 +99,7 @@ def flow_factory(): # INITIALIZE PERSISTENCE #################################### -with example_utils.get_backend() as backend: +with eu.get_backend() as backend: logbook = p_utils.temporary_log_book(backend) # CREATE AND RUN THE FLOW: FIRST ATTEMPT #################### @@ -117,13 +110,13 @@ with example_utils.get_backend() as backend: backend=backend) print_task_states(flowdetail, "At the beginning, there is no state") - print_wrapped("Running") + eu.print_wrapped("Running") engine.run() print_task_states(flowdetail, "After running") # RE-CREATE, RESUME, RUN #################################### - print_wrapped("Resuming and running again") + eu.print_wrapped("Resuming and running again") # NOTE(harlowja): reload the flow detail from backend, this will allow us # to resume the flow from its suspended state, but first we need to search diff --git a/taskflow/examples/resume_vm_boot.py b/taskflow/examples/resume_vm_boot.py index 90756f18..1540b44c 100644 --- a/taskflow/examples/resume_vm_boot.py +++ b/taskflow/examples/resume_vm_boot.py @@ -43,7 +43,7 @@ from taskflow import task from taskflow.utils import eventlet_utils as e_utils from taskflow.utils import persistence_utils as p_utils -import example_utils # noqa +import example_utils as eu # noqa # INTRO: This examples shows how a hierarchy of flows can be used to create a # vm in a reliable & resumable manner using taskflow + a miniature version of @@ -61,12 +61,6 @@ def slow_down(how_long=0.5): time.sleep(how_long) -def print_wrapped(text): - print("-" * (len(text))) - print(text) - print("-" * (len(text))) - - class PrintText(task.Task): """Just inserts some text print outs in a workflow.""" def __init__(self, print_what, no_slow=False): @@ -77,10 +71,10 @@ class PrintText(task.Task): def execute(self): if self._no_slow: - print_wrapped(self._text) + eu.print_wrapped(self._text) else: with slow_down(): - print_wrapped(self._text) + eu.print_wrapped(self._text) class DefineVMSpec(task.Task): @@ -229,10 +223,10 @@ def create_flow(): PrintText("Instance is running!", no_slow=True)) return flow -print_wrapped("Initializing") +eu.print_wrapped("Initializing") # Setup the persistence & resumption layer. -with example_utils.get_backend() as backend: +with eu.get_backend() as backend: try: book_id, flow_id = sys.argv[2].split("+", 1) if not uuidutils.is_uuid_like(book_id): @@ -275,7 +269,7 @@ with example_utils.get_backend() as backend: engine_conf=engine_conf) # Make me my vm please! - print_wrapped('Running') + eu.print_wrapped('Running') engine.run() # How to use. diff --git a/taskflow/examples/reverting_linear.py b/taskflow/examples/reverting_linear.py index e6e5bb04..f912fa94 100644 --- a/taskflow/examples/reverting_linear.py +++ b/taskflow/examples/reverting_linear.py @@ -31,21 +31,15 @@ from taskflow.patterns import linear_flow as lf from taskflow import task # INTRO: In this example we create three tasks, each of which ~calls~ a given -# number (provided as a function input), one of those tasks fails calling a +# number (provided as a function input), one of those tasks *fails* calling a # given number (the suzzie calling); this causes the workflow to enter the # reverting process, which activates the revert methods of the previous two # phone ~calls~. # # This simulated calling makes it appear like all three calls occur or all # three don't occur (transaction-like capabilities). No persistence layer is -# used here so reverting and executing will not handle process failure. -# -# This example shows a basic usage of the taskflow structures without involving -# the complexity of persistence. Using the structures that taskflow provides -# via tasks and flows makes it possible for you to easily at a later time -# hook in a persistence layer (and then gain the functionality that offers) -# when you decide the complexity of adding that layer in is 'worth it' for your -# applications usage pattern (which some applications may not need). +# used here so reverting and executing will *not* be tolerant of process +# failure. class CallJim(task.Task): @@ -94,6 +88,6 @@ except Exception as e: # how to deal with multiple tasks failing while running. # # You will also note that this is not a problem in this case since no - # parallelism is involved; this is ensured by the usage of a linear flow, - # which runs serially as well as the default engine type which is 'serial'. + # parallelism is involved; this is ensured by the usage of a linear flow + # and the default engine type which is 'serial' vs being 'parallel'. print("Flow failed: %s" % e) diff --git a/taskflow/examples/simple_linear.py b/taskflow/examples/simple_linear.py index 17fa587e..495b9633 100644 --- a/taskflow/examples/simple_linear.py +++ b/taskflow/examples/simple_linear.py @@ -36,12 +36,13 @@ from taskflow import task # sequence (the flow) and then passing the work off to an engine, with some # initial data to be ran in a reliable manner. # -# This example shows a basic usage of the taskflow structures without involving -# the complexity of persistence. Using the structures that taskflow provides -# via tasks and flows makes it possible for you to easily at a later time -# hook in a persistence layer (and then gain the functionality that offers) -# when you decide the complexity of adding that layer in is 'worth it' for your -# applications usage pattern (which some applications may not need). +# NOTE(harlowja): This example shows a basic usage of the taskflow structures +# without involving the complexity of persistence. Using the structures that +# taskflow provides via tasks and flows makes it possible for you to easily at +# a later time hook in a persistence layer (and then gain the functionality +# that offers) when you decide the complexity of adding that layer in +# is 'worth it' for your applications usage pattern (which certain applications +# may not need). class CallJim(task.Task): diff --git a/taskflow/examples/wrapped_exception.py b/taskflow/examples/wrapped_exception.py index 17ae6322..07340082 100644 --- a/taskflow/examples/wrapped_exception.py +++ b/taskflow/examples/wrapped_exception.py @@ -36,6 +36,8 @@ from taskflow import task from taskflow.tests import utils from taskflow.utils import misc +import example_utils as eu # noqa + # INTRO: In this example we create two tasks which can trigger exceptions # based on various inputs to show how to analyze the thrown exceptions for # which types were thrown and handle the different types in different ways. @@ -54,12 +56,6 @@ from taskflow.utils import misc # that code to do further cleanups (if desired). -def print_wrapped(text): - print("-" * (len(text))) - print(text) - print("-" * (len(text))) - - class FirstException(Exception): """Exception that first task raises.""" @@ -112,18 +108,18 @@ def run(**store): misc.Failure.reraise_if_any(unknown_failures) -print_wrapped("Raise and catch first exception only") +eu.print_wrapped("Raise and catch first exception only") run(sleep1=0.0, raise1=True, sleep2=0.0, raise2=False) # NOTE(imelnikov): in general, sleeping does not guarantee that we'll have both # task running before one of them fails, but with current implementation this # works most of times, which is enough for our purposes here (as an example). -print_wrapped("Raise and catch both exceptions") +eu.print_wrapped("Raise and catch both exceptions") run(sleep1=1.0, raise1=True, sleep2=1.0, raise2=True) -print_wrapped("Handle one exception, and re-raise another") +eu.print_wrapped("Handle one exception, and re-raise another") try: run(sleep1=1.0, raise1=True, sleep2=1.0, raise2='boom') diff --git a/taskflow/tests/test_examples.py b/taskflow/tests/test_examples.py index 025fabcd..2631cd47 100644 --- a/taskflow/tests/test_examples.py +++ b/taskflow/tests/test_examples.py @@ -24,7 +24,7 @@ extension; then it will be checked that output did not change. When this module is used as main module, output for all examples are generated. Please note that this will break tests as output for most -examples is indeterministic. +examples is indeterministic (due to hash randomization for example). """ From 52b43f3e2f808602848aa9c56dbb8adadcd36a98 Mon Sep 17 00:00:00 2001 From: Joshua Harlow Date: Mon, 14 Jul 2014 18:42:03 -0700 Subject: [PATCH 151/188] Allow a jobs posted book to be none by default Not all users of jobs and jobboards are using the associated book that accompanies a job, instead they are fine with just using the name and the details that can be provided for usage in their application. Example: http://review.openstack.org/#/c/91763/ To allow the optional usage of books with jobs (which is already supported) by default set the book to none and allow it to be provided on a as needed basis. Change-Id: I69e370a733e44c45c62177008838c259fd9c9a7c --- taskflow/jobs/backends/impl_zookeeper.py | 2 +- taskflow/jobs/jobboard.py | 10 +++++----- 2 files changed, 6 insertions(+), 6 deletions(-) diff --git a/taskflow/jobs/backends/impl_zookeeper.py b/taskflow/jobs/backends/impl_zookeeper.py index be305a12..0eacea46 100644 --- a/taskflow/jobs/backends/impl_zookeeper.py +++ b/taskflow/jobs/backends/impl_zookeeper.py @@ -432,7 +432,7 @@ class ZookeeperJobBoard(jobboard.NotifyingJobBoard): else: child_proc(request) - def post(self, name, book, details=None): + def post(self, name, book=None, details=None): def format_posting(job_uuid): posting = { diff --git a/taskflow/jobs/jobboard.py b/taskflow/jobs/jobboard.py index c93123a5..d7d0850f 100644 --- a/taskflow/jobs/jobboard.py +++ b/taskflow/jobs/jobboard.py @@ -118,14 +118,14 @@ class JobBoard(object): """ @abc.abstractmethod - def post(self, name, book, details=None): + def post(self, name, book=None, details=None): """Atomically creates and posts a job to the jobboard. This posting allowing others to attempt to claim that job (and - subsequently work on that job). The contents of the provided logbook - must provide *enough* information for others to reference to - construct & work on the desired entries that are contained in that - logbook. + subsequently work on that job). The contents of the provided logbook, + details dictionary, or name (or a mix of these) must provide *enough* + information for consumers to reference to construct and perform that + jobs contained work (whatever it may be). Once a job has been posted it can only be removed by consuming that job (after that job is claimed). Any entity can post/propose jobs From d7ac72980489f0be580946c6e120650e89b097a2 Mon Sep 17 00:00:00 2001 From: Christian Berendt Date: Wed, 16 Jul 2014 14:34:20 +0200 Subject: [PATCH 152/188] Bump hacking to version 0.9.2 Change-Id: Ia8272ed8086d13fe1f1853e6cff768e9b75a2793 --- test-requirements.txt | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/test-requirements.txt b/test-requirements.txt index 4682acec..62fd2900 100644 --- a/test-requirements.txt +++ b/test-requirements.txt @@ -1,4 +1,4 @@ -hacking>=0.9.1,<0.10 +hacking>=0.9.2,<0.10 discover coverage>=3.6 mock>=1.0 From 6132647fb77b83cc99406b5da9ae3c63666cb447 Mon Sep 17 00:00:00 2001 From: Christian Berendt Date: Thu, 17 Jul 2014 09:05:16 +0200 Subject: [PATCH 153/188] Enabled hacking checks H305 and H307 * H305 imports not grouped correctly * H307 like imports should be grouped together Change-Id: If1dd9c89f65ede6959865a885777cb08c263eca0 --- taskflow/engines/action_engine/engine.py | 2 -- taskflow/engines/action_engine/runtime.py | 11 +++++------ taskflow/engines/worker_based/protocol.py | 3 +-- taskflow/engines/worker_based/proxy.py | 2 +- taskflow/examples/calculate_in_parallel.py | 1 - taskflow/examples/delayed_return.py | 1 - taskflow/examples/fake_billing.py | 3 +-- taskflow/examples/resume_from_backend.py | 1 - taskflow/examples/resume_many_flows/resume_all.py | 1 - taskflow/examples/resume_vm_boot.py | 9 +++------ taskflow/examples/resume_volume_create.py | 4 +--- taskflow/examples/reverting_linear.py | 1 - taskflow/examples/simple_linear_listening.py | 1 - taskflow/examples/wrapped_exception.py | 1 - .../versions/14b227d79a87_add_intention_column.py | 1 + .../versions/84d6e888850_add_task_detail_type.py | 1 + taskflow/persistence/backends/sqlalchemy/models.py | 1 - taskflow/test.py | 6 ++---- taskflow/tests/unit/jobs/base.py | 2 +- taskflow/tests/unit/jobs/test_zk_job.py | 6 ++---- taskflow/tests/unit/patterns/test_graph_flow.py | 1 - taskflow/tests/unit/patterns/test_linear_flow.py | 1 - taskflow/tests/unit/patterns/test_unordered_flow.py | 1 - .../tests/unit/persistence/test_zk_persistence.py | 3 +-- taskflow/tests/unit/test_action_engine.py | 11 ++++------- taskflow/tests/unit/test_action_engine_compile.py | 4 +--- taskflow/tests/unit/test_arguments_passing.py | 1 - taskflow/tests/unit/test_duration.py | 6 +++--- taskflow/tests/unit/test_engine_helpers.py | 3 +-- taskflow/tests/unit/test_flow_dependencies.py | 3 +-- taskflow/tests/unit/test_functor_task.py | 1 - taskflow/tests/unit/test_progress.py | 5 ++--- taskflow/tests/unit/test_retries.py | 6 ++---- taskflow/tests/unit/test_utils_async_utils.py | 3 +-- taskflow/tests/unit/test_utils_failure.py | 1 - taskflow/tests/unit/worker_based/test_executor.py | 2 +- taskflow/tests/unit/worker_based/test_protocol.py | 3 +-- taskflow/tests/unit/worker_based/test_proxy.py | 3 ++- taskflow/tests/unit/worker_based/test_server.py | 6 ++---- tox.ini | 2 +- 40 files changed, 42 insertions(+), 82 deletions(-) diff --git a/taskflow/engines/action_engine/engine.py b/taskflow/engines/action_engine/engine.py index fefcef0f..054c2ccf 100644 --- a/taskflow/engines/action_engine/engine.py +++ b/taskflow/engines/action_engine/engine.py @@ -20,13 +20,11 @@ from taskflow.engines.action_engine import compiler from taskflow.engines.action_engine import executor from taskflow.engines.action_engine import runtime from taskflow.engines import base - from taskflow import exceptions as exc from taskflow.openstack.common import excutils from taskflow import retry from taskflow import states from taskflow import storage as atom_storage - from taskflow.utils import lock_utils from taskflow.utils import misc from taskflow.utils import reflection diff --git a/taskflow/engines/action_engine/runtime.py b/taskflow/engines/action_engine/runtime.py index 3f5e2670..90913b99 100644 --- a/taskflow/engines/action_engine/runtime.py +++ b/taskflow/engines/action_engine/runtime.py @@ -14,17 +14,16 @@ # License for the specific language governing permissions and limitations # under the License. -from taskflow import exceptions as excp -from taskflow import retry as retry_atom -from taskflow import states as st -from taskflow import task as task_atom -from taskflow.utils import misc - from taskflow.engines.action_engine import analyzer as ca from taskflow.engines.action_engine import executor as ex from taskflow.engines.action_engine import retry_action as ra from taskflow.engines.action_engine import runner as ru from taskflow.engines.action_engine import task_action as ta +from taskflow import exceptions as excp +from taskflow import retry as retry_atom +from taskflow import states as st +from taskflow import task as task_atom +from taskflow.utils import misc class Runtime(object): diff --git a/taskflow/engines/worker_based/protocol.py b/taskflow/engines/worker_based/protocol.py index d8cab533..1eb43227 100644 --- a/taskflow/engines/worker_based/protocol.py +++ b/taskflow/engines/worker_based/protocol.py @@ -16,9 +16,8 @@ import abc -import six - from concurrent import futures +import six from taskflow.engines.action_engine import executor from taskflow.types import time diff --git a/taskflow/engines/worker_based/proxy.py b/taskflow/engines/worker_based/proxy.py index 3700501e..9850b376 100644 --- a/taskflow/engines/worker_based/proxy.py +++ b/taskflow/engines/worker_based/proxy.py @@ -14,11 +14,11 @@ # License for the specific language governing permissions and limitations # under the License. -import kombu import logging import socket import threading +import kombu import six LOG = logging.getLogger(__name__) diff --git a/taskflow/examples/calculate_in_parallel.py b/taskflow/examples/calculate_in_parallel.py index f179c3ee..5b996564 100644 --- a/taskflow/examples/calculate_in_parallel.py +++ b/taskflow/examples/calculate_in_parallel.py @@ -26,7 +26,6 @@ top_dir = os.path.abspath(os.path.join(os.path.dirname(__file__), sys.path.insert(0, top_dir) import taskflow.engines - from taskflow.patterns import linear_flow as lf from taskflow.patterns import unordered_flow as uf from taskflow import task diff --git a/taskflow/examples/delayed_return.py b/taskflow/examples/delayed_return.py index cbdc66d5..46578621 100644 --- a/taskflow/examples/delayed_return.py +++ b/taskflow/examples/delayed_return.py @@ -35,7 +35,6 @@ sys.path.insert(0, self_dir) # while the function will have returned. import taskflow.engines - from taskflow.listeners import base from taskflow.patterns import linear_flow as lf from taskflow import states diff --git a/taskflow/examples/fake_billing.py b/taskflow/examples/fake_billing.py index 16829226..566fd4b1 100644 --- a/taskflow/examples/fake_billing.py +++ b/taskflow/examples/fake_billing.py @@ -28,10 +28,9 @@ top_dir = os.path.abspath(os.path.join(os.path.dirname(__file__), sys.path.insert(0, top_dir) -from taskflow.openstack.common import uuidutils - from taskflow import engines from taskflow.listeners import printing +from taskflow.openstack.common import uuidutils from taskflow.patterns import graph_flow as gf from taskflow.patterns import linear_flow as lf from taskflow import task diff --git a/taskflow/examples/resume_from_backend.py b/taskflow/examples/resume_from_backend.py index ef20a160..2a6e0955 100644 --- a/taskflow/examples/resume_from_backend.py +++ b/taskflow/examples/resume_from_backend.py @@ -28,7 +28,6 @@ sys.path.insert(0, top_dir) sys.path.insert(0, self_dir) import taskflow.engines - from taskflow.patterns import linear_flow as lf from taskflow import task from taskflow.utils import persistence_utils as p_utils diff --git a/taskflow/examples/resume_many_flows/resume_all.py b/taskflow/examples/resume_many_flows/resume_all.py index 8be5f6d0..071fb616 100644 --- a/taskflow/examples/resume_many_flows/resume_all.py +++ b/taskflow/examples/resume_many_flows/resume_all.py @@ -30,7 +30,6 @@ sys.path.insert(0, example_dir) import taskflow.engines - from taskflow import states import example_utils # noqa diff --git a/taskflow/examples/resume_vm_boot.py b/taskflow/examples/resume_vm_boot.py index 90756f18..973089ab 100644 --- a/taskflow/examples/resume_vm_boot.py +++ b/taskflow/examples/resume_vm_boot.py @@ -31,15 +31,12 @@ top_dir = os.path.abspath(os.path.join(os.path.dirname(__file__), sys.path.insert(0, top_dir) sys.path.insert(0, self_dir) -from taskflow.patterns import graph_flow as gf -from taskflow.patterns import linear_flow as lf - -from taskflow.openstack.common import uuidutils - from taskflow import engines from taskflow import exceptions as exc +from taskflow.openstack.common import uuidutils +from taskflow.patterns import graph_flow as gf +from taskflow.patterns import linear_flow as lf from taskflow import task - from taskflow.utils import eventlet_utils as e_utils from taskflow.utils import persistence_utils as p_utils diff --git a/taskflow/examples/resume_volume_create.py b/taskflow/examples/resume_volume_create.py index f6f90bbc..0fe502e4 100644 --- a/taskflow/examples/resume_volume_create.py +++ b/taskflow/examples/resume_volume_create.py @@ -31,12 +31,10 @@ top_dir = os.path.abspath(os.path.join(os.path.dirname(__file__), sys.path.insert(0, top_dir) sys.path.insert(0, self_dir) +from taskflow import engines from taskflow.patterns import graph_flow as gf from taskflow.patterns import linear_flow as lf - -from taskflow import engines from taskflow import task - from taskflow.utils import persistence_utils as p_utils import example_utils # noqa diff --git a/taskflow/examples/reverting_linear.py b/taskflow/examples/reverting_linear.py index e6e5bb04..99e7b5df 100644 --- a/taskflow/examples/reverting_linear.py +++ b/taskflow/examples/reverting_linear.py @@ -26,7 +26,6 @@ top_dir = os.path.abspath(os.path.join(os.path.dirname(__file__), sys.path.insert(0, top_dir) import taskflow.engines - from taskflow.patterns import linear_flow as lf from taskflow import task diff --git a/taskflow/examples/simple_linear_listening.py b/taskflow/examples/simple_linear_listening.py index 358f0ff2..04f9f14e 100644 --- a/taskflow/examples/simple_linear_listening.py +++ b/taskflow/examples/simple_linear_listening.py @@ -26,7 +26,6 @@ top_dir = os.path.abspath(os.path.join(os.path.dirname(__file__), sys.path.insert(0, top_dir) import taskflow.engines - from taskflow.patterns import linear_flow as lf from taskflow import task diff --git a/taskflow/examples/wrapped_exception.py b/taskflow/examples/wrapped_exception.py index 17ae6322..3d3a7379 100644 --- a/taskflow/examples/wrapped_exception.py +++ b/taskflow/examples/wrapped_exception.py @@ -29,7 +29,6 @@ sys.path.insert(0, top_dir) import taskflow.engines - from taskflow import exceptions from taskflow.patterns import unordered_flow as uf from taskflow import task diff --git a/taskflow/persistence/backends/sqlalchemy/alembic/versions/14b227d79a87_add_intention_column.py b/taskflow/persistence/backends/sqlalchemy/alembic/versions/14b227d79a87_add_intention_column.py index b7bfe8d8..33541d0e 100644 --- a/taskflow/persistence/backends/sqlalchemy/alembic/versions/14b227d79a87_add_intention_column.py +++ b/taskflow/persistence/backends/sqlalchemy/alembic/versions/14b227d79a87_add_intention_column.py @@ -20,6 +20,7 @@ down_revision = '84d6e888850' from alembic import op import sqlalchemy as sa + from taskflow import states diff --git a/taskflow/persistence/backends/sqlalchemy/alembic/versions/84d6e888850_add_task_detail_type.py b/taskflow/persistence/backends/sqlalchemy/alembic/versions/84d6e888850_add_task_detail_type.py index d03b6528..756cf93a 100644 --- a/taskflow/persistence/backends/sqlalchemy/alembic/versions/84d6e888850_add_task_detail_type.py +++ b/taskflow/persistence/backends/sqlalchemy/alembic/versions/84d6e888850_add_task_detail_type.py @@ -28,6 +28,7 @@ down_revision = '1c783c0c2875' from alembic import op import sqlalchemy as sa + from taskflow.persistence import logbook diff --git a/taskflow/persistence/backends/sqlalchemy/models.py b/taskflow/persistence/backends/sqlalchemy/models.py index cad86628..4a78c5cb 100644 --- a/taskflow/persistence/backends/sqlalchemy/models.py +++ b/taskflow/persistence/backends/sqlalchemy/models.py @@ -25,7 +25,6 @@ from sqlalchemy import types as types from taskflow.openstack.common import jsonutils from taskflow.openstack.common import timeutils from taskflow.openstack.common import uuidutils - from taskflow.persistence import logbook from taskflow import states diff --git a/taskflow/test.py b/taskflow/test.py index aa2ffd42..4de61d3e 100644 --- a/taskflow/test.py +++ b/taskflow/test.py @@ -14,15 +14,13 @@ # License for the specific language governing permissions and limitations # under the License. +import fixtures import mock - +import six from testtools import compat from testtools import matchers from testtools import testcase -import fixtures -import six - from taskflow import exceptions from taskflow.tests import utils from taskflow.utils import misc diff --git a/taskflow/tests/unit/jobs/base.py b/taskflow/tests/unit/jobs/base.py index c75f424c..a178a8af 100644 --- a/taskflow/tests/unit/jobs/base.py +++ b/taskflow/tests/unit/jobs/base.py @@ -15,11 +15,11 @@ # under the License. import contextlib -import mock import threading import time from kazoo.recipe import watchers +import mock from taskflow import exceptions as excp from taskflow.openstack.common import uuidutils diff --git a/taskflow/tests/unit/jobs/test_zk_job.py b/taskflow/tests/unit/jobs/test_zk_job.py index 3d5f8228..7268a1a4 100644 --- a/taskflow/tests/unit/jobs/test_zk_job.py +++ b/taskflow/tests/unit/jobs/test_zk_job.py @@ -16,16 +16,14 @@ import six import testtools - from zake import fake_client from zake import utils as zake_utils from taskflow.jobs.backends import impl_zookeeper -from taskflow import states -from taskflow import test - from taskflow.openstack.common import jsonutils from taskflow.openstack.common import uuidutils +from taskflow import states +from taskflow import test from taskflow.tests.unit.jobs import base from taskflow.tests import utils as test_utils from taskflow.utils import kazoo_utils diff --git a/taskflow/tests/unit/patterns/test_graph_flow.py b/taskflow/tests/unit/patterns/test_graph_flow.py index 2a95ad2d..c7dad38e 100644 --- a/taskflow/tests/unit/patterns/test_graph_flow.py +++ b/taskflow/tests/unit/patterns/test_graph_flow.py @@ -17,7 +17,6 @@ from taskflow import exceptions as exc from taskflow.patterns import graph_flow as gf from taskflow import retry - from taskflow import test from taskflow.tests import utils diff --git a/taskflow/tests/unit/patterns/test_linear_flow.py b/taskflow/tests/unit/patterns/test_linear_flow.py index 7ff88860..a0dbd0d7 100644 --- a/taskflow/tests/unit/patterns/test_linear_flow.py +++ b/taskflow/tests/unit/patterns/test_linear_flow.py @@ -17,7 +17,6 @@ from taskflow import exceptions as exc from taskflow.patterns import linear_flow as lf from taskflow import retry - from taskflow import test from taskflow.tests import utils diff --git a/taskflow/tests/unit/patterns/test_unordered_flow.py b/taskflow/tests/unit/patterns/test_unordered_flow.py index 4759f8d2..a4043fe2 100644 --- a/taskflow/tests/unit/patterns/test_unordered_flow.py +++ b/taskflow/tests/unit/patterns/test_unordered_flow.py @@ -17,7 +17,6 @@ from taskflow import exceptions as exc from taskflow.patterns import unordered_flow as uf from taskflow import retry - from taskflow import test from taskflow.tests import utils diff --git a/taskflow/tests/unit/persistence/test_zk_persistence.py b/taskflow/tests/unit/persistence/test_zk_persistence.py index 354c2a71..609de21f 100644 --- a/taskflow/tests/unit/persistence/test_zk_persistence.py +++ b/taskflow/tests/unit/persistence/test_zk_persistence.py @@ -25,10 +25,9 @@ from taskflow.openstack.common import uuidutils from taskflow.persistence import backends from taskflow.persistence.backends import impl_zookeeper from taskflow import test -from taskflow.utils import kazoo_utils - from taskflow.tests.unit.persistence import base from taskflow.tests import utils as test_utils +from taskflow.utils import kazoo_utils TEST_PATH_TPL = '/taskflow/persistence-test/%s' _ZOOKEEPER_AVAILABLE = test_utils.zookeeper_available( diff --git a/taskflow/tests/unit/test_action_engine.py b/taskflow/tests/unit/test_action_engine.py index 533b5eef..d2fb0d43 100644 --- a/taskflow/tests/unit/test_action_engine.py +++ b/taskflow/tests/unit/test_action_engine.py @@ -15,28 +15,25 @@ # under the License. import contextlib -import testtools import threading from concurrent import futures - -from taskflow.patterns import graph_flow as gf -from taskflow.patterns import linear_flow as lf -from taskflow.patterns import unordered_flow as uf +import testtools import taskflow.engines - from taskflow.engines.action_engine import engine as eng from taskflow.engines.worker_based import engine as w_eng from taskflow.engines.worker_based import worker as wkr from taskflow import exceptions as exc +from taskflow.patterns import graph_flow as gf +from taskflow.patterns import linear_flow as lf +from taskflow.patterns import unordered_flow as uf from taskflow.persistence import logbook from taskflow import states from taskflow import task from taskflow import test from taskflow.tests import utils from taskflow.types import graph as gr - from taskflow.utils import eventlet_utils as eu from taskflow.utils import misc from taskflow.utils import persistence_utils as p_utils diff --git a/taskflow/tests/unit/test_action_engine_compile.py b/taskflow/tests/unit/test_action_engine_compile.py index 82075b04..ef268d9d 100644 --- a/taskflow/tests/unit/test_action_engine_compile.py +++ b/taskflow/tests/unit/test_action_engine_compile.py @@ -16,17 +16,15 @@ import string +from taskflow.engines.action_engine import compiler from taskflow import exceptions as exc from taskflow.patterns import graph_flow as gf from taskflow.patterns import linear_flow as lf from taskflow.patterns import unordered_flow as uf from taskflow import retry - from taskflow import test from taskflow.tests import utils as t_utils -from taskflow.engines.action_engine import compiler - def _make_many(amount): assert amount <= len(string.ascii_lowercase), 'Not enough letters' diff --git a/taskflow/tests/unit/test_arguments_passing.py b/taskflow/tests/unit/test_arguments_passing.py index 0281c1ff..5e9fc3a8 100644 --- a/taskflow/tests/unit/test_arguments_passing.py +++ b/taskflow/tests/unit/test_arguments_passing.py @@ -15,7 +15,6 @@ # under the License. import taskflow.engines - from taskflow import exceptions as exc from taskflow import test from taskflow.tests import utils diff --git a/taskflow/tests/unit/test_duration.py b/taskflow/tests/unit/test_duration.py index 67d240cb..e1588eb2 100644 --- a/taskflow/tests/unit/test_duration.py +++ b/taskflow/tests/unit/test_duration.py @@ -15,17 +15,17 @@ # under the License. import contextlib -import mock import time -from taskflow import task -from taskflow import test +import mock import taskflow.engines from taskflow import exceptions as exc from taskflow.listeners import timing from taskflow.patterns import linear_flow as lf from taskflow.persistence.backends import impl_memory +from taskflow import task +from taskflow import test from taskflow.tests import utils as t_utils from taskflow.utils import persistence_utils as p_utils diff --git a/taskflow/tests/unit/test_engine_helpers.py b/taskflow/tests/unit/test_engine_helpers.py index da0a276b..30ff51c3 100644 --- a/taskflow/tests/unit/test_engine_helpers.py +++ b/taskflow/tests/unit/test_engine_helpers.py @@ -16,14 +16,13 @@ import mock +import taskflow.engines from taskflow import exceptions as exc from taskflow.patterns import linear_flow from taskflow import test from taskflow.tests import utils as test_utils from taskflow.utils import persistence_utils as p_utils -import taskflow.engines - class EngineLoadingTestCase(test.TestCase): def test_default_load(self): diff --git a/taskflow/tests/unit/test_flow_dependencies.py b/taskflow/tests/unit/test_flow_dependencies.py index 7700499d..3ddb95d9 100644 --- a/taskflow/tests/unit/test_flow_dependencies.py +++ b/taskflow/tests/unit/test_flow_dependencies.py @@ -14,11 +14,10 @@ # License for the specific language governing permissions and limitations # under the License. +from taskflow import exceptions from taskflow.patterns import graph_flow as gf from taskflow.patterns import linear_flow as lf from taskflow.patterns import unordered_flow as uf - -from taskflow import exceptions from taskflow import retry from taskflow import test from taskflow.tests import utils diff --git a/taskflow/tests/unit/test_functor_task.py b/taskflow/tests/unit/test_functor_task.py index 53db4c96..676c2c8b 100644 --- a/taskflow/tests/unit/test_functor_task.py +++ b/taskflow/tests/unit/test_functor_task.py @@ -15,7 +15,6 @@ # under the License. import taskflow.engines - from taskflow.patterns import linear_flow from taskflow import task as base from taskflow import test diff --git a/taskflow/tests/unit/test_progress.py b/taskflow/tests/unit/test_progress.py index f1fa15d3..f37d1132 100644 --- a/taskflow/tests/unit/test_progress.py +++ b/taskflow/tests/unit/test_progress.py @@ -16,12 +16,11 @@ import contextlib -from taskflow import task -from taskflow import test - import taskflow.engines from taskflow.patterns import linear_flow as lf from taskflow.persistence.backends import impl_memory +from taskflow import task +from taskflow import test from taskflow.utils import persistence_utils as p_utils diff --git a/taskflow/tests/unit/test_retries.py b/taskflow/tests/unit/test_retries.py index 6953b376..71ea70cb 100644 --- a/taskflow/tests/unit/test_retries.py +++ b/taskflow/tests/unit/test_retries.py @@ -14,13 +14,11 @@ # License for the specific language governing permissions and limitations # under the License. +import taskflow.engines +from taskflow import exceptions as exc from taskflow.patterns import graph_flow as gf from taskflow.patterns import linear_flow as lf from taskflow.patterns import unordered_flow as uf - -import taskflow.engines - -from taskflow import exceptions as exc from taskflow import retry from taskflow import states as st from taskflow import test diff --git a/taskflow/tests/unit/test_utils_async_utils.py b/taskflow/tests/unit/test_utils_async_utils.py index 8e9ab944..0abf4107 100644 --- a/taskflow/tests/unit/test_utils_async_utils.py +++ b/taskflow/tests/unit/test_utils_async_utils.py @@ -14,9 +14,8 @@ # License for the specific language governing permissions and limitations # under the License. -import testtools - from concurrent import futures +import testtools from taskflow import test from taskflow.utils import async_utils as au diff --git a/taskflow/tests/unit/test_utils_failure.py b/taskflow/tests/unit/test_utils_failure.py index 394abfd2..012e241a 100644 --- a/taskflow/tests/unit/test_utils_failure.py +++ b/taskflow/tests/unit/test_utils_failure.py @@ -19,7 +19,6 @@ import six from taskflow import exceptions from taskflow import test from taskflow.tests import utils as test_utils - from taskflow.utils import misc diff --git a/taskflow/tests/unit/worker_based/test_executor.py b/taskflow/tests/unit/worker_based/test_executor.py index 7faee1b7..36e6c0b7 100644 --- a/taskflow/tests/unit/worker_based/test_executor.py +++ b/taskflow/tests/unit/worker_based/test_executor.py @@ -14,12 +14,12 @@ # License for the specific language governing permissions and limitations # under the License. -import mock import threading import time from concurrent import futures from kombu import exceptions as kombu_exc +import mock from taskflow.engines.worker_based import executor from taskflow.engines.worker_based import protocol as pr diff --git a/taskflow/tests/unit/worker_based/test_protocol.py b/taskflow/tests/unit/worker_based/test_protocol.py index 27d6e00d..af7e6490 100644 --- a/taskflow/tests/unit/worker_based/test_protocol.py +++ b/taskflow/tests/unit/worker_based/test_protocol.py @@ -14,9 +14,8 @@ # License for the specific language governing permissions and limitations # under the License. -import mock - from concurrent import futures +import mock from taskflow.engines.worker_based import protocol as pr from taskflow import test diff --git a/taskflow/tests/unit/worker_based/test_proxy.py b/taskflow/tests/unit/worker_based/test_proxy.py index 8876c23c..2cbdc25a 100644 --- a/taskflow/tests/unit/worker_based/test_proxy.py +++ b/taskflow/tests/unit/worker_based/test_proxy.py @@ -14,10 +14,11 @@ # License for the specific language governing permissions and limitations # under the License. -import mock import socket import threading +import mock + from taskflow.engines.worker_based import proxy from taskflow import test diff --git a/taskflow/tests/unit/worker_based/test_server.py b/taskflow/tests/unit/worker_based/test_server.py index a4eab7a8..2a5ed9f1 100644 --- a/taskflow/tests/unit/worker_based/test_server.py +++ b/taskflow/tests/unit/worker_based/test_server.py @@ -14,11 +14,9 @@ # License for the specific language governing permissions and limitations # under the License. -import mock - -import six - from kombu import exceptions as exc +import mock +import six from taskflow.engines.worker_based import endpoint as ep from taskflow.engines.worker_based import protocol as pr diff --git a/tox.ini b/tox.ini index 82ba2343..29b000e6 100644 --- a/tox.ini +++ b/tox.ini @@ -68,7 +68,7 @@ commands = python setup.py testr --coverage --testr-args='{posargs}' commands = {posargs} [flake8] -ignore = H307,H305,H904 +ignore = H904 builtins = _ exclude = .venv,.tox,dist,doc,./taskflow/openstack/common,*egg,.git,build,tools From 2a8716885a66676e969ff580c1ee244b9bc166e7 Mon Sep 17 00:00:00 2001 From: OpenStack Proposal Bot Date: Tue, 22 Jul 2014 13:49:00 +0000 Subject: [PATCH 154/188] Updated from global requirements Change-Id: I7e075ff2afeb380f28ae1800bbb91389b772c598 --- test-requirements.txt | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/test-requirements.txt b/test-requirements.txt index 4682acec..7082c274 100644 --- a/test-requirements.txt +++ b/test-requirements.txt @@ -5,7 +5,7 @@ mock>=1.0 python-subunit>=0.0.18 testrepository>=0.0.18 testtools>=0.9.34 -zake>=0.0.20 # Apache-2.0 +zake>=0.0.26 # Apache-2.0 # docs build jobs sphinx>=1.1.2,!=1.2.0,<1.3 oslosphinx From 75dc2e3b5e777eeea6540f286400fa381c6517b6 Mon Sep 17 00:00:00 2001 From: YAMAMOTO Takashi Date: Wed, 23 Jul 2014 12:14:10 +0900 Subject: [PATCH 155/188] README.rst: Avoid using non-ascii character Replace U+2019 with ascii 0x27. Change-Id: I16a1d900ebc708be5d958713f1cd8364fe0d9793 --- README.rst | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/README.rst b/README.rst index f16b9688..d75518f3 100644 --- a/README.rst +++ b/README.rst @@ -20,7 +20,7 @@ Requirements Because TaskFlow has many optional (pluggable) parts like persistence backends and engines, we decided to split our requirements into two -parts: - things that are absolutely required by TaskFlow (you can’t use +parts: - things that are absolutely required by TaskFlow (you can't use TaskFlow without them) are put to ``requirements.txt``; - things that are required by some optional part of TaskFlow (you can use TaskFlow without them) are put to ``optional-requirements.txt``; if you want to From 9401b514193bf0e4c618a441e9990a5c2395b9ec Mon Sep 17 00:00:00 2001 From: Brian Jarrett Date: Wed, 23 Jul 2014 23:09:32 -0600 Subject: [PATCH 156/188] Fixes unsorted dicts and sets in doctests A few doctests were checking the values of dictionaries and sets without sorting them in any way. This was causing some of the doctests to fail when the order inside the dictionary or set changed and wasn't consistent. Now all of the doctests check against sorted values so the tests are consistent in their output every time. Change-Id: I52b30327fb4acb3f1ee57ae14b611e988a43576a Closes-Bug: 1347937 --- doc/source/arguments_and_results.rst | 4 ++-- doc/source/inputs_and_outputs.rst | 10 ++++++---- 2 files changed, 8 insertions(+), 6 deletions(-) diff --git a/doc/source/arguments_and_results.rst b/doc/source/arguments_and_results.rst index e5870545..e23a6375 100644 --- a/doc/source/arguments_and_results.rst +++ b/doc/source/arguments_and_results.rst @@ -53,8 +53,8 @@ the task. ... def execute(self, spam, eggs): ... return spam + eggs ... - >>> MyTask().requires - set(['eggs', 'spam']) + >>> sorted(MyTask().requires) + ['eggs', 'spam'] Inference from the method signature is the ''simplest'' way to specify task arguments. Optional arguments (with default values), and special arguments like diff --git a/doc/source/inputs_and_outputs.rst b/doc/source/inputs_and_outputs.rst index cc1fd2d0..34fb1bad 100644 --- a/doc/source/inputs_and_outputs.rst +++ b/doc/source/inputs_and_outputs.rst @@ -34,6 +34,7 @@ set of names of such values is available via ``provides`` property of the flow. from taskflow import task from taskflow.patterns import linear_flow from taskflow import engines + from pprint import pprint For example: @@ -118,10 +119,11 @@ of the engine helpers (:py:func:`~taskflow.engines.helpers.run` or >>> flo = linear_flow.Flow("cat-dog") >>> flo.add(CatTalk(), DogTalk(provides="dog")) - >>> engines.run(flo, store={'meow': 'meow', 'woof': 'woof'}) + >>> result = engines.run(flo, store={'meow': 'meow', 'woof': 'woof'}) meow woof - {'meow': 'meow', 'woof': 'woof', 'dog': 'dog'} + >>> pprint(result) + {'dog': 'dog', 'meow': 'meow', 'woof': 'woof'} You can also directly interact with the engine storage layer to add additional values, note that if this route is used you can't use @@ -154,8 +156,8 @@ For example: >>> eng.run() meow woof - >>> print(eng.storage.fetch_all()) - {'meow': 'meow', 'woof': 'woof', 'dog': 'dog'} + >>> pprint(eng.storage.fetch_all()) + {'dog': 'dog', 'meow': 'meow', 'woof': 'woof'} >>> print(eng.storage.fetch("dog")) dog From 2405bd80e90a7ca61c3f57d5f6ad022eef3d002f Mon Sep 17 00:00:00 2001 From: Christian Berendt Date: Thu, 24 Jul 2014 10:04:42 +0200 Subject: [PATCH 157/188] Enable hacking checks H305 and H307 in tox.ini template With the change If1dd9c89f65ede6959865a885777cb08c263eca0 we enabled the hacking checks H305 and H307. But we forgot to enable them in the tox.ini template as well. Change-Id: I67482951aa09ee73546715181701066b1d26343a --- tox-tmpl.ini | 4 +--- 1 file changed, 1 insertion(+), 3 deletions(-) diff --git a/tox-tmpl.ini b/tox-tmpl.ini index 3e3e07ed..f0f3dd00 100644 --- a/tox-tmpl.ini +++ b/tox-tmpl.ini @@ -39,10 +39,8 @@ commands = python setup.py testr --coverage --testr-args='{posargs}' commands = {posargs} [flake8] -# H305 imports not grouped correctly -# H307 like imports should be grouped together # H904 Wrap long lines in parentheses instead of a backslash -ignore = H307,H305,H904 +ignore = H904 builtins = _ exclude = .venv,.tox,dist,doc,./taskflow/openstack/common,*egg,.git,build,tools From e3936120545c16c67b3c5964a814902dcf2198d5 Mon Sep 17 00:00:00 2001 From: Joshua Harlow Date: Sun, 20 Jul 2014 12:52:32 -0700 Subject: [PATCH 158/188] Use checked commit when committing kazoo transactions To avoid the case where the transaction appears to have committed successfully (when it actually has not) create a new checked_commit utility function and use it in the zookeeper persistence backend to ensure that the transaction has completed correctly (when it has not raise a new kazoo exception that contains the failures to the caller, which can then reraise that exception, examine the failures or do some other action). Change-Id: Ib169b36abb568a0e1516c4c786fccb30856661e1 --- .../persistence/backends/impl_zookeeper.py | 89 ++++++++++--------- taskflow/utils/kazoo_utils.py | 88 ++++++++++++++++++ taskflow/utils/reflection.py | 7 +- 3 files changed, 141 insertions(+), 43 deletions(-) diff --git a/taskflow/persistence/backends/impl_zookeeper.py b/taskflow/persistence/backends/impl_zookeeper.py index 024398d2..e60bad85 100644 --- a/taskflow/persistence/backends/impl_zookeeper.py +++ b/taskflow/persistence/backends/impl_zookeeper.py @@ -156,8 +156,10 @@ class ZkConnection(base.Connection): def update_atom_details(self, ad): """Update a atom detail transactionally.""" with self._exc_wrapper(): - with self._client.transaction() as txn: - return self._update_atom_details(ad, txn) + txn = self._client.transaction() + ad = self._update_atom_details(ad, txn) + k_utils.checked_commit(txn) + return ad def _update_atom_details(self, ad, txn, create_missing=False): # Determine whether the desired data exists or not. @@ -209,8 +211,10 @@ class ZkConnection(base.Connection): def update_flow_details(self, fd): """Update a flow detail transactionally.""" with self._exc_wrapper(): - with self._client.transaction() as txn: - return self._update_flow_details(fd, txn) + txn = self._client.transaction() + fd = self._update_flow_details(fd, txn) + k_utils.checked_commit(txn) + return fd def _update_flow_details(self, fd, txn, create_missing=False): # Determine whether the desired data exists or not @@ -306,19 +310,19 @@ class ZkConnection(base.Connection): return e_lb with self._exc_wrapper(): - with self._client.transaction() as txn: - # Determine whether the desired data exists or not. - lb_path = paths.join(self.book_path, lb.uuid) - try: - lb_data, _zstat = self._client.get(lb_path) - except k_exc.NoNodeError: - # Create a new logbook since it doesn't exist. - e_lb = _create_logbook(lb_path, txn) - else: - # Otherwise update the existing logbook instead. - e_lb = _update_logbook(lb_path, lb_data, txn) - # Finally return (updated) logbook. - return e_lb + txn = self._client.transaction() + # Determine whether the desired data exists or not. + lb_path = paths.join(self.book_path, lb.uuid) + try: + lb_data, _zstat = self._client.get(lb_path) + except k_exc.NoNodeError: + # Create a new logbook since it doesn't exist. + e_lb = _create_logbook(lb_path, txn) + else: + # Otherwise update the existing logbook instead. + e_lb = _update_logbook(lb_path, lb_data, txn) + k_utils.checked_commit(txn) + return e_lb def _get_logbook(self, lb_uuid): lb_path = paths.join(self.book_path, lb_uuid) @@ -380,35 +384,38 @@ class ZkConnection(base.Connection): txn.delete(lb_path) with self._exc_wrapper(): - with self._client.transaction() as txn: - _destroy_logbook(lb_uuid, txn) + txn = self._client.transaction() + _destroy_logbook(lb_uuid, txn) + k_utils.checked_commit(txn) def clear_all(self, delete_dirs=True): """Delete all data transactionally.""" with self._exc_wrapper(): - with self._client.transaction() as txn: + txn = self._client.transaction() - # Delete all data under logbook path. - for lb_uuid in self._client.get_children(self.book_path): - lb_path = paths.join(self.book_path, lb_uuid) - for fd_uuid in self._client.get_children(lb_path): - txn.delete(paths.join(lb_path, fd_uuid)) - txn.delete(lb_path) + # Delete all data under logbook path. + for lb_uuid in self._client.get_children(self.book_path): + lb_path = paths.join(self.book_path, lb_uuid) + for fd_uuid in self._client.get_children(lb_path): + txn.delete(paths.join(lb_path, fd_uuid)) + txn.delete(lb_path) - # Delete all data under flow detail path. - for fd_uuid in self._client.get_children(self.flow_path): - fd_path = paths.join(self.flow_path, fd_uuid) - for ad_uuid in self._client.get_children(fd_path): - txn.delete(paths.join(fd_path, ad_uuid)) - txn.delete(fd_path) + # Delete all data under flow detail path. + for fd_uuid in self._client.get_children(self.flow_path): + fd_path = paths.join(self.flow_path, fd_uuid) + for ad_uuid in self._client.get_children(fd_path): + txn.delete(paths.join(fd_path, ad_uuid)) + txn.delete(fd_path) - # Delete all data under atom detail path. - for ad_uuid in self._client.get_children(self.atom_path): - ad_path = paths.join(self.atom_path, ad_uuid) - txn.delete(ad_path) + # Delete all data under atom detail path. + for ad_uuid in self._client.get_children(self.atom_path): + ad_path = paths.join(self.atom_path, ad_uuid) + txn.delete(ad_path) - # Delete containing directories. - if delete_dirs: - txn.delete(self.book_path) - txn.delete(self.atom_path) - txn.delete(self.flow_path) + # Delete containing directories. + if delete_dirs: + txn.delete(self.book_path) + txn.delete(self.atom_path) + txn.delete(self.flow_path) + + k_utils.checked_commit(txn) diff --git a/taskflow/utils/kazoo_utils.py b/taskflow/utils/kazoo_utils.py index 84f6b262..ae62e880 100644 --- a/taskflow/utils/kazoo_utils.py +++ b/taskflow/utils/kazoo_utils.py @@ -15,9 +15,11 @@ # under the License. from kazoo import client +from kazoo import exceptions as k_exc import six from taskflow import exceptions as exc +from taskflow.utils import reflection def _parse_hosts(hosts): @@ -33,6 +35,92 @@ def _parse_hosts(hosts): return hosts +def prettify_failures(failures, limit=-1): + """Prettifies a checked commits failures (ignores sensitive data...). + + Example input and output: + + >>> from taskflow.utils import kazoo_utils + >>> conf = {"hosts": ['localhost:2181']} + >>> c = kazoo_utils.make_client(conf) + >>> c.start(timeout=1) + >>> txn = c.transaction() + >>> txn.create("/test") + >>> txn.check("/test", 2) + >>> txn.delete("/test") + >>> try: + ... kazoo_utils.checked_commit(txn) + ... except kazoo_utils.KazooTransactionException as e: + ... print(kazoo_utils.prettify_failures(e.failures, limit=1)) + ... + RolledBackError@Create(path='/test') and 2 more... + >>> c.stop() + >>> c.close() + """ + prettier = [] + for (op, r) in failures: + pretty_op = reflection.get_class_name(op, fully_qualified=False) + # Pick off a few attributes that are meaningful (but one that don't + # show actual data, which might not be desired to show...). + selected_attrs = [ + "path=%r" % op.path, + ] + try: + if op.version != -1: + selected_attrs.append("version=%s" % op.version) + except AttributeError: + pass + pretty_op += "(%s)" % (", ".join(selected_attrs)) + pretty_cause = reflection.get_class_name(r, fully_qualified=False) + prettier.append("%s@%s" % (pretty_cause, pretty_op)) + if limit <= 0 or len(prettier) <= limit: + return ", ".join(prettier) + else: + leftover = prettier[limit:] + prettier = prettier[0:limit] + return ", ".join(prettier) + " and %s more..." % len(leftover) + + +class KazooTransactionException(k_exc.KazooException): + """Exception raised when a checked commit fails.""" + + def __init__(self, message, failures): + super(KazooTransactionException, self).__init__(message) + self._failures = tuple(failures) + + @property + def failures(self): + return self._failures + + +def checked_commit(txn): + # Until https://github.com/python-zk/kazoo/pull/224 is fixed we have + # to workaround the transaction failing silently. + if not txn.operations: + return [] + results = txn.commit() + failures = [] + for op, result in six.moves.zip(txn.operations, results): + if isinstance(result, k_exc.KazooException): + failures.append((op, result)) + if len(results) < len(txn.operations): + raise KazooTransactionException( + "Transaction returned %s results, this is less than" + " the number of expected transaction operations %s" + % (len(results), len(txn.operations)), failures) + if len(results) > len(txn.operations): + raise KazooTransactionException( + "Transaction returned %s results, this is greater than" + " the number of expected transaction operations %s" + % (len(results), len(txn.operations)), failures) + if failures: + raise KazooTransactionException( + "Transaction with %s operations failed: %s" + % (len(txn.operations), + prettify_failures(failures, limit=1)), failures) + return results + + def finalize_client(client): """Stops and closes a client, even if it wasn't started.""" client.stop() diff --git a/taskflow/utils/reflection.py b/taskflow/utils/reflection.py index c7f1a06a..b386dfa2 100644 --- a/taskflow/utils/reflection.py +++ b/taskflow/utils/reflection.py @@ -77,7 +77,7 @@ def get_member_names(obj, exclude_hidden=True): return [name for (name, _obj) in _get_members(obj, exclude_hidden)] -def get_class_name(obj): +def get_class_name(obj, fully_qualified=True): """Get class name for object. If object is a type, fully qualified name of the type is returned. @@ -88,7 +88,10 @@ def get_class_name(obj): obj = type(obj) if obj.__module__ in ('builtins', '__builtin__', 'exceptions'): return obj.__name__ - return '.'.join((obj.__module__, obj.__name__)) + if fully_qualified: + return '.'.join((obj.__module__, obj.__name__)) + else: + return obj.__name__ def get_all_class_names(obj, up_to=object): From a68d40bf02a69957ad4cd2e4eb0646e2b9c74575 Mon Sep 17 00:00:00 2001 From: Joshua Harlow Date: Wed, 18 Jun 2014 21:25:46 -0700 Subject: [PATCH 159/188] Use a common message dispatcher Instead of recreating a dispatcher in the server and executor objects use a common dispatcher that is shared between them. It will dispatch based on the message type received into a provided dict of dispatch handler callbacks. It also can generically requeue messages and can reject messages if they are missing key required message properties ('type' in the current case). Part of blueprint wbe-message-validation Change-Id: I8320f4707183f36e6a69f0552cf62f99a5467b7e --- taskflow/engines/worker_based/dispatcher.py | 96 +++++++++++++++++++ taskflow/engines/worker_based/executor.py | 32 ++----- taskflow/engines/worker_based/proxy.py | 14 ++- taskflow/engines/worker_based/server.py | 61 ++++-------- .../unit/worker_based/test_dispatcher.py | 77 +++++++++++++++ .../tests/unit/worker_based/test_executor.py | 51 +++------- .../unit/worker_based/test_message_pump.py | 80 ++++++++++++++++ .../tests/unit/worker_based/test_proxy.py | 7 +- .../tests/unit/worker_based/test_server.py | 70 +------------- 9 files changed, 308 insertions(+), 180 deletions(-) create mode 100644 taskflow/engines/worker_based/dispatcher.py create mode 100644 taskflow/tests/unit/worker_based/test_dispatcher.py create mode 100644 taskflow/tests/unit/worker_based/test_message_pump.py diff --git a/taskflow/engines/worker_based/dispatcher.py b/taskflow/engines/worker_based/dispatcher.py new file mode 100644 index 00000000..4983f6d5 --- /dev/null +++ b/taskflow/engines/worker_based/dispatcher.py @@ -0,0 +1,96 @@ +# -*- coding: utf-8 -*- + +# Copyright (C) 2014 Yahoo! Inc. All Rights Reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); you may +# not use this file except in compliance with the License. You may obtain +# a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the +# License for the specific language governing permissions and limitations +# under the License. + +import logging + +from kombu import exceptions as kombu_exc +import six + +LOG = logging.getLogger(__name__) + + +class TypeDispatcher(object): + """Receives messages and dispatches to type specific handlers.""" + + def __init__(self, type_handlers): + self._handlers = dict(type_handlers) + self._requeue_filters = [] + + def add_requeue_filter(self, callback): + """Add a callback that can *request* message requeuing. + + The callback will be activated before the message has been acked and + it can be used to instruct the dispatcher to requeue the message + instead of processing it. + """ + assert six.callable(callback), "Callback must be callable" + self._requeue_filters.append(callback) + + def _collect_requeue_votes(self, data, message): + # Returns how many of the filters asked for the message to be requeued. + requeue_votes = 0 + for f in self._requeue_filters: + try: + if f(data, message): + requeue_votes += 1 + except Exception: + LOG.exception("Failed calling requeue filter to determine" + " if message %r should be requeued.", + message.delivery_tag) + return requeue_votes + + def _requeue_log_error(self, message, errors): + # TODO(harlowja): Remove when http://github.com/celery/kombu/pull/372 + # is merged and a version is released with this change... + try: + message.requeue() + except errors as exc: + # This was taken from how kombu is formatting its messages + # when its reject_log_error or ack_log_error functions are + # used so that we have a similar error format for requeuing. + LOG.critical("Couldn't requeue %r, reason:%r", + message.delivery_tag, exc, exc_info=True) + else: + LOG.debug("AMQP message %r requeued.", message.delivery_tag) + + def on_message(self, data, message): + """This method is called on incoming messages.""" + LOG.debug("Got message: %r", message.delivery_tag) + if self._collect_requeue_votes(data, message): + self._requeue_log_error(message, + errors=(kombu_exc.MessageStateError,)) + else: + try: + msg_type = message.properties['type'] + except KeyError: + message.reject_log_error( + logger=LOG, errors=(kombu_exc.MessageStateError,)) + LOG.warning("The 'type' message property is missing" + " in message %r", message.delivery_tag) + else: + handler = self._handlers.get(msg_type) + if handler is None: + message.reject_log_error( + logger=LOG, errors=(kombu_exc.MessageStateError,)) + LOG.warning("Unexpected message type: '%s' in message" + " %r", msg_type, message.delivery_tag) + else: + message.ack_log_error( + logger=LOG, errors=(kombu_exc.MessageStateError,)) + if message.acknowledged: + LOG.debug("AMQP message %r acknowledged.", + message.delivery_tag) + handler(data, message) diff --git a/taskflow/engines/worker_based/executor.py b/taskflow/engines/worker_based/executor.py index afea043f..8bd799c4 100644 --- a/taskflow/engines/worker_based/executor.py +++ b/taskflow/engines/worker_based/executor.py @@ -16,8 +16,6 @@ import logging -from kombu import exceptions as kombu_exc - from taskflow.engines.action_engine import executor from taskflow.engines.worker_based import cache from taskflow.engines.worker_based import protocol as pr @@ -75,36 +73,18 @@ class WorkerTaskExecutor(executor.TaskExecutorBase): self._topics = topics self._requests_cache = cache.RequestsCache() self._workers_cache = cache.WorkersCache() - self._proxy = proxy.Proxy(uuid, exchange, self._on_message, + handlers = { + pr.NOTIFY: self._process_notify, + pr.RESPONSE: self._process_response, + } + self._proxy = proxy.Proxy(uuid, exchange, handlers, self._on_wait, **kwargs) self._proxy_thread = None self._periodic = PeriodicWorker(tt.Timeout(pr.NOTIFY_PERIOD), [self._notify_topics]) self._periodic_thread = None - def _on_message(self, data, message): - """This method is called on incoming message.""" - LOG.debug("Got message: %s", data) - try: - # acknowledge message before processing - message.ack() - except kombu_exc.MessageStateError: - LOG.exception("Failed to acknowledge AMQP message.") - else: - LOG.debug("AMQP message acknowledged.") - try: - msg_type = message.properties['type'] - except KeyError: - LOG.warning("The 'type' message property is missing.") - else: - if msg_type == pr.NOTIFY: - self._process_notify(data) - elif msg_type == pr.RESPONSE: - self._process_response(data, message) - else: - LOG.warning("Unexpected message type: %s", msg_type) - - def _process_notify(self, notify): + def _process_notify(self, notify, message): """Process notify message from remote side.""" LOG.debug("Start processing notify message.") topic = notify['topic'] diff --git a/taskflow/engines/worker_based/proxy.py b/taskflow/engines/worker_based/proxy.py index 9850b376..aaa75c86 100644 --- a/taskflow/engines/worker_based/proxy.py +++ b/taskflow/engines/worker_based/proxy.py @@ -21,6 +21,9 @@ import threading import kombu import six +from taskflow.engines.worker_based import dispatcher + + LOG = logging.getLogger(__name__) # NOTE(skudriashev): A timeout of 1 is often used in environments where @@ -31,17 +34,20 @@ DRAIN_EVENTS_PERIOD = 1 class Proxy(object): """A proxy processes messages from/to the named exchange.""" - def __init__(self, topic, exchange_name, on_message, on_wait=None, + def __init__(self, topic, exchange_name, type_handlers, on_wait=None, **kwargs): self._topic = topic self._exchange_name = exchange_name - self._on_message = on_message self._on_wait = on_wait self._running = threading.Event() self._url = kwargs.get('url') self._transport = kwargs.get('transport') self._transport_opts = kwargs.get('transport_options') - + self._dispatcher = dispatcher.TypeDispatcher(type_handlers) + self._dispatcher.add_requeue_filter( + # NOTE(skudriashev): Process all incoming messages only if proxy is + # running, otherwise requeue them. + lambda data, message: not self.is_running) self._drain_events_timeout = DRAIN_EVENTS_PERIOD if self._transport == 'memory' and self._transport_opts: polling_interval = self._transport_opts.get('polling_interval') @@ -95,7 +101,7 @@ class Proxy(object): with kombu.connections[self._conn].acquire(block=True) as conn: queue = self._make_queue(self._topic, self._exchange, channel=conn) with conn.Consumer(queues=queue, - callbacks=[self._on_message]): + callbacks=[self._dispatcher.on_message]): self._running.set() while self.is_running: try: diff --git a/taskflow/engines/worker_based/server.py b/taskflow/engines/worker_based/server.py index d52c3dce..7f10113f 100644 --- a/taskflow/engines/worker_based/server.py +++ b/taskflow/engines/worker_based/server.py @@ -17,7 +17,7 @@ import functools import logging -from kombu import exceptions as kombu_exc +import six from taskflow.engines.worker_based import protocol as pr from taskflow.engines.worker_based import proxy @@ -26,54 +26,35 @@ from taskflow.utils import misc LOG = logging.getLogger(__name__) +def delayed(executor): + """Wraps & runs the function using a futures compatible executor.""" + + def decorator(f): + + @six.wraps(f) + def wrapper(*args, **kwargs): + return executor.submit(f, *args, **kwargs) + + return wrapper + + return decorator + + class Server(object): """Server implementation that waits for incoming tasks requests.""" def __init__(self, topic, exchange, executor, endpoints, **kwargs): - self._proxy = proxy.Proxy(topic, exchange, self._on_message, **kwargs) + handlers = { + pr.NOTIFY: delayed(executor)(self._process_notify), + pr.REQUEST: delayed(executor)(self._process_request), + } + self._proxy = proxy.Proxy(topic, exchange, handlers, + on_wait=None, **kwargs) self._topic = topic self._executor = executor self._endpoints = dict([(endpoint.name, endpoint) for endpoint in endpoints]) - def _on_message(self, data, message): - """This method is called on incoming message.""" - LOG.debug("Got message: %s", data) - # NOTE(skudriashev): Process all incoming messages only if proxy is - # running, otherwise requeue them. - if self._proxy.is_running: - # NOTE(skudriashev): Process request only if message has been - # acknowledged successfully. - try: - # acknowledge message before processing - message.ack() - except kombu_exc.MessageStateError: - LOG.exception("Failed to acknowledge AMQP message.") - else: - LOG.debug("AMQP message acknowledged.") - try: - msg_type = message.properties['type'] - except KeyError: - LOG.warning("The 'type' message property is missing.") - else: - if msg_type == pr.NOTIFY: - handler = self._process_notify - elif msg_type == pr.REQUEST: - handler = self._process_request - else: - LOG.warning("Unexpected message type: %s", msg_type) - return - # spawn new thread to process request - self._executor.submit(handler, data, message) - else: - try: - # requeue message - message.requeue() - except kombu_exc.MessageStateError: - LOG.exception("Failed to requeue AMQP message.") - else: - LOG.debug("AMQP message requeued.") - @staticmethod def _parse_request(task_cls, task_name, action, arguments, result=None, failures=None, **kwargs): diff --git a/taskflow/tests/unit/worker_based/test_dispatcher.py b/taskflow/tests/unit/worker_based/test_dispatcher.py new file mode 100644 index 00000000..4dae910d --- /dev/null +++ b/taskflow/tests/unit/worker_based/test_dispatcher.py @@ -0,0 +1,77 @@ +# -*- coding: utf-8 -*- + +# Copyright (C) 2014 Yahoo! Inc. All Rights Reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); you may +# not use this file except in compliance with the License. You may obtain +# a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the +# License for the specific language governing permissions and limitations +# under the License. + +from kombu import message +import mock + +from taskflow.engines.worker_based import dispatcher +from taskflow import test + + +def mock_acked_message(ack_ok=True, **kwargs): + msg = mock.create_autospec(message.Message, spec_set=True, instance=True, + channel=None, **kwargs) + + def ack_side_effect(*args, **kwargs): + msg.acknowledged = True + + if ack_ok: + msg.ack_log_error.side_effect = ack_side_effect + msg.acknowledged = False + return msg + + +class TestDispatcher(test.MockTestCase): + def test_creation(self): + on_hello = mock.MagicMock() + handlers = {'hello': on_hello} + dispatcher.TypeDispatcher(handlers) + + def test_on_message(self): + on_hello = mock.MagicMock() + handlers = {'hello': on_hello} + d = dispatcher.TypeDispatcher(handlers) + msg = mock_acked_message(properties={'type': 'hello'}) + d.on_message("", msg) + self.assertTrue(on_hello.called) + self.assertTrue(msg.ack_log_error.called) + self.assertTrue(msg.acknowledged) + + def test_on_rejected_message(self): + d = dispatcher.TypeDispatcher({}) + msg = mock_acked_message(properties={'type': 'hello'}) + d.on_message("", msg) + self.assertTrue(msg.reject_log_error.called) + self.assertFalse(msg.acknowledged) + + def test_on_requeue_message(self): + d = dispatcher.TypeDispatcher({}) + d.add_requeue_filter(lambda data, message: True) + msg = mock_acked_message() + d.on_message("", msg) + self.assertTrue(msg.requeue.called) + self.assertFalse(msg.acknowledged) + + def test_failed_ack(self): + on_hello = mock.MagicMock() + handlers = {'hello': on_hello} + d = dispatcher.TypeDispatcher(handlers) + msg = mock_acked_message(ack_ok=False, + properties={'type': 'hello'}) + d.on_message("", msg) + self.assertTrue(msg.ack_log_error.called) + self.assertFalse(msg.acknowledged) + self.assertFalse(on_hello.called) diff --git a/taskflow/tests/unit/worker_based/test_executor.py b/taskflow/tests/unit/worker_based/test_executor.py index 36e6c0b7..f837394c 100644 --- a/taskflow/tests/unit/worker_based/test_executor.py +++ b/taskflow/tests/unit/worker_based/test_executor.py @@ -18,7 +18,6 @@ import threading import time from concurrent import futures -from kombu import exceptions as kombu_exc import mock from taskflow.engines.worker_based import executor @@ -86,7 +85,7 @@ class TestWorkerTaskExecutor(test.MockTestCase): master_mock_calls = [ mock.call.Proxy(self.executor_uuid, self.executor_exchange, - ex._on_message, ex._on_wait, url=self.broker_url) + mock.ANY, ex._on_wait, url=self.broker_url) ] self.assertEqual(self.master_mock.mock_calls, master_mock_calls) @@ -94,21 +93,19 @@ class TestWorkerTaskExecutor(test.MockTestCase): response = pr.Response(pr.RUNNING) ex = self.executor() ex._requests_cache[self.task_uuid] = self.request_inst_mock - ex._on_message(response.to_dict(), self.message_mock) + ex._process_response(response.to_dict(), self.message_mock) self.assertEqual(self.request_inst_mock.mock_calls, [mock.call.set_running()]) - self.assertEqual(self.message_mock.mock_calls, [mock.call.ack()]) def test_on_message_response_state_progress(self): response = pr.Response(pr.PROGRESS, progress=1.0) ex = self.executor() ex._requests_cache[self.task_uuid] = self.request_inst_mock - ex._on_message(response.to_dict(), self.message_mock) + ex._process_response(response.to_dict(), self.message_mock) self.assertEqual(self.request_inst_mock.mock_calls, [mock.call.on_progress(progress=1.0)]) - self.assertEqual(self.message_mock.mock_calls, [mock.call.ack()]) def test_on_message_response_state_failure(self): failure = misc.Failure.from_exception(Exception('test')) @@ -116,75 +113,49 @@ class TestWorkerTaskExecutor(test.MockTestCase): response = pr.Response(pr.FAILURE, result=failure_dict) ex = self.executor() ex._requests_cache[self.task_uuid] = self.request_inst_mock - ex._on_message(response.to_dict(), self.message_mock) + ex._process_response(response.to_dict(), self.message_mock) self.assertEqual(len(ex._requests_cache), 0) self.assertEqual(self.request_inst_mock.mock_calls, [ mock.call.set_result(result=utils.FailureMatcher(failure)) ]) - self.assertEqual(self.message_mock.mock_calls, [mock.call.ack()]) def test_on_message_response_state_success(self): response = pr.Response(pr.SUCCESS, result=self.task_result, event='executed') ex = self.executor() ex._requests_cache[self.task_uuid] = self.request_inst_mock - ex._on_message(response.to_dict(), self.message_mock) + ex._process_response(response.to_dict(), self.message_mock) self.assertEqual(self.request_inst_mock.mock_calls, [mock.call.set_result(result=self.task_result, event='executed')]) - self.assertEqual(self.message_mock.mock_calls, [mock.call.ack()]) def test_on_message_response_unknown_state(self): response = pr.Response(state='') ex = self.executor() ex._requests_cache[self.task_uuid] = self.request_inst_mock - ex._on_message(response.to_dict(), self.message_mock) + ex._process_response(response.to_dict(), self.message_mock) self.assertEqual(self.request_inst_mock.mock_calls, []) - self.assertEqual(self.message_mock.mock_calls, [mock.call.ack()]) def test_on_message_response_unknown_task(self): self.message_mock.properties['correlation_id'] = '' response = pr.Response(pr.RUNNING) ex = self.executor() ex._requests_cache[self.task_uuid] = self.request_inst_mock - ex._on_message(response.to_dict(), self.message_mock) + ex._process_response(response.to_dict(), self.message_mock) self.assertEqual(self.request_inst_mock.mock_calls, []) - self.assertEqual(self.message_mock.mock_calls, [mock.call.ack()]) def test_on_message_response_no_correlation_id(self): self.message_mock.properties = {'type': pr.RESPONSE} response = pr.Response(pr.RUNNING) ex = self.executor() ex._requests_cache[self.task_uuid] = self.request_inst_mock - ex._on_message(response.to_dict(), self.message_mock) + ex._process_response(response.to_dict(), self.message_mock) self.assertEqual(self.request_inst_mock.mock_calls, []) - self.assertEqual(self.message_mock.mock_calls, [mock.call.ack()]) - - @mock.patch('taskflow.engines.worker_based.executor.LOG.warning') - def test_on_message_unknown_type(self, mocked_warning): - self.message_mock.properties = {'correlation_id': self.task_uuid, - 'type': ''} - ex = self.executor() - ex._on_message({}, self.message_mock) - self.assertTrue(mocked_warning.called) - - @mock.patch('taskflow.engines.worker_based.executor.LOG.warning') - def test_on_message_no_type(self, mocked_warning): - self.message_mock.properties = {'correlation_id': self.task_uuid} - ex = self.executor() - ex._on_message({}, self.message_mock) - self.assertTrue(mocked_warning.called) - - @mock.patch('taskflow.engines.worker_based.executor.LOG.exception') - def test_on_message_acknowledge_raises(self, mocked_exception): - self.message_mock.ack.side_effect = kombu_exc.MessageStateError() - self.executor()._on_message({}, self.message_mock) - self.assertTrue(mocked_exception.called) def test_on_wait_task_not_expired(self): ex = self.executor() @@ -222,7 +193,7 @@ class TestWorkerTaskExecutor(test.MockTestCase): self.message_mock.properties['type'] = pr.NOTIFY notify = pr.Notify(topic=self.executor_topic, tasks=[self.task.name]) ex = self.executor() - ex._on_message(notify.to_dict(), self.message_mock) + ex._process_notify(notify.to_dict(), self.message_mock) ex.execute_task(self.task, self.task_uuid, self.task_args) expected_calls = [ @@ -240,7 +211,7 @@ class TestWorkerTaskExecutor(test.MockTestCase): self.message_mock.properties['type'] = pr.NOTIFY notify = pr.Notify(topic=self.executor_topic, tasks=[self.task.name]) ex = self.executor() - ex._on_message(notify.to_dict(), self.message_mock) + ex._process_notify(notify.to_dict(), self.message_mock) ex.revert_task(self.task, self.task_uuid, self.task_args, self.task_result, self.task_failures) @@ -273,7 +244,7 @@ class TestWorkerTaskExecutor(test.MockTestCase): self.proxy_inst_mock.publish.side_effect = Exception('Woot!') notify = pr.Notify(topic=self.executor_topic, tasks=[self.task.name]) ex = self.executor() - ex._on_message(notify.to_dict(), self.message_mock) + ex._process_notify(notify.to_dict(), self.message_mock) ex.execute_task(self.task, self.task_uuid, self.task_args) expected_calls = [ diff --git a/taskflow/tests/unit/worker_based/test_message_pump.py b/taskflow/tests/unit/worker_based/test_message_pump.py new file mode 100644 index 00000000..ec15d24d --- /dev/null +++ b/taskflow/tests/unit/worker_based/test_message_pump.py @@ -0,0 +1,80 @@ +# -*- coding: utf-8 -*- + +# Copyright (C) 2014 Yahoo! Inc. All Rights Reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); you may +# not use this file except in compliance with the License. You may obtain +# a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the +# License for the specific language governing permissions and limitations +# under the License. + +import threading + +import mock + +from taskflow.engines.worker_based import protocol as pr +from taskflow.engines.worker_based import proxy +from taskflow import test + + +class TestMessagePump(test.MockTestCase): + def test_notify(self): + barrier = threading.Event() + + on_notify = mock.MagicMock() + on_notify.side_effect = lambda *args, **kwargs: barrier.set() + + handlers = {pr.NOTIFY: on_notify} + p = proxy.Proxy("test", "test", handlers, + transport='memory', + transport_options={ + 'polling_interval': 0.01, + }) + + t = threading.Thread(target=p.start) + t.daemon = True + t.start() + p.wait() + p.publish(pr.Notify(), 'test') + + barrier.wait(1.0) + self.assertTrue(barrier.is_set()) + p.stop() + t.join() + + self.assertTrue(on_notify.called) + on_notify.assert_called_with({}, mock.ANY) + + def test_response(self): + barrier = threading.Event() + + on_response = mock.MagicMock() + on_response.side_effect = lambda *args, **kwargs: barrier.set() + + handlers = {pr.RESPONSE: on_response} + p = proxy.Proxy("test", "test", handlers, + transport='memory', + transport_options={ + 'polling_interval': 0.01, + }) + + t = threading.Thread(target=p.start) + t.daemon = True + t.start() + p.wait() + resp = pr.Response(pr.RUNNING) + p.publish(resp, 'test') + + barrier.wait(1.0) + self.assertTrue(barrier.is_set()) + p.stop() + t.join() + + self.assertTrue(on_response.called) + on_response.assert_called_with(resp.to_dict(), mock.ANY) diff --git a/taskflow/tests/unit/worker_based/test_proxy.py b/taskflow/tests/unit/worker_based/test_proxy.py index 2cbdc25a..e2dc02e8 100644 --- a/taskflow/tests/unit/worker_based/test_proxy.py +++ b/taskflow/tests/unit/worker_based/test_proxy.py @@ -66,7 +66,6 @@ class TestProxy(test.MockTestCase): self.conn_inst_mock.Consumer.return_value.__exit__ = mock.MagicMock() # other mocking - self.on_message_mock = mock.MagicMock(name='on_message') self.on_wait_mock = mock.MagicMock(name='on_wait') self.master_mock.attach_mock(self.on_wait_mock, 'on_wait') @@ -85,7 +84,7 @@ class TestProxy(test.MockTestCase): auto_delete=True, channel=self.conn_inst_mock), mock.call.connection.Consumer(queues=self.queue_inst_mock, - callbacks=[self.on_message_mock]), + callbacks=[mock.ANY]), mock.call.connection.Consumer().__enter__(), ] + calls + [ mock.call.connection.Consumer().__exit__(exc_type, mock.ANY, @@ -95,8 +94,8 @@ class TestProxy(test.MockTestCase): def proxy(self, reset_master_mock=False, **kwargs): proxy_kwargs = dict(topic=self.topic, exchange_name=self.exchange_name, - on_message=self.on_message_mock, - url=self.broker_url) + url=self.broker_url, + type_handlers={}) proxy_kwargs.update(kwargs) p = proxy.Proxy(**proxy_kwargs) if reset_master_mock: diff --git a/taskflow/tests/unit/worker_based/test_server.py b/taskflow/tests/unit/worker_based/test_server.py index 2a5ed9f1..2a64c960 100644 --- a/taskflow/tests/unit/worker_based/test_server.py +++ b/taskflow/tests/unit/worker_based/test_server.py @@ -14,7 +14,6 @@ # License for the specific language governing permissions and limitations # under the License. -from kombu import exceptions as exc import mock import six @@ -86,9 +85,9 @@ class TestServer(test.MockTestCase): # check calls master_mock_calls = [ mock.call.Proxy(self.server_topic, self.server_exchange, - s._on_message, url=self.broker_url) + mock.ANY, url=self.broker_url, on_wait=mock.ANY) ] - self.assertEqual(self.master_mock.mock_calls, master_mock_calls) + self.master_mock.assert_has_calls(master_mock_calls) self.assertEqual(len(s._endpoints), 3) def test_creation_with_endpoints(self): @@ -97,72 +96,11 @@ class TestServer(test.MockTestCase): # check calls master_mock_calls = [ mock.call.Proxy(self.server_topic, self.server_exchange, - s._on_message, url=self.broker_url) + mock.ANY, url=self.broker_url, on_wait=mock.ANY) ] - self.assertEqual(self.master_mock.mock_calls, master_mock_calls) + self.master_mock.assert_has_calls(master_mock_calls) self.assertEqual(len(s._endpoints), len(self.endpoints)) - def test_on_message_proxy_running_ack_success(self): - request = self.make_request() - s = self.server(reset_master_mock=True) - s._on_message(request, self.message_mock) - - # check calls - master_mock_calls = [ - mock.call.message.ack(), - mock.call.executor.submit(s._process_request, request, - self.message_mock) - ] - self.assertEqual(self.master_mock.mock_calls, master_mock_calls) - - def test_on_message_proxy_running_ack_failure(self): - self.message_mock.ack.side_effect = exc.MessageStateError('Woot!') - s = self.server(reset_master_mock=True) - s._on_message({}, self.message_mock) - - # check calls - master_mock_calls = [ - mock.call.message.ack() - ] - self.assertEqual(self.master_mock.mock_calls, master_mock_calls) - - def test_on_message_proxy_not_running_requeue_success(self): - self.proxy_inst_mock.is_running = False - s = self.server(reset_master_mock=True) - s._on_message({}, self.message_mock) - - # check calls - master_mock_calls = [ - mock.call.message.requeue() - ] - self.assertEqual(self.master_mock.mock_calls, master_mock_calls) - - def test_on_message_proxy_not_running_requeue_failure(self): - self.message_mock.requeue.side_effect = exc.MessageStateError('Woot!') - self.proxy_inst_mock.is_running = False - s = self.server(reset_master_mock=True) - s._on_message({}, self.message_mock) - - # check calls - master_mock_calls = [ - mock.call.message.requeue() - ] - self.assertEqual(self.master_mock.mock_calls, master_mock_calls) - - @mock.patch('taskflow.engines.worker_based.server.LOG.warning') - def test_on_message_unknown_type(self, mocked_warning): - self.message_mock.properties['type'] = '' - s = self.server() - s._on_message({}, self.message_mock) - self.assertTrue(mocked_warning.called) - - @mock.patch('taskflow.engines.worker_based.server.LOG.warning') - def test_on_message_no_type(self, mocked_warning): - self.message_mock.properties = {} - s = self.server() - s._on_message({}, self.message_mock) - self.assertTrue(mocked_warning.called) - def test_parse_request(self): request = self.make_request() task_cls, action, task_args = server.Server._parse_request(**request) From 9bce85dc66c5ef1be6ba0e504baae104903ed377 Mon Sep 17 00:00:00 2001 From: Joshua Harlow Date: Fri, 20 Jun 2014 18:28:49 -0700 Subject: [PATCH 160/188] Allow handlers to provide validation callables In order to reject messages before they are processed make it possible to provide a handler pair that will validate the incoming message and either reject or allow the message to be passed to the provided handler. Part of blueprint wbe-message-validation Change-Id: Ibd6ee40020c6b98283f40d5bd59e8950d63b7f71 --- taskflow/engines/worker_based/dispatcher.py | 44 ++++++++++++++------- taskflow/exceptions.py | 4 ++ 2 files changed, 34 insertions(+), 14 deletions(-) diff --git a/taskflow/engines/worker_based/dispatcher.py b/taskflow/engines/worker_based/dispatcher.py index 4983f6d5..9ff8ac10 100644 --- a/taskflow/engines/worker_based/dispatcher.py +++ b/taskflow/engines/worker_based/dispatcher.py @@ -19,6 +19,8 @@ import logging from kombu import exceptions as kombu_exc import six +from taskflow import exceptions as excp + LOG = logging.getLogger(__name__) @@ -66,6 +68,32 @@ class TypeDispatcher(object): else: LOG.debug("AMQP message %r requeued.", message.delivery_tag) + def _process_message(self, data, message, message_type): + handler = self._handlers.get(message_type) + if handler is None: + message.reject_log_error(logger=LOG, + errors=(kombu_exc.MessageStateError,)) + LOG.warning("Unexpected message type: '%s' in message" + " %r", message_type, message.delivery_tag) + else: + if isinstance(handler, (tuple, list)): + handler, validator = handler + try: + validator(data) + except excp.InvalidFormat as e: + message.reject_log_error( + logger=LOG, errors=(kombu_exc.MessageStateError,)) + LOG.warn("Message: %r, '%s' was rejected due to it being" + " in an invalid format: %s", + message.delivery_tag, message_type, e) + return + message.ack_log_error(logger=LOG, + errors=(kombu_exc.MessageStateError,)) + if message.acknowledged: + LOG.debug("AMQP message %r acknowledged.", + message.delivery_tag) + handler(data, message) + def on_message(self, data, message): """This method is called on incoming messages.""" LOG.debug("Got message: %r", message.delivery_tag) @@ -74,23 +102,11 @@ class TypeDispatcher(object): errors=(kombu_exc.MessageStateError,)) else: try: - msg_type = message.properties['type'] + message_type = message.properties['type'] except KeyError: message.reject_log_error( logger=LOG, errors=(kombu_exc.MessageStateError,)) LOG.warning("The 'type' message property is missing" " in message %r", message.delivery_tag) else: - handler = self._handlers.get(msg_type) - if handler is None: - message.reject_log_error( - logger=LOG, errors=(kombu_exc.MessageStateError,)) - LOG.warning("Unexpected message type: '%s' in message" - " %r", msg_type, message.delivery_tag) - else: - message.ack_log_error( - logger=LOG, errors=(kombu_exc.MessageStateError,)) - if message.acknowledged: - LOG.debug("AMQP message %r acknowledged.", - message.delivery_tag) - handler(data, message) + self._process_message(data, message, message_type) diff --git a/taskflow/exceptions.py b/taskflow/exceptions.py index 55c889ca..e5b9a9c2 100644 --- a/taskflow/exceptions.py +++ b/taskflow/exceptions.py @@ -129,6 +129,10 @@ class MultipleChoices(TaskFlowException): """Raised when some decision can't be made due to many possible choices.""" +class InvalidFormat(TaskFlowException): + """Raised when some object/entity is not in the expected format.""" + + # Others. class WrappedFailure(Exception): From 5237533a8cfa12153a85a76d1978544093f31a2b Mon Sep 17 00:00:00 2001 From: Joshua Harlow Date: Mon, 30 Jun 2014 12:51:36 -0700 Subject: [PATCH 161/188] WBE notification message validation Add send and receive validation of the notify message that is sent between executors and workers to be more robust around invalid message formats being sent and received. Part of blueprint wbe-message-validation Change-Id: I7300d6f2d00e48c4f989c7f958a028bdff4afdd4 --- requirements.txt | 1 + taskflow/engines/worker_based/executor.py | 6 ++- taskflow/engines/worker_based/protocol.py | 45 +++++++++++++++++++++++ taskflow/engines/worker_based/server.py | 5 ++- 4 files changed, 55 insertions(+), 2 deletions(-) diff --git a/requirements.txt b/requirements.txt index 21fc544a..764c2ad2 100644 --- a/requirements.txt +++ b/requirements.txt @@ -10,3 +10,4 @@ Babel>=1.3 stevedore>=0.14 # Backport for concurrent.futures which exists in 3.2+ futures>=2.1.3 +jsonschema>=2.0.0,<3.0.0 diff --git a/taskflow/engines/worker_based/executor.py b/taskflow/engines/worker_based/executor.py index 8bd799c4..ba599e69 100644 --- a/taskflow/engines/worker_based/executor.py +++ b/taskflow/engines/worker_based/executor.py @@ -14,6 +14,7 @@ # License for the specific language governing permissions and limitations # under the License. +import functools import logging from taskflow.engines.action_engine import executor @@ -74,7 +75,10 @@ class WorkerTaskExecutor(executor.TaskExecutorBase): self._requests_cache = cache.RequestsCache() self._workers_cache = cache.WorkersCache() handlers = { - pr.NOTIFY: self._process_notify, + pr.NOTIFY: [ + self._process_notify, + functools.partial(pr.Notify.validate, response=True), + ], pr.RESPONSE: self._process_response, } self._proxy = proxy.Proxy(uuid, exchange, handlers, diff --git a/taskflow/engines/worker_based/protocol.py b/taskflow/engines/worker_based/protocol.py index 1eb43227..c107af65 100644 --- a/taskflow/engines/worker_based/protocol.py +++ b/taskflow/engines/worker_based/protocol.py @@ -17,9 +17,12 @@ import abc from concurrent import futures +import jsonschema +from jsonschema import exceptions as schema_exc import six from taskflow.engines.action_engine import executor +from taskflow import exceptions as excp from taskflow.types import time from taskflow.utils import misc from taskflow.utils import reflection @@ -78,12 +81,54 @@ class Notify(Message): """Represents notify message type.""" TYPE = NOTIFY + # NOTE(harlowja): the executor (the entity who initially requests a worker + # to send back a notification response) schema is different than the + # worker response schema (that's why there are two schemas here). + _RESPONSE_SCHEMA = { + "type": "object", + 'properties': { + 'topic': { + "type": "string", + }, + 'tasks': { + "type": "array", + "items": { + "type": "string", + }, + } + }, + "required": ["topic", 'tasks'], + "additionalProperties": False, + } + _SENDER_SCHEMA = { + "type": "object", + "additionalProperties": False, + } + def __init__(self, **data): self._data = data def to_dict(self): return self._data + @classmethod + def validate(cls, data, response): + if response: + schema = cls._RESPONSE_SCHEMA + else: + schema = cls._SENDER_SCHEMA + try: + jsonschema.validate(data, schema) + except schema_exc.ValidationError as e: + if response: + raise excp.InvalidFormat("%s message response data not of the" + " expected format: %s" + % (cls.TYPE, e.message), e) + else: + raise excp.InvalidFormat("%s message sender data not of the" + " expected format: %s" + % (cls.TYPE, e.message), e) + class Request(Message): """Represents request with execution results. diff --git a/taskflow/engines/worker_based/server.py b/taskflow/engines/worker_based/server.py index 7f10113f..1e86fb23 100644 --- a/taskflow/engines/worker_based/server.py +++ b/taskflow/engines/worker_based/server.py @@ -45,7 +45,10 @@ class Server(object): def __init__(self, topic, exchange, executor, endpoints, **kwargs): handlers = { - pr.NOTIFY: delayed(executor)(self._process_notify), + pr.NOTIFY: [ + delayed(executor)(self._process_notify), + functools.partial(pr.Notify.validate, response=False), + ], pr.REQUEST: delayed(executor)(self._process_request), } self._proxy = proxy.Proxy(topic, exchange, handlers, From 0eb9fb90ab942eb6be648b002d4356f9c6fa5602 Mon Sep 17 00:00:00 2001 From: Joshua Harlow Date: Mon, 30 Jun 2014 16:43:54 -0700 Subject: [PATCH 162/188] WBE response message validation Add receive validation of the response message sent by workers for progress updates and for result completion to be more robust around invalid message formats being sent and received. Part of blueprint wbe-message-validation Change-Id: I583b9d6344224724d147f2a5b10ddbd23d6a5fdc --- taskflow/engines/worker_based/executor.py | 5 +- taskflow/engines/worker_based/protocol.py | 61 ++++++++++++++++++++++- 2 files changed, 64 insertions(+), 2 deletions(-) diff --git a/taskflow/engines/worker_based/executor.py b/taskflow/engines/worker_based/executor.py index ba599e69..c2b52094 100644 --- a/taskflow/engines/worker_based/executor.py +++ b/taskflow/engines/worker_based/executor.py @@ -79,7 +79,10 @@ class WorkerTaskExecutor(executor.TaskExecutorBase): self._process_notify, functools.partial(pr.Notify.validate, response=True), ], - pr.RESPONSE: self._process_response, + pr.RESPONSE: [ + self._process_response, + pr.Response.validate, + ], } self._proxy = proxy.Proxy(uuid, exchange, handlers, self._on_wait, **kwargs) diff --git a/taskflow/engines/worker_based/protocol.py b/taskflow/engines/worker_based/protocol.py index c107af65..3c11c637 100644 --- a/taskflow/engines/worker_based/protocol.py +++ b/taskflow/engines/worker_based/protocol.py @@ -27,7 +27,8 @@ from taskflow.types import time from taskflow.utils import misc from taskflow.utils import reflection -# NOTE(skudriashev): This is protocol events, not related to the task states. +# NOTE(skudriashev): This is protocol states and events, which are not +# related to task states. WAITING = 'WAITING' PENDING = 'PENDING' RUNNING = 'RUNNING' @@ -35,6 +36,8 @@ SUCCESS = 'SUCCESS' FAILURE = 'FAILURE' PROGRESS = 'PROGRESS' +_ALL_STATES = (WAITING, PENDING, RUNNING, SUCCESS, FAILURE, PROGRESS) + # Remote task actions. EXECUTE = 'execute' REVERT = 'revert' @@ -222,6 +225,53 @@ class Request(Message): class Response(Message): """Represents response message type.""" TYPE = RESPONSE + _SCHEMA = { + "type": "object", + 'properties': { + 'state': { + "type": "string", + "enum": list(_ALL_STATES), + }, + 'data': { + "anyOf": [ + { + "$ref": "#/definitions/progress", + }, + { + "$ref": "#/definitions/completion", + }, + ], + }, + }, + "required": ["state", 'data'], + "additionalProperties": False, + "definitions": { + "progress": { + "type": "object", + "properties": { + 'progress': { + 'type': 'number', + }, + 'event_data': { + 'type': 'object', + }, + }, + "required": ["progress", 'event_data'], + "additionalProperties": False, + }, + "completion": { + "type": "object", + "properties": { + # This can be any arbitrary type that a task returns, so + # thats why we can't be strict about what type it is since + # any of the json serializable types are allowed. + "result": {}, + }, + "required": ["result"], + "additionalProperties": False, + }, + }, + } def __init__(self, state, **data): self._state = state @@ -245,3 +295,12 @@ class Response(Message): def to_dict(self): return dict(state=self._state, data=self._data) + + @classmethod + def validate(cls, data): + try: + jsonschema.validate(data, cls._SCHEMA) + except schema_exc.ValidationError as e: + raise excp.InvalidFormat("%s message response data not of the" + " expected format: %s" + % (cls.TYPE, e.message), e) From b6c0a854425215356ff2171653a3412e158b3a15 Mon Sep 17 00:00:00 2001 From: Joshua Harlow Date: Mon, 30 Jun 2014 17:53:42 -0700 Subject: [PATCH 163/188] WBE request message validation Add receive validation of the request message sent by executors to workers to initiate work requests so that we can be more robust around invalid message formats being sent and received. Part of blueprint wbe-message-validation Change-Id: If7fc4c870252b6ef29e626a874e42c82d3879512 --- taskflow/engines/worker_based/protocol.py | 56 ++++++++++++++++++++++- taskflow/engines/worker_based/server.py | 5 +- 2 files changed, 58 insertions(+), 3 deletions(-) diff --git a/taskflow/engines/worker_based/protocol.py b/taskflow/engines/worker_based/protocol.py index 3c11c637..db693ecc 100644 --- a/taskflow/engines/worker_based/protocol.py +++ b/taskflow/engines/worker_based/protocol.py @@ -67,6 +67,12 @@ NOTIFY = 'NOTIFY' REQUEST = 'REQUEST' RESPONSE = 'RESPONSE' +# Special jsonschema validation types/adjustments. +_SCHEMA_TYPES = { + # See: https://github.com/Julian/jsonschema/issues/148 + 'array': (list, tuple), +} + @six.add_metaclass(abc.ABCMeta) class Message(object): @@ -121,7 +127,7 @@ class Notify(Message): else: schema = cls._SENDER_SCHEMA try: - jsonschema.validate(data, schema) + jsonschema.validate(data, schema, types=_SCHEMA_TYPES) except schema_exc.ValidationError as e: if response: raise excp.InvalidFormat("%s message response data not of the" @@ -140,6 +146,43 @@ class Request(Message): given timeout. """ TYPE = REQUEST + _SCHEMA = { + "type": "object", + 'properties': { + # These two are typically only sent on revert actions (that is + # why are are not including them in the required section). + 'result': {}, + 'failures': { + "type": "object", + }, + 'task_cls': { + 'type': 'string', + }, + 'task_name': { + 'type': 'string', + }, + 'task_version': { + "oneOf": [ + { + "type": "string", + }, + { + "type": "array", + }, + ], + }, + 'action': { + "type": "string", + "enum": list(six.iterkeys(ACTION_TO_EVENT)), + }, + # Keyword arguments that end up in the revert() or execute() + # method of the remote task. + 'arguments': { + "type": "object", + }, + }, + 'required': ['task_cls', 'task_name', 'task_version', 'action'], + } def __init__(self, task, uuid, action, arguments, progress_callback, timeout, **kwargs): @@ -221,6 +264,15 @@ class Request(Message): def on_progress(self, event_data, progress): self._progress_callback(self._task, event_data, progress) + @classmethod + def validate(cls, data): + try: + jsonschema.validate(data, cls._SCHEMA, types=_SCHEMA_TYPES) + except schema_exc.ValidationError as e: + raise excp.InvalidFormat("%s message response data not of the" + " expected format: %s" + % (cls.TYPE, e.message), e) + class Response(Message): """Represents response message type.""" @@ -299,7 +351,7 @@ class Response(Message): @classmethod def validate(cls, data): try: - jsonschema.validate(data, cls._SCHEMA) + jsonschema.validate(data, cls._SCHEMA, types=_SCHEMA_TYPES) except schema_exc.ValidationError as e: raise excp.InvalidFormat("%s message response data not of the" " expected format: %s" diff --git a/taskflow/engines/worker_based/server.py b/taskflow/engines/worker_based/server.py index 1e86fb23..8b175783 100644 --- a/taskflow/engines/worker_based/server.py +++ b/taskflow/engines/worker_based/server.py @@ -49,7 +49,10 @@ class Server(object): delayed(executor)(self._process_notify), functools.partial(pr.Notify.validate, response=False), ], - pr.REQUEST: delayed(executor)(self._process_request), + pr.REQUEST: [ + delayed(executor)(self._process_request), + pr.Request.validate, + ], } self._proxy = proxy.Proxy(topic, exchange, handlers, on_wait=None, **kwargs) From 4ff2faa8b0b35717f7f78b54ac981e30d3c47ad6 Mon Sep 17 00:00:00 2001 From: Joshua Harlow Date: Tue, 1 Jul 2014 14:38:23 -0700 Subject: [PATCH 164/188] Add basic WBE validation sanity tests Add some basic tests that verify that our new message format validation is working as expected. Part of blueprint wbe-message-validation Change-Id: I64821407e52977a0e0ae0dff09f4b440717290cb --- .../tests/unit/worker_based/test_protocol.py | 63 +++++++++++++++++++ 1 file changed, 63 insertions(+) diff --git a/taskflow/tests/unit/worker_based/test_protocol.py b/taskflow/tests/unit/worker_based/test_protocol.py index af7e6490..f94d03d4 100644 --- a/taskflow/tests/unit/worker_based/test_protocol.py +++ b/taskflow/tests/unit/worker_based/test_protocol.py @@ -18,11 +18,74 @@ from concurrent import futures import mock from taskflow.engines.worker_based import protocol as pr +from taskflow import exceptions as excp +from taskflow.openstack.common import uuidutils from taskflow import test from taskflow.tests import utils from taskflow.utils import misc +class TestProtocolValidation(test.TestCase): + def test_send_notify(self): + msg = pr.Notify() + pr.Notify.validate(msg.to_dict(), False) + + def test_send_notify_invalid(self): + msg = { + 'all your base': 'are belong to us', + } + self.assertRaises(excp.InvalidFormat, + pr.Notify.validate, msg, False) + + def test_reply_notify(self): + msg = pr.Notify(topic="bob", tasks=['a', 'b', 'c']) + pr.Notify.validate(msg.to_dict(), True) + + def test_reply_notify_invalid(self): + msg = { + 'topic': {}, + 'tasks': 'not yours', + } + self.assertRaises(excp.InvalidFormat, + pr.Notify.validate, msg, True) + + def test_request(self): + msg = pr.Request(utils.DummyTask("hi"), uuidutils.generate_uuid(), + pr.EXECUTE, {}, None, 1.0) + pr.Request.validate(msg.to_dict()) + + def test_request_invalid(self): + msg = { + 'task_name': 1, + 'task_cls': False, + 'arguments': [], + } + self.assertRaises(excp.InvalidFormat, pr.Request.validate, msg) + + def test_request_invalid_action(self): + msg = pr.Request(utils.DummyTask("hi"), uuidutils.generate_uuid(), + pr.EXECUTE, {}, None, 1.0) + msg = msg.to_dict() + msg['action'] = 'NOTHING' + self.assertRaises(excp.InvalidFormat, pr.Request.validate, msg) + + def test_response_progress(self): + msg = pr.Response(pr.PROGRESS, progress=0.5, event_data={}) + pr.Response.validate(msg.to_dict()) + + def test_response_completion(self): + msg = pr.Response(pr.SUCCESS, result=1) + pr.Response.validate(msg.to_dict()) + + def test_response_mixed_invalid(self): + msg = pr.Response(pr.PROGRESS, progress=0.5, event_data={}, result=1) + self.assertRaises(excp.InvalidFormat, pr.Response.validate, msg) + + def test_response_bad_state(self): + msg = pr.Response('STUFF') + self.assertRaises(excp.InvalidFormat, pr.Response.validate, msg) + + class TestProtocol(test.TestCase): def setUp(self): From 630364b4c13ae6f1aa6cdf9a2219239593e639dd Mon Sep 17 00:00:00 2001 From: Joshua Harlow Date: Fri, 20 Jun 2014 22:22:17 -0700 Subject: [PATCH 165/188] Improve WBE testing coverage Test more of the message pump process and verify that messages are received and sent correctly by using a latch type and waiting for the desired number of messages to be triggered. Test the full server and executor pipeline by setting up threads and a in-memory queue/exchange that is used by these threads for task execution. Change-Id: I62296f12eee6fa00559e84068ec5ee2a6d4bc0dc --- taskflow/engines/worker_based/executor.py | 36 ++++++- .../unit/worker_based/test_message_pump.py | 84 ++++++++++++++-- .../tests/unit/worker_based/test_pipeline.py | 98 +++++++++++++++++++ taskflow/types/latch.py | 69 +++++++++++++ 4 files changed, 278 insertions(+), 9 deletions(-) create mode 100644 taskflow/tests/unit/worker_based/test_pipeline.py create mode 100644 taskflow/types/latch.py diff --git a/taskflow/engines/worker_based/executor.py b/taskflow/engines/worker_based/executor.py index 8bd799c4..c4a669bb 100644 --- a/taskflow/engines/worker_based/executor.py +++ b/taskflow/engines/worker_based/executor.py @@ -15,6 +15,7 @@ # under the License. import logging +import threading from taskflow.engines.action_engine import executor from taskflow.engines.worker_based import cache @@ -73,6 +74,7 @@ class WorkerTaskExecutor(executor.TaskExecutorBase): self._topics = topics self._requests_cache = cache.RequestsCache() self._workers_cache = cache.WorkersCache() + self._workers_arrival = threading.Condition() handlers = { pr.NOTIFY: self._process_notify, pr.RESPONSE: self._process_response, @@ -91,7 +93,12 @@ class WorkerTaskExecutor(executor.TaskExecutorBase): tasks = notify['tasks'] # add worker info to the cache - self._workers_cache[topic] = tasks + self._workers_arrival.acquire() + try: + self._workers_cache[topic] = tasks + self._workers_arrival.notify_all() + finally: + self._workers_arrival.release() # publish waiting requests for request in self._requests_cache.get_waiting_requests(tasks): @@ -195,6 +202,33 @@ class WorkerTaskExecutor(executor.TaskExecutorBase): """Wait for futures returned by this executor to complete.""" return async_utils.wait_for_any(fs, timeout) + def wait_for_workers(self, workers=1, timeout=None): + """Waits for geq workers to notify they are ready to do work. + + NOTE(harlowja): if a timeout is provided this function will wait + until that timeout expires, if the amount of workers does not reach + the desired amount of workers before the timeout expires then this will + return how many workers are still needed, otherwise it will + return zero. + """ + if workers <= 0: + raise ValueError("Worker amount must be greater than zero") + w = None + if timeout is not None: + w = tt.StopWatch(timeout).start() + self._workers_arrival.acquire() + try: + while len(self._workers_cache) < workers: + if w is not None and w.expired(): + return workers - len(self._workers_cache) + timeout = None + if w is not None: + timeout = w.leftover() + self._workers_arrival.wait(timeout) + return 0 + finally: + self._workers_arrival.release() + def start(self): """Starts proxy thread and associated topic notification thread.""" if not _is_alive(self._proxy_thread): diff --git a/taskflow/tests/unit/worker_based/test_message_pump.py b/taskflow/tests/unit/worker_based/test_message_pump.py index ec15d24d..10116c21 100644 --- a/taskflow/tests/unit/worker_based/test_message_pump.py +++ b/taskflow/tests/unit/worker_based/test_message_pump.py @@ -20,7 +20,14 @@ import mock from taskflow.engines.worker_based import protocol as pr from taskflow.engines.worker_based import proxy +from taskflow.openstack.common import uuidutils from taskflow import test +from taskflow.tests import utils as test_utils +from taskflow.types import latch + +TEST_EXCHANGE, TEST_TOPIC = ('test-exchange', 'test-topic') +BARRIER_WAIT_TIMEOUT = 1.0 +POLLING_INTERVAL = 0.01 class TestMessagePump(test.MockTestCase): @@ -31,19 +38,19 @@ class TestMessagePump(test.MockTestCase): on_notify.side_effect = lambda *args, **kwargs: barrier.set() handlers = {pr.NOTIFY: on_notify} - p = proxy.Proxy("test", "test", handlers, + p = proxy.Proxy(TEST_TOPIC, TEST_EXCHANGE, handlers, transport='memory', transport_options={ - 'polling_interval': 0.01, + 'polling_interval': POLLING_INTERVAL, }) t = threading.Thread(target=p.start) t.daemon = True t.start() p.wait() - p.publish(pr.Notify(), 'test') + p.publish(pr.Notify(), TEST_TOPIC) - barrier.wait(1.0) + barrier.wait(BARRIER_WAIT_TIMEOUT) self.assertTrue(barrier.is_set()) p.stop() t.join() @@ -58,10 +65,10 @@ class TestMessagePump(test.MockTestCase): on_response.side_effect = lambda *args, **kwargs: barrier.set() handlers = {pr.RESPONSE: on_response} - p = proxy.Proxy("test", "test", handlers, + p = proxy.Proxy(TEST_TOPIC, TEST_EXCHANGE, handlers, transport='memory', transport_options={ - 'polling_interval': 0.01, + 'polling_interval': POLLING_INTERVAL, }) t = threading.Thread(target=p.start) @@ -69,12 +76,73 @@ class TestMessagePump(test.MockTestCase): t.start() p.wait() resp = pr.Response(pr.RUNNING) - p.publish(resp, 'test') + p.publish(resp, TEST_TOPIC) - barrier.wait(1.0) + barrier.wait(BARRIER_WAIT_TIMEOUT) self.assertTrue(barrier.is_set()) p.stop() t.join() self.assertTrue(on_response.called) on_response.assert_called_with(resp.to_dict(), mock.ANY) + + def test_multi_message(self): + message_count = 30 + barrier = latch.Latch(message_count) + countdown = lambda data, message: barrier.countdown() + + on_notify = mock.MagicMock() + on_notify.side_effect = countdown + + on_response = mock.MagicMock() + on_response.side_effect = countdown + + on_request = mock.MagicMock() + on_request.side_effect = countdown + + handlers = { + pr.NOTIFY: on_notify, + pr.RESPONSE: on_response, + pr.REQUEST: on_request, + } + p = proxy.Proxy(TEST_TOPIC, TEST_EXCHANGE, handlers, + transport='memory', + transport_options={ + 'polling_interval': POLLING_INTERVAL, + }) + + t = threading.Thread(target=p.start) + t.daemon = True + t.start() + p.wait() + + for i in range(0, message_count): + j = i % 3 + if j == 0: + p.publish(pr.Notify(), TEST_TOPIC) + elif j == 1: + p.publish(pr.Response(pr.RUNNING), TEST_TOPIC) + else: + p.publish(pr.Request(test_utils.DummyTask("dummy_%s" % i), + uuidutils.generate_uuid(), + pr.EXECUTE, [], None, None), TEST_TOPIC) + + barrier.wait(BARRIER_WAIT_TIMEOUT) + self.assertEqual(0, barrier.needed) + p.stop() + t.join() + + self.assertTrue(on_notify.called) + self.assertTrue(on_response.called) + self.assertTrue(on_request.called) + + self.assertEqual(10, on_notify.call_count) + self.assertEqual(10, on_response.call_count) + self.assertEqual(10, on_request.call_count) + + call_count = sum([ + on_notify.call_count, + on_response.call_count, + on_request.call_count, + ]) + self.assertEqual(message_count, call_count) diff --git a/taskflow/tests/unit/worker_based/test_pipeline.py b/taskflow/tests/unit/worker_based/test_pipeline.py new file mode 100644 index 00000000..8809785e --- /dev/null +++ b/taskflow/tests/unit/worker_based/test_pipeline.py @@ -0,0 +1,98 @@ +# -*- coding: utf-8 -*- + +# Copyright (C) 2014 Yahoo! Inc. All Rights Reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); you may +# not use this file except in compliance with the License. You may obtain +# a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the +# License for the specific language governing permissions and limitations +# under the License. + +import threading + +from concurrent import futures + +from taskflow.engines.worker_based import endpoint +from taskflow.engines.worker_based import executor as worker_executor +from taskflow.engines.worker_based import server as worker_server +from taskflow.openstack.common import uuidutils +from taskflow import test +from taskflow.tests import utils as test_utils +from taskflow.utils import misc + + +TEST_EXCHANGE, TEST_TOPIC = ('test-exchange', 'test-topic') +WAIT_TIMEOUT = 1.0 +POLLING_INTERVAL = 0.01 + + +class TestPipeline(test.MockTestCase): + def _fetch_server(self, task_classes): + endpoints = [] + for cls in task_classes: + endpoints.append(endpoint.Endpoint(cls)) + server = worker_server.Server( + TEST_TOPIC, TEST_EXCHANGE, + futures.ThreadPoolExecutor(1), endpoints, + transport='memory', + transport_options={ + 'polling_interval': POLLING_INTERVAL, + }) + server_thread = threading.Thread(target=server.start) + server_thread.daemon = True + return (server, server_thread) + + def _fetch_executor(self): + executor = worker_executor.WorkerTaskExecutor( + uuidutils.generate_uuid(), + TEST_EXCHANGE, + [TEST_TOPIC], + transport='memory', + transport_options={ + 'polling_interval': POLLING_INTERVAL, + }) + return executor + + def _start_components(self, task_classes): + server, server_thread = self._fetch_server(task_classes) + executor = self._fetch_executor() + self.addCleanup(executor.stop) + self.addCleanup(server_thread.join) + self.addCleanup(server.stop) + executor.start() + server_thread.start() + server.wait() + return (executor, server) + + def test_execution_pipeline(self): + executor, server = self._start_components([test_utils.TaskOneReturn]) + self.assertEqual(0, executor.wait_for_workers(timeout=WAIT_TIMEOUT)) + + t = test_utils.TaskOneReturn() + f = executor.execute_task(t, uuidutils.generate_uuid(), {}) + executor.wait_for_any([f]) + + t2, _action, result = f.result() + + self.assertEqual(1, result) + self.assertEqual(t, t2) + + def test_execution_failure_pipeline(self): + task_classes = [ + test_utils.TaskWithFailure, + ] + executor, server = self._start_components(task_classes) + + t = test_utils.TaskWithFailure() + f = executor.execute_task(t, uuidutils.generate_uuid(), {}) + executor.wait_for_any([f]) + + _t2, _action, result = f.result() + self.assertIsInstance(result, misc.Failure) + self.assertEqual(RuntimeError, result.check(RuntimeError)) diff --git a/taskflow/types/latch.py b/taskflow/types/latch.py new file mode 100644 index 00000000..4451fc32 --- /dev/null +++ b/taskflow/types/latch.py @@ -0,0 +1,69 @@ +# -*- coding: utf-8 -*- + +# Copyright (C) 2014 Yahoo! Inc. All Rights Reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); you may +# not use this file except in compliance with the License. You may obtain +# a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the +# License for the specific language governing permissions and limitations +# under the License. + +import threading + +from taskflow.types import time as tt + + +class Latch(object): + """A class that ensures N-arrivals occur before unblocking.""" + + def __init__(self, count): + count = int(count) + if count <= 0: + raise ValueError("Count must be greater than zero") + self._count = count + self._cond = threading.Condition() + + @property + def needed(self): + """Returns how many decrements are needed before latch is released.""" + return max(0, self._count) + + def countdown(self): + """Decrements the internal counter due to an arrival.""" + self._cond.acquire() + try: + self._count -= 1 + if self._count <= 0: + self._cond.notify_all() + finally: + self._cond.release() + + def wait(self, timeout=None): + """Waits until the latch is released. + + NOTE(harlowja): if a timeout is provided this function will wait + until that timeout expires, if the latch has been released before the + timeout expires then this will return True, otherwise it will + return False. + """ + w = None + if timeout is not None: + w = tt.StopWatch(timeout).start() + self._cond.acquire() + try: + while self._count > 0: + if w is not None: + if w.expired(): + return False + else: + timeout = w.leftover() + self._cond.wait(timeout) + return True + finally: + self._cond.release() From b6139b799df28f9f5dcf0060f382cac8325c3e13 Mon Sep 17 00:00:00 2001 From: Joshua Harlow Date: Wed, 16 Jul 2014 18:42:49 -0700 Subject: [PATCH 166/188] Use a check + create transaction when claiming a job To avoid the case where a secondary entity is able to consume and remove a job (and its associated lock) while another entity is in the process of claiming a job by getting that same lock we want to avoid this by having a transaction be used that will enforce the job still exists before the lock is created. This ensures that a lock is not created on a job that does not exist in the first place. Fixes bug 1343029 Change-Id: I6c45cbb9dfb3a479658f4e605d37a6d3ac04d1b8 --- taskflow/jobs/backends/impl_zookeeper.py | 49 +++++++++++++++++------- 1 file changed, 36 insertions(+), 13 deletions(-) diff --git a/taskflow/jobs/backends/impl_zookeeper.py b/taskflow/jobs/backends/impl_zookeeper.py index be305a12..4260ab91 100644 --- a/taskflow/jobs/backends/impl_zookeeper.py +++ b/taskflow/jobs/backends/impl_zookeeper.py @@ -476,6 +476,17 @@ class ZookeeperJobBoard(jobboard.NotifyingJobBoard): return job def claim(self, job, who): + def _unclaimable_try_find_owner(cause): + try: + owner = self.find_owner(job) + except Exception: + owner = None + if owner: + msg = "Job %s already claimed by '%s'" % (job.uuid, owner) + else: + msg = "Job %s already claimed" % (job.uuid) + return excp.UnclaimableJob(msg, cause) + _check_who(who) with self._wrap(job.uuid, job.path, "Claiming failure: %s"): # NOTE(harlowja): post as json which will allow for future changes @@ -483,21 +494,33 @@ class ZookeeperJobBoard(jobboard.NotifyingJobBoard): value = jsonutils.dumps({ 'owner': who, }) + # Ensure the target job is still existent (at the right version). + job_data, job_stat = self._client.get(job.path) + txn = self._client.transaction() + # This will abort (and not create the lock) if the job has been + # removed (somehow...) or updated by someone else to a different + # version... + txn.check(job.path, version=job_stat.version) + txn.create(job.lock_path, value=misc.binary_encode(value), + ephemeral=True) try: - self._client.create(job.lock_path, - value=misc.binary_encode(value), - ephemeral=True) - except k_exceptions.NodeExistsException: - # Try to see if we can find who the owner really is... - try: - owner = self.find_owner(job) - except Exception: - owner = None - if owner: - msg = "Job %s already claimed by '%s'" % (job.uuid, owner) + kazoo_utils.checked_commit(txn) + except k_exceptions.NodeExistsError as e: + raise _unclaimable_try_find_owner(e) + except kazoo_utils.KazooTransactionException as e: + if len(e.failures) < 2: + raise else: - msg = "Job %s already claimed" % (job.uuid) - raise excp.UnclaimableJob(msg) + if isinstance(e.failures[0], k_exceptions.NoNodeError): + raise excp.NotFound( + "Job %s not found to be claimed" % job.uuid, + e.failures[0]) + if isinstance(e.failures[1], k_exceptions.NodeExistsError): + raise _unclaimable_try_find_owner(e.failures[1]) + else: + raise excp.UnclaimableJob( + "Job %s claim failed due to transaction" + " not succeeding" % (job.uuid), e) @contextlib.contextmanager def _wrap(self, job_uuid, job_path, From f7daf2217735259622886917d7bb7c33b122408d Mon Sep 17 00:00:00 2001 From: Joshua Harlow Date: Mon, 28 Jul 2014 15:15:53 -0700 Subject: [PATCH 167/188] Use checked_commit() around consume() and abandon() To ensure we reliably handle when a transaction fails we should use the checked_commit() helper function instead of the currently not fully exception handling kazoo transaction commit function. This should be addressed in the future with: - https://github.com/python-zk/kazoo/pull/224 - https://github.com/python-zk/kazoo/pull/225 Those have not merged yet (or been released) so we need to use a similar function in the meantime. Change-Id: Icf83b7d4955c11227e733287170a7bd3ab372bd2 --- taskflow/jobs/backends/impl_zookeeper.py | 12 +++++++----- 1 file changed, 7 insertions(+), 5 deletions(-) diff --git a/taskflow/jobs/backends/impl_zookeeper.py b/taskflow/jobs/backends/impl_zookeeper.py index 4260ab91..8416d305 100644 --- a/taskflow/jobs/backends/impl_zookeeper.py +++ b/taskflow/jobs/backends/impl_zookeeper.py @@ -581,9 +581,10 @@ class ZookeeperJobBoard(jobboard.NotifyingJobBoard): raise excp.JobFailure("Can not consume a job %s" " which is not owned by %s" % (job.uuid, who)) - with self._client.transaction() as txn: - txn.delete(job.lock_path, version=lock_stat.version) - txn.delete(job.path, version=data_stat.version) + txn = self._client.transaction() + txn.delete(job.lock_path, version=lock_stat.version) + txn.delete(job.path, version=data_stat.version) + kazoo_utils.checked_commit(txn) self._remove_job(job.path) def abandon(self, job, who): @@ -600,8 +601,9 @@ class ZookeeperJobBoard(jobboard.NotifyingJobBoard): raise excp.JobFailure("Can not abandon a job %s" " which is not owned by %s" % (job.uuid, who)) - with self._client.transaction() as txn: - txn.delete(job.lock_path, version=lock_stat.version) + txn = self._client.transaction() + txn.delete(job.lock_path, version=lock_stat.version) + kazoo_utils.checked_commit(txn) def _state_change_listener(self, state): LOG.debug("Kazoo client has changed to state: %s", state) From a84b3240fcabd030b2c970eed46aa9f20cf0afd2 Mon Sep 17 00:00:00 2001 From: Greg Hill Date: Wed, 6 Aug 2014 08:40:42 -0500 Subject: [PATCH 168/188] add pre/post execute/retry callbacks to tasks This enables us to execute code to set up or tear down global state in running tasks. Change-Id: Ib1e5d03ab46b3ce1d03fa83b91bf437fa950b758 Implements: blueprint task-callbacks --- taskflow/engines/action_engine/executor.py | 6 ++++ taskflow/task.py | 34 ++++++++++++++++++++++ 2 files changed, 40 insertions(+) diff --git a/taskflow/engines/action_engine/executor.py b/taskflow/engines/action_engine/executor.py index 816060f5..e28e863b 100644 --- a/taskflow/engines/action_engine/executor.py +++ b/taskflow/engines/action_engine/executor.py @@ -31,11 +31,14 @@ REVERTED = 'reverted' def _execute_task(task, arguments, progress_callback): with task.autobind('update_progress', progress_callback): try: + task.pre_execute() result = task.execute(**arguments) except Exception: # NOTE(imelnikov): wrap current exception with Failure # object and return it. result = misc.Failure() + finally: + task.post_execute() return (task, EXECUTED, result) @@ -45,11 +48,14 @@ def _revert_task(task, arguments, result, failures, progress_callback): kwargs['flow_failures'] = failures with task.autobind('update_progress', progress_callback): try: + task.pre_revert() result = task.revert(**kwargs) except Exception: # NOTE(imelnikov): wrap current exception with Failure # object and return it. result = misc.Failure() + finally: + task.post_revert() return (task, REVERTED, result) diff --git a/taskflow/task.py b/taskflow/task.py index 067613a2..cd470e72 100644 --- a/taskflow/task.py +++ b/taskflow/task.py @@ -47,6 +47,15 @@ class BaseTask(atom.Atom): # Map of events => lists of callbacks to invoke on task events. self._events_listeners = collections.defaultdict(list) + def pre_execute(self): + """Code to be run prior to executing the task. + + A common pattern for initializing the state of the system prior to + running tasks is to define some code in a base class that all your + tasks inherit from. In that class, you can define a pre_execute + method and it will always be invoked just prior to your tasks running. + """ + @abc.abstractmethod def execute(self, *args, **kwargs): """Activate a given task which will perform some operation and return. @@ -65,6 +74,25 @@ class BaseTask(atom.Atom): or remote). """ + def post_execute(self): + """Code to be run after executing the task. + + A common pattern for cleaning up global state of the system after the + execution of tasks is to define some code in a base class that all your + tasks inherit from. In that class, you can define a post_execute + method and it will always be invoked just after your tasks execute, + regardless of whether they succeded or not. + + This pattern is useful if you have global shared database sessions + that need to be cleaned up, for example. + """ + + def pre_revert(self): + """Code to be run prior to reverting the task. + + This works the same as pre_execute, but for the revert phase. + """ + def revert(self, *args, **kwargs): """Revert this task. @@ -79,6 +107,12 @@ class BaseTask(atom.Atom): contain the failure information. """ + def post_revert(self): + """Code to be run after reverting the task. + + This works the same as post_execute, but for the revert phase. + """ + def update_progress(self, progress, **kwargs): """Update task progress and notify all registered listeners. From 2a041b326836d288e08f11a24ebb35d36664b63f Mon Sep 17 00:00:00 2001 From: Joshua Harlow Date: Fri, 1 Aug 2014 18:49:36 -0700 Subject: [PATCH 169/188] Add a pformat() failure method and use it in the conductor When the conductors engine raises a wrapped failure (which may contain a single failure from a remote worker, or may contain many failures if there were more than one such failures...) it is very useful to show more than just the basic exception information (or at least provide methods that can print out the information in a nice manner). This commit adds a pformat() failure method that formats the failure in a nice and understandable manner and then uses it in the conductor dispatching routine so that the conductors LOGs are much more useful. Change-Id: I46afacf54c9b4cae885c76c09b51e61d71fe623a --- taskflow/conductors/single_threaded.py | 17 ++++++++++++----- taskflow/tests/unit/test_utils_failure.py | 23 +++++++++++++++++++++++ taskflow/utils/misc.py | 19 +++++++++++++++++-- 3 files changed, 52 insertions(+), 7 deletions(-) diff --git a/taskflow/conductors/single_threaded.py b/taskflow/conductors/single_threaded.py index d13d666c..6387938e 100644 --- a/taskflow/conductors/single_threaded.py +++ b/taskflow/conductors/single_threaded.py @@ -96,17 +96,24 @@ class SingleThreadedConductor(base.Conductor): engine.run() except excp.WrappedFailure as e: if all((f.check(*NO_CONSUME_EXCEPTIONS) for f in e)): - LOG.warn("Job execution failed (consumption being" - " skipped): %s", job, exc_info=True) consume = False - else: - LOG.warn("Job execution failed: %s", job, exc_info=True) + if LOG.isEnabledFor(logging.WARNING): + if consume: + LOG.warn("Job execution failed (consumption being" + " skipped): %s [%s failures]", job, len(e)) + else: + LOG.warn("Job execution failed (consumption" + " proceeding): %s [%s failures]", job, len(e)) + # Show the failure/s + traceback (if possible)... + for i, f in enumerate(e): + LOG.warn("%s. %s", i + 1, f.pformat(traceback=True)) except NO_CONSUME_EXCEPTIONS: LOG.warn("Job execution failed (consumption being" " skipped): %s", job, exc_info=True) consume = False except Exception: - LOG.warn("Job execution failed: %s", job, exc_info=True) + LOG.warn("Job execution failed (consumption proceeding): %s", + job, exc_info=True) else: LOG.info("Job completed successfully: %s", job) return consume diff --git a/taskflow/tests/unit/test_utils_failure.py b/taskflow/tests/unit/test_utils_failure.py index 012e241a..4958da62 100644 --- a/taskflow/tests/unit/test_utils_failure.py +++ b/taskflow/tests/unit/test_utils_failure.py @@ -42,6 +42,10 @@ class GeneralFailureObjTestsMixin(object): self.assertEqual(list(self.fail_obj), test_utils.RUNTIME_ERROR_CLASSES[:-2]) + def test_pformat_no_traceback(self): + text = self.fail_obj.pformat() + self.assertNotIn("Traceback", text) + def test_check_str(self): val = 'Exception' self.assertEqual(self.fail_obj.check(val), val) @@ -91,6 +95,10 @@ class ReCreatedFailureTestCase(test.TestCase, GeneralFailureObjTestsMixin): def test_no_exc_info(self): self.assertIs(self.fail_obj.exc_info, None) + def test_pformat_traceback(self): + text = self.fail_obj.pformat(traceback=True) + self.assertIn("Traceback (most recent call last):", text) + def test_reraises(self): exc = self.assertRaises(exceptions.WrappedFailure, self.fail_obj.reraise) @@ -103,6 +111,10 @@ class FromExceptionTestCase(test.TestCase, GeneralFailureObjTestsMixin): super(FromExceptionTestCase, self).setUp() self.fail_obj = misc.Failure.from_exception(RuntimeError('Woot!')) + def test_pformat_no_traceback(self): + text = self.fail_obj.pformat(traceback=True) + self.assertIn("Traceback not available", text) + class FailureObjectTestCase(test.TestCase): @@ -188,6 +200,17 @@ class FailureObjectTestCase(test.TestCase): self.assertNotEqual(captured, None) self.assertFalse(captured.matches(None)) + def test_pformat_traceback(self): + captured = _captured_failure('Woot!') + text = captured.pformat(traceback=True) + self.assertIn("Traceback (most recent call last):", text) + + def test_pformat_traceback_captured_no_exc_info(self): + captured = _captured_failure('Woot!') + captured = misc.Failure.from_dict(captured.to_dict()) + text = captured.pformat(traceback=True) + self.assertIn("Traceback (most recent call last):", text) + class WrappedFailureTestCase(test.TestCase): diff --git a/taskflow/utils/misc.py b/taskflow/utils/misc.py index 65f21e24..b716a469 100644 --- a/taskflow/utils/misc.py +++ b/taskflow/utils/misc.py @@ -707,8 +707,23 @@ class Failure(object): return None def __str__(self): - return 'Failure: %s: %s' % (self._exc_type_names[0], - self._exception_str) + return self.pformat() + + def pformat(self, traceback=False): + buf = six.StringIO() + buf.write( + 'Failure: %s: %s' % (self._exc_type_names[0], self._exception_str)) + if traceback: + if self._traceback_str is not None: + traceback_str = self._traceback_str.rstrip() + else: + traceback_str = None + if traceback_str: + buf.write('\nTraceback (most recent call last):\n') + buf.write(traceback_str) + else: + buf.write('\nTraceback not available.') + return buf.getvalue() def __iter__(self): """Iterate over exception type names.""" From 89c6c7435b33c6989607d077f66c0aac9ff4500d Mon Sep 17 00:00:00 2001 From: Joshua Harlow Date: Wed, 9 Jul 2014 18:56:55 -0700 Subject: [PATCH 170/188] Avoid naming time type module the same as a builtin In order to avoid naming conflicts which can easily occur if we name our time module as a python builtin just pick a name that doesn't conflict (it still retains the same meaning). Change-Id: Ia19f3776689d2b1f31f7cbfaa15e53f99a3ee900 --- taskflow/conductors/single_threaded.py | 2 +- taskflow/engines/worker_based/executor.py | 2 +- taskflow/engines/worker_based/protocol.py | 4 ++-- taskflow/jobs/backends/impl_zookeeper.py | 4 ++-- taskflow/listeners/timing.py | 4 ++-- taskflow/tests/unit/test_types.py | 2 +- taskflow/types/latch.py | 2 +- taskflow/types/{time.py => timing.py} | 0 8 files changed, 10 insertions(+), 10 deletions(-) rename taskflow/types/{time.py => timing.py} (100%) diff --git a/taskflow/conductors/single_threaded.py b/taskflow/conductors/single_threaded.py index d13d666c..25bd53a0 100644 --- a/taskflow/conductors/single_threaded.py +++ b/taskflow/conductors/single_threaded.py @@ -20,7 +20,7 @@ import six from taskflow.conductors import base from taskflow import exceptions as excp from taskflow.listeners import logging as logging_listener -from taskflow.types import time as tt +from taskflow.types import timing as tt from taskflow.utils import lock_utils LOG = logging.getLogger(__name__) diff --git a/taskflow/engines/worker_based/executor.py b/taskflow/engines/worker_based/executor.py index 2b82f01a..f5afc72e 100644 --- a/taskflow/engines/worker_based/executor.py +++ b/taskflow/engines/worker_based/executor.py @@ -23,7 +23,7 @@ from taskflow.engines.worker_based import cache from taskflow.engines.worker_based import protocol as pr from taskflow.engines.worker_based import proxy from taskflow import exceptions as exc -from taskflow.types import time as tt +from taskflow.types import timing as tt from taskflow.utils import async_utils from taskflow.utils import misc from taskflow.utils import reflection diff --git a/taskflow/engines/worker_based/protocol.py b/taskflow/engines/worker_based/protocol.py index db693ecc..ea994272 100644 --- a/taskflow/engines/worker_based/protocol.py +++ b/taskflow/engines/worker_based/protocol.py @@ -23,7 +23,7 @@ import six from taskflow.engines.action_engine import executor from taskflow import exceptions as excp -from taskflow.types import time +from taskflow.types import timing as tt from taskflow.utils import misc from taskflow.utils import reflection @@ -194,7 +194,7 @@ class Request(Message): self._arguments = arguments self._progress_callback = progress_callback self._kwargs = kwargs - self._watch = time.StopWatch(duration=timeout).start() + self._watch = tt.StopWatch(duration=timeout).start() self._state = WAITING self.result = futures.Future() diff --git a/taskflow/jobs/backends/impl_zookeeper.py b/taskflow/jobs/backends/impl_zookeeper.py index be305a12..88f068f7 100644 --- a/taskflow/jobs/backends/impl_zookeeper.py +++ b/taskflow/jobs/backends/impl_zookeeper.py @@ -33,7 +33,7 @@ from taskflow.openstack.common import excutils from taskflow.openstack.common import jsonutils from taskflow.openstack.common import uuidutils from taskflow import states -from taskflow.types import time +from taskflow.types import timing as tt from taskflow.utils import kazoo_utils from taskflow.utils import lock_utils from taskflow.utils import misc @@ -587,7 +587,7 @@ class ZookeeperJobBoard(jobboard.NotifyingJobBoard): # Wait until timeout expires (or forever) for jobs to appear. watch = None if timeout is not None: - watch = time.StopWatch(duration=float(timeout)).start() + watch = tt.StopWatch(duration=float(timeout)).start() self._job_cond.acquire() try: while True: diff --git a/taskflow/listeners/timing.py b/taskflow/listeners/timing.py index 87240a36..e21dd642 100644 --- a/taskflow/listeners/timing.py +++ b/taskflow/listeners/timing.py @@ -21,7 +21,7 @@ import logging from taskflow import exceptions as exc from taskflow.listeners import base from taskflow import states -from taskflow.types import time +from taskflow.types import timing as tt STARTING_STATES = (states.RUNNING, states.REVERTING) FINISHED_STATES = base.FINISH_STATES + (states.REVERTED,) @@ -64,7 +64,7 @@ class TimingListener(base.ListenerBase): if state == states.PENDING: self._timers.pop(task_name, None) elif state in STARTING_STATES: - self._timers[task_name] = time.StopWatch().start() + self._timers[task_name] = tt.StopWatch().start() elif state in FINISHED_STATES: if task_name in self._timers: self._record_ending(self._timers[task_name], task_name) diff --git a/taskflow/tests/unit/test_types.py b/taskflow/tests/unit/test_types.py index e69cac0d..e17a9fe2 100644 --- a/taskflow/tests/unit/test_types.py +++ b/taskflow/tests/unit/test_types.py @@ -20,7 +20,7 @@ import networkx as nx from taskflow import test from taskflow.types import graph -from taskflow.types import time as tt +from taskflow.types import timing as tt from taskflow.types import tree diff --git a/taskflow/types/latch.py b/taskflow/types/latch.py index 4451fc32..9aa2622d 100644 --- a/taskflow/types/latch.py +++ b/taskflow/types/latch.py @@ -16,7 +16,7 @@ import threading -from taskflow.types import time as tt +from taskflow.types import timing as tt class Latch(object): diff --git a/taskflow/types/time.py b/taskflow/types/timing.py similarity index 100% rename from taskflow/types/time.py rename to taskflow/types/timing.py From 2b15d09dc10905bae5fee3cf997c588502fc91f3 Mon Sep 17 00:00:00 2001 From: Joshua Harlow Date: Thu, 14 Aug 2014 18:13:53 -0700 Subject: [PATCH 171/188] Update oslo-incubator to 037dee004c3e2239 This brought in/includes the following changes: ad248f6658d5 Specify namedtuple_as_object=False when using simplejson 3d90045d2d Backport code for i18n to check lazy at runtime Change-Id: I6ffb317f7e8fa57d79a7d0c6a9fc2517bc169e4b --- taskflow/openstack/common/gettextutils.py | 63 ++++++++--------------- taskflow/openstack/common/jsonutils.py | 10 ++++ 2 files changed, 32 insertions(+), 41 deletions(-) diff --git a/taskflow/openstack/common/gettextutils.py b/taskflow/openstack/common/gettextutils.py index 3fff6e30..20fc2543 100644 --- a/taskflow/openstack/common/gettextutils.py +++ b/taskflow/openstack/common/gettextutils.py @@ -23,7 +23,6 @@ Usual usage in an openstack.common module: """ import copy -import functools import gettext import locale from logging import handlers @@ -42,7 +41,7 @@ class TranslatorFactory(object): """Create translator functions """ - def __init__(self, domain, lazy=False, localedir=None): + def __init__(self, domain, localedir=None): """Establish a set of translation functions for the domain. :param domain: Name of translation domain, @@ -55,7 +54,6 @@ class TranslatorFactory(object): :type localedir: str """ self.domain = domain - self.lazy = lazy if localedir is None: localedir = os.environ.get(domain.upper() + '_LOCALEDIR') self.localedir = localedir @@ -75,16 +73,19 @@ class TranslatorFactory(object): """ if domain is None: domain = self.domain - if self.lazy: - return functools.partial(Message, domain=domain) - t = gettext.translation( - domain, - localedir=self.localedir, - fallback=True, - ) - if six.PY3: - return t.gettext - return t.ugettext + t = gettext.translation(domain, + localedir=self.localedir, + fallback=True) + # Use the appropriate method of the translation object based + # on the python version. + m = t.gettext if six.PY3 else t.ugettext + + def f(msg): + """oslo.i18n.gettextutils translation function.""" + if USE_LAZY: + return Message(msg, domain=domain) + return m(msg) + return f @property def primary(self): @@ -147,19 +148,11 @@ def enable_lazy(): your project is importing _ directly instead of using the gettextutils.install() way of importing the _ function. """ - # FIXME(dhellmann): This function will be removed in oslo.i18n, - # because the TranslatorFactory makes it superfluous. - global _, _LI, _LW, _LE, _LC, USE_LAZY - tf = TranslatorFactory('taskflow', lazy=True) - _ = tf.primary - _LI = tf.log_info - _LW = tf.log_warning - _LE = tf.log_error - _LC = tf.log_critical + global USE_LAZY USE_LAZY = True -def install(domain, lazy=False): +def install(domain): """Install a _() function using the given translation domain. Given a translation domain, install a _() function using gettext's @@ -170,26 +163,14 @@ def install(domain, lazy=False): a translation-domain-specific environment variable (e.g. NOVA_LOCALEDIR). + Note that to enable lazy translation, enable_lazy must be + called. + :param domain: the translation domain - :param lazy: indicates whether or not to install the lazy _() function. - The lazy _() introduces a way to do deferred translation - of messages by installing a _ that builds Message objects, - instead of strings, which can then be lazily translated into - any available locale. """ - if lazy: - from six import moves - tf = TranslatorFactory(domain, lazy=True) - moves.builtins.__dict__['_'] = tf.primary - else: - localedir = '%s_LOCALEDIR' % domain.upper() - if six.PY3: - gettext.install(domain, - localedir=os.environ.get(localedir)) - else: - gettext.install(domain, - localedir=os.environ.get(localedir), - unicode=True) + from six import moves + tf = TranslatorFactory(domain) + moves.builtins.__dict__['_'] = tf.primary class Message(six.text_type): diff --git a/taskflow/openstack/common/jsonutils.py b/taskflow/openstack/common/jsonutils.py index acbf65d2..dec02a95 100644 --- a/taskflow/openstack/common/jsonutils.py +++ b/taskflow/openstack/common/jsonutils.py @@ -38,11 +38,13 @@ import inspect import itertools import sys +is_simplejson = False if sys.version_info < (2, 7): # On Python <= 2.6, json module is not C boosted, so try to use # simplejson module if available try: import simplejson as json + is_simplejson = True except ImportError: import json else: @@ -165,9 +167,17 @@ def to_primitive(value, convert_instances=False, convert_datetime=True, def dumps(value, default=to_primitive, **kwargs): + if is_simplejson: + kwargs['namedtuple_as_object'] = False return json.dumps(value, default=default, **kwargs) +def dump(obj, fp, *args, **kwargs): + if is_simplejson: + kwargs['namedtuple_as_object'] = False + return json.dump(obj, fp, *args, **kwargs) + + def loads(s, encoding='utf-8', **kwargs): return json.loads(strutils.safe_decode(s, encoding), **kwargs) From 5f0948bd1249cddabd9f5bd807bf2d2a52a61e50 Mon Sep 17 00:00:00 2001 From: Joshua Harlow Date: Fri, 15 Aug 2014 12:18:26 -0700 Subject: [PATCH 172/188] Split requirements into py2 and py3 files Since PBR now supports distinguishing between a requirements file for py2 and a different requirements file for py3 we should add the support for that separation so that it is easier to package taskflow. Change-Id: I3c5faa99a529bcb28aa0276b8d345a26f94b5af3 --- README.rst | 13 ++++----- requirements.txt => requirements-py2.txt | 1 + requirements-py3.txt | 12 +++++++++ tox-tmpl.ini | 12 +++++---- tox.ini | 34 ++++++++++++++++++++---- 5 files changed, 56 insertions(+), 16 deletions(-) rename requirements.txt => requirements-py2.txt (89%) create mode 100644 requirements-py3.txt diff --git a/README.rst b/README.rst index d75518f3..24c6e5d9 100644 --- a/README.rst +++ b/README.rst @@ -21,12 +21,13 @@ Requirements Because TaskFlow has many optional (pluggable) parts like persistence backends and engines, we decided to split our requirements into two parts: - things that are absolutely required by TaskFlow (you can't use -TaskFlow without them) are put to ``requirements.txt``; - things that -are required by some optional part of TaskFlow (you can use TaskFlow -without them) are put to ``optional-requirements.txt``; if you want to -use the feature in question, you should add that requirements to your -project or environment; - as usual, things that required only for -running tests are put to ``test-requirements.txt``. +TaskFlow without them) are put into ``requirements-pyN.txt`` (``N`` being the +Python *major* version number used to install the package); - things that are +required by some optional part of TaskFlow (you can use TaskFlow without +them) are put into ``optional-requirements.txt``; if you want to use the +feature in question, you should add that requirements to your project or +environment; - as usual, things that required only for running tests are +put into ``test-requirements.txt``. Tox.ini ~~~~~~~ diff --git a/requirements.txt b/requirements-py2.txt similarity index 89% rename from requirements.txt rename to requirements-py2.txt index 764c2ad2..83523949 100644 --- a/requirements.txt +++ b/requirements-py2.txt @@ -10,4 +10,5 @@ Babel>=1.3 stevedore>=0.14 # Backport for concurrent.futures which exists in 3.2+ futures>=2.1.3 +# Used for structured input validation jsonschema>=2.0.0,<3.0.0 diff --git a/requirements-py3.txt b/requirements-py3.txt new file mode 100644 index 00000000..c6ca178c --- /dev/null +++ b/requirements-py3.txt @@ -0,0 +1,12 @@ +# Packages needed for using this library. +anyjson>=0.3.3 +iso8601>=0.1.9 +# Python 2->3 compatibility library. +six>=1.7.0 +# Very nice graph library +networkx>=1.8 +Babel>=1.3 +# Used for backend storage engine loading. +stevedore>=0.14 +# Used for structured input validation +jsonschema>=2.0.0,<3.0.0 diff --git a/tox-tmpl.ini b/tox-tmpl.ini index f0f3dd00..8ad9f332 100644 --- a/tox-tmpl.ini +++ b/tox-tmpl.ini @@ -10,8 +10,7 @@ skipsdist = True usedevelop = True install_command = pip install {opts} {packages} setenv = VIRTUAL_ENV={envdir} -deps = -r{toxinidir}/requirements.txt - -r{toxinidir}/test-requirements.txt +deps = -r{toxinidir}/test-requirements.txt alembic>=0.4.1 psycopg2 kazoo>=1.3.1 @@ -26,7 +25,7 @@ commands = flake8 {posargs} [testenv:pylint] setenv = VIRTUAL_ENV={envdir} -deps = -r{toxinidir}/requirements.txt +deps = -r{toxinidir}/requirements-py2.txt pylint==0.26.0 commands = pylint --rcfile=pylintrc taskflow @@ -50,7 +49,7 @@ exclude = .venv,.tox,dist,doc,./taskflow/openstack/common,*egg,.git,build,tools deps = {[testenv:py26-sa7-mysql-ev]deps} [testenv:py27] -deps = -r{toxinidir}/requirements.txt +deps = -r{toxinidir}/requirements-py2.txt -r{toxinidir}/optional-requirements.txt -r{toxinidir}/test-requirements.txt doc8>=0.3.4 @@ -61,11 +60,12 @@ commands = [testenv:py33] deps = {[testenv]deps} + -r{toxinidir}/requirements-py3.txt SQLAlchemy>=0.7.8,<=0.9.99 # NOTE(imelnikov): psycopg2 is not supported on pypy [testenv:pypy] -deps = -r{toxinidir}/requirements.txt +deps = -r{toxinidir}/requirements-py2.txt -r{toxinidir}/test-requirements.txt SQLAlchemy>=0.7.8,<=0.9.99 alembic>=0.4.1 @@ -81,10 +81,12 @@ eventlet = ev,* [axis:python:py26] basepython = python2.6 deps = {[testenv]deps} + -r{toxinidir}/requirements-py2.txt [axis:python:py27] basepython = python2.7 deps = {[testenv]deps} + -r{toxinidir}/requirements-py2.txt [axis:eventlet:ev] deps = diff --git a/tox.ini b/tox.ini index 29b000e6..0283c14d 100644 --- a/tox.ini +++ b/tox.ini @@ -39,8 +39,7 @@ envlist = cover, usedevelop = True install_command = pip install {opts} {packages} setenv = VIRTUAL_ENV={envdir} -deps = -r{toxinidir}/requirements.txt - -r{toxinidir}/test-requirements.txt +deps = -r{toxinidir}/test-requirements.txt alembic>=0.4.1 psycopg2 kazoo>=1.3.1 @@ -55,7 +54,7 @@ commands = flake8 {posargs} [testenv:pylint] setenv = VIRTUAL_ENV={envdir} -deps = -r{toxinidir}/requirements.txt +deps = -r{toxinidir}/requirements-py2.txt pylint==0.26.0 commands = pylint --rcfile=pylintrc taskflow @@ -76,7 +75,7 @@ exclude = .venv,.tox,dist,doc,./taskflow/openstack/common,*egg,.git,build,tools deps = {[testenv:py26-sa7-mysql-ev]deps} [testenv:py27] -deps = -r{toxinidir}/requirements.txt +deps = -r{toxinidir}/requirements-py2.txt -r{toxinidir}/optional-requirements.txt -r{toxinidir}/test-requirements.txt doc8>=0.3.4 @@ -87,10 +86,11 @@ commands = [testenv:py33] deps = {[testenv]deps} + -r{toxinidir}/requirements-py3.txt SQLAlchemy>=0.7.8,<=0.9.99 [testenv:pypy] -deps = -r{toxinidir}/requirements.txt +deps = -r{toxinidir}/requirements-py2.txt -r{toxinidir}/test-requirements.txt SQLAlchemy>=0.7.8,<=0.9.99 alembic>=0.4.1 @@ -99,6 +99,7 @@ deps = -r{toxinidir}/requirements.txt [testenv:py26-sa7-mysql-ev] deps = {[testenv]deps} + -r{toxinidir}/requirements-py2.txt SQLAlchemy>=0.7.8,<=0.7.99 MySQL-python eventlet>=0.13.0 @@ -106,12 +107,14 @@ basepython = python2.6 [testenv:py26-sa7-mysql] deps = {[testenv]deps} + -r{toxinidir}/requirements-py2.txt SQLAlchemy>=0.7.8,<=0.7.99 MySQL-python basepython = python2.6 [testenv:py26-sa7-pymysql-ev] deps = {[testenv]deps} + -r{toxinidir}/requirements-py2.txt SQLAlchemy>=0.7.8,<=0.7.99 pyMySQL eventlet>=0.13.0 @@ -119,12 +122,14 @@ basepython = python2.6 [testenv:py26-sa7-pymysql] deps = {[testenv]deps} + -r{toxinidir}/requirements-py2.txt SQLAlchemy>=0.7.8,<=0.7.99 pyMySQL basepython = python2.6 [testenv:py26-sa8-mysql-ev] deps = {[testenv]deps} + -r{toxinidir}/requirements-py2.txt SQLAlchemy>=0.8,<=0.8.99 MySQL-python eventlet>=0.13.0 @@ -132,12 +137,14 @@ basepython = python2.6 [testenv:py26-sa8-mysql] deps = {[testenv]deps} + -r{toxinidir}/requirements-py2.txt SQLAlchemy>=0.8,<=0.8.99 MySQL-python basepython = python2.6 [testenv:py26-sa8-pymysql-ev] deps = {[testenv]deps} + -r{toxinidir}/requirements-py2.txt SQLAlchemy>=0.8,<=0.8.99 pyMySQL eventlet>=0.13.0 @@ -145,12 +152,14 @@ basepython = python2.6 [testenv:py26-sa8-pymysql] deps = {[testenv]deps} + -r{toxinidir}/requirements-py2.txt SQLAlchemy>=0.8,<=0.8.99 pyMySQL basepython = python2.6 [testenv:py26-sa9-mysql-ev] deps = {[testenv]deps} + -r{toxinidir}/requirements-py2.txt SQLAlchemy>=0.9,<=0.9.99 MySQL-python eventlet>=0.13.0 @@ -158,12 +167,14 @@ basepython = python2.6 [testenv:py26-sa9-mysql] deps = {[testenv]deps} + -r{toxinidir}/requirements-py2.txt SQLAlchemy>=0.9,<=0.9.99 MySQL-python basepython = python2.6 [testenv:py26-sa9-pymysql-ev] deps = {[testenv]deps} + -r{toxinidir}/requirements-py2.txt SQLAlchemy>=0.9,<=0.9.99 pyMySQL eventlet>=0.13.0 @@ -171,12 +182,14 @@ basepython = python2.6 [testenv:py26-sa9-pymysql] deps = {[testenv]deps} + -r{toxinidir}/requirements-py2.txt SQLAlchemy>=0.9,<=0.9.99 pyMySQL basepython = python2.6 [testenv:py27-sa7-mysql-ev] deps = {[testenv]deps} + -r{toxinidir}/requirements-py2.txt SQLAlchemy>=0.7.8,<=0.7.99 MySQL-python eventlet>=0.13.0 @@ -184,12 +197,14 @@ basepython = python2.7 [testenv:py27-sa7-mysql] deps = {[testenv]deps} + -r{toxinidir}/requirements-py2.txt SQLAlchemy>=0.7.8,<=0.7.99 MySQL-python basepython = python2.7 [testenv:py27-sa7-pymysql-ev] deps = {[testenv]deps} + -r{toxinidir}/requirements-py2.txt SQLAlchemy>=0.7.8,<=0.7.99 pyMySQL eventlet>=0.13.0 @@ -197,12 +212,14 @@ basepython = python2.7 [testenv:py27-sa7-pymysql] deps = {[testenv]deps} + -r{toxinidir}/requirements-py2.txt SQLAlchemy>=0.7.8,<=0.7.99 pyMySQL basepython = python2.7 [testenv:py27-sa8-mysql-ev] deps = {[testenv]deps} + -r{toxinidir}/requirements-py2.txt SQLAlchemy>=0.8,<=0.8.99 MySQL-python eventlet>=0.13.0 @@ -210,12 +227,14 @@ basepython = python2.7 [testenv:py27-sa8-mysql] deps = {[testenv]deps} + -r{toxinidir}/requirements-py2.txt SQLAlchemy>=0.8,<=0.8.99 MySQL-python basepython = python2.7 [testenv:py27-sa8-pymysql-ev] deps = {[testenv]deps} + -r{toxinidir}/requirements-py2.txt SQLAlchemy>=0.8,<=0.8.99 pyMySQL eventlet>=0.13.0 @@ -223,12 +242,14 @@ basepython = python2.7 [testenv:py27-sa8-pymysql] deps = {[testenv]deps} + -r{toxinidir}/requirements-py2.txt SQLAlchemy>=0.8,<=0.8.99 pyMySQL basepython = python2.7 [testenv:py27-sa9-mysql-ev] deps = {[testenv]deps} + -r{toxinidir}/requirements-py2.txt SQLAlchemy>=0.9,<=0.9.99 MySQL-python eventlet>=0.13.0 @@ -236,12 +257,14 @@ basepython = python2.7 [testenv:py27-sa9-mysql] deps = {[testenv]deps} + -r{toxinidir}/requirements-py2.txt SQLAlchemy>=0.9,<=0.9.99 MySQL-python basepython = python2.7 [testenv:py27-sa9-pymysql-ev] deps = {[testenv]deps} + -r{toxinidir}/requirements-py2.txt SQLAlchemy>=0.9,<=0.9.99 pyMySQL eventlet>=0.13.0 @@ -249,6 +272,7 @@ basepython = python2.7 [testenv:py27-sa9-pymysql] deps = {[testenv]deps} + -r{toxinidir}/requirements-py2.txt SQLAlchemy>=0.9,<=0.9.99 pyMySQL basepython = python2.7 From b28fe65819fb7dbd74f55855787ebc3dcbef5cca Mon Sep 17 00:00:00 2001 From: Joshua Harlow Date: Sat, 16 Aug 2014 10:59:41 -0700 Subject: [PATCH 173/188] Remove sphinx examples emphasize-lines The emphasize-lines was becoming inconsistent and is hard to keep up to date in the correct manner so removing it since we already provide introduction comments in the examples and emphasizing/highlighting them does not add much meaningful benefit. Change-Id: I406dd606df75e7523703812803255418ec2f47d8 --- doc/source/examples.rst | 13 ------------- 1 file changed, 13 deletions(-) diff --git a/doc/source/examples.rst b/doc/source/examples.rst index 40c0a2d2..86766641 100644 --- a/doc/source/examples.rst +++ b/doc/source/examples.rst @@ -9,7 +9,6 @@ Making phone calls :language: python :linenos: :lines: 16- - :emphasize-lines: 16-29 Making phone calls (automatically reverting) ============================================ @@ -22,7 +21,6 @@ Making phone calls (automatically reverting) :language: python :linenos: :lines: 16- - :emphasize-lines: 17-26 Building a car ============== @@ -35,7 +33,6 @@ Building a car :language: python :linenos: :lines: 16- - :emphasize-lines: 22-28 Linear equation solver (explicit dependencies) ============================================== @@ -48,7 +45,6 @@ Linear equation solver (explicit dependencies) :language: python :linenos: :lines: 16- - :emphasize-lines: 17-27 Linear equation solver (inferred dependencies) ============================================== @@ -59,7 +55,6 @@ Linear equation solver (inferred dependencies) :language: python :linenos: :lines: 16- - :emphasize-lines: 18-31 Linear equation solver (in parallel) ==================================== @@ -72,7 +67,6 @@ Linear equation solver (in parallel) :language: python :linenos: :lines: 16- - :emphasize-lines: 18-21 Creating a volume (in parallel) =============================== @@ -85,7 +79,6 @@ Creating a volume (in parallel) :language: python :linenos: :lines: 16- - :emphasize-lines: 21-23 Storing & emitting a bill ========================= @@ -98,7 +91,6 @@ Storing & emitting a bill :language: python :linenos: :lines: 16- - :emphasize-lines: 24-32 Suspending a workflow & resuming ================================ @@ -111,7 +103,6 @@ Suspending a workflow & resuming :language: python :linenos: :lines: 16- - :emphasize-lines: 22-39 Creating a virtual machine (resumable) ====================================== @@ -124,7 +115,6 @@ Creating a virtual machine (resumable) :language: python :linenos: :lines: 16- - :emphasize-lines: 32-34 Creating a volume (resumable) ============================= @@ -137,7 +127,6 @@ Creating a volume (resumable) :language: python :linenos: :lines: 16- - :emphasize-lines: 28-30 Running engines via iteration ============================= @@ -150,7 +139,6 @@ Running engines via iteration :language: python :linenos: :lines: 16- - :emphasize-lines: 24-27 Controlling retries using a retry controller ============================================ @@ -163,4 +151,3 @@ Controlling retries using a retry controller :language: python :linenos: :lines: 16- - :emphasize-lines: 17-25 From 16d80b5236a42c255257fc93d6c5ed9b4943f769 Mon Sep 17 00:00:00 2001 From: Joshua Harlow Date: Sun, 17 Aug 2014 15:07:07 -0700 Subject: [PATCH 174/188] Allow worker count to be specified when no executor provided When a multi-threaded engine is used it is nice to be able to inform the engine how many workers should be created when it creates its own executor. To allow this to be possible accept a new keywork argument that can be used to set this value. Change-Id: I0095d548249372440abbcf9b5c3b8fa841ca0ea9 --- taskflow/engines/action_engine/engine.py | 9 ++++++--- taskflow/engines/action_engine/executor.py | 18 +++++++++++------- 2 files changed, 17 insertions(+), 10 deletions(-) diff --git a/taskflow/engines/action_engine/engine.py b/taskflow/engines/action_engine/engine.py index 054c2ccf..ca8a80a6 100644 --- a/taskflow/engines/action_engine/engine.py +++ b/taskflow/engines/action_engine/engine.py @@ -216,9 +216,12 @@ class MultiThreadedActionEngine(ActionEngine): _storage_factory = atom_storage.MultiThreadedStorage def _task_executor_factory(self): - return executor.ParallelTaskExecutor(self._executor) + return executor.ParallelTaskExecutor(executor=self._executor, + max_workers=self._max_workers) - def __init__(self, flow, flow_detail, backend, conf, **kwargs): + def __init__(self, flow, flow_detail, backend, conf, + executor=None, max_workers=None): super(MultiThreadedActionEngine, self).__init__( flow, flow_detail, backend, conf) - self._executor = kwargs.get('executor') + self._executor = executor + self._max_workers = max_workers diff --git a/taskflow/engines/action_engine/executor.py b/taskflow/engines/action_engine/executor.py index e28e863b..b2bdbdae 100644 --- a/taskflow/engines/action_engine/executor.py +++ b/taskflow/engines/action_engine/executor.py @@ -111,13 +111,14 @@ class SerialTaskExecutor(TaskExecutorBase): class ParallelTaskExecutor(TaskExecutorBase): """Executes tasks in parallel. - Submits tasks to executor which should provide interface similar + Submits tasks to an executor which should provide an interface similar to concurrent.Futures.Executor. """ - def __init__(self, executor=None): + def __init__(self, executor=None, max_workers=None): self._executor = executor - self._own_executor = executor is None + self._max_workers = max_workers + self._create_executor = executor is None def execute_task(self, task, task_uuid, arguments, progress_callback=None): return self._executor.submit( @@ -133,11 +134,14 @@ class ParallelTaskExecutor(TaskExecutorBase): return async_utils.wait_for_any(fs, timeout) def start(self): - if self._own_executor: - thread_count = threading_utils.get_optimal_thread_count() - self._executor = futures.ThreadPoolExecutor(thread_count) + if self._create_executor: + if self._max_workers is not None: + max_workers = self._max_workers + else: + max_workers = threading_utils.get_optimal_thread_count() + self._executor = futures.ThreadPoolExecutor(max_workers) def stop(self): - if self._own_executor: + if self._create_executor: self._executor.shutdown(wait=True) self._executor = None From 408a8442aacb1f6bbfa9ba1afeda783d33a74b90 Mon Sep 17 00:00:00 2001 From: Joshua Harlow Date: Sat, 26 Jul 2014 22:51:42 -0700 Subject: [PATCH 175/188] Make the WBE worker banner information more meaningful Add in more details that are displayed in the LOG when a WBE worker is started up that is useful to show to be able to help in debugging, or other informational and operational purposes. Example of the new output is the following: TaskFlow v0.3.21.62 WBE worker. Connection details: Driver = py-amqp v1.4.5 Exchange = test Topic = test Transport = amqp Uri = amqp://guest@localhost:5672// Powered by: Executor = concurrent.futures.thread.ThreadPoolExecutor Thread count = 3 Supported endpoints: - taskflow.tests.utils.NastyTask ... - taskflow.tests.utils.TaskMultiArgOneReturn System details: Hostname = lappy.gateway.net Pid = 28364 Platform = Linux-3.13.0-30-generic-x86_64-with-Ubuntu-14.04-trusty Python = 2.7.6 (default, Mar 22 2014, 22:59:56) Thread id = 139875992315712 Change-Id: I6d7dba3406007ddc80cce96cfdbbfd25935a12ae --- taskflow/engines/worker_based/proxy.py | 34 +++++++--- taskflow/engines/worker_based/server.py | 4 ++ taskflow/engines/worker_based/worker.py | 88 ++++++++++++++++++++++--- 3 files changed, 107 insertions(+), 19 deletions(-) diff --git a/taskflow/engines/worker_based/proxy.py b/taskflow/engines/worker_based/proxy.py index aaa75c86..d2991ca3 100644 --- a/taskflow/engines/worker_based/proxy.py +++ b/taskflow/engines/worker_based/proxy.py @@ -22,7 +22,7 @@ import kombu import six from taskflow.engines.worker_based import dispatcher - +from taskflow.utils import misc LOG = logging.getLogger(__name__) @@ -40,29 +40,45 @@ class Proxy(object): self._exchange_name = exchange_name self._on_wait = on_wait self._running = threading.Event() - self._url = kwargs.get('url') - self._transport = kwargs.get('transport') - self._transport_opts = kwargs.get('transport_options') self._dispatcher = dispatcher.TypeDispatcher(type_handlers) self._dispatcher.add_requeue_filter( # NOTE(skudriashev): Process all incoming messages only if proxy is # running, otherwise requeue them. lambda data, message: not self.is_running) + + url = kwargs.get('url') + transport = kwargs.get('transport') + transport_opts = kwargs.get('transport_options') + self._drain_events_timeout = DRAIN_EVENTS_PERIOD - if self._transport == 'memory' and self._transport_opts: - polling_interval = self._transport_opts.get('polling_interval') - if polling_interval: + if transport == 'memory' and transport_opts: + polling_interval = transport_opts.get('polling_interval') + if polling_interval is not None: self._drain_events_timeout = polling_interval # create connection - self._conn = kombu.Connection(self._url, transport=self._transport, - transport_options=self._transport_opts) + self._conn = kombu.Connection(url, transport=transport, + transport_options=transport_opts) # create exchange self._exchange = kombu.Exchange(name=self._exchange_name, durable=False, auto_delete=True) + @property + def connection_details(self): + # The kombu drivers seem to use 'N/A' when they don't have a version... + driver_version = self._conn.transport.driver_version() + if driver_version and driver_version.lower() == 'n/a': + driver_version = None + return misc.AttrDict( + uri=self._conn.as_uri(include_password=False), + transport=misc.AttrDict( + options=dict(self._conn.transport_options), + driver_type=self._conn.transport.driver_type, + driver_name=self._conn.transport.driver_name, + driver_version=driver_version)) + @property def is_running(self): """Return whether the proxy is running.""" diff --git a/taskflow/engines/worker_based/server.py b/taskflow/engines/worker_based/server.py index 8b175783..73625865 100644 --- a/taskflow/engines/worker_based/server.py +++ b/taskflow/engines/worker_based/server.py @@ -61,6 +61,10 @@ class Server(object): self._endpoints = dict([(endpoint.name, endpoint) for endpoint in endpoints]) + @property + def connection_details(self): + return self._proxy.connection_details + @staticmethod def _parse_request(task_cls, task_name, action, arguments, result=None, failures=None, **kwargs): diff --git a/taskflow/engines/worker_based/worker.py b/taskflow/engines/worker_based/worker.py index 49011788..49816eab 100644 --- a/taskflow/engines/worker_based/worker.py +++ b/taskflow/engines/worker_based/worker.py @@ -15,6 +15,11 @@ # under the License. import logging +import os +import platform +import socket +import string +import sys from concurrent import futures @@ -23,6 +28,37 @@ from taskflow.engines.worker_based import server from taskflow import task as t_task from taskflow.utils import reflection from taskflow.utils import threading_utils as tu +from taskflow import version + +BANNER_TEMPLATE = string.Template(""" +TaskFlow v${version} WBE worker. +Connection details: + Driver = $transport_driver + Exchange = $exchange + Topic = $topic + Transport = $transport_type + Uri = $connection_uri +Powered by: + Executor = $executor_type + Thread count = $executor_thread_count +Supported endpoints:$endpoints +System details: + Hostname = $hostname + Pid = $pid + Platform = $platform + Python = $python + Thread id = $thread_id +""".strip()) +BANNER_TEMPLATE.defaults = { + # These values may not be possible to fetch/known, default to unknown... + 'pid': '???', + 'hostname': '???', + 'executor_thread_count': '???', + 'endpoints': ' %s' % ([]), + # These are static (avoid refetching...) + 'version': version.version_string(), + 'python': sys.version.split("\n", 1)[0].strip(), +} LOG = logging.getLogger(__name__) @@ -78,6 +114,7 @@ class Worker(object): self._executor = futures.ThreadPoolExecutor(self._threads_count) self._owns_executor = True self._endpoints = self._derive_endpoints(tasks) + self._exchange = exchange self._server = server.Server(topic, exchange, self._executor, self._endpoints, **kwargs) @@ -87,17 +124,48 @@ class Worker(object): derived_tasks = reflection.find_subclasses(tasks, t_task.BaseTask) return [endpoint.Endpoint(task) for task in derived_tasks] - def run(self): - """Run worker.""" - if self._threads_count != -1: - LOG.info("Starting the '%s' topic worker in %s threads.", - self._topic, self._threads_count) + def _generate_banner(self): + """Generates a banner that can be useful to display before running.""" + tpl_params = {} + connection_details = self._server.connection_details + transport = connection_details.transport + if transport.driver_version: + transport_driver = "%s v%s" % (transport.driver_name, + transport.driver_version) else: - LOG.info("Starting the '%s' topic worker using a %s.", self._topic, - self._executor) - LOG.info("Tasks list:") - for e in self._endpoints: - LOG.info("|-- %s", e) + transport_driver = transport.driver_name + tpl_params['transport_driver'] = transport_driver + tpl_params['exchange'] = self._exchange + tpl_params['topic'] = self._topic + tpl_params['transport_type'] = transport.driver_type + tpl_params['connection_uri'] = connection_details.uri + tpl_params['executor_type'] = reflection.get_class_name(self._executor) + if self._threads_count != -1: + tpl_params['executor_thread_count'] = self._threads_count + if self._endpoints: + pretty_endpoints = [] + for ep in self._endpoints: + pretty_endpoints.append(" - %s" % ep) + # This ensures there is a newline before the list... + tpl_params['endpoints'] = "\n" + "\n".join(pretty_endpoints) + try: + tpl_params['hostname'] = socket.getfqdn() + except socket.error: + pass + try: + tpl_params['pid'] = os.getpid() + except OSError: + pass + tpl_params['platform'] = platform.platform() + tpl_params['thread_id'] = tu.get_ident() + return BANNER_TEMPLATE.substitute(BANNER_TEMPLATE.defaults, + **tpl_params) + + def run(self, display_banner=True): + """Runs the worker.""" + if display_banner: + for line in self._generate_banner().splitlines(): + LOG.info(line) self._server.start() def wait(self): From f34307590a5803ef469d5d0e458ac4503ac8915a Mon Sep 17 00:00:00 2001 From: OpenStack Proposal Bot Date: Fri, 22 Aug 2014 12:34:49 +0000 Subject: [PATCH 176/188] Updated from global requirements Change-Id: I4758cb3c316356299595ca895e13cfcfc0b88123 --- test-requirements.txt | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/test-requirements.txt b/test-requirements.txt index 287d686a..2f5e670e 100644 --- a/test-requirements.txt +++ b/test-requirements.txt @@ -8,4 +8,4 @@ testtools>=0.9.34 zake>=0.0.26 # Apache-2.0 # docs build jobs sphinx>=1.1.2,!=1.2.0,<1.3 -oslosphinx +oslosphinx>=2.2.0.0a2 From 00f60097f15f4f8eb394d7d11c3040f2dbff337e Mon Sep 17 00:00:00 2001 From: OpenStack Proposal Bot Date: Tue, 26 Aug 2014 04:11:34 +0000 Subject: [PATCH 177/188] Updated from global requirements Change-Id: Ifeed3788115e8fca7f9a23c293f7d91118743824 --- test-requirements.txt | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/test-requirements.txt b/test-requirements.txt index 2f5e670e..a2efaa3f 100644 --- a/test-requirements.txt +++ b/test-requirements.txt @@ -5,7 +5,7 @@ mock>=1.0 python-subunit>=0.0.18 testrepository>=0.0.18 testtools>=0.9.34 -zake>=0.0.26 # Apache-2.0 +zake>=0.1 # Apache-2.0 # docs build jobs sphinx>=1.1.2,!=1.2.0,<1.3 oslosphinx>=2.2.0.0a2 From 5c84ddad84a70cd658686e4f42604ebdd652f843 Mon Sep 17 00:00:00 2001 From: Joshua Harlow Date: Sun, 13 Jul 2014 21:35:21 -0700 Subject: [PATCH 178/188] Use __qualname__ where appropriate The __qualname__ attribute simplifies the determination of an objects class name and callable name and is useful in python 3.x to be able to use since it can correctly identify names better than the python 2.x __name__ attribute can. Adds a few tests in to ensure that the usage of __qualname__ where available functions as expected. See: http://legacy.python.org/dev/peps/pep-3155/ Fixes bug 1341441 Change-Id: Ic6942cbbc8e35d65fb3ac603ff1dfc8e20c194a3 --- taskflow/tests/unit/test_utils.py | 56 ++++++++++++++++++++++++-- taskflow/utils/reflection.py | 66 +++++++++++++++++++++++++------ 2 files changed, 105 insertions(+), 17 deletions(-) diff --git a/taskflow/tests/unit/test_utils.py b/taskflow/tests/unit/test_utils.py index 22da1b8b..4518f961 100644 --- a/taskflow/tests/unit/test_utils.py +++ b/taskflow/tests/unit/test_utils.py @@ -19,6 +19,9 @@ import functools import inspect import sys +import six +import testtools + from taskflow import states from taskflow import test from taskflow.tests import utils as test_utils @@ -111,17 +114,22 @@ class GetCallableNameTest(test.TestCase): def test_method(self): name = reflection.get_callable_name(Class.method) - self.assertEqual(name, '.'.join((__name__, 'method'))) + self.assertEqual(name, '.'.join((__name__, 'Class', 'method'))) def test_instance_method(self): name = reflection.get_callable_name(Class().method) self.assertEqual(name, '.'.join((__name__, 'Class', 'method'))) def test_static_method(self): - # NOTE(imelnikov): static method are just functions, class name - # is not recorded anywhere in them. name = reflection.get_callable_name(Class.static_method) - self.assertEqual(name, '.'.join((__name__, 'static_method'))) + if six.PY3: + self.assertEqual(name, + '.'.join((__name__, 'Class', 'static_method'))) + else: + # NOTE(imelnikov): static method are just functions, class name + # is not recorded anywhere in them. + self.assertEqual(name, + '.'.join((__name__, 'static_method'))) def test_class_method(self): name = reflection.get_callable_name(Class.class_method) @@ -141,6 +149,46 @@ class GetCallableNameTest(test.TestCase): '__call__'))) +# These extended/special case tests only work on python 3, due to python 2 +# being broken/incorrect with regard to these special cases... +@testtools.skipIf(not six.PY3, 'python 3.x is not currently available') +class GetCallableNameTestExtended(test.TestCase): + # Tests items in http://legacy.python.org/dev/peps/pep-3155/ + + class InnerCallableClass(object): + def __call__(self): + pass + + def test_inner_callable_class(self): + obj = self.InnerCallableClass() + name = reflection.get_callable_name(obj.__call__) + expected_name = '.'.join((__name__, 'GetCallableNameTestExtended', + 'InnerCallableClass', '__call__')) + self.assertEqual(expected_name, name) + + def test_inner_callable_function(self): + def a(): + + def b(): + pass + + return b + + name = reflection.get_callable_name(a()) + expected_name = '.'.join((__name__, 'GetCallableNameTestExtended', + 'test_inner_callable_function', '', + 'a', '', 'b')) + self.assertEqual(expected_name, name) + + def test_inner_class(self): + obj = self.InnerCallableClass() + name = reflection.get_callable_name(obj) + expected_name = '.'.join((__name__, + 'GetCallableNameTestExtended', + 'InnerCallableClass')) + self.assertEqual(expected_name, name) + + class NotifierTest(test.TestCase): def test_notify_called(self): diff --git a/taskflow/utils/reflection.py b/taskflow/utils/reflection.py index b386dfa2..bc5a3223 100644 --- a/taskflow/utils/reflection.py +++ b/taskflow/utils/reflection.py @@ -21,6 +21,16 @@ import six from taskflow.openstack.common import importutils +try: + _TYPE_TYPE = types.TypeType +except AttributeError: + _TYPE_TYPE = type + +# See: https://docs.python.org/2/library/__builtin__.html#module-__builtin__ +# and see https://docs.python.org/2/reference/executionmodel.html (and likely +# others)... +_BUILTIN_MODULES = ('builtins', '__builtin__', 'exceptions') + def _get_members(obj, exclude_hidden): """Yields the members of an object, filtering by hidden/not hidden.""" @@ -86,12 +96,27 @@ def get_class_name(obj, fully_qualified=True): """ if not isinstance(obj, six.class_types): obj = type(obj) - if obj.__module__ in ('builtins', '__builtin__', 'exceptions'): - return obj.__name__ - if fully_qualified: - return '.'.join((obj.__module__, obj.__name__)) + try: + built_in = obj.__module__ in _BUILTIN_MODULES + except AttributeError: + pass else: - return obj.__name__ + if built_in: + try: + return obj.__qualname__ + except AttributeError: + return obj.__name__ + pieces = [] + try: + pieces.append(obj.__qualname__) + except AttributeError: + pieces.append(obj.__name__) + if fully_qualified: + try: + pieces.insert(0, obj.__module__) + except AttributeError: + pass + return '.'.join(pieces) def get_all_class_names(obj, up_to=object): @@ -115,21 +140,36 @@ def get_callable_name(function): """ method_self = get_method_self(function) if method_self is not None: - # this is bound method + # This is a bound method. if isinstance(method_self, six.class_types): - # this is bound class method + # This is a bound class method. im_class = method_self else: im_class = type(method_self) - parts = (im_class.__module__, im_class.__name__, - function.__name__) - elif inspect.isfunction(function) or inspect.ismethod(function): - parts = (function.__module__, function.__name__) + try: + parts = (im_class.__module__, function.__qualname__) + except AttributeError: + parts = (im_class.__module__, im_class.__name__, function.__name__) + elif inspect.ismethod(function) or inspect.isfunction(function): + # This could be a function, a static method, a unbound method... + try: + parts = (function.__module__, function.__qualname__) + except AttributeError: + if hasattr(function, 'im_class'): + # This is a unbound method, which exists only in python 2.x + im_class = function.im_class + parts = (im_class.__module__, + im_class.__name__, function.__name__) + else: + parts = (function.__module__, function.__name__) else: im_class = type(function) - if im_class is type: + if im_class is _TYPE_TYPE: im_class = function - parts = (im_class.__module__, im_class.__name__) + try: + parts = (im_class.__module__, im_class.__qualname__) + except AttributeError: + parts = (im_class.__module__, im_class.__name__) return '.'.join(parts) From 494003283f864efe74df61cefae64fc87e35864d Mon Sep 17 00:00:00 2001 From: Joshua Harlow Date: Wed, 9 Jul 2014 21:06:22 -0700 Subject: [PATCH 179/188] Cleanup WBE example to be simpler to understand This makes the example simpler to follow and understand when it is using threads, is in one file and just uses the simpler in-memory kombu backend by default (it can still use the file transport if configured to use it). Change-Id: I7fe9758e3285e7f0a610482bf26322841fb22f39 --- doc/source/examples.rst | 12 ++ taskflow/examples/wbe_simple_linear.out.txt | 5 + taskflow/examples/wbe_simple_linear.py | 146 ++++++++++++++++++++ taskflow/examples/worker_based/flow.py | 61 -------- taskflow/examples/worker_based/worker.py | 58 -------- taskflow/examples/worker_based_flow.out.txt | 6 - taskflow/examples/worker_based_flow.py | 73 ---------- 7 files changed, 163 insertions(+), 198 deletions(-) create mode 100644 taskflow/examples/wbe_simple_linear.out.txt create mode 100644 taskflow/examples/wbe_simple_linear.py delete mode 100644 taskflow/examples/worker_based/flow.py delete mode 100644 taskflow/examples/worker_based/worker.py delete mode 100644 taskflow/examples/worker_based_flow.out.txt delete mode 100644 taskflow/examples/worker_based_flow.py diff --git a/doc/source/examples.rst b/doc/source/examples.rst index 86766641..9199bc11 100644 --- a/doc/source/examples.rst +++ b/doc/source/examples.rst @@ -151,3 +151,15 @@ Controlling retries using a retry controller :language: python :linenos: :lines: 16- + +Distributed execution (simple) +============================== + +.. note:: + + Full source located at :example:`wbe_simple_linear` + +.. literalinclude:: ../../taskflow/examples/wbe_simple_linear.py + :language: python + :linenos: + :lines: 16- diff --git a/taskflow/examples/wbe_simple_linear.out.txt b/taskflow/examples/wbe_simple_linear.out.txt new file mode 100644 index 00000000..1585fb96 --- /dev/null +++ b/taskflow/examples/wbe_simple_linear.out.txt @@ -0,0 +1,5 @@ +Running 2 workers. +Executing some work. +Execution finished. +Result = {"result1": 1, "result2": 666, "x": 111, "y": 222, "z": 333} +Stopping workers. diff --git a/taskflow/examples/wbe_simple_linear.py b/taskflow/examples/wbe_simple_linear.py new file mode 100644 index 00000000..e28579f8 --- /dev/null +++ b/taskflow/examples/wbe_simple_linear.py @@ -0,0 +1,146 @@ +# -*- coding: utf-8 -*- + +# Copyright (C) 2014 Yahoo! Inc. All Rights Reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); you may +# not use this file except in compliance with the License. You may obtain +# a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the +# License for the specific language governing permissions and limitations +# under the License. + +import json +import logging +import os +import sys +import tempfile +import threading + +top_dir = os.path.abspath(os.path.join(os.path.dirname(__file__), + os.pardir, + os.pardir)) +sys.path.insert(0, top_dir) + +from taskflow import engines +from taskflow.engines.worker_based import worker +from taskflow.patterns import linear_flow as lf +from taskflow.tests import utils + +import example_utils # noqa + +# INTRO: This example walks through a miniature workflow which shows how to +# start up a number of workers (these workers will process task execution and +# reversion requests using any provided input data) and then use an engine +# that creates a set of *capable* tasks and flows (the engine can not create +# tasks that the workers are not able to run, this will end in failure) that +# those workers will run and then executes that workflow seamlessly using the +# workers to perform the actual execution. +# +# NOTE(harlowja): this example simulates the expected larger number of workers +# by using a set of threads (which in this example simulate the remote workers +# that would typically be running on other external machines). + +# A filesystem can also be used as the queue transport (useful as simple +# transport type that does not involve setting up a larger mq system). If this +# is false then the memory transport is used instead, both work in standalone +# setups. +USE_FILESYSTEM = False +BASE_SHARED_CONF = { + 'exchange': 'taskflow', +} +WORKERS = 2 +WORKER_CONF = { + # These are the tasks the worker can execute, they *must* be importable, + # typically this list is used to restrict what workers may execute to + # a smaller set of *allowed* tasks that are known to be safe (one would + # not want to allow all python code to be executed). + 'tasks': [ + 'taskflow.tests.utils:TaskOneArgOneReturn', + 'taskflow.tests.utils:TaskMultiArgOneReturn' + ], +} +ENGINE_CONF = { + 'engine': 'worker-based', +} + + +def run(engine_conf): + flow = lf.Flow('simple-linear').add( + utils.TaskOneArgOneReturn(provides='result1'), + utils.TaskMultiArgOneReturn(provides='result2') + ) + eng = engines.load(flow, + store=dict(x=111, y=222, z=333), + engine_conf=engine_conf) + eng.run() + return eng.storage.fetch_all() + + +if __name__ == "__main__": + logging.basicConfig(level=logging.ERROR) + + # Setup our transport configuration and merge it into the worker and + # engine configuration so that both of those use it correctly. + shared_conf = dict(BASE_SHARED_CONF) + + tmp_path = None + if USE_FILESYSTEM: + tmp_path = tempfile.mkdtemp(prefix='wbe-example-') + shared_conf.update({ + 'transport': 'filesystem', + 'transport_options': { + 'data_folder_in': tmp_path, + 'data_folder_out': tmp_path, + 'polling_interval': 0.1, + }, + }) + else: + shared_conf.update({ + 'transport': 'memory', + 'transport_options': { + 'polling_interval': 0.1, + }, + }) + worker_conf = dict(WORKER_CONF) + worker_conf.update(shared_conf) + engine_conf = dict(ENGINE_CONF) + engine_conf.update(shared_conf) + workers = [] + worker_topics = [] + + try: + # Create a set of workers to simulate actual remote workers. + print('Running %s workers.' % (WORKERS)) + for i in range(0, WORKERS): + worker_conf['topic'] = 'worker-%s' % (i + 1) + worker_topics.append(worker_conf['topic']) + w = worker.Worker(**worker_conf) + runner = threading.Thread(target=w.run) + runner.daemon = True + runner.start() + w.wait() + workers.append((runner, w.stop)) + + # Now use those workers to do something. + print('Executing some work.') + engine_conf['topics'] = worker_topics + result = run(engine_conf) + print('Execution finished.') + # This is done so that the test examples can work correctly + # even when the keys change order (which will happen in various + # python versions). + print("Result = %s" % json.dumps(result, sort_keys=True)) + finally: + # And cleanup. + print('Stopping workers.') + while workers: + r, stopper = workers.pop() + stopper() + r.join() + if tmp_path: + example_utils.rm_path(tmp_path) diff --git a/taskflow/examples/worker_based/flow.py b/taskflow/examples/worker_based/flow.py deleted file mode 100644 index 50529a81..00000000 --- a/taskflow/examples/worker_based/flow.py +++ /dev/null @@ -1,61 +0,0 @@ -# -*- coding: utf-8 -*- - -# Copyright (C) 2014 Yahoo! Inc. All Rights Reserved. -# -# Licensed under the Apache License, Version 2.0 (the "License"); you may -# not use this file except in compliance with the License. You may obtain -# a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT -# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the -# License for the specific language governing permissions and limitations -# under the License. - -import json -import logging -import sys - -import taskflow.engines -from taskflow.patterns import linear_flow as lf -from taskflow.tests import utils - -LOG = logging.getLogger(__name__) - - -if __name__ == "__main__": - logging.basicConfig(level=logging.ERROR) - engine_conf = { - 'engine': 'worker-based', - 'exchange': 'taskflow', - 'topics': ['test-topic'], - } - - # parse command line - try: - arg = sys.argv[1] - except IndexError: - pass - else: - try: - cfg = json.loads(arg) - except ValueError: - engine_conf.update(url=arg) - else: - engine_conf.update(cfg) - finally: - LOG.debug("Worker configuration: %s\n" % - json.dumps(engine_conf, sort_keys=True, indent=4)) - - # create and run flow - flow = lf.Flow('simple-linear').add( - utils.TaskOneArgOneReturn(provides='result1'), - utils.TaskMultiArgOneReturn(provides='result2') - ) - eng = taskflow.engines.load(flow, - store=dict(x=111, y=222, z=333), - engine_conf=engine_conf) - eng.run() - print(json.dumps(eng.storage.fetch_all(), sort_keys=True)) diff --git a/taskflow/examples/worker_based/worker.py b/taskflow/examples/worker_based/worker.py deleted file mode 100644 index 405813c7..00000000 --- a/taskflow/examples/worker_based/worker.py +++ /dev/null @@ -1,58 +0,0 @@ -# -*- coding: utf-8 -*- - -# Copyright (C) 2014 Yahoo! Inc. All Rights Reserved. -# -# Licensed under the Apache License, Version 2.0 (the "License"); you may -# not use this file except in compliance with the License. You may obtain -# a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT -# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the -# License for the specific language governing permissions and limitations -# under the License. - -import json -import logging -import sys - -from taskflow.engines.worker_based import worker as w - -LOG = logging.getLogger(__name__) - - -if __name__ == "__main__": - logging.basicConfig(level=logging.ERROR) - worker_conf = { - 'exchange': 'taskflow', - 'topic': 'test-topic', - 'tasks': [ - 'taskflow.tests.utils:TaskOneArgOneReturn', - 'taskflow.tests.utils:TaskMultiArgOneReturn' - ] - } - - # parse command line - try: - arg = sys.argv[1] - except IndexError: - pass - else: - try: - cfg = json.loads(arg) - except ValueError: - worker_conf.update(url=arg) - else: - worker_conf.update(cfg) - finally: - LOG.debug("Worker configuration: %s\n" % - json.dumps(worker_conf, sort_keys=True, indent=4)) - - # run worker - worker = w.Worker(**worker_conf) - try: - worker.run() - except KeyboardInterrupt: - pass diff --git a/taskflow/examples/worker_based_flow.out.txt b/taskflow/examples/worker_based_flow.out.txt deleted file mode 100644 index 7b97ff93..00000000 --- a/taskflow/examples/worker_based_flow.out.txt +++ /dev/null @@ -1,6 +0,0 @@ -Run worker. -Run flow. -{"result1": 1, "result2": 666, "x": 111, "y": 222, "z": 333} - -Flow finished. -Stop worker. diff --git a/taskflow/examples/worker_based_flow.py b/taskflow/examples/worker_based_flow.py deleted file mode 100644 index ef984ee9..00000000 --- a/taskflow/examples/worker_based_flow.py +++ /dev/null @@ -1,73 +0,0 @@ -# -*- coding: utf-8 -*- - -# Copyright (C) 2014 Yahoo! Inc. All Rights Reserved. -# -# Licensed under the Apache License, Version 2.0 (the "License"); you may -# not use this file except in compliance with the License. You may obtain -# a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT -# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the -# License for the specific language governing permissions and limitations -# under the License. - -import json -import os -import subprocess -import sys -import tempfile - -self_dir = os.path.abspath(os.path.dirname(__file__)) -sys.path.insert(0, self_dir) - -import example_utils # noqa - - -def _path_to(name): - return os.path.abspath(os.path.join(os.path.dirname(__file__), - 'worker_based', name)) - - -def run_test(name, config): - cmd = [sys.executable, _path_to(name), config] - process = subprocess.Popen(cmd, stdin=None, stdout=subprocess.PIPE, - stderr=sys.stderr) - return process, cmd - - -def main(): - tmp_path = None - try: - tmp_path = tempfile.mkdtemp(prefix='worker-based-example-') - config = json.dumps({ - 'transport': 'filesystem', - 'transport_options': { - 'data_folder_in': tmp_path, - 'data_folder_out': tmp_path - } - }) - - print('Run worker.') - worker_process, _ = run_test('worker.py', config) - - print('Run flow.') - flow_process, flow_cmd = run_test('flow.py', config) - stdout, _ = flow_process.communicate() - rc = flow_process.returncode - if rc != 0: - raise RuntimeError("Could not run %s [%s]" % (flow_cmd, rc)) - print(stdout.decode()) - print('Flow finished.') - - print('Stop worker.') - worker_process.terminate() - - finally: - if tmp_path is not None: - example_utils.rm_path(tmp_path) - -if __name__ == '__main__': - main() From 1eabee4c8b2faee518b726e08567b1ff9444505b Mon Sep 17 00:00:00 2001 From: Joshua Harlow Date: Mon, 1 Sep 2014 09:48:21 -0700 Subject: [PATCH 180/188] Make version.py handle pbr not being installed Since pbr is now only a dev/build-time requirement of taskflow and not a run-time or test-time requirement we should not have a version file that explicitly requires pbr to exist to function correctly. To accommodate when pbr is not found use the pkg_resources function that can provide the installed version instead so that we still provide back a valid version. Change-Id: Id191d2b38def54b95a3467b4023a9540c284660d --- taskflow/version.py | 14 ++++++++------ 1 file changed, 8 insertions(+), 6 deletions(-) diff --git a/taskflow/version.py b/taskflow/version.py index 1777ba62..7f7fcd9a 100644 --- a/taskflow/version.py +++ b/taskflow/version.py @@ -14,17 +14,19 @@ # License for the specific language governing permissions and limitations # under the License. -from pbr import version as pbr_version +import pkg_resources TASK_VENDOR = "OpenStack Foundation" TASK_PRODUCT = "OpenStack TaskFlow" TASK_PACKAGE = None # OS distro package version suffix -version_info = pbr_version.VersionInfo('taskflow') - - -def version_string(): - return version_info.version_string() +try: + from pbr import version as pbr_version + _version_info = pbr_version.VersionInfo('taskflow') + version_string = _version_info.version_string +except ImportError: + _version_info = pkg_resources.get_distribution('taskflow') + version_string = lambda: _version_info.version def version_string_with_package(): From e720bdb8afe993deaa5fd0426c002a04b4858ebf Mon Sep 17 00:00:00 2001 From: Joshua Harlow Date: Wed, 13 Aug 2014 16:37:56 -0700 Subject: [PATCH 181/188] Use explicit WBE request state transitions Instead of having an implicit state machine for a requests lifecycle move toward an explicit state model and transition set that is validated and transitioned in a more easy to understand/reason about manner. This also fixes a bug that was found due to a response validation not taking into account a transition that was found due to this stricter transition checking. Includes a few tiny related/affected commits: * Remove testing of request repr() and str() as these types of tests are not useful and we removed the repr() version of the request message as the base classes is good enough. * Raise and capture a better exception and save its associated failure object when a request has expired (this gives expired requests better failure objects and associated details). Fixes bug 1356658 Partially fixes bug 1357117 Change-Id: Ie1386cca13a2da7265e22447b4c111a0a0074201 --- taskflow/engines/worker_based/executor.py | 52 ++++++---- taskflow/engines/worker_based/protocol.py | 98 ++++++++++++++++--- .../tests/unit/worker_based/test_executor.py | 44 ++++++--- .../tests/unit/worker_based/test_protocol.py | 30 +++--- 4 files changed, 168 insertions(+), 56 deletions(-) diff --git a/taskflow/engines/worker_based/executor.py b/taskflow/engines/worker_based/executor.py index f5afc72e..3b5a0355 100644 --- a/taskflow/engines/worker_based/executor.py +++ b/taskflow/engines/worker_based/executor.py @@ -23,6 +23,7 @@ from taskflow.engines.worker_based import cache from taskflow.engines.worker_based import protocol as pr from taskflow.engines.worker_based import proxy from taskflow import exceptions as exc +from taskflow.openstack.common import timeutils from taskflow.types import timing as tt from taskflow.utils import async_utils from taskflow.utils import misc @@ -109,8 +110,8 @@ class WorkerTaskExecutor(executor.TaskExecutorBase): # publish waiting requests for request in self._requests_cache.get_waiting_requests(tasks): - request.set_pending() - self._publish_request(request, topic) + if request.transition_log_error(pr.PENDING, logger=LOG): + self._publish_request(request, topic) def _process_response(self, response, message): """Process response from remote side.""" @@ -120,20 +121,23 @@ class WorkerTaskExecutor(executor.TaskExecutorBase): except KeyError: LOG.warning("The 'correlation_id' message property is missing.") else: - LOG.debug("Task uuid: '%s'", task_uuid) request = self._requests_cache.get(task_uuid) if request is not None: response = pr.Response.from_dict(response) if response.state == pr.RUNNING: - request.set_running() + request.transition_log_error(pr.RUNNING, logger=LOG) elif response.state == pr.PROGRESS: request.on_progress(**response.data) elif response.state in (pr.FAILURE, pr.SUCCESS): - # NOTE(imelnikov): request should not be in cache when - # another thread can see its result and schedule another - # request with same uuid; so we remove it, then set result - del self._requests_cache[request.uuid] - request.set_result(**response.data) + moved = request.transition_log_error(response.state, + logger=LOG) + if moved: + # NOTE(imelnikov): request should not be in the + # cache when another thread can see its result and + # schedule another request with the same uuid; so + # we remove it, then set the result... + del self._requests_cache[request.uuid] + request.set_result(**response.data) else: LOG.warning("Unexpected response status: '%s'", response.state) @@ -147,10 +151,21 @@ class WorkerTaskExecutor(executor.TaskExecutorBase): When request has expired it is removed from the requests cache and the `RequestTimeout` exception is set as a request result. """ - LOG.debug("Request '%r' has expired.", request) - LOG.debug("The '%r' request has expired.", request) - request.set_result(misc.Failure.from_exception( - exc.RequestTimeout("The '%r' request has expired" % request))) + if request.transition_log_error(pr.FAILURE, logger=LOG): + # Raise an exception (and then catch it) so we get a nice + # traceback that the request will get instead of it getting + # just an exception with no traceback... + try: + request_age = timeutils.delta_seconds(request.created_on, + timeutils.utcnow()) + raise exc.RequestTimeout( + "Request '%s' has expired after waiting for %0.2f" + " seconds for it to transition out of (%s) states" + % (request, request_age, ", ".join(pr.WAITING_STATES))) + except exc.RequestTimeout: + with misc.capture_failure() as fail: + LOG.debug(fail.exception_str) + request.set_result(fail) def _on_wait(self): """This function is called cyclically between draining events.""" @@ -169,9 +184,9 @@ class WorkerTaskExecutor(executor.TaskExecutorBase): # before putting it into the requests cache to prevent the notify # processing thread get list of waiting requests and publish it # before it is published here, so it wouldn't be published twice. - request.set_pending() - self._requests_cache[request.uuid] = request - self._publish_request(request, topic) + if request.transition_log_error(pr.PENDING, logger=LOG): + self._requests_cache[request.uuid] = request + self._publish_request(request, topic) else: self._requests_cache[request.uuid] = request @@ -187,8 +202,9 @@ class WorkerTaskExecutor(executor.TaskExecutorBase): except Exception: with misc.capture_failure() as failure: LOG.exception("Failed to submit the '%s' request.", request) - del self._requests_cache[request.uuid] - request.set_result(failure) + if request.transition_log_error(pr.FAILURE, logger=LOG): + del self._requests_cache[request.uuid] + request.set_result(failure) def _notify_topics(self): """Cyclically called to publish notify message to each topic.""" diff --git a/taskflow/engines/worker_based/protocol.py b/taskflow/engines/worker_based/protocol.py index ea994272..334c1d93 100644 --- a/taskflow/engines/worker_based/protocol.py +++ b/taskflow/engines/worker_based/protocol.py @@ -15,6 +15,8 @@ # under the License. import abc +import logging +import threading from concurrent import futures import jsonschema @@ -23,7 +25,9 @@ import six from taskflow.engines.action_engine import executor from taskflow import exceptions as excp +from taskflow.openstack.common import timeutils from taskflow.types import timing as tt +from taskflow.utils import lock_utils from taskflow.utils import misc from taskflow.utils import reflection @@ -36,7 +40,34 @@ SUCCESS = 'SUCCESS' FAILURE = 'FAILURE' PROGRESS = 'PROGRESS' +# During these states the expiry is active (once out of these states the expiry +# no longer matters, since we have no way of knowing how long a task will run +# for). +WAITING_STATES = (WAITING, PENDING) + _ALL_STATES = (WAITING, PENDING, RUNNING, SUCCESS, FAILURE, PROGRESS) +_STOP_TIMER_STATES = (RUNNING, SUCCESS, FAILURE) + +# Transitions that a request state can go through. +_ALLOWED_TRANSITIONS = ( + # Used when a executor starts to publish a request to a selected worker. + (WAITING, PENDING), + # When a request expires (isn't able to be processed by any worker). + (WAITING, FAILURE), + # Worker has started executing a request. + (PENDING, RUNNING), + # Worker failed to construct/process a request to run (either the worker + # did not transition to RUNNING in the given timeout or the worker itself + # had some type of failure before RUNNING started). + # + # Also used by the executor if the request was attempted to be published + # but that did publishing process did not work out. + (PENDING, FAILURE), + # Execution failed due to some type of remote failure. + (RUNNING, FAILURE), + # Execution succeeded & has completed. + (RUNNING, SUCCESS), +) # Remote task actions. EXECUTE = 'execute' @@ -73,6 +104,8 @@ _SCHEMA_TYPES = { 'array': (list, tuple), } +LOG = logging.getLogger(__name__) + @six.add_metaclass(abc.ABCMeta) class Message(object): @@ -143,8 +176,10 @@ class Request(Message): """Represents request with execution results. Every request is created in the WAITING state and is expired within the - given timeout. + given timeout if it does not transition out of the (WAITING, PENDING) + states. """ + TYPE = REQUEST _SCHEMA = { "type": "object", @@ -196,11 +231,10 @@ class Request(Message): self._kwargs = kwargs self._watch = tt.StopWatch(duration=timeout).start() self._state = WAITING + self._lock = threading.Lock() + self._created_on = timeutils.utcnow() self.result = futures.Future() - def __repr__(self): - return "%s:%s" % (self._task_cls, self._action) - @property def uuid(self): return self._uuid @@ -213,6 +247,10 @@ class Request(Message): def state(self): return self._state + @property + def created_on(self): + return self._created_on + @property def expired(self): """Check if request has expired. @@ -224,7 +262,7 @@ class Request(Message): state for more then the given timeout (it is not considered to be expired in any other state). """ - if self._state in (WAITING, PENDING): + if self._state in WAITING_STATES: return self._watch.expired() return False @@ -254,16 +292,43 @@ class Request(Message): def set_result(self, result): self.result.set_result((self._task, self._event, result)) - def set_pending(self): - self._state = PENDING - - def set_running(self): - self._state = RUNNING - self._watch.stop() - def on_progress(self, event_data, progress): self._progress_callback(self._task, event_data, progress) + def transition_log_error(self, new_state, logger=None): + if logger is None: + logger = LOG + moved = False + try: + moved = self.transition(new_state) + except excp.InvalidState: + logger.warn("Failed to transition '%s' to %s state.", self, + new_state, exc_info=True) + return moved + + @lock_utils.locked + def transition(self, new_state): + """Transitions the request to a new state. + + If transition was performed, it returns True. If transition + should was ignored, it returns False. If transition is not + valid (and will not be performed), it raises an InvalidState + exception. + """ + old_state = self._state + if old_state == new_state: + return False + pair = (old_state, new_state) + if pair not in _ALLOWED_TRANSITIONS: + raise excp.InvalidState("Request transition from %s to %s is" + " not allowed" % pair) + if new_state in _STOP_TIMER_STATES: + self._watch.stop() + self._state = new_state + LOG.debug("Transitioned '%s' from %s state to %s state", self, + old_state, new_state) + return True + @classmethod def validate(cls, data): try: @@ -292,6 +357,9 @@ class Response(Message): { "$ref": "#/definitions/completion", }, + { + "$ref": "#/definitions/empty", + }, ], }, }, @@ -311,6 +379,12 @@ class Response(Message): "required": ["progress", 'event_data'], "additionalProperties": False, }, + # Used when sending *only* request state changes (and no data is + # expected). + "empty": { + "type": "object", + "additionalProperties": False, + }, "completion": { "type": "object", "properties": { diff --git a/taskflow/tests/unit/worker_based/test_executor.py b/taskflow/tests/unit/worker_based/test_executor.py index f837394c..aa236fcf 100644 --- a/taskflow/tests/unit/worker_based/test_executor.py +++ b/taskflow/tests/unit/worker_based/test_executor.py @@ -22,6 +22,7 @@ import mock from taskflow.engines.worker_based import executor from taskflow.engines.worker_based import protocol as pr +from taskflow.openstack.common import timeutils from taskflow import test from taskflow.tests import utils from taskflow.utils import misc @@ -95,8 +96,10 @@ class TestWorkerTaskExecutor(test.MockTestCase): ex._requests_cache[self.task_uuid] = self.request_inst_mock ex._process_response(response.to_dict(), self.message_mock) - self.assertEqual(self.request_inst_mock.mock_calls, - [mock.call.set_running()]) + expected_calls = [ + mock.call.transition_log_error(pr.RUNNING, logger=mock.ANY), + ] + self.assertEqual(expected_calls, self.request_inst_mock.mock_calls) def test_on_message_response_state_progress(self): response = pr.Response(pr.PROGRESS, progress=1.0) @@ -116,9 +119,11 @@ class TestWorkerTaskExecutor(test.MockTestCase): ex._process_response(response.to_dict(), self.message_mock) self.assertEqual(len(ex._requests_cache), 0) - self.assertEqual(self.request_inst_mock.mock_calls, [ + expected_calls = [ + mock.call.transition_log_error(pr.FAILURE, logger=mock.ANY), mock.call.set_result(result=utils.FailureMatcher(failure)) - ]) + ] + self.assertEqual(expected_calls, self.request_inst_mock.mock_calls) def test_on_message_response_state_success(self): response = pr.Response(pr.SUCCESS, result=self.task_result, @@ -127,9 +132,11 @@ class TestWorkerTaskExecutor(test.MockTestCase): ex._requests_cache[self.task_uuid] = self.request_inst_mock ex._process_response(response.to_dict(), self.message_mock) - self.assertEqual(self.request_inst_mock.mock_calls, - [mock.call.set_result(result=self.task_result, - event='executed')]) + expected_calls = [ + mock.call.transition_log_error(pr.SUCCESS, logger=mock.ANY), + mock.call.set_result(result=self.task_result, event='executed') + ] + self.assertEqual(expected_calls, self.request_inst_mock.mock_calls) def test_on_message_response_unknown_state(self): response = pr.Response(state='') @@ -166,7 +173,13 @@ class TestWorkerTaskExecutor(test.MockTestCase): self.assertEqual(len(ex._requests_cache), 1) def test_on_wait_task_expired(self): + now = timeutils.utcnow() self.request_inst_mock.expired = True + self.request_inst_mock.created_on = now + timeutils.set_time_override(now) + self.addCleanup(timeutils.clear_time_override) + timeutils.advance_time_seconds(120) + ex = self.executor() ex._requests_cache[self.task_uuid] = self.request_inst_mock @@ -199,13 +212,14 @@ class TestWorkerTaskExecutor(test.MockTestCase): expected_calls = [ mock.call.Request(self.task, self.task_uuid, 'execute', self.task_args, None, self.timeout), - mock.call.request.set_pending(), + mock.call.request.transition_log_error(pr.PENDING, + logger=mock.ANY), mock.call.proxy.publish(msg=self.request_inst_mock, routing_key=self.executor_topic, reply_to=self.executor_uuid, correlation_id=self.task_uuid) ] - self.assertEqual(self.master_mock.mock_calls, expected_calls) + self.assertEqual(expected_calls, self.master_mock.mock_calls) def test_revert_task(self): self.message_mock.properties['type'] = pr.NOTIFY @@ -220,13 +234,14 @@ class TestWorkerTaskExecutor(test.MockTestCase): self.task_args, None, self.timeout, failures=self.task_failures, result=self.task_result), - mock.call.request.set_pending(), + mock.call.request.transition_log_error(pr.PENDING, + logger=mock.ANY), mock.call.proxy.publish(msg=self.request_inst_mock, routing_key=self.executor_topic, reply_to=self.executor_uuid, correlation_id=self.task_uuid) ] - self.assertEqual(self.master_mock.mock_calls, expected_calls) + self.assertEqual(expected_calls, self.master_mock.mock_calls) def test_execute_task_topic_not_found(self): workers_info = {self.executor_topic: ['']} @@ -250,14 +265,17 @@ class TestWorkerTaskExecutor(test.MockTestCase): expected_calls = [ mock.call.Request(self.task, self.task_uuid, 'execute', self.task_args, None, self.timeout), - mock.call.request.set_pending(), + mock.call.request.transition_log_error(pr.PENDING, + logger=mock.ANY), mock.call.proxy.publish(msg=self.request_inst_mock, routing_key=self.executor_topic, reply_to=self.executor_uuid, correlation_id=self.task_uuid), + mock.call.request.transition_log_error(pr.FAILURE, + logger=mock.ANY), mock.call.request.set_result(mock.ANY) ] - self.assertEqual(self.master_mock.mock_calls, expected_calls) + self.assertEqual(expected_calls, self.master_mock.mock_calls) def test_wait_for_any(self): fs = [futures.Future(), futures.Future()] diff --git a/taskflow/tests/unit/worker_based/test_protocol.py b/taskflow/tests/unit/worker_based/test_protocol.py index f94d03d4..7d51da31 100644 --- a/taskflow/tests/unit/worker_based/test_protocol.py +++ b/taskflow/tests/unit/worker_based/test_protocol.py @@ -115,6 +115,18 @@ class TestProtocol(test.TestCase): to_dict.update(kwargs) return to_dict + def test_request_transitions(self): + request = self.request() + self.assertEqual(pr.WAITING, request.state) + self.assertIn(request.state, pr.WAITING_STATES) + self.assertRaises(excp.InvalidState, request.transition, pr.SUCCESS) + self.assertFalse(request.transition(pr.WAITING)) + self.assertTrue(request.transition(pr.PENDING)) + self.assertTrue(request.transition(pr.RUNNING)) + self.assertTrue(request.transition(pr.SUCCESS)) + for s in (pr.PENDING, pr.WAITING): + self.assertRaises(excp.InvalidState, request.transition, s) + def test_creation(self): request = self.request() self.assertEqual(request.uuid, self.task_uuid) @@ -122,15 +134,6 @@ class TestProtocol(test.TestCase): self.assertIsInstance(request.result, futures.Future) self.assertFalse(request.result.done()) - def test_str(self): - request = self.request() - self.assertEqual(str(request), - " %s" % self.request_to_dict()) - - def test_repr(self): - expected = '%s:%s' % (self.task.name, self.task_action) - self.assertEqual(repr(self.request()), expected) - def test_to_dict_default(self): self.assertEqual(self.request().to_dict(), self.request_to_dict()) @@ -156,19 +159,20 @@ class TestProtocol(test.TestCase): @mock.patch('taskflow.engines.worker_based.protocol.misc.wallclock') def test_pending_not_expired(self, mocked_wallclock): - mocked_wallclock.side_effect = [1, self.timeout] + mocked_wallclock.side_effect = [0, self.timeout - 1] self.assertFalse(self.request().expired) @mock.patch('taskflow.engines.worker_based.protocol.misc.wallclock') def test_pending_expired(self, mocked_wallclock): - mocked_wallclock.side_effect = [1, self.timeout + 2] + mocked_wallclock.side_effect = [0, self.timeout + 2] self.assertTrue(self.request().expired) @mock.patch('taskflow.engines.worker_based.protocol.misc.wallclock') def test_running_not_expired(self, mocked_wallclock): - mocked_wallclock.side_effect = [1, self.timeout + 2] + mocked_wallclock.side_effect = [0, self.timeout + 2] request = self.request() - request.set_running() + request.transition(pr.PENDING) + request.transition(pr.RUNNING) self.assertFalse(request.expired) def test_set_result(self): From bfaa109821b3f849ce4f5290d7a8248531aa1042 Mon Sep 17 00:00:00 2001 From: Joshua Harlow Date: Sun, 24 Aug 2014 21:11:42 -0700 Subject: [PATCH 182/188] Tweak engine iteration 'close-up shop' runtime path 1. Have the runner yield the final set of failures instead of raising them, this allows the same yield syntax to be used for all exit points that the runner run_iter() produces and now raise failures from the main engine run loop to match this change. 2. Use a context manager instead of try/finally to start and stop the action engines task executor (teenie niceness...) 3. When the engine run_iter() is used and the generator that is returned is closed, instead of breaking from the run loop, which can leave running tasks incomplete instead continue running and signal to the runner that the engine has suspended itself. This ensures that the running atoms are not lost when the generator from run_iter() is closed (for whatever reason) before finishing. Also adds a bunch of useful tests that directly test the runner instead of the indirect testing that we were doing before. Fixes bug 1361013 Change-Id: I1b598e26f0b3877c8f7004f87bacdb7f5e9c9897 --- taskflow/engines/action_engine/engine.py | 62 ++++--- taskflow/engines/action_engine/runner.py | 20 +-- .../tests/unit/test_action_engine_compile.py | 68 +++---- .../tests/unit/test_action_engine_runner.py | 169 ++++++++++++++++++ taskflow/tests/utils.py | 14 ++ 5 files changed, 262 insertions(+), 71 deletions(-) create mode 100644 taskflow/tests/unit/test_action_engine_runner.py diff --git a/taskflow/engines/action_engine/engine.py b/taskflow/engines/action_engine/engine.py index ca8a80a6..a5f587fd 100644 --- a/taskflow/engines/action_engine/engine.py +++ b/taskflow/engines/action_engine/engine.py @@ -14,6 +14,7 @@ # License for the specific language governing permissions and limitations # under the License. +import contextlib import threading from taskflow.engines.action_engine import compiler @@ -30,6 +31,16 @@ from taskflow.utils import misc from taskflow.utils import reflection +@contextlib.contextmanager +def _start_stop(executor): + # A teenie helper context manager to safely start/stop a executor... + executor.start() + try: + yield executor + finally: + executor.stop() + + class ActionEngine(base.EngineBase): """Generic action-based engine. @@ -110,31 +121,38 @@ class ActionEngine(base.EngineBase): """ self.compile() self.prepare() - self._task_executor.start() - state = None runner = self._runtime.runner - try: + last_state = None + with _start_stop(self._task_executor): self._change_state(states.RUNNING) - for state in runner.run_iter(timeout=timeout): - try: - try_suspend = yield state - except GeneratorExit: - break - else: - if try_suspend: + try: + closed = False + for (last_state, failures) in runner.run_iter(timeout=timeout): + if failures: + misc.Failure.reraise_if_any(failures) + if closed: + continue + try: + try_suspend = yield last_state + except GeneratorExit: + # The generator was closed, attempt to suspend and + # continue looping until we have cleanly closed up + # shop... + closed = True self.suspend() - except Exception: - with excutils.save_and_reraise_exception(): - self._change_state(states.FAILURE) - else: - ignorable_states = getattr(runner, 'ignorable_states', []) - if state and state not in ignorable_states: - self._change_state(state) - if state != states.SUSPENDED and state != states.SUCCESS: - failures = self.storage.get_failures() - misc.Failure.reraise_if_any(failures.values()) - finally: - self._task_executor.stop() + else: + if try_suspend: + self.suspend() + except Exception: + with excutils.save_and_reraise_exception(): + self._change_state(states.FAILURE) + else: + ignorable_states = getattr(runner, 'ignorable_states', []) + if last_state and last_state not in ignorable_states: + self._change_state(last_state) + if last_state not in [states.SUSPENDED, states.SUCCESS]: + failures = self.storage.get_failures() + misc.Failure.reraise_if_any(failures.values()) def _change_state(self, state): with self._state_lock: diff --git a/taskflow/engines/action_engine/runner.py b/taskflow/engines/action_engine/runner.py index 0120bd69..b48f66a7 100644 --- a/taskflow/engines/action_engine/runner.py +++ b/taskflow/engines/action_engine/runner.py @@ -72,12 +72,12 @@ class Runner(object): timeout = _WAITING_TIMEOUT # Prepare flow to be resumed - yield st.RESUMING + yield (st.RESUMING, []) next_nodes = self._completer.resume() next_nodes.update(self._analyzer.get_next_nodes()) # Schedule nodes to be worked on - yield st.SCHEDULING + yield (st.SCHEDULING, []) if self.is_running(): not_done, failures = self._scheduler.schedule(next_nodes) else: @@ -90,7 +90,7 @@ class Runner(object): # preempt those tasks (maybe in the future we will be better able to do # this). while not_done: - yield st.WAITING + yield (st.WAITING, []) # TODO(harlowja): maybe we should start doing 'yield from' this # call sometime in the future, or equivalent that will work in @@ -101,7 +101,7 @@ class Runner(object): # failures). If failures occurred just continue processing what # is running (so that we don't leave it abandoned) but do not # schedule anything new. - yield st.ANALYZING + yield (st.ANALYZING, []) next_nodes = set() for future in done: try: @@ -119,7 +119,7 @@ class Runner(object): else: next_nodes.update(more_nodes) if next_nodes and not failures and self.is_running(): - yield st.SCHEDULING + yield (st.SCHEDULING, []) # Recheck incase someone suspended it. if self.is_running(): more_not_done, failures = self._scheduler.schedule( @@ -127,10 +127,10 @@ class Runner(object): not_done.update(more_not_done) if failures: - misc.Failure.reraise_if_any(failures) - if self._analyzer.get_next_nodes(): - yield st.SUSPENDED + yield (st.FAILURE, failures) + elif self._analyzer.get_next_nodes(): + yield (st.SUSPENDED, []) elif self._analyzer.is_success(): - yield st.SUCCESS + yield (st.SUCCESS, []) else: - yield st.REVERTED + yield (st.REVERTED, []) diff --git a/taskflow/tests/unit/test_action_engine_compile.py b/taskflow/tests/unit/test_action_engine_compile.py index ef268d9d..7207468e 100644 --- a/taskflow/tests/unit/test_action_engine_compile.py +++ b/taskflow/tests/unit/test_action_engine_compile.py @@ -14,8 +14,6 @@ # License for the specific language governing permissions and limitations # under the License. -import string - from taskflow.engines.action_engine import compiler from taskflow import exceptions as exc from taskflow.patterns import graph_flow as gf @@ -23,20 +21,12 @@ from taskflow.patterns import linear_flow as lf from taskflow.patterns import unordered_flow as uf from taskflow import retry from taskflow import test -from taskflow.tests import utils as t_utils - - -def _make_many(amount): - assert amount <= len(string.ascii_lowercase), 'Not enough letters' - tasks = [] - for i in range(0, amount): - tasks.append(t_utils.DummyTask(name=string.ascii_lowercase[i])) - return tasks +from taskflow.tests import utils as test_utils class PatternCompileTest(test.TestCase): def test_task(self): - task = t_utils.DummyTask(name='a') + task = test_utils.DummyTask(name='a') compilation = compiler.PatternCompiler().compile(task) g = compilation.execution_graph self.assertEqual(list(g.nodes()), [task]) @@ -54,7 +44,7 @@ class PatternCompileTest(test.TestCase): compiler.PatternCompiler().compile, 42) def test_linear(self): - a, b, c, d = _make_many(4) + a, b, c, d = test_utils.make_many(4) flo = lf.Flow("test") flo.add(a, b, c) sflo = lf.Flow("sub-test") @@ -74,7 +64,7 @@ class PatternCompileTest(test.TestCase): self.assertEqual([a], list(g.no_predecessors_iter())) def test_invalid(self): - a, b, c = _make_many(3) + a, b, c = test_utils.make_many(3) flo = lf.Flow("test") flo.add(a, b, c) flo.add(flo) @@ -82,7 +72,7 @@ class PatternCompileTest(test.TestCase): compiler.PatternCompiler().compile, flo) def test_unordered(self): - a, b, c, d = _make_many(4) + a, b, c, d = test_utils.make_many(4) flo = uf.Flow("test") flo.add(a, b, c, d) compilation = compiler.PatternCompiler().compile(flo) @@ -95,7 +85,7 @@ class PatternCompileTest(test.TestCase): set(g.no_predecessors_iter())) def test_linear_nested(self): - a, b, c, d = _make_many(4) + a, b, c, d = test_utils.make_many(4) flo = lf.Flow("test") flo.add(a, b) flo2 = uf.Flow("test2") @@ -119,7 +109,7 @@ class PatternCompileTest(test.TestCase): self.assertTrue(g.has_edge(b, d)) def test_unordered_nested(self): - a, b, c, d = _make_many(4) + a, b, c, d = test_utils.make_many(4) flo = uf.Flow("test") flo.add(a, b) flo2 = lf.Flow("test2") @@ -142,7 +132,7 @@ class PatternCompileTest(test.TestCase): self.assertEqual(1, lb.number_of_edges()) def test_unordered_nested_in_linear(self): - a, b, c, d = _make_many(4) + a, b, c, d = test_utils.make_many(4) flo = lf.Flow('lt').add( a, uf.Flow('ut').add(b, c), @@ -159,7 +149,7 @@ class PatternCompileTest(test.TestCase): ]) def test_graph(self): - a, b, c, d = _make_many(4) + a, b, c, d = test_utils.make_many(4) flo = gf.Flow("test") flo.add(a, b, c, d) @@ -169,7 +159,7 @@ class PatternCompileTest(test.TestCase): self.assertEqual(0, g.number_of_edges()) def test_graph_nested(self): - a, b, c, d, e, f, g = _make_many(7) + a, b, c, d, e, f, g = test_utils.make_many(7) flo = gf.Flow("test") flo.add(a, b, c, d) @@ -186,7 +176,7 @@ class PatternCompileTest(test.TestCase): ]) def test_graph_nested_graph(self): - a, b, c, d, e, f, g = _make_many(7) + a, b, c, d, e, f, g = test_utils.make_many(7) flo = gf.Flow("test") flo.add(a, b, c, d) @@ -200,7 +190,7 @@ class PatternCompileTest(test.TestCase): self.assertEqual(0, g.number_of_edges()) def test_graph_links(self): - a, b, c, d = _make_many(4) + a, b, c, d = test_utils.make_many(4) flo = gf.Flow("test") flo.add(a, b, c, d) flo.link(a, b) @@ -219,8 +209,8 @@ class PatternCompileTest(test.TestCase): self.assertItemsEqual([d], g.no_successors_iter()) def test_graph_dependencies(self): - a = t_utils.ProvidesRequiresTask('a', provides=['x'], requires=[]) - b = t_utils.ProvidesRequiresTask('b', provides=[], requires=['x']) + a = test_utils.ProvidesRequiresTask('a', provides=['x'], requires=[]) + b = test_utils.ProvidesRequiresTask('b', provides=[], requires=['x']) flo = gf.Flow("test").add(a, b) compilation = compiler.PatternCompiler().compile(flo) @@ -233,9 +223,9 @@ class PatternCompileTest(test.TestCase): self.assertItemsEqual([b], g.no_successors_iter()) def test_graph_nested_requires(self): - a = t_utils.ProvidesRequiresTask('a', provides=['x'], requires=[]) - b = t_utils.ProvidesRequiresTask('b', provides=[], requires=[]) - c = t_utils.ProvidesRequiresTask('c', provides=[], requires=['x']) + a = test_utils.ProvidesRequiresTask('a', provides=['x'], requires=[]) + b = test_utils.ProvidesRequiresTask('b', provides=[], requires=[]) + c = test_utils.ProvidesRequiresTask('c', provides=[], requires=['x']) flo = gf.Flow("test").add( a, lf.Flow("test2").add(b, c) @@ -252,9 +242,9 @@ class PatternCompileTest(test.TestCase): self.assertItemsEqual([c], g.no_successors_iter()) def test_graph_nested_provides(self): - a = t_utils.ProvidesRequiresTask('a', provides=[], requires=['x']) - b = t_utils.ProvidesRequiresTask('b', provides=['x'], requires=[]) - c = t_utils.ProvidesRequiresTask('c', provides=[], requires=[]) + a = test_utils.ProvidesRequiresTask('a', provides=[], requires=['x']) + b = test_utils.ProvidesRequiresTask('b', provides=['x'], requires=[]) + c = test_utils.ProvidesRequiresTask('c', provides=[], requires=[]) flo = gf.Flow("test").add( a, lf.Flow("test2").add(b, c) @@ -272,8 +262,8 @@ class PatternCompileTest(test.TestCase): def test_checks_for_dups(self): flo = gf.Flow("test").add( - t_utils.DummyTask(name="a"), - t_utils.DummyTask(name="a") + test_utils.DummyTask(name="a"), + test_utils.DummyTask(name="a") ) self.assertRaisesRegexp(exc.Duplicate, '^Atoms with duplicate names', @@ -281,8 +271,8 @@ class PatternCompileTest(test.TestCase): def test_checks_for_dups_globally(self): flo = gf.Flow("test").add( - gf.Flow("int1").add(t_utils.DummyTask(name="a")), - gf.Flow("int2").add(t_utils.DummyTask(name="a"))) + gf.Flow("int1").add(test_utils.DummyTask(name="a")), + gf.Flow("int2").add(test_utils.DummyTask(name="a"))) self.assertRaisesRegexp(exc.Duplicate, '^Atoms with duplicate names', compiler.PatternCompiler().compile, flo) @@ -325,7 +315,7 @@ class PatternCompileTest(test.TestCase): def test_retry_in_linear_flow_with_tasks(self): c = retry.AlwaysRevert("c") - a, b = _make_many(2) + a, b = test_utils.make_many(2) flo = lf.Flow("test", c).add(a, b) compilation = compiler.PatternCompiler().compile(flo) g = compilation.execution_graph @@ -343,7 +333,7 @@ class PatternCompileTest(test.TestCase): def test_retry_in_unordered_flow_with_tasks(self): c = retry.AlwaysRevert("c") - a, b = _make_many(2) + a, b = test_utils.make_many(2) flo = uf.Flow("test", c).add(a, b) compilation = compiler.PatternCompiler().compile(flo) g = compilation.execution_graph @@ -361,7 +351,7 @@ class PatternCompileTest(test.TestCase): def test_retry_in_graph_flow_with_tasks(self): r = retry.AlwaysRevert("cp") - a, b, c = _make_many(3) + a, b, c = test_utils.make_many(3) flo = gf.Flow("test", r).add(a, b, c).link(b, c) compilation = compiler.PatternCompiler().compile(flo) g = compilation.execution_graph @@ -382,7 +372,7 @@ class PatternCompileTest(test.TestCase): def test_retries_hierarchy(self): c1 = retry.AlwaysRevert("cp1") c2 = retry.AlwaysRevert("cp2") - a, b, c, d = _make_many(4) + a, b, c, d = test_utils.make_many(4) flo = lf.Flow("test", c1).add( a, lf.Flow("test", c2).add(b, c), @@ -407,7 +397,7 @@ class PatternCompileTest(test.TestCase): def test_retry_subflows_hierarchy(self): c1 = retry.AlwaysRevert("cp1") - a, b, c, d = _make_many(4) + a, b, c, d = test_utils.make_many(4) flo = lf.Flow("test", c1).add( a, lf.Flow("test").add(b, c), diff --git a/taskflow/tests/unit/test_action_engine_runner.py b/taskflow/tests/unit/test_action_engine_runner.py new file mode 100644 index 00000000..d7927f8e --- /dev/null +++ b/taskflow/tests/unit/test_action_engine_runner.py @@ -0,0 +1,169 @@ +# -*- coding: utf-8 -*- + +# Copyright (C) 2014 Yahoo! Inc. All Rights Reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); you may +# not use this file except in compliance with the License. You may obtain +# a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the +# License for the specific language governing permissions and limitations +# under the License. + +import six + +from taskflow.engines.action_engine import compiler +from taskflow.engines.action_engine import executor +from taskflow.engines.action_engine import runtime +from taskflow.patterns import linear_flow as lf +from taskflow import states as st +from taskflow import storage +from taskflow import test +from taskflow.tests import utils as test_utils +from taskflow.utils import misc +from taskflow.utils import persistence_utils as pu + + +class RunnerTest(test.TestCase): + def _make_runtime(self, flow, initial_state=None): + compilation = compiler.PatternCompiler().compile(flow) + flow_detail = pu.create_flow_detail(flow) + store = storage.SingleThreadedStorage(flow_detail) + # This ensures the tasks exist in storage... + for task in compilation.execution_graph: + store.ensure_task(task.name) + if initial_state: + store.set_flow_state(initial_state) + task_notifier = misc.Notifier() + task_executor = executor.SerialTaskExecutor() + task_executor.start() + self.addCleanup(task_executor.stop) + return runtime.Runtime(compiler.PatternCompiler().compile(flow), + store, task_notifier, task_executor) + + def test_running(self): + flow = lf.Flow("root") + flow.add(*test_utils.make_many(1)) + + rt = self._make_runtime(flow, initial_state=st.RUNNING) + self.assertTrue(rt.runner.is_running()) + + rt = self._make_runtime(flow, initial_state=st.SUSPENDED) + self.assertFalse(rt.runner.is_running()) + + def test_run_iterations(self): + flow = lf.Flow("root") + tasks = test_utils.make_many( + 1, task_cls=test_utils.TaskNoRequiresNoReturns) + flow.add(*tasks) + + rt = self._make_runtime(flow, initial_state=st.RUNNING) + self.assertTrue(rt.runner.is_running()) + + it = rt.runner.run_iter() + state, failures = six.next(it) + self.assertEqual(st.RESUMING, state) + self.assertEqual(0, len(failures)) + + state, failures = six.next(it) + self.assertEqual(st.SCHEDULING, state) + self.assertEqual(0, len(failures)) + + state, failures = six.next(it) + self.assertEqual(st.WAITING, state) + self.assertEqual(0, len(failures)) + + state, failures = six.next(it) + self.assertEqual(st.ANALYZING, state) + self.assertEqual(0, len(failures)) + + state, failures = six.next(it) + self.assertEqual(st.SUCCESS, state) + self.assertEqual(0, len(failures)) + + self.assertRaises(StopIteration, six.next, it) + + def test_run_iterations_reverted(self): + flow = lf.Flow("root") + tasks = test_utils.make_many( + 1, task_cls=test_utils.TaskWithFailure) + flow.add(*tasks) + + rt = self._make_runtime(flow, initial_state=st.RUNNING) + self.assertTrue(rt.runner.is_running()) + + transitions = list(rt.runner.run_iter()) + state, failures = transitions[-1] + self.assertEqual(st.REVERTED, state) + self.assertEqual([], failures) + + self.assertEqual(st.REVERTED, rt.storage.get_atom_state(tasks[0].name)) + + def test_run_iterations_failure(self): + flow = lf.Flow("root") + tasks = test_utils.make_many( + 1, task_cls=test_utils.NastyFailingTask) + flow.add(*tasks) + + rt = self._make_runtime(flow, initial_state=st.RUNNING) + self.assertTrue(rt.runner.is_running()) + + transitions = list(rt.runner.run_iter()) + state, failures = transitions[-1] + self.assertEqual(st.FAILURE, state) + self.assertEqual(1, len(failures)) + failure = failures[0] + self.assertTrue(failure.check(RuntimeError)) + + self.assertEqual(st.FAILURE, rt.storage.get_atom_state(tasks[0].name)) + + def test_run_iterations_suspended(self): + flow = lf.Flow("root") + tasks = test_utils.make_many( + 2, task_cls=test_utils.TaskNoRequiresNoReturns) + flow.add(*tasks) + + rt = self._make_runtime(flow, initial_state=st.RUNNING) + self.assertTrue(rt.runner.is_running()) + + transitions = [] + for state, failures in rt.runner.run_iter(): + transitions.append((state, failures)) + if state == st.ANALYZING: + rt.storage.set_flow_state(st.SUSPENDED) + state, failures = transitions[-1] + self.assertEqual(st.SUSPENDED, state) + self.assertEqual([], failures) + + self.assertEqual(st.SUCCESS, rt.storage.get_atom_state(tasks[0].name)) + self.assertEqual(st.PENDING, rt.storage.get_atom_state(tasks[1].name)) + + def test_run_iterations_suspended_failure(self): + flow = lf.Flow("root") + sad_tasks = test_utils.make_many( + 1, task_cls=test_utils.NastyFailingTask) + flow.add(*sad_tasks) + happy_tasks = test_utils.make_many( + 1, task_cls=test_utils.TaskNoRequiresNoReturns, offset=1) + flow.add(*happy_tasks) + + rt = self._make_runtime(flow, initial_state=st.RUNNING) + self.assertTrue(rt.runner.is_running()) + + transitions = [] + for state, failures in rt.runner.run_iter(): + transitions.append((state, failures)) + if state == st.ANALYZING: + rt.storage.set_flow_state(st.SUSPENDED) + state, failures = transitions[-1] + self.assertEqual(st.SUSPENDED, state) + self.assertEqual([], failures) + + self.assertEqual(st.PENDING, + rt.storage.get_atom_state(happy_tasks[0].name)) + self.assertEqual(st.FAILURE, + rt.storage.get_atom_state(sad_tasks[0].name)) diff --git a/taskflow/tests/utils.py b/taskflow/tests/utils.py index ce3289ec..d7c85b95 100644 --- a/taskflow/tests/utils.py +++ b/taskflow/tests/utils.py @@ -15,6 +15,7 @@ # under the License. import contextlib +import string import threading import six @@ -346,3 +347,16 @@ class WaitForOneFromTask(SaveOrderTask): if name not in self.wait_for or state not in self.wait_states: return self.event.set() + + +def make_many(amount, task_cls=DummyTask, offset=0): + name_pool = string.ascii_lowercase + string.ascii_uppercase + tasks = [] + while amount > 0: + if offset >= len(name_pool): + raise AssertionError('Name pool size to small (%s < %s)' + % (len(name_pool), offset + 1)) + tasks.append(task_cls(name=name_pool[offset])) + offset += 1 + amount -= 1 + return tasks From 055dd829c1a7bc8e2f9add82c9d22551081e7f24 Mon Sep 17 00:00:00 2001 From: Joshua Harlow Date: Wed, 27 Aug 2014 21:42:42 -0700 Subject: [PATCH 183/188] Move parts of action engine tests to a subdirectory To match how we have a worker_based subdirectory for its specific tests lets start moving pieces of the action engine specific tests to its own directory as well and move more in the future as well. Change-Id: I003b07a95259ba18b961834515121243e27d7456 --- taskflow/tests/unit/action_engine/__init__.py | 0 .../test_compile.py} | 0 .../test_runner.py} | 0 taskflow/tests/unit/{test_action_engine.py => test_engines.py} | 0 4 files changed, 0 insertions(+), 0 deletions(-) create mode 100644 taskflow/tests/unit/action_engine/__init__.py rename taskflow/tests/unit/{test_action_engine_compile.py => action_engine/test_compile.py} (100%) rename taskflow/tests/unit/{test_action_engine_runner.py => action_engine/test_runner.py} (100%) rename taskflow/tests/unit/{test_action_engine.py => test_engines.py} (100%) diff --git a/taskflow/tests/unit/action_engine/__init__.py b/taskflow/tests/unit/action_engine/__init__.py new file mode 100644 index 00000000..e69de29b diff --git a/taskflow/tests/unit/test_action_engine_compile.py b/taskflow/tests/unit/action_engine/test_compile.py similarity index 100% rename from taskflow/tests/unit/test_action_engine_compile.py rename to taskflow/tests/unit/action_engine/test_compile.py diff --git a/taskflow/tests/unit/test_action_engine_runner.py b/taskflow/tests/unit/action_engine/test_runner.py similarity index 100% rename from taskflow/tests/unit/test_action_engine_runner.py rename to taskflow/tests/unit/action_engine/test_runner.py diff --git a/taskflow/tests/unit/test_action_engine.py b/taskflow/tests/unit/test_engines.py similarity index 100% rename from taskflow/tests/unit/test_action_engine.py rename to taskflow/tests/unit/test_engines.py From a8462dce96b4a7ee3540f99e1eabfd1573050404 Mon Sep 17 00:00:00 2001 From: Joshua Harlow Date: Wed, 27 Aug 2014 17:43:51 -0700 Subject: [PATCH 184/188] Update transitioning function name to be more understandable To make it more obvious what this function does adjust the naming to be more clear and add a docstring that explains this in more depth. Change-Id: Ib39afd28e0bdeb94d50e85f2a39e210d3cbd7b01 --- taskflow/engines/worker_based/executor.py | 14 ++++++------ taskflow/engines/worker_based/protocol.py | 13 +++++++++-- .../tests/unit/worker_based/test_executor.py | 22 +++++++++---------- 3 files changed, 29 insertions(+), 20 deletions(-) diff --git a/taskflow/engines/worker_based/executor.py b/taskflow/engines/worker_based/executor.py index 3b5a0355..9ff7078b 100644 --- a/taskflow/engines/worker_based/executor.py +++ b/taskflow/engines/worker_based/executor.py @@ -110,7 +110,7 @@ class WorkerTaskExecutor(executor.TaskExecutorBase): # publish waiting requests for request in self._requests_cache.get_waiting_requests(tasks): - if request.transition_log_error(pr.PENDING, logger=LOG): + if request.transition_and_log_error(pr.PENDING, logger=LOG): self._publish_request(request, topic) def _process_response(self, response, message): @@ -125,12 +125,12 @@ class WorkerTaskExecutor(executor.TaskExecutorBase): if request is not None: response = pr.Response.from_dict(response) if response.state == pr.RUNNING: - request.transition_log_error(pr.RUNNING, logger=LOG) + request.transition_and_log_error(pr.RUNNING, logger=LOG) elif response.state == pr.PROGRESS: request.on_progress(**response.data) elif response.state in (pr.FAILURE, pr.SUCCESS): - moved = request.transition_log_error(response.state, - logger=LOG) + moved = request.transition_and_log_error(response.state, + logger=LOG) if moved: # NOTE(imelnikov): request should not be in the # cache when another thread can see its result and @@ -151,7 +151,7 @@ class WorkerTaskExecutor(executor.TaskExecutorBase): When request has expired it is removed from the requests cache and the `RequestTimeout` exception is set as a request result. """ - if request.transition_log_error(pr.FAILURE, logger=LOG): + if request.transition_and_log_error(pr.FAILURE, logger=LOG): # Raise an exception (and then catch it) so we get a nice # traceback that the request will get instead of it getting # just an exception with no traceback... @@ -184,7 +184,7 @@ class WorkerTaskExecutor(executor.TaskExecutorBase): # before putting it into the requests cache to prevent the notify # processing thread get list of waiting requests and publish it # before it is published here, so it wouldn't be published twice. - if request.transition_log_error(pr.PENDING, logger=LOG): + if request.transition_and_log_error(pr.PENDING, logger=LOG): self._requests_cache[request.uuid] = request self._publish_request(request, topic) else: @@ -202,7 +202,7 @@ class WorkerTaskExecutor(executor.TaskExecutorBase): except Exception: with misc.capture_failure() as failure: LOG.exception("Failed to submit the '%s' request.", request) - if request.transition_log_error(pr.FAILURE, logger=LOG): + if request.transition_and_log_error(pr.FAILURE, logger=LOG): del self._requests_cache[request.uuid] request.set_result(failure) diff --git a/taskflow/engines/worker_based/protocol.py b/taskflow/engines/worker_based/protocol.py index 334c1d93..6e54f9fb 100644 --- a/taskflow/engines/worker_based/protocol.py +++ b/taskflow/engines/worker_based/protocol.py @@ -295,7 +295,16 @@ class Request(Message): def on_progress(self, event_data, progress): self._progress_callback(self._task, event_data, progress) - def transition_log_error(self, new_state, logger=None): + def transition_and_log_error(self, new_state, logger=None): + """Transitions *and* logs an error if that transitioning raises. + + This overlays the transition function and performs nearly the same + functionality but instead of raising if the transition was not valid + it logs a warning to the provided logger and returns False to + indicate that the transition was not performed (note that this + is *different* from the transition function where False means + ignored). + """ if logger is None: logger = LOG moved = False @@ -311,7 +320,7 @@ class Request(Message): """Transitions the request to a new state. If transition was performed, it returns True. If transition - should was ignored, it returns False. If transition is not + should was ignored, it returns False. If transition was not valid (and will not be performed), it raises an InvalidState exception. """ diff --git a/taskflow/tests/unit/worker_based/test_executor.py b/taskflow/tests/unit/worker_based/test_executor.py index aa236fcf..e6c97e17 100644 --- a/taskflow/tests/unit/worker_based/test_executor.py +++ b/taskflow/tests/unit/worker_based/test_executor.py @@ -97,7 +97,7 @@ class TestWorkerTaskExecutor(test.MockTestCase): ex._process_response(response.to_dict(), self.message_mock) expected_calls = [ - mock.call.transition_log_error(pr.RUNNING, logger=mock.ANY), + mock.call.transition_and_log_error(pr.RUNNING, logger=mock.ANY), ] self.assertEqual(expected_calls, self.request_inst_mock.mock_calls) @@ -120,7 +120,7 @@ class TestWorkerTaskExecutor(test.MockTestCase): self.assertEqual(len(ex._requests_cache), 0) expected_calls = [ - mock.call.transition_log_error(pr.FAILURE, logger=mock.ANY), + mock.call.transition_and_log_error(pr.FAILURE, logger=mock.ANY), mock.call.set_result(result=utils.FailureMatcher(failure)) ] self.assertEqual(expected_calls, self.request_inst_mock.mock_calls) @@ -133,7 +133,7 @@ class TestWorkerTaskExecutor(test.MockTestCase): ex._process_response(response.to_dict(), self.message_mock) expected_calls = [ - mock.call.transition_log_error(pr.SUCCESS, logger=mock.ANY), + mock.call.transition_and_log_error(pr.SUCCESS, logger=mock.ANY), mock.call.set_result(result=self.task_result, event='executed') ] self.assertEqual(expected_calls, self.request_inst_mock.mock_calls) @@ -212,8 +212,8 @@ class TestWorkerTaskExecutor(test.MockTestCase): expected_calls = [ mock.call.Request(self.task, self.task_uuid, 'execute', self.task_args, None, self.timeout), - mock.call.request.transition_log_error(pr.PENDING, - logger=mock.ANY), + mock.call.request.transition_and_log_error(pr.PENDING, + logger=mock.ANY), mock.call.proxy.publish(msg=self.request_inst_mock, routing_key=self.executor_topic, reply_to=self.executor_uuid, @@ -234,8 +234,8 @@ class TestWorkerTaskExecutor(test.MockTestCase): self.task_args, None, self.timeout, failures=self.task_failures, result=self.task_result), - mock.call.request.transition_log_error(pr.PENDING, - logger=mock.ANY), + mock.call.request.transition_and_log_error(pr.PENDING, + logger=mock.ANY), mock.call.proxy.publish(msg=self.request_inst_mock, routing_key=self.executor_topic, reply_to=self.executor_uuid, @@ -265,14 +265,14 @@ class TestWorkerTaskExecutor(test.MockTestCase): expected_calls = [ mock.call.Request(self.task, self.task_uuid, 'execute', self.task_args, None, self.timeout), - mock.call.request.transition_log_error(pr.PENDING, - logger=mock.ANY), + mock.call.request.transition_and_log_error(pr.PENDING, + logger=mock.ANY), mock.call.proxy.publish(msg=self.request_inst_mock, routing_key=self.executor_topic, reply_to=self.executor_uuid, correlation_id=self.task_uuid), - mock.call.request.transition_log_error(pr.FAILURE, - logger=mock.ANY), + mock.call.request.transition_and_log_error(pr.FAILURE, + logger=mock.ANY), mock.call.request.set_result(mock.ANY) ] self.assertEqual(expected_calls, self.master_mock.mock_calls) From 7cbc1ec339b10a6d8c2852bc2466726ebc29314f Mon Sep 17 00:00:00 2001 From: OpenStack Proposal Bot Date: Thu, 4 Sep 2014 17:55:42 +0000 Subject: [PATCH 185/188] Updated from global requirements Change-Id: Idf777721698dd6eebd62c694423e746df2e2fb9b --- requirements-py2.txt | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/requirements-py2.txt b/requirements-py2.txt index 83523949..5c0d5265 100644 --- a/requirements-py2.txt +++ b/requirements-py2.txt @@ -9,6 +9,6 @@ Babel>=1.3 # Used for backend storage engine loading. stevedore>=0.14 # Backport for concurrent.futures which exists in 3.2+ -futures>=2.1.3 +futures>=2.1.6 # Used for structured input validation jsonschema>=2.0.0,<3.0.0 From 7316fd1bf3de1507f521403fd2722ea3c54448d0 Mon Sep 17 00:00:00 2001 From: Doug Hellmann Date: Wed, 3 Sep 2014 14:27:13 -0400 Subject: [PATCH 186/188] warn against sorting requirements Co-authored-by: Joshua Harlow Addresses-Bug: #1365061 Change-Id: I64ae9191863564e278a35d42ec9cd743a233028e --- optional-requirements.txt | 13 ++++++++----- requirements-py2.txt | 4 ++++ requirements-py3.txt | 4 ++++ test-requirements.txt | 4 ++++ 4 files changed, 20 insertions(+), 5 deletions(-) diff --git a/optional-requirements.txt b/optional-requirements.txt index d423210a..e010cf60 100644 --- a/optional-requirements.txt +++ b/optional-requirements.txt @@ -1,8 +1,11 @@ -# This file lists dependencies that are used by different -# pluggable (optional) parts of TaskFlow, like engines -# or persistence backends. They are not strictly required -# by TaskFlow (you can use TaskFlow without them), but -# so they don't go to requirements.txt. +# This file lists dependencies that are used by different pluggable (optional) +# parts of TaskFlow, like engines or persistence backends. They are not +# strictly required by TaskFlow (aka you can use TaskFlow without them), so +# they don't go into one of the requirements.txt files. + +# The order of packages is significant, because pip processes them in the order +# of appearance. Changing the order has an impact on the overall integration +# process, which may cause wedges in the gate later. # Database (sqlalchemy) persistence: SQLAlchemy>=0.7.8,<=0.9.99 diff --git a/requirements-py2.txt b/requirements-py2.txt index 83523949..a8e3f464 100644 --- a/requirements-py2.txt +++ b/requirements-py2.txt @@ -1,3 +1,7 @@ +# The order of packages is significant, because pip processes them in the order +# of appearance. Changing the order has an impact on the overall integration +# process, which may cause wedges in the gate later. + # Packages needed for using this library. anyjson>=0.3.3 iso8601>=0.1.9 diff --git a/requirements-py3.txt b/requirements-py3.txt index c6ca178c..1e1052e5 100644 --- a/requirements-py3.txt +++ b/requirements-py3.txt @@ -1,3 +1,7 @@ +# The order of packages is significant, because pip processes them in the order +# of appearance. Changing the order has an impact on the overall integration +# process, which may cause wedges in the gate later. + # Packages needed for using this library. anyjson>=0.3.3 iso8601>=0.1.9 diff --git a/test-requirements.txt b/test-requirements.txt index a2efaa3f..4068d786 100644 --- a/test-requirements.txt +++ b/test-requirements.txt @@ -1,3 +1,7 @@ +# The order of packages is significant, because pip processes them in the order +# of appearance. Changing the order has an impact on the overall integration +# process, which may cause wedges in the gate later. + hacking>=0.9.2,<0.10 discover coverage>=3.6 From e1ef04492ebddab02de2f277079ae0c7448c3b40 Mon Sep 17 00:00:00 2001 From: Joshua Harlow Date: Wed, 9 Jul 2014 17:54:21 -0700 Subject: [PATCH 187/188] Translate the engine runner into a well defined state-machine Instead of having a ad-hoc state-machine being used to perform the various runtime actions (performed when a engine is ran) we can gain a much more explict execution model by translating that ad-hoc state machine to an explicit one instead... This commit does just that, it adds a new fsm type that can be used to create, define and run state-machines that respond to various events (internal or external) and uses it in the runner action engine module to run the previously ad-hc/implicit state-machine. Implements blueprint runner-state-machine Change-Id: Id35633a9de707f3ffb1a4b7e9619af1be009317f --- requirements-py2.txt | 4 + requirements-py3.txt | 2 + taskflow/engines/action_engine/runner.py | 295 ++++++++++++------ .../tests/unit/action_engine/test_runner.py | 158 +++++++++- taskflow/tests/unit/test_types.py | 150 ++++++++- taskflow/types/fsm.py | 290 +++++++++++++++++ 6 files changed, 792 insertions(+), 107 deletions(-) create mode 100644 taskflow/types/fsm.py diff --git a/requirements-py2.txt b/requirements-py2.txt index 93f8e139..9b204ea6 100644 --- a/requirements-py2.txt +++ b/requirements-py2.txt @@ -5,6 +5,8 @@ # Packages needed for using this library. anyjson>=0.3.3 iso8601>=0.1.9 +# Only needed on python 2.6 +ordereddict # Python 2->3 compatibility library. six>=1.7.0 # Very nice graph library @@ -16,3 +18,5 @@ stevedore>=0.14 futures>=2.1.6 # Used for structured input validation jsonschema>=2.0.0,<3.0.0 +# For pretty printing state-machine tables +PrettyTable>=0.7,<0.8 diff --git a/requirements-py3.txt b/requirements-py3.txt index 1e1052e5..63880b31 100644 --- a/requirements-py3.txt +++ b/requirements-py3.txt @@ -14,3 +14,5 @@ Babel>=1.3 stevedore>=0.14 # Used for structured input validation jsonschema>=2.0.0,<3.0.0 +# For pretty printing state-machine tables +PrettyTable>=0.7,<0.8 diff --git a/taskflow/engines/action_engine/runner.py b/taskflow/engines/action_engine/runner.py index b48f66a7..7a0b9c87 100644 --- a/taskflow/engines/action_engine/runner.py +++ b/taskflow/engines/action_engine/runner.py @@ -14,24 +14,199 @@ # License for the specific language governing permissions and limitations # under the License. +import logging + from taskflow import states as st +from taskflow.types import fsm from taskflow.utils import misc +# Waiting state timeout (in seconds). +_WAITING_TIMEOUT = 60 -_WAITING_TIMEOUT = 60 # in seconds +# Meta states the state machine uses. +_UNDEFINED = 'UNDEFINED' +_GAME_OVER = 'GAME_OVER' +_META_STATES = (_GAME_OVER, _UNDEFINED) + +LOG = logging.getLogger(__name__) + + +class _MachineMemory(object): + """State machine memory.""" + + def __init__(self): + self.next_nodes = set() + self.not_done = set() + self.failures = [] + self.done = set() + + +class _MachineBuilder(object): + """State machine *builder* that the runner uses. + + NOTE(harlowja): the machine states that this build will for are:: + + +--------------+-----------+------------+----------+---------+ + | Start | Event | End | On Enter | On Exit | + +--------------+-----------+------------+----------+---------+ + | ANALYZING | finished | GAME_OVER | on_enter | on_exit | + | ANALYZING | schedule | SCHEDULING | on_enter | on_exit | + | ANALYZING | wait | WAITING | on_enter | on_exit | + | FAILURE[$] | | | | | + | GAME_OVER | failed | FAILURE | on_enter | on_exit | + | GAME_OVER | reverted | REVERTED | on_enter | on_exit | + | GAME_OVER | success | SUCCESS | on_enter | on_exit | + | GAME_OVER | suspended | SUSPENDED | on_enter | on_exit | + | RESUMING | schedule | SCHEDULING | on_enter | on_exit | + | REVERTED[$] | | | | | + | SCHEDULING | wait | WAITING | on_enter | on_exit | + | SUCCESS[$] | | | | | + | SUSPENDED[$] | | | | | + | UNDEFINED[^] | start | RESUMING | on_enter | on_exit | + | WAITING | analyze | ANALYZING | on_enter | on_exit | + +--------------+-----------+------------+----------+---------+ + + Between any of these yielded states (minus ``GAME_OVER`` and ``UNDEFINED``) + if the engine has been suspended or the engine has failed (due to a + non-resolveable task failure or scheduling failure) the machine will stop + executing new tasks (currently running tasks will be allowed to complete) + and this machines run loop will be broken. + """ + + def __init__(self, runtime, waiter): + self._analyzer = runtime.analyzer + self._completer = runtime.completer + self._scheduler = runtime.scheduler + self._storage = runtime.storage + self._waiter = waiter + + def runnable(self): + return self._storage.get_flow_state() == st.RUNNING + + def build(self, timeout=None): + memory = _MachineMemory() + if timeout is None: + timeout = _WAITING_TIMEOUT + + def resume(old_state, new_state, event): + memory.next_nodes.update(self._completer.resume()) + memory.next_nodes.update(self._analyzer.get_next_nodes()) + return 'schedule' + + def game_over(old_state, new_state, event): + if memory.failures: + return 'failed' + if self._analyzer.get_next_nodes(): + return 'suspended' + elif self._analyzer.is_success(): + return 'success' + else: + return 'reverted' + + def schedule(old_state, new_state, event): + if self.runnable() and memory.next_nodes: + not_done, failures = self._scheduler.schedule( + memory.next_nodes) + if not_done: + memory.not_done.update(not_done) + if failures: + memory.failures.extend(failures) + memory.next_nodes.clear() + return 'wait' + + def wait(old_state, new_state, event): + # TODO(harlowja): maybe we should start doing 'yield from' this + # call sometime in the future, or equivalent that will work in + # py2 and py3. + if memory.not_done: + done, not_done = self._waiter.wait_for_any(memory.not_done, + timeout) + memory.done.update(done) + memory.not_done = not_done + return 'analyze' + + def analyze(old_state, new_state, event): + next_nodes = set() + while memory.done: + fut = memory.done.pop() + try: + node, event, result = fut.result() + retain = self._completer.complete(node, event, result) + if retain and isinstance(result, misc.Failure): + memory.failures.append(result) + except Exception: + memory.failures.append(misc.Failure()) + else: + try: + more_nodes = self._analyzer.get_next_nodes(node) + except Exception: + memory.failures.append(misc.Failure()) + else: + next_nodes.update(more_nodes) + if self.runnable() and next_nodes and not memory.failures: + memory.next_nodes.update(next_nodes) + return 'schedule' + elif memory.not_done: + return 'wait' + else: + return 'finished' + + def on_exit(old_state, event): + LOG.debug("Exiting old state '%s' in response to event '%s'", + old_state, event) + + def on_enter(new_state, event): + LOG.debug("Entering new state '%s' in response to event '%s'", + new_state, event) + + # NOTE(harlowja): when ran in debugging mode it is quite useful + # to track the various state transitions as they happen... + watchers = {} + if LOG.isEnabledFor(logging.DEBUG): + watchers['on_exit'] = on_exit + watchers['on_enter'] = on_enter + + m = fsm.FSM(_UNDEFINED) + m.add_state(_GAME_OVER, **watchers) + m.add_state(_UNDEFINED, **watchers) + m.add_state(st.ANALYZING, **watchers) + m.add_state(st.RESUMING, **watchers) + m.add_state(st.REVERTED, terminal=True, **watchers) + m.add_state(st.SCHEDULING, **watchers) + m.add_state(st.SUCCESS, terminal=True, **watchers) + m.add_state(st.SUSPENDED, terminal=True, **watchers) + m.add_state(st.WAITING, **watchers) + m.add_state(st.FAILURE, terminal=True, **watchers) + + m.add_transition(_GAME_OVER, st.REVERTED, 'reverted') + m.add_transition(_GAME_OVER, st.SUCCESS, 'success') + m.add_transition(_GAME_OVER, st.SUSPENDED, 'suspended') + m.add_transition(_GAME_OVER, st.FAILURE, 'failed') + m.add_transition(_UNDEFINED, st.RESUMING, 'start') + m.add_transition(st.ANALYZING, _GAME_OVER, 'finished') + m.add_transition(st.ANALYZING, st.SCHEDULING, 'schedule') + m.add_transition(st.ANALYZING, st.WAITING, 'wait') + m.add_transition(st.RESUMING, st.SCHEDULING, 'schedule') + m.add_transition(st.SCHEDULING, st.WAITING, 'wait') + m.add_transition(st.WAITING, st.ANALYZING, 'analyze') + + m.add_reaction(_GAME_OVER, 'finished', game_over) + m.add_reaction(st.ANALYZING, 'analyze', analyze) + m.add_reaction(st.RESUMING, 'start', resume) + m.add_reaction(st.SCHEDULING, 'schedule', schedule) + m.add_reaction(st.WAITING, 'wait', wait) + + return (m, memory) class Runner(object): """Runner that iterates while executing nodes using the given runtime. - This runner acts as the action engine run loop, it resumes the workflow, - schedules all task it can for execution using the runtimes scheduler and - analyzer components, and than waits on returned futures and then activates - the runtimes completion component to finish up those tasks. - - This process repeats until the analzyer runs out of next nodes, when the - scheduler can no longer schedule tasks or when the the engine has been - suspended or a task has failed and that failure could not be resolved. + This runner acts as the action engine run loop/state-machine, it resumes + the workflow, schedules all task it can for execution using the runtimes + scheduler and analyzer components, and than waits on returned futures and + then activates the runtimes completion component to finish up those tasks + and so on... NOTE(harlowja): If the runtimes scheduler component is able to schedule tasks in parallel, this enables parallel running and/or reversion. @@ -43,94 +218,22 @@ class Runner(object): ignorable_states = (st.SCHEDULING, st.WAITING, st.RESUMING, st.ANALYZING) def __init__(self, runtime, waiter): - self._scheduler = runtime.scheduler - self._completer = runtime.completer - self._storage = runtime.storage - self._analyzer = runtime.analyzer - self._waiter = waiter + self._builder = _MachineBuilder(runtime, waiter) - def is_running(self): - return self._storage.get_flow_state() == st.RUNNING + @property + def builder(self): + return self._builder + + def runnable(self): + return self._builder.runnable() def run_iter(self, timeout=None): - """Runs the nodes using the runtime components. - - NOTE(harlowja): the states that this generator will go through are: - - RESUMING -> SCHEDULING - SCHEDULING -> WAITING - WAITING -> ANALYZING - ANALYZING -> SCHEDULING - - Between any of these yielded states if the engine has been suspended - or the engine has failed (due to a non-resolveable task failure or - scheduling failure) the engine will stop executing new tasks (currently - running tasks will be allowed to complete) and this iteration loop - will be broken. - """ - if timeout is None: - timeout = _WAITING_TIMEOUT - - # Prepare flow to be resumed - yield (st.RESUMING, []) - next_nodes = self._completer.resume() - next_nodes.update(self._analyzer.get_next_nodes()) - - # Schedule nodes to be worked on - yield (st.SCHEDULING, []) - if self.is_running(): - not_done, failures = self._scheduler.schedule(next_nodes) - else: - not_done, failures = (set(), []) - - # Run! - # - # At this point we need to ensure we wait for all active nodes to - # finish running (even if we are asked to suspend) since we can not - # preempt those tasks (maybe in the future we will be better able to do - # this). - while not_done: - yield (st.WAITING, []) - - # TODO(harlowja): maybe we should start doing 'yield from' this - # call sometime in the future, or equivalent that will work in - # py2 and py3. - done, not_done = self._waiter.wait_for_any(not_done, timeout) - - # Analyze the results and schedule more nodes (unless we had - # failures). If failures occurred just continue processing what - # is running (so that we don't leave it abandoned) but do not - # schedule anything new. - yield (st.ANALYZING, []) - next_nodes = set() - for future in done: - try: - node, event, result = future.result() - retain = self._completer.complete(node, event, result) - if retain and isinstance(result, misc.Failure): - failures.append(result) - except Exception: - failures.append(misc.Failure()) + """Runs the nodes using a built state machine.""" + machine, memory = self.builder.build(timeout=timeout) + for (_prior_state, new_state) in machine.run_iter('start'): + # NOTE(harlowja): skip over meta-states. + if new_state not in _META_STATES: + if new_state == st.FAILURE: + yield (new_state, memory.failures) else: - try: - more_nodes = self._analyzer.get_next_nodes(node) - except Exception: - failures.append(misc.Failure()) - else: - next_nodes.update(more_nodes) - if next_nodes and not failures and self.is_running(): - yield (st.SCHEDULING, []) - # Recheck incase someone suspended it. - if self.is_running(): - more_not_done, failures = self._scheduler.schedule( - next_nodes) - not_done.update(more_not_done) - - if failures: - yield (st.FAILURE, failures) - elif self._analyzer.get_next_nodes(): - yield (st.SUSPENDED, []) - elif self._analyzer.is_success(): - yield (st.SUCCESS, []) - else: - yield (st.REVERTED, []) + yield (new_state, []) diff --git a/taskflow/tests/unit/action_engine/test_runner.py b/taskflow/tests/unit/action_engine/test_runner.py index d7927f8e..2e18f6b6 100644 --- a/taskflow/tests/unit/action_engine/test_runner.py +++ b/taskflow/tests/unit/action_engine/test_runner.py @@ -18,17 +18,20 @@ import six from taskflow.engines.action_engine import compiler from taskflow.engines.action_engine import executor +from taskflow.engines.action_engine import runner from taskflow.engines.action_engine import runtime +from taskflow import exceptions as excp from taskflow.patterns import linear_flow as lf from taskflow import states as st from taskflow import storage from taskflow import test from taskflow.tests import utils as test_utils +from taskflow.types import fsm from taskflow.utils import misc from taskflow.utils import persistence_utils as pu -class RunnerTest(test.TestCase): +class _RunnerTestMixin(object): def _make_runtime(self, flow, initial_state=None): compilation = compiler.PatternCompiler().compile(flow) flow_detail = pu.create_flow_detail(flow) @@ -42,18 +45,20 @@ class RunnerTest(test.TestCase): task_executor = executor.SerialTaskExecutor() task_executor.start() self.addCleanup(task_executor.stop) - return runtime.Runtime(compiler.PatternCompiler().compile(flow), - store, task_notifier, task_executor) + return runtime.Runtime(compilation, store, + task_notifier, task_executor) + +class RunnerTest(test.TestCase, _RunnerTestMixin): def test_running(self): flow = lf.Flow("root") flow.add(*test_utils.make_many(1)) rt = self._make_runtime(flow, initial_state=st.RUNNING) - self.assertTrue(rt.runner.is_running()) + self.assertTrue(rt.runner.runnable()) rt = self._make_runtime(flow, initial_state=st.SUSPENDED) - self.assertFalse(rt.runner.is_running()) + self.assertFalse(rt.runner.runnable()) def test_run_iterations(self): flow = lf.Flow("root") @@ -62,7 +67,7 @@ class RunnerTest(test.TestCase): flow.add(*tasks) rt = self._make_runtime(flow, initial_state=st.RUNNING) - self.assertTrue(rt.runner.is_running()) + self.assertTrue(rt.runner.runnable()) it = rt.runner.run_iter() state, failures = six.next(it) @@ -94,7 +99,7 @@ class RunnerTest(test.TestCase): flow.add(*tasks) rt = self._make_runtime(flow, initial_state=st.RUNNING) - self.assertTrue(rt.runner.is_running()) + self.assertTrue(rt.runner.runnable()) transitions = list(rt.runner.run_iter()) state, failures = transitions[-1] @@ -110,7 +115,7 @@ class RunnerTest(test.TestCase): flow.add(*tasks) rt = self._make_runtime(flow, initial_state=st.RUNNING) - self.assertTrue(rt.runner.is_running()) + self.assertTrue(rt.runner.runnable()) transitions = list(rt.runner.run_iter()) state, failures = transitions[-1] @@ -128,7 +133,7 @@ class RunnerTest(test.TestCase): flow.add(*tasks) rt = self._make_runtime(flow, initial_state=st.RUNNING) - self.assertTrue(rt.runner.is_running()) + self.assertTrue(rt.runner.runnable()) transitions = [] for state, failures in rt.runner.run_iter(): @@ -152,7 +157,7 @@ class RunnerTest(test.TestCase): flow.add(*happy_tasks) rt = self._make_runtime(flow, initial_state=st.RUNNING) - self.assertTrue(rt.runner.is_running()) + self.assertTrue(rt.runner.runnable()) transitions = [] for state, failures in rt.runner.run_iter(): @@ -167,3 +172,136 @@ class RunnerTest(test.TestCase): rt.storage.get_atom_state(happy_tasks[0].name)) self.assertEqual(st.FAILURE, rt.storage.get_atom_state(sad_tasks[0].name)) + + +class RunnerBuilderTest(test.TestCase, _RunnerTestMixin): + def test_builder_manual_process(self): + flow = lf.Flow("root") + tasks = test_utils.make_many( + 1, task_cls=test_utils.TaskNoRequiresNoReturns) + flow.add(*tasks) + + rt = self._make_runtime(flow, initial_state=st.RUNNING) + machine, memory = rt.runner.builder.build() + self.assertTrue(rt.runner.builder.runnable()) + self.assertRaises(fsm.NotInitialized, machine.process_event, 'poke') + + # Should now be pending... + self.assertEqual(st.PENDING, rt.storage.get_atom_state(tasks[0].name)) + + machine.initialize() + self.assertEqual(runner._UNDEFINED, machine.current_state) + self.assertFalse(machine.terminated) + self.assertRaises(excp.NotFound, machine.process_event, 'poke') + last_state = machine.current_state + + reaction, terminal = machine.process_event('start') + self.assertFalse(terminal) + self.assertIsNotNone(reaction) + self.assertEqual(st.RESUMING, machine.current_state) + self.assertRaises(excp.NotFound, machine.process_event, 'poke') + + last_state = machine.current_state + cb, args, kwargs = reaction + next_event = cb(last_state, machine.current_state, + 'start', *args, **kwargs) + reaction, terminal = machine.process_event(next_event) + self.assertFalse(terminal) + self.assertIsNotNone(reaction) + self.assertEqual(st.SCHEDULING, machine.current_state) + self.assertRaises(excp.NotFound, machine.process_event, 'poke') + + last_state = machine.current_state + cb, args, kwargs = reaction + next_event = cb(last_state, machine.current_state, + next_event, *args, **kwargs) + reaction, terminal = machine.process_event(next_event) + self.assertFalse(terminal) + self.assertEqual(st.WAITING, machine.current_state) + self.assertRaises(excp.NotFound, machine.process_event, 'poke') + + # Should now be running... + self.assertEqual(st.RUNNING, rt.storage.get_atom_state(tasks[0].name)) + + last_state = machine.current_state + cb, args, kwargs = reaction + next_event = cb(last_state, machine.current_state, + next_event, *args, **kwargs) + reaction, terminal = machine.process_event(next_event) + self.assertFalse(terminal) + self.assertIsNotNone(reaction) + self.assertEqual(st.ANALYZING, machine.current_state) + self.assertRaises(excp.NotFound, machine.process_event, 'poke') + + last_state = machine.current_state + cb, args, kwargs = reaction + next_event = cb(last_state, machine.current_state, + next_event, *args, **kwargs) + reaction, terminal = machine.process_event(next_event) + self.assertFalse(terminal) + self.assertEqual(runner._GAME_OVER, machine.current_state) + + # Should now be done... + self.assertEqual(st.SUCCESS, rt.storage.get_atom_state(tasks[0].name)) + + def test_builder_automatic_process(self): + flow = lf.Flow("root") + tasks = test_utils.make_many( + 1, task_cls=test_utils.TaskNoRequiresNoReturns) + flow.add(*tasks) + + rt = self._make_runtime(flow, initial_state=st.RUNNING) + machine, memory = rt.runner.builder.build() + self.assertTrue(rt.runner.builder.runnable()) + + transitions = list(machine.run_iter('start')) + self.assertEqual((runner._UNDEFINED, st.RESUMING), transitions[0]) + self.assertEqual((runner._GAME_OVER, st.SUCCESS), transitions[-1]) + self.assertEqual(st.SUCCESS, rt.storage.get_atom_state(tasks[0].name)) + + def test_builder_automatic_process_failure(self): + flow = lf.Flow("root") + tasks = test_utils.make_many(1, task_cls=test_utils.NastyFailingTask) + flow.add(*tasks) + + rt = self._make_runtime(flow, initial_state=st.RUNNING) + machine, memory = rt.runner.builder.build() + self.assertTrue(rt.runner.builder.runnable()) + + transitions = list(machine.run_iter('start')) + self.assertEqual((runner._GAME_OVER, st.FAILURE), transitions[-1]) + self.assertEqual(1, len(memory.failures)) + + def test_builder_automatic_process_reverted(self): + flow = lf.Flow("root") + tasks = test_utils.make_many(1, task_cls=test_utils.TaskWithFailure) + flow.add(*tasks) + + rt = self._make_runtime(flow, initial_state=st.RUNNING) + machine, memory = rt.runner.builder.build() + self.assertTrue(rt.runner.builder.runnable()) + + transitions = list(machine.run_iter('start')) + self.assertEqual((runner._GAME_OVER, st.REVERTED), transitions[-1]) + self.assertEqual(st.REVERTED, rt.storage.get_atom_state(tasks[0].name)) + + def test_builder_expected_transition_occurrences(self): + flow = lf.Flow("root") + tasks = test_utils.make_many( + 10, task_cls=test_utils.TaskNoRequiresNoReturns) + flow.add(*tasks) + + rt = self._make_runtime(flow, initial_state=st.RUNNING) + machine, memory = rt.runner.builder.build() + transitions = list(machine.run_iter('start')) + + occurrences = dict((t, transitions.count(t)) for t in transitions) + self.assertEqual(10, occurrences.get((st.SCHEDULING, st.WAITING))) + self.assertEqual(10, occurrences.get((st.WAITING, st.ANALYZING))) + self.assertEqual(9, occurrences.get((st.ANALYZING, st.SCHEDULING))) + self.assertEqual(1, occurrences.get((runner._GAME_OVER, st.SUCCESS))) + self.assertEqual(1, occurrences.get((runner._UNDEFINED, st.RESUMING))) + + self.assertEqual(0, len(memory.next_nodes)) + self.assertEqual(0, len(memory.not_done)) + self.assertEqual(0, len(memory.failures)) diff --git a/taskflow/tests/unit/test_types.py b/taskflow/tests/unit/test_types.py index e17a9fe2..141cdfc8 100644 --- a/taskflow/tests/unit/test_types.py +++ b/taskflow/tests/unit/test_types.py @@ -17,8 +17,11 @@ import time import networkx as nx +import six +from taskflow import exceptions as excp from taskflow import test +from taskflow.types import fsm from taskflow.types import graph from taskflow.types import timing as tt from taskflow.types import tree @@ -117,7 +120,7 @@ class TreeTest(test.TestCase): 'primate', 'monkey', 'human']), set(things)) -class StopWatchUtilsTest(test.TestCase): +class StopWatchTest(test.TestCase): def test_no_states(self): watch = tt.StopWatch() self.assertRaises(RuntimeError, watch.stop) @@ -156,3 +159,148 @@ class StopWatchUtilsTest(test.TestCase): with tt.StopWatch() as watch: time.sleep(0.05) self.assertGreater(0.01, watch.elapsed()) + + +class FSMTest(test.TestCase): + def setUp(self): + super(FSMTest, self).setUp() + # NOTE(harlowja): this state machine will never stop if run() is used. + self.jumper = fsm.FSM("down") + self.jumper.add_state('up') + self.jumper.add_state('down') + self.jumper.add_transition('down', 'up', 'jump') + self.jumper.add_transition('up', 'down', 'fall') + self.jumper.add_reaction('up', 'jump', lambda *args: 'fall') + self.jumper.add_reaction('down', 'fall', lambda *args: 'jump') + + def test_bad_start_state(self): + m = fsm.FSM('unknown') + self.assertRaises(excp.NotFound, m.run, 'unknown') + + def test_contains(self): + m = fsm.FSM('unknown') + self.assertNotIn('unknown', m) + m.add_state('unknown') + self.assertIn('unknown', m) + + def test_duplicate_state(self): + m = fsm.FSM('unknown') + m.add_state('unknown') + self.assertRaises(excp.Duplicate, m.add_state, 'unknown') + + def test_duplicate_reaction(self): + self.assertRaises( + # Currently duplicate reactions are not allowed... + excp.Duplicate, + self.jumper.add_reaction, 'down', 'fall', lambda *args: 'skate') + + def test_bad_transition(self): + m = fsm.FSM('unknown') + m.add_state('unknown') + m.add_state('fire') + self.assertRaises(excp.NotFound, m.add_transition, + 'unknown', 'something', 'boom') + self.assertRaises(excp.NotFound, m.add_transition, + 'something', 'unknown', 'boom') + + def test_bad_reaction(self): + m = fsm.FSM('unknown') + m.add_state('unknown') + self.assertRaises(excp.NotFound, m.add_reaction, 'something', 'boom', + lambda *args: 'cough') + + def test_run(self): + m = fsm.FSM('down') + m.add_state('down') + m.add_state('up') + m.add_state('broken', terminal=True) + m.add_transition('down', 'up', 'jump') + m.add_transition('up', 'broken', 'hit-wall') + m.add_reaction('up', 'jump', lambda *args: 'hit-wall') + self.assertEqual(['broken', 'down', 'up'], sorted(m.states)) + self.assertEqual(2, m.events) + m.initialize() + self.assertEqual('down', m.current_state) + self.assertFalse(m.terminated) + m.run('jump') + self.assertTrue(m.terminated) + self.assertEqual('broken', m.current_state) + self.assertRaises(excp.InvalidState, m.run, 'jump', initialize=False) + + def test_on_enter_on_exit(self): + enter_transitions = [] + exit_transitions = [] + + def on_exit(state, event): + exit_transitions.append((state, event)) + + def on_enter(state, event): + enter_transitions.append((state, event)) + + m = fsm.FSM('start') + m.add_state('start', on_exit=on_exit) + m.add_state('down', on_enter=on_enter, on_exit=on_exit) + m.add_state('up', on_enter=on_enter, on_exit=on_exit) + m.add_transition('start', 'down', 'beat') + m.add_transition('down', 'up', 'jump') + m.add_transition('up', 'down', 'fall') + + m.initialize() + m.process_event('beat') + m.process_event('jump') + m.process_event('fall') + self.assertEqual([('down', 'beat'), + ('up', 'jump'), ('down', 'fall')], enter_transitions) + self.assertEqual([('down', 'jump'), ('up', 'fall')], exit_transitions) + + def test_run_iter(self): + up_downs = [] + for (old_state, new_state) in self.jumper.run_iter('jump'): + up_downs.append((old_state, new_state)) + if len(up_downs) >= 3: + break + self.assertEqual([('down', 'up'), ('up', 'down'), ('down', 'up')], + up_downs) + self.assertFalse(self.jumper.terminated) + self.assertEqual('up', self.jumper.current_state) + self.jumper.process_event('fall') + self.assertEqual('down', self.jumper.current_state) + + def test_run_send(self): + up_downs = [] + it = self.jumper.run_iter('jump') + while True: + up_downs.append(it.send(None)) + if len(up_downs) >= 3: + it.close() + break + self.assertEqual('up', self.jumper.current_state) + self.assertFalse(self.jumper.terminated) + self.assertEqual([('down', 'up'), ('up', 'down'), ('down', 'up')], + up_downs) + self.assertRaises(StopIteration, six.next, it) + + def test_run_send_fail(self): + up_downs = [] + it = self.jumper.run_iter('jump') + up_downs.append(six.next(it)) + self.assertRaises(excp.NotFound, it.send, 'fail') + it.close() + self.assertEqual([('down', 'up')], up_downs) + + def test_not_initialized(self): + self.assertRaises(fsm.NotInitialized, + self.jumper.process_event, 'jump') + + def test_iter(self): + transitions = list(self.jumper) + self.assertEqual(2, len(transitions)) + self.assertIn(('up', 'fall', 'down'), transitions) + self.assertIn(('down', 'jump', 'up'), transitions) + + def test_invalid_callbacks(self): + m = fsm.FSM('working') + m.add_state('working') + m.add_state('broken') + self.assertRaises(AssertionError, m.add_state, 'b', on_enter=2) + self.assertRaises(AssertionError, m.add_state, 'b', on_exit=2) diff --git a/taskflow/types/fsm.py b/taskflow/types/fsm.py new file mode 100644 index 00000000..cbe85b78 --- /dev/null +++ b/taskflow/types/fsm.py @@ -0,0 +1,290 @@ +# -*- coding: utf-8 -*- + +# Copyright (C) 2014 Yahoo! Inc. All Rights Reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); you may +# not use this file except in compliance with the License. You may obtain +# a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the +# License for the specific language governing permissions and limitations +# under the License. + +try: + from collections import OrderedDict # noqa +except ImportError: + from ordereddict import OrderedDict # noqa + +import prettytable +import six + +from taskflow import exceptions as excp + + +class _Jump(object): + """A FSM transition tracks this data while jumping.""" + def __init__(self, name, on_enter, on_exit): + self.name = name + self.on_enter = on_enter + self.on_exit = on_exit + + +class NotInitialized(excp.TaskFlowException): + """Error raised when an action is attempted on a not inited machine.""" + + +class FSM(object): + """A finite state machine. + + This state machine can be used to automatically run a given set of + transitions and states in response to events (either from callbacks or from + generator/iterator send() values, see PEP 342). On each triggered event, a + on_enter and on_exit callback can also be provided which will be called to + perform some type of action on leaving a prior state and before entering a + new state. + + NOTE(harlowja): reactions will *only* be called when the generator/iterator + from run_iter() does *not* send back a new event (they will always be + called if the run() method is used). This allows for two unique ways (these + ways can also be intermixed) to use this state machine when using + run_iter(); one where *external* events trigger the next state transition + and one where *internal* reaction callbacks trigger the next state + transition. The other way to use this state machine is to skip using run() + or run_iter() completely and use the process_event() method explicitly and + trigger the events via some *external* functionality. + """ + def __init__(self, start_state): + self._transitions = {} + self._states = OrderedDict() + self._start_state = start_state + self._current = None + + @property + def start_state(self): + return self._start_state + + @property + def current_state(self): + if self._current is not None: + return self._current.name + return None + + @property + def terminated(self): + """Returns whether the state machine is in a terminal state.""" + if self._current is None: + return False + return self._states[self._current.name]['terminal'] + + def add_state(self, state, terminal=False, on_enter=None, on_exit=None): + """Adds a given state to the state machine. + + The on_enter and on_exit callbacks, if provided will be expected to + take two positional parameters, these being the state being exited (for + on_exit) or the state being entered (for on_enter) and a second + parameter which is the event that is being processed that caused the + state transition. + """ + if state in self._states: + raise excp.Duplicate("State '%s' already defined" % state) + if on_enter is not None: + assert six.callable(on_enter), "On enter callback must be callable" + if on_exit is not None: + assert six.callable(on_exit), "On exit callback must be callable" + self._states[state] = { + 'terminal': bool(terminal), + 'reactions': {}, + 'on_enter': on_enter, + 'on_exit': on_exit, + } + self._transitions[state] = OrderedDict() + + def add_reaction(self, state, event, reaction, *args, **kwargs): + """Adds a reaction that may get triggered by the given event & state. + + Reaction callbacks may (depending on how the state machine is ran) be + used after an event is processed (and a transition occurs) to cause the + machine to react to the newly arrived at stable state. + + These callbacks are expected to accept three default positional + parameters (although more can be passed in via *args and **kwargs, + these will automatically get provided to the callback when it is + activated *ontop* of the three default). The three default parameters + are the last stable state, the new stable state and the event that + caused the transition to this new stable state to be arrived at. + + The expected result of a callback is expected to be a new event that + the callback wants the state machine to react to. This new event + may (depending on how the state machine is ran) get processed (and + this process typically repeats) until the state machine reaches a + terminal state. + """ + if state not in self._states: + raise excp.NotFound("Can not add a reaction to event '%s' for an" + " undefined state '%s'" % (event, state)) + assert six.callable(reaction), "Reaction callback must be callable" + if event not in self._states[state]['reactions']: + self._states[state]['reactions'][event] = (reaction, args, kwargs) + else: + raise excp.Duplicate("State '%s' reaction to event '%s'" + " already defined" % (state, event)) + + def add_transition(self, start, end, event): + """Adds an allowed transition from start -> end for the given event.""" + if start not in self._states: + raise excp.NotFound("Can not add a transition on event '%s' that" + " starts in a undefined state '%s'" % (event, + start)) + if end not in self._states: + raise excp.NotFound("Can not add a transition on event '%s' that" + " ends in a undefined state '%s'" % (event, + end)) + self._transitions[start][event] = _Jump(end, + self._states[end]['on_enter'], + self._states[start]['on_exit']) + + def process_event(self, event): + """Trigger a state change in response to the provided event.""" + current = self._current + if current is None: + raise NotInitialized("Can only process events after" + " being initialized (not before)") + if self._states[current.name]['terminal']: + raise excp.InvalidState("Can not transition from terminal" + " state '%s' on event '%s'" + % (current.name, event)) + if event not in self._transitions[current.name]: + raise excp.NotFound("Can not transition from state '%s' on" + " event '%s' (no defined transition)" + % (current.name, event)) + replacement = self._transitions[current.name][event] + if current.on_exit is not None: + current.on_exit(current.name, event) + if replacement.on_enter is not None: + replacement.on_enter(replacement.name, event) + self._current = replacement + return ( + self._states[replacement.name]['reactions'].get(event), + self._states[replacement.name]['terminal'], + ) + + def initialize(self): + """Sets up the state machine (sets current state to start state...).""" + if self._start_state not in self._states: + raise excp.NotFound("Can not start from a undefined" + " state '%s'" % (self._start_state)) + if self._states[self._start_state]['terminal']: + raise excp.InvalidState("Can not start from a terminal" + " state '%s'" % (self._start_state)) + self._current = _Jump(self._start_state, None, None) + + def run(self, event, initialize=True): + """Runs the state machine, using reactions only.""" + for transition in self.run_iter(event, initialize=initialize): + pass + + def run_iter(self, event, initialize=True): + """Returns a iterator/generator that will run the state machine. + + NOTE(harlowja): only one runner iterator/generator should be active for + a machine, if this is not observed then it is possible for + initialization and other local state to be corrupted and cause issues + when running... + """ + if initialize: + self.initialize() + while True: + old_state = self.current_state + reaction, terminal = self.process_event(event) + new_state = self.current_state + try: + sent_event = yield (old_state, new_state) + except GeneratorExit: + break + if terminal: + break + if reaction is None and sent_event is None: + raise excp.NotFound("Unable to progress since no reaction (or" + " sent event) has been made available in" + " new state '%s' (moved to from state '%s'" + " in response to event '%s')" + % (new_state, old_state, event)) + elif sent_event is not None: + event = sent_event + else: + cb, args, kwargs = reaction + event = cb(old_state, new_state, event, *args, **kwargs) + + def __contains__(self, state): + return state in self._states + + @property + def states(self): + """Returns the state names.""" + return list(six.iterkeys(self._states)) + + @property + def events(self): + """Returns how many events exist.""" + c = 0 + for state in six.iterkeys(self._states): + c += len(self._transitions[state]) + return c + + def __iter__(self): + """Iterates over (start, event, end) transition tuples.""" + for state in six.iterkeys(self._states): + for event, target in six.iteritems(self._transitions[state]): + yield (state, event, target.name) + + def pformat(self, sort=True): + """Pretty formats the state + transition table into a string. + + NOTE(harlowja): the sort parameter can be provided to sort the states + and transitions by sort order; with it being provided as false the rows + will be iterated in addition order instead. + """ + def orderedkeys(data): + if sort: + return sorted(six.iterkeys(data)) + return list(six.iterkeys(data)) + tbl = prettytable.PrettyTable( + ["Start", "Event", "End", "On Enter", "On Exit"]) + for state in orderedkeys(self._states): + prefix_markings = [] + if self.current_state == state: + prefix_markings.append("@") + postfix_markings = [] + if self.start_state == state: + postfix_markings.append("^") + if self._states[state]['terminal']: + postfix_markings.append("$") + pretty_state = "%s%s" % ("".join(prefix_markings), state) + if postfix_markings: + pretty_state += "[%s]" % "".join(postfix_markings) + if self._transitions[state]: + for event in orderedkeys(self._transitions[state]): + target = self._transitions[state][event] + row = [pretty_state, event, target.name] + if target.on_enter is not None: + try: + row.append(target.on_enter.__name__) + except AttributeError: + row.append(target.on_enter) + else: + row.append('') + if target.on_exit is not None: + try: + row.append(target.on_exit.__name__) + except AttributeError: + row.append(target.on_exit) + else: + row.append('') + tbl.add_row(row) + else: + tbl.add_row([pretty_state, "", "", "", ""]) + return tbl.get_string(print_empty=True) From 66839bdcafa448e48f4814e4ba0ebd008bc67c01 Mon Sep 17 00:00:00 2001 From: Joshua Harlow Date: Sun, 7 Sep 2014 09:53:22 -0700 Subject: [PATCH 188/188] Update oslo-incubator to 32e7f0b56f52742754 Pickup a couple of changes (mainly the first one listed below) that are useful to have before we release and will ensure we are compatible with python 2.6 and RHEL due to a bug in jsonutils. d9060f69d203 - Check for namedtuple_as_object support before using it 66142c3471fe - Make strutils.mask_password more secure Closes-Bug: #1361230 Change-Id: I2c3d86184830211b3163988a4054fc45e233c9ba --- taskflow/openstack/common/jsonutils.py | 8 ++++- taskflow/openstack/common/strutils.py | 44 ++++++++++++++++++-------- 2 files changed, 37 insertions(+), 15 deletions(-) diff --git a/taskflow/openstack/common/jsonutils.py b/taskflow/openstack/common/jsonutils.py index dec02a95..8231688c 100644 --- a/taskflow/openstack/common/jsonutils.py +++ b/taskflow/openstack/common/jsonutils.py @@ -44,7 +44,13 @@ if sys.version_info < (2, 7): # simplejson module if available try: import simplejson as json - is_simplejson = True + # NOTE(mriedem): Make sure we have a new enough version of simplejson + # to support the namedobject_as_tuple argument. This can be removed + # in the Kilo release when python 2.6 support is dropped. + if 'namedtuple_as_object' in inspect.getargspec(json.dumps).args: + is_simplejson = True + else: + import json except ImportError: import json else: diff --git a/taskflow/openstack/common/strutils.py b/taskflow/openstack/common/strutils.py index 660704e3..2f0fd659 100644 --- a/taskflow/openstack/common/strutils.py +++ b/taskflow/openstack/common/strutils.py @@ -50,26 +50,37 @@ SLUGIFY_STRIP_RE = re.compile(r"[^\w\s-]") SLUGIFY_HYPHENATE_RE = re.compile(r"[-\s]+") -# NOTE(flaper87): The following 3 globals are used by `mask_password` +# NOTE(flaper87): The following globals are used by `mask_password` _SANITIZE_KEYS = ['adminPass', 'admin_pass', 'password', 'admin_password'] # NOTE(ldbragst): Let's build a list of regex objects using the list of # _SANITIZE_KEYS we already have. This way, we only have to add the new key # to the list of _SANITIZE_KEYS and we can generate regular expressions # for XML and JSON automatically. -_SANITIZE_PATTERNS = [] -_FORMAT_PATTERNS = [r'(%(key)s\s*[=]\s*[\"\']).*?([\"\'])', - r'(<%(key)s>).*?()', - r'([\"\']%(key)s[\"\']\s*:\s*[\"\']).*?([\"\'])', - r'([\'"].*?%(key)s[\'"]\s*:\s*u?[\'"]).*?([\'"])', - r'([\'"].*?%(key)s[\'"]\s*,\s*\'--?[A-z]+\'\s*,\s*u?[\'"])' - '.*?([\'"])', - r'(%(key)s\s*--?[A-z]+\s*)\S+(\s*)'] +_SANITIZE_PATTERNS_2 = [] +_SANITIZE_PATTERNS_1 = [] + +# NOTE(amrith): Some regular expressions have only one parameter, some +# have two parameters. Use different lists of patterns here. +_FORMAT_PATTERNS_1 = [r'(%(key)s\s*[=]\s*)[^\s^\'^\"]+'] +_FORMAT_PATTERNS_2 = [r'(%(key)s\s*[=]\s*[\"\']).*?([\"\'])', + r'(%(key)s\s+[\"\']).*?([\"\'])', + r'([-]{2}%(key)s\s+)[^\'^\"^=^\s]+([\s]*)', + r'(<%(key)s>).*?()', + r'([\"\']%(key)s[\"\']\s*:\s*[\"\']).*?([\"\'])', + r'([\'"].*?%(key)s[\'"]\s*:\s*u?[\'"]).*?([\'"])', + r'([\'"].*?%(key)s[\'"]\s*,\s*\'--?[A-z]+\'\s*,\s*u?' + '[\'"]).*?([\'"])', + r'(%(key)s\s*--?[A-z]+\s*)\S+(\s*)'] for key in _SANITIZE_KEYS: - for pattern in _FORMAT_PATTERNS: + for pattern in _FORMAT_PATTERNS_2: reg_ex = re.compile(pattern % {'key': key}, re.DOTALL) - _SANITIZE_PATTERNS.append(reg_ex) + _SANITIZE_PATTERNS_2.append(reg_ex) + + for pattern in _FORMAT_PATTERNS_1: + reg_ex = re.compile(pattern % {'key': key}, re.DOTALL) + _SANITIZE_PATTERNS_1.append(reg_ex) def int_from_bool_as_string(subject): @@ -289,7 +300,12 @@ def mask_password(message, secret="***"): if not any(key in message for key in _SANITIZE_KEYS): return message - secret = r'\g<1>' + secret + r'\g<2>' - for pattern in _SANITIZE_PATTERNS: - message = re.sub(pattern, secret, message) + substitute = r'\g<1>' + secret + r'\g<2>' + for pattern in _SANITIZE_PATTERNS_2: + message = re.sub(pattern, substitute, message) + + substitute = r'\g<1>' + secret + for pattern in _SANITIZE_PATTERNS_1: + message = re.sub(pattern, substitute, message) + return message