Cleanup some of the example code & docs
This commit makes a set of small adjustments to examples. - Rework some of the comments to be more clear. - Add links to the original source tree file. - Rename some of the examples to make it clear the concept the example is intented to show. - Move some common example functionality to the example utility file. Change-Id: I858e0dbf72fe8cb40a05bfdbb0857720ffb71c7f
This commit is contained in:
parent
e4810f0d31
commit
9a239a0a2e
@ -11,6 +11,7 @@ sys.path.insert(0, os.path.abspath('../..'))
|
||||
extensions = [
|
||||
'sphinx.ext.autodoc',
|
||||
'sphinx.ext.doctest',
|
||||
'sphinx.ext.extlinks',
|
||||
'sphinx.ext.inheritance_diagram',
|
||||
'sphinx.ext.intersphinx',
|
||||
'sphinx.ext.viewcode',
|
||||
@ -37,6 +38,7 @@ exclude_patterns = ['_build']
|
||||
# General information about the project.
|
||||
project = u'TaskFlow'
|
||||
copyright = u'2013-2014, OpenStack Foundation'
|
||||
source_tree = 'http://git.openstack.org/cgit/openstack/taskflow/tree'
|
||||
|
||||
# If true, '()' will be appended to :func: etc. cross-reference text.
|
||||
add_function_parentheses = True
|
||||
@ -51,6 +53,10 @@ pygments_style = 'sphinx'
|
||||
# Prefixes that are ignored for sorting the Python module index
|
||||
modindex_common_prefix = ['taskflow.']
|
||||
|
||||
# Shortened external links.
|
||||
extlinks = {
|
||||
'example': (source_tree + '/taskflow/examples/%s.py', ''),
|
||||
}
|
||||
|
||||
# -- Options for HTML output --------------------------------------------------
|
||||
|
||||
|
@ -1,32 +1,59 @@
|
||||
Linear phone calls
|
||||
Making phone calls
|
||||
==================
|
||||
|
||||
.. note::
|
||||
|
||||
Full source located at :example:`simple_linear`.
|
||||
|
||||
.. literalinclude:: ../../taskflow/examples/simple_linear.py
|
||||
:language: python
|
||||
:linenos:
|
||||
:lines: 16-
|
||||
:emphasize-lines: 16-28
|
||||
:emphasize-lines: 16-29
|
||||
|
||||
Linear phone calls (reverting)
|
||||
==============================
|
||||
Making phone calls (automatically reverting)
|
||||
============================================
|
||||
|
||||
.. note::
|
||||
|
||||
Full source located at :example:`reverting_linear`.
|
||||
|
||||
.. literalinclude:: ../../taskflow/examples/reverting_linear.py
|
||||
:language: python
|
||||
:linenos:
|
||||
:lines: 16-
|
||||
:emphasize-lines: 17-32
|
||||
:emphasize-lines: 17-26
|
||||
|
||||
Building a car
|
||||
==============
|
||||
|
||||
.. note::
|
||||
|
||||
Full source located at :example:`build_a_car`.
|
||||
|
||||
.. literalinclude:: ../../taskflow/examples/build_a_car.py
|
||||
:language: python
|
||||
:linenos:
|
||||
:lines: 16-
|
||||
:emphasize-lines: 20-26
|
||||
:emphasize-lines: 22-28
|
||||
|
||||
Task dependencies
|
||||
=================
|
||||
Linear equation solver (explicit dependencies)
|
||||
==============================================
|
||||
|
||||
.. note::
|
||||
|
||||
Full source located at :example:`calculate_linear`.
|
||||
|
||||
.. literalinclude:: ../../taskflow/examples/calculate_linear.py
|
||||
:language: python
|
||||
:linenos:
|
||||
:lines: 16-
|
||||
:emphasize-lines: 17-27
|
||||
|
||||
Linear equation solver (inferred dependencies)
|
||||
==============================================
|
||||
|
||||
``Source:`` :example:`graph_flow.py`
|
||||
|
||||
.. literalinclude:: ../../taskflow/examples/graph_flow.py
|
||||
:language: python
|
||||
@ -34,8 +61,12 @@ Task dependencies
|
||||
:lines: 16-
|
||||
:emphasize-lines: 18-31
|
||||
|
||||
Parallel calculations
|
||||
=====================
|
||||
Linear equation solver (in parallel)
|
||||
====================================
|
||||
|
||||
.. note::
|
||||
|
||||
Full source located at :example:`calculate_in_parallel`
|
||||
|
||||
.. literalinclude:: ../../taskflow/examples/calculate_in_parallel.py
|
||||
:language: python
|
||||
@ -43,8 +74,12 @@ Parallel calculations
|
||||
:lines: 16-
|
||||
:emphasize-lines: 18-21
|
||||
|
||||
Parallel pseudo-volume-create
|
||||
=============================
|
||||
Creating a volume (in parallel)
|
||||
===============================
|
||||
|
||||
.. note::
|
||||
|
||||
Full source located at :example:`create_parallel_volume`
|
||||
|
||||
.. literalinclude:: ../../taskflow/examples/create_parallel_volume.py
|
||||
:language: python
|
||||
@ -52,8 +87,25 @@ Parallel pseudo-volume-create
|
||||
:lines: 16-
|
||||
:emphasize-lines: 21-23
|
||||
|
||||
Suspended workflow reloaded
|
||||
===========================
|
||||
Storing & emitting a bill
|
||||
=========================
|
||||
|
||||
.. note::
|
||||
|
||||
Full source located at :example:`fake_billing`
|
||||
|
||||
.. literalinclude:: ../../taskflow/examples/fake_billing.py
|
||||
:language: python
|
||||
:linenos:
|
||||
:lines: 16-
|
||||
:emphasize-lines: 24-32
|
||||
|
||||
Suspending a workflow & resuming
|
||||
================================
|
||||
|
||||
.. note::
|
||||
|
||||
Full source located at :example:`resume_from_backend`
|
||||
|
||||
.. literalinclude:: ../../taskflow/examples/resume_from_backend.py
|
||||
:language: python
|
||||
@ -61,8 +113,12 @@ Suspended workflow reloaded
|
||||
:lines: 16-
|
||||
:emphasize-lines: 22-39
|
||||
|
||||
Resumable vm-pseudo-boot
|
||||
========================
|
||||
Creating a virtual machine (resumable)
|
||||
======================================
|
||||
|
||||
.. note::
|
||||
|
||||
Full source located at :example:`resume_vm_boot`
|
||||
|
||||
.. literalinclude:: ../../taskflow/examples/resume_vm_boot.py
|
||||
:language: python
|
||||
@ -70,8 +126,12 @@ Resumable vm-pseudo-boot
|
||||
:lines: 16-
|
||||
:emphasize-lines: 32-34
|
||||
|
||||
Resumable volume-pseudo-create
|
||||
==============================
|
||||
Creating a volume (resumable)
|
||||
=============================
|
||||
|
||||
.. note::
|
||||
|
||||
Full source located at :example:`resume_volume_create`
|
||||
|
||||
.. literalinclude:: ../../taskflow/examples/resume_volume_create.py
|
||||
:language: python
|
||||
@ -79,8 +139,12 @@ Resumable volume-pseudo-create
|
||||
:lines: 16-
|
||||
:emphasize-lines: 28-30
|
||||
|
||||
Running engines by iteration
|
||||
============================
|
||||
Running engines via iteration
|
||||
=============================
|
||||
|
||||
.. note::
|
||||
|
||||
Full source located at :example:`run_by_iter`
|
||||
|
||||
.. literalinclude:: ../../taskflow/examples/run_by_iter.py
|
||||
:language: python
|
||||
@ -88,8 +152,12 @@ Running engines by iteration
|
||||
:lines: 16-
|
||||
:emphasize-lines: 24-27
|
||||
|
||||
Retry controlling
|
||||
=================
|
||||
Controlling retries using a retry controller
|
||||
============================================
|
||||
|
||||
.. note::
|
||||
|
||||
Full source located at :example:`retry_flow`
|
||||
|
||||
.. literalinclude:: ../../taskflow/examples/retry_flow.py
|
||||
:language: python
|
||||
|
@ -32,14 +32,16 @@ from taskflow.patterns import graph_flow as gf
|
||||
from taskflow.patterns import linear_flow as lf
|
||||
from taskflow import task
|
||||
|
||||
import example_utils as eu # noqa
|
||||
|
||||
# INTRO: This examples shows how a graph_flow and linear_flow can be used
|
||||
# together to execute non-dependent tasks by going through the steps required
|
||||
# to build a simplistic car (an assembly line if you will). It also shows
|
||||
# how raw functions can be wrapped into a task object instead of being forced
|
||||
# to use the more heavy task base class. This is useful in scenarios where
|
||||
# pre-existing code has functions that you easily want to plug-in to taskflow,
|
||||
# without requiring a large amount of code changes.
|
||||
|
||||
# INTRO: This examples shows how a graph flow and linear flow can be used
|
||||
# together to execute dependent & non-dependent tasks by going through the
|
||||
# steps required to build a simplistic car (an assembly line if you will). It
|
||||
# also shows how raw functions can be wrapped into a task object instead of
|
||||
# being forced to use the more *heavy* task base class. This is useful in
|
||||
# scenarios where pre-existing code has functions that you easily want to
|
||||
# plug-in to taskflow, without requiring a large amount of code changes.
|
||||
|
||||
|
||||
def build_frame():
|
||||
@ -58,6 +60,9 @@ def build_wheels():
|
||||
return '4'
|
||||
|
||||
|
||||
# These just return true to indiciate success, they would in the real work
|
||||
# do more than just that.
|
||||
|
||||
def install_engine(frame, engine):
|
||||
return True
|
||||
|
||||
@ -75,13 +80,7 @@ def install_wheels(frame, engine, engine_installed, wheels):
|
||||
|
||||
|
||||
def trash(**kwargs):
|
||||
print_wrapped("Throwing away pieces of car!")
|
||||
|
||||
|
||||
def print_wrapped(text):
|
||||
print("-" * (len(text)))
|
||||
print(text)
|
||||
print("-" * (len(text)))
|
||||
eu.print_wrapped("Throwing away pieces of car!")
|
||||
|
||||
|
||||
def startup(**kwargs):
|
||||
@ -114,6 +113,9 @@ def task_watch(state, details):
|
||||
|
||||
flow = lf.Flow("make-auto").add(
|
||||
task.FunctorTask(startup, revert=trash, provides='ran'),
|
||||
# A graph flow allows automatic dependency based ordering, the ordering
|
||||
# is determined by analyzing the symbols required and provided and ordering
|
||||
# execution based on a functioning order (if one exists).
|
||||
gf.Flow("install-parts").add(
|
||||
task.FunctorTask(build_frame, provides='frame'),
|
||||
task.FunctorTask(build_engine, provides='engine'),
|
||||
@ -141,7 +143,7 @@ flow = lf.Flow("make-auto").add(
|
||||
# the tasks should produce, in this example this specification will influence
|
||||
# what those tasks do and what output they create. Different tasks depend on
|
||||
# different information from this specification, all of which will be provided
|
||||
# automatically by the engine.
|
||||
# automatically by the engine to those tasks.
|
||||
spec = {
|
||||
"frame": 'steel',
|
||||
"engine": 'honda',
|
||||
@ -164,7 +166,7 @@ engine = taskflow.engines.load(flow, store={'spec': spec.copy()})
|
||||
engine.notifier.register('*', flow_watch)
|
||||
engine.task_notifier.register('*', task_watch)
|
||||
|
||||
print_wrapped("Building a car")
|
||||
eu.print_wrapped("Building a car")
|
||||
engine.run()
|
||||
|
||||
# Alter the specification and ensure that the reverting logic gets triggered
|
||||
@ -177,8 +179,8 @@ engine = taskflow.engines.load(flow, store={'spec': spec.copy()})
|
||||
engine.notifier.register('*', flow_watch)
|
||||
engine.task_notifier.register('*', task_watch)
|
||||
|
||||
print_wrapped("Building a wrong car that doesn't match specification")
|
||||
eu.print_wrapped("Building a wrong car that doesn't match specification")
|
||||
try:
|
||||
engine.run()
|
||||
except Exception as e:
|
||||
print_wrapped("Flow failed: %s" % e)
|
||||
eu.print_wrapped("Flow failed: %s" % e)
|
||||
|
@ -29,8 +29,11 @@ import taskflow.engines
|
||||
from taskflow.patterns import graph_flow as gf
|
||||
from taskflow import task
|
||||
|
||||
import example_utils as eu # noqa
|
||||
|
||||
# In this example we demonstrate use of TargetedFlow to make oversimplified
|
||||
|
||||
# In this example we demonstrate use of a target flow (a flow that only
|
||||
# executes up to a specified target) to make an *oversimplified* pseudo
|
||||
# build system. It pretends to compile all sources to object files and
|
||||
# link them into an executable. It also can build docs, but this can be
|
||||
# "switched off" via targeted flow special power -- ability to ignore
|
||||
@ -75,7 +78,7 @@ class BuildDocsTask(task.Task):
|
||||
|
||||
|
||||
def make_flow_and_store(source_files, executable_only=False):
|
||||
flow = gf.TargetedFlow('build flow')
|
||||
flow = gf.TargetedFlow('build-flow')
|
||||
object_targets = []
|
||||
store = {}
|
||||
for source in source_files:
|
||||
@ -97,12 +100,12 @@ def make_flow_and_store(source_files, executable_only=False):
|
||||
return flow, store
|
||||
|
||||
|
||||
SOURCE_FILES = ['first.c', 'second.cpp', 'main.cpp']
|
||||
if __name__ == "__main__":
|
||||
SOURCE_FILES = ['first.c', 'second.cpp', 'main.cpp']
|
||||
eu.print_wrapped('Running all tasks:')
|
||||
flow, store = make_flow_and_store(SOURCE_FILES)
|
||||
taskflow.engines.run(flow, store=store)
|
||||
|
||||
print('Running all tasks:')
|
||||
flow, store = make_flow_and_store(SOURCE_FILES)
|
||||
taskflow.engines.run(flow, store=store)
|
||||
|
||||
print('\nBuilding executable, no docs:')
|
||||
flow, store = make_flow_and_store(SOURCE_FILES, executable_only=True)
|
||||
taskflow.engines.run(flow, store=store)
|
||||
eu.print_wrapped('Building executable, no docs:')
|
||||
flow, store = make_flow_and_store(SOURCE_FILES, executable_only=True)
|
||||
taskflow.engines.run(flow, store=store)
|
||||
|
@ -31,20 +31,20 @@ from taskflow.patterns import linear_flow as lf
|
||||
from taskflow.patterns import unordered_flow as uf
|
||||
from taskflow import task
|
||||
|
||||
# INTRO: This examples shows how linear_flow and unordered_flow can be used
|
||||
# together to execute calculations in parallel and then use the
|
||||
# result for the next task. Adder task is used for all calculations
|
||||
# and arguments' bindings are used to set correct parameters to the task.
|
||||
# INTRO: This examples shows how a linear flow and a unordered flow can be
|
||||
# used together to execute calculations in parallel and then use the
|
||||
# result for the next task/s. The adder task is used for all calculations
|
||||
# and argument bindings are used to set correct parameters for each task.
|
||||
|
||||
|
||||
# This task provides some values from as a result of execution, this can be
|
||||
# useful when you want to provide values from a static set to other tasks that
|
||||
# depend on those values existing before those tasks can run.
|
||||
#
|
||||
# This method is *depreciated* in favor of a simpler mechanism that just
|
||||
# provides those values on engine running by prepopulating the storage backend
|
||||
# before your tasks are ran (which accomplishes a similar goal in a more
|
||||
# uniform manner).
|
||||
# NOTE(harlowja): this usage is *depreciated* in favor of a simpler mechanism
|
||||
# that provides those values on engine running by prepopulating the storage
|
||||
# backend before your tasks are ran (which accomplishes a similar goal in a
|
||||
# more uniform manner).
|
||||
class Provider(task.Task):
|
||||
def __init__(self, name, *args, **kwargs):
|
||||
super(Provider, self).__init__(name=name, **kwargs)
|
||||
|
@ -30,11 +30,11 @@ from taskflow.patterns import linear_flow as lf
|
||||
from taskflow import task
|
||||
|
||||
|
||||
# INTRO: In this example linear_flow is used to group four tasks to calculate
|
||||
# INTRO: In this example a linear flow is used to group four tasks to calculate
|
||||
# a value. A single added task is used twice, showing how this can be done
|
||||
# and the twice added task takes in different bound values. In the first case
|
||||
# it uses default parameters ('x' and 'y') and in the second case arguments
|
||||
# are bound with ('z', 'd') keys from the engines storage mechanism.
|
||||
# are bound with ('z', 'd') keys from the engines internal storage mechanism.
|
||||
#
|
||||
# A multiplier task uses a binding that another task also provides, but this
|
||||
# example explicitly shows that 'z' parameter is bound with 'a' key
|
||||
@ -47,10 +47,10 @@ from taskflow import task
|
||||
# useful when you want to provide values from a static set to other tasks that
|
||||
# depend on those values existing before those tasks can run.
|
||||
#
|
||||
# This method is *depreciated* in favor of a simpler mechanism that just
|
||||
# provides those values on engine running by prepopulating the storage backend
|
||||
# before your tasks are ran (which accomplishes a similar goal in a more
|
||||
# uniform manner).
|
||||
# NOTE(harlowja): this usage is *depreciated* in favor of a simpler mechanism
|
||||
# that just provides those values on engine running by prepopulating the
|
||||
# storage backend before your tasks are ran (which accomplishes a similar goal
|
||||
# in a more uniform manner).
|
||||
class Provider(task.Task):
|
||||
|
||||
def __init__(self, name, *args, **kwargs):
|
||||
@ -89,8 +89,8 @@ class Multiplier(task.Task):
|
||||
|
||||
# Note here that the ordering is established so that the correct sequences
|
||||
# of operations occurs where the adding and multiplying is done according
|
||||
# to the expected and typical mathematical model. A graph_flow could also be
|
||||
# used here to automatically ensure the correct ordering.
|
||||
# to the expected and typical mathematical model. A graph flow could also be
|
||||
# used here to automatically infer & ensure the correct ordering.
|
||||
flow = lf.Flow('root').add(
|
||||
# Provide the initial values for other tasks to depend on.
|
||||
#
|
||||
|
@ -35,6 +35,12 @@ except ImportError:
|
||||
SQLALCHEMY_AVAILABLE = False
|
||||
|
||||
|
||||
def print_wrapped(text):
|
||||
print("-" * (len(text)))
|
||||
print(text)
|
||||
print("-" * (len(text)))
|
||||
|
||||
|
||||
def rm_path(persist_path):
|
||||
if not os.path.exists(persist_path):
|
||||
return
|
||||
|
@ -70,7 +70,7 @@ class UrlCaller(object):
|
||||
|
||||
# Since engines save the output of tasks to a optional persistent storage
|
||||
# backend resources have to be dealt with in a slightly different manner since
|
||||
# resources are transient and can not be persisted (or serialized). For tasks
|
||||
# resources are transient and can *not* be persisted (or serialized). For tasks
|
||||
# that require access to a set of resources it is a common pattern to provide
|
||||
# a object (in this case this object) on construction of those tasks via the
|
||||
# task constructor.
|
||||
@ -149,9 +149,9 @@ class DeclareSuccess(task.Task):
|
||||
print("All data processed and sent to %s" % (sent_to))
|
||||
|
||||
|
||||
# Resources (db handles and similar) of course can't be persisted so we need
|
||||
# to make sure that we pass this resource fetcher to the tasks constructor so
|
||||
# that the tasks have access to any needed resources (the resources are
|
||||
# Resources (db handles and similar) of course can *not* be persisted so we
|
||||
# need to make sure that we pass this resource fetcher to the tasks constructor
|
||||
# so that the tasks have access to any needed resources (the resources are
|
||||
# lazily loaded so that they are only created when they are used).
|
||||
resources = ResourceFetcher()
|
||||
flow = lf.Flow("initialize-me")
|
||||
|
@ -31,20 +31,20 @@ from taskflow.patterns import linear_flow as lf
|
||||
from taskflow import task
|
||||
|
||||
|
||||
# In this example there are complex dependencies between tasks that are used to
|
||||
# perform a simple set of linear equations.
|
||||
# In this example there are complex *inferred* dependencies between tasks that
|
||||
# are used to perform a simple set of linear equations.
|
||||
#
|
||||
# As you will see below the tasks just define what they require as input
|
||||
# and produce as output (named values). Then the user doesn't care about
|
||||
# ordering the TASKS (in this case the tasks calculate pieces of the overall
|
||||
# ordering the tasks (in this case the tasks calculate pieces of the overall
|
||||
# equation).
|
||||
#
|
||||
# As you will notice graph_flow resolves dependencies automatically using the
|
||||
# tasks requirements and provided values and no ordering dependency has to be
|
||||
# manually created.
|
||||
# As you will notice a graph flow resolves dependencies automatically using the
|
||||
# tasks symbol requirements and provided symbol values and no orderin
|
||||
# dependency has to be manually created.
|
||||
#
|
||||
# Also notice that flows of any types can be nested into a graph_flow; subflows
|
||||
# dependencies will be resolved too!! Pretty cool right!
|
||||
# Also notice that flows of any types can be nested into a graph flow; showing
|
||||
# that subflow dependencies (and associated ordering) will be inferred too.
|
||||
|
||||
|
||||
class Adder(task.Task):
|
||||
|
@ -35,7 +35,7 @@ from taskflow.persistence import logbook
|
||||
from taskflow import task
|
||||
from taskflow.utils import persistence_utils as p_utils
|
||||
|
||||
import example_utils # noqa
|
||||
import example_utils as eu # noqa
|
||||
|
||||
# INTRO: In this example we create two tasks, one that will say hi and one
|
||||
# that will say bye with optional capability to raise an error while
|
||||
@ -49,12 +49,6 @@ import example_utils # noqa
|
||||
# the database during both of these modes (failing or not failing).
|
||||
|
||||
|
||||
def print_wrapped(text):
|
||||
print("-" * (len(text)))
|
||||
print(text)
|
||||
print("-" * (len(text)))
|
||||
|
||||
|
||||
class HiTask(task.Task):
|
||||
def execute(self):
|
||||
print("Hi!")
|
||||
@ -84,7 +78,7 @@ def make_flow(blowup=False):
|
||||
# Persist the flow and task state here, if the file/dir exists already blowup
|
||||
# if not don't blowup, this allows a user to see both the modes and to see
|
||||
# what is stored in each case.
|
||||
if example_utils.SQLALCHEMY_AVAILABLE:
|
||||
if eu.SQLALCHEMY_AVAILABLE:
|
||||
persist_path = os.path.join(tempfile.gettempdir(), "persisting.db")
|
||||
backend_uri = "sqlite:///%s" % (persist_path)
|
||||
else:
|
||||
@ -96,7 +90,7 @@ if os.path.exists(persist_path):
|
||||
else:
|
||||
blowup = True
|
||||
|
||||
with example_utils.get_backend(backend_uri) as backend:
|
||||
with eu.get_backend(backend_uri) as backend:
|
||||
# Now we can run.
|
||||
engine_config = {
|
||||
'backend': backend,
|
||||
@ -108,17 +102,17 @@ with example_utils.get_backend(backend_uri) as backend:
|
||||
# did exist, assume we won't blowup (and therefore this shows the undo
|
||||
# and redo that a flow will go through).
|
||||
flow = make_flow(blowup=blowup)
|
||||
print_wrapped("Running")
|
||||
eu.print_wrapped("Running")
|
||||
try:
|
||||
eng = engines.load(flow, **engine_config)
|
||||
eng.run()
|
||||
if not blowup:
|
||||
example_utils.rm_path(persist_path)
|
||||
eu.rm_path(persist_path)
|
||||
except Exception:
|
||||
# NOTE(harlowja): don't exit with non-zero status code, so that we can
|
||||
# print the book contents, as well as avoiding exiting also makes the
|
||||
# unit tests (which also runs these examples) pass.
|
||||
traceback.print_exc(file=sys.stdout)
|
||||
|
||||
print_wrapped("Book contents")
|
||||
eu.print_wrapped("Book contents")
|
||||
print(p_utils.pformat(engine_config['book']))
|
||||
|
@ -33,7 +33,7 @@ from taskflow.patterns import linear_flow as lf
|
||||
from taskflow import task
|
||||
from taskflow.utils import persistence_utils as p_utils
|
||||
|
||||
import example_utils # noqa
|
||||
import example_utils as eu # noqa
|
||||
|
||||
# INTRO: In this example linear_flow is used to group three tasks, one which
|
||||
# will suspend the future work the engine may do. This suspend engine is then
|
||||
@ -53,20 +53,13 @@ import example_utils # noqa
|
||||
#
|
||||
# python taskflow/examples/resume_from_backend.py \
|
||||
# zookeeper://127.0.0.1:2181/taskflow/resume_from_backend/
|
||||
#
|
||||
|
||||
|
||||
# UTILITY FUNCTIONS #########################################
|
||||
|
||||
|
||||
def print_wrapped(text):
|
||||
print("-" * (len(text)))
|
||||
print(text)
|
||||
print("-" * (len(text)))
|
||||
|
||||
|
||||
def print_task_states(flowdetail, msg):
|
||||
print_wrapped(msg)
|
||||
eu.print_wrapped(msg)
|
||||
print("Flow '%s' state: %s" % (flowdetail.name, flowdetail.state))
|
||||
# Sort by these so that our test validation doesn't get confused by the
|
||||
# order in which the items in the flow detail can be in.
|
||||
@ -106,7 +99,7 @@ def flow_factory():
|
||||
|
||||
# INITIALIZE PERSISTENCE ####################################
|
||||
|
||||
with example_utils.get_backend() as backend:
|
||||
with eu.get_backend() as backend:
|
||||
logbook = p_utils.temporary_log_book(backend)
|
||||
|
||||
# CREATE AND RUN THE FLOW: FIRST ATTEMPT ####################
|
||||
@ -117,13 +110,13 @@ with example_utils.get_backend() as backend:
|
||||
backend=backend)
|
||||
|
||||
print_task_states(flowdetail, "At the beginning, there is no state")
|
||||
print_wrapped("Running")
|
||||
eu.print_wrapped("Running")
|
||||
engine.run()
|
||||
print_task_states(flowdetail, "After running")
|
||||
|
||||
# RE-CREATE, RESUME, RUN ####################################
|
||||
|
||||
print_wrapped("Resuming and running again")
|
||||
eu.print_wrapped("Resuming and running again")
|
||||
|
||||
# NOTE(harlowja): reload the flow detail from backend, this will allow us
|
||||
# to resume the flow from its suspended state, but first we need to search
|
||||
|
@ -43,7 +43,7 @@ from taskflow import task
|
||||
from taskflow.utils import eventlet_utils as e_utils
|
||||
from taskflow.utils import persistence_utils as p_utils
|
||||
|
||||
import example_utils # noqa
|
||||
import example_utils as eu # noqa
|
||||
|
||||
# INTRO: This examples shows how a hierarchy of flows can be used to create a
|
||||
# vm in a reliable & resumable manner using taskflow + a miniature version of
|
||||
@ -61,12 +61,6 @@ def slow_down(how_long=0.5):
|
||||
time.sleep(how_long)
|
||||
|
||||
|
||||
def print_wrapped(text):
|
||||
print("-" * (len(text)))
|
||||
print(text)
|
||||
print("-" * (len(text)))
|
||||
|
||||
|
||||
class PrintText(task.Task):
|
||||
"""Just inserts some text print outs in a workflow."""
|
||||
def __init__(self, print_what, no_slow=False):
|
||||
@ -77,10 +71,10 @@ class PrintText(task.Task):
|
||||
|
||||
def execute(self):
|
||||
if self._no_slow:
|
||||
print_wrapped(self._text)
|
||||
eu.print_wrapped(self._text)
|
||||
else:
|
||||
with slow_down():
|
||||
print_wrapped(self._text)
|
||||
eu.print_wrapped(self._text)
|
||||
|
||||
|
||||
class DefineVMSpec(task.Task):
|
||||
@ -229,10 +223,10 @@ def create_flow():
|
||||
PrintText("Instance is running!", no_slow=True))
|
||||
return flow
|
||||
|
||||
print_wrapped("Initializing")
|
||||
eu.print_wrapped("Initializing")
|
||||
|
||||
# Setup the persistence & resumption layer.
|
||||
with example_utils.get_backend() as backend:
|
||||
with eu.get_backend() as backend:
|
||||
try:
|
||||
book_id, flow_id = sys.argv[2].split("+", 1)
|
||||
if not uuidutils.is_uuid_like(book_id):
|
||||
@ -275,7 +269,7 @@ with example_utils.get_backend() as backend:
|
||||
engine_conf=engine_conf)
|
||||
|
||||
# Make me my vm please!
|
||||
print_wrapped('Running')
|
||||
eu.print_wrapped('Running')
|
||||
engine.run()
|
||||
|
||||
# How to use.
|
||||
|
@ -31,21 +31,15 @@ from taskflow.patterns import linear_flow as lf
|
||||
from taskflow import task
|
||||
|
||||
# INTRO: In this example we create three tasks, each of which ~calls~ a given
|
||||
# number (provided as a function input), one of those tasks fails calling a
|
||||
# number (provided as a function input), one of those tasks *fails* calling a
|
||||
# given number (the suzzie calling); this causes the workflow to enter the
|
||||
# reverting process, which activates the revert methods of the previous two
|
||||
# phone ~calls~.
|
||||
#
|
||||
# This simulated calling makes it appear like all three calls occur or all
|
||||
# three don't occur (transaction-like capabilities). No persistence layer is
|
||||
# used here so reverting and executing will not handle process failure.
|
||||
#
|
||||
# This example shows a basic usage of the taskflow structures without involving
|
||||
# the complexity of persistence. Using the structures that taskflow provides
|
||||
# via tasks and flows makes it possible for you to easily at a later time
|
||||
# hook in a persistence layer (and then gain the functionality that offers)
|
||||
# when you decide the complexity of adding that layer in is 'worth it' for your
|
||||
# applications usage pattern (which some applications may not need).
|
||||
# used here so reverting and executing will *not* be tolerant of process
|
||||
# failure.
|
||||
|
||||
|
||||
class CallJim(task.Task):
|
||||
@ -94,6 +88,6 @@ except Exception as e:
|
||||
# how to deal with multiple tasks failing while running.
|
||||
#
|
||||
# You will also note that this is not a problem in this case since no
|
||||
# parallelism is involved; this is ensured by the usage of a linear flow,
|
||||
# which runs serially as well as the default engine type which is 'serial'.
|
||||
# parallelism is involved; this is ensured by the usage of a linear flow
|
||||
# and the default engine type which is 'serial' vs being 'parallel'.
|
||||
print("Flow failed: %s" % e)
|
||||
|
@ -36,12 +36,13 @@ from taskflow import task
|
||||
# sequence (the flow) and then passing the work off to an engine, with some
|
||||
# initial data to be ran in a reliable manner.
|
||||
#
|
||||
# This example shows a basic usage of the taskflow structures without involving
|
||||
# the complexity of persistence. Using the structures that taskflow provides
|
||||
# via tasks and flows makes it possible for you to easily at a later time
|
||||
# hook in a persistence layer (and then gain the functionality that offers)
|
||||
# when you decide the complexity of adding that layer in is 'worth it' for your
|
||||
# applications usage pattern (which some applications may not need).
|
||||
# NOTE(harlowja): This example shows a basic usage of the taskflow structures
|
||||
# without involving the complexity of persistence. Using the structures that
|
||||
# taskflow provides via tasks and flows makes it possible for you to easily at
|
||||
# a later time hook in a persistence layer (and then gain the functionality
|
||||
# that offers) when you decide the complexity of adding that layer in
|
||||
# is 'worth it' for your applications usage pattern (which certain applications
|
||||
# may not need).
|
||||
|
||||
|
||||
class CallJim(task.Task):
|
||||
|
@ -36,6 +36,8 @@ from taskflow import task
|
||||
from taskflow.tests import utils
|
||||
from taskflow.utils import misc
|
||||
|
||||
import example_utils as eu # noqa
|
||||
|
||||
# INTRO: In this example we create two tasks which can trigger exceptions
|
||||
# based on various inputs to show how to analyze the thrown exceptions for
|
||||
# which types were thrown and handle the different types in different ways.
|
||||
@ -54,12 +56,6 @@ from taskflow.utils import misc
|
||||
# that code to do further cleanups (if desired).
|
||||
|
||||
|
||||
def print_wrapped(text):
|
||||
print("-" * (len(text)))
|
||||
print(text)
|
||||
print("-" * (len(text)))
|
||||
|
||||
|
||||
class FirstException(Exception):
|
||||
"""Exception that first task raises."""
|
||||
|
||||
@ -112,18 +108,18 @@ def run(**store):
|
||||
misc.Failure.reraise_if_any(unknown_failures)
|
||||
|
||||
|
||||
print_wrapped("Raise and catch first exception only")
|
||||
eu.print_wrapped("Raise and catch first exception only")
|
||||
run(sleep1=0.0, raise1=True,
|
||||
sleep2=0.0, raise2=False)
|
||||
|
||||
# NOTE(imelnikov): in general, sleeping does not guarantee that we'll have both
|
||||
# task running before one of them fails, but with current implementation this
|
||||
# works most of times, which is enough for our purposes here (as an example).
|
||||
print_wrapped("Raise and catch both exceptions")
|
||||
eu.print_wrapped("Raise and catch both exceptions")
|
||||
run(sleep1=1.0, raise1=True,
|
||||
sleep2=1.0, raise2=True)
|
||||
|
||||
print_wrapped("Handle one exception, and re-raise another")
|
||||
eu.print_wrapped("Handle one exception, and re-raise another")
|
||||
try:
|
||||
run(sleep1=1.0, raise1=True,
|
||||
sleep2=1.0, raise2='boom')
|
||||
|
@ -24,7 +24,7 @@ extension; then it will be checked that output did not change.
|
||||
|
||||
When this module is used as main module, output for all examples are
|
||||
generated. Please note that this will break tests as output for most
|
||||
examples is indeterministic.
|
||||
examples is indeterministic (due to hash randomization for example).
|
||||
"""
|
||||
|
||||
|
||||
|
Loading…
x
Reference in New Issue
Block a user