Implement run iterations

Instead of blocking the caller when they call run()
allow there to be a new api run_iter() that will yield
back the engine state transitions while running. This
allows for a engine user to do alternate work while an
engine is running (and come back to yield on there own
time).

Implements blueprint iterable-execution

Change-Id: Ibb48c6c5618c97c59a6ab170dab5233ed47e5554
This commit is contained in:
Joshua Harlow
2014-04-09 15:28:43 -07:00
parent ee1e96d87d
commit 53dcbd4d97
6 changed files with 145 additions and 27 deletions

View File

@@ -86,29 +86,61 @@ class ActionEngine(base.EngineBase):
g = self._analyzer.execution_graph
return g
@lock_utils.locked
def run(self):
"""Runs the flow in the engine to completion."""
with lock_utils.try_lock(self._lock) as was_locked:
if not was_locked:
raise exc.ExecutionFailure("Engine currently locked, please"
" try again later")
for _state in self.run_iter():
pass
def run_iter(self, timeout=None):
"""Runs the engine using iteration (or die trying).
:param timeout: timeout to wait for any tasks to complete (this timeout
will be used during the waiting period that occurs after the
waiting state is yielded when unfinished tasks are being waited
for).
Instead of running to completion in a blocking manner, this will
return a generator which will yield back the various states that the
engine is going through (and can be used to run multiple engines at
once using a generator per engine). the iterator returned also
responds to the send() method from pep-0342 and will attempt to suspend
itself if a truthy value is sent in (the suspend may be delayed until
all active tasks have finished).
NOTE(harlowja): using the run_iter method will **not** retain the
engine lock while executing so the user should ensure that there is
only one entity using a returned engine iterator (one per engine) at a
given time.
"""
self.compile()
self.prepare()
self._task_executor.start()
state = None
try:
self._run()
finally:
self._task_executor.stop()
def _run(self):
self._change_state(states.RUNNING)
try:
state = self._root.execute()
self._change_state(states.RUNNING)
for state in self._root.execute_iter(timeout=timeout):
try:
try_suspend = yield state
except GeneratorExit:
break
else:
if try_suspend:
self.suspend()
except Exception:
with excutils.save_and_reraise_exception():
self._change_state(states.FAILURE)
else:
self._change_state(state)
if state != states.SUSPENDED and state != states.SUCCESS:
failures = self.storage.get_failures()
misc.Failure.reraise_if_any(failures.values())
ignorable_states = getattr(self._root, 'ignorable_states', [])
if state and state not in ignorable_states:
self._change_state(state)
if state != states.SUSPENDED and state != states.SUCCESS:
failures = self.storage.get_failures()
misc.Failure.reraise_if_any(failures.values())
finally:
self._task_executor.stop()
def _change_state(self, state):
with self._state_lock:

View File

@@ -33,6 +33,11 @@ class FutureGraphAction(object):
in parallel, this enables parallel flow run and reversion.
"""
# Informational states this action yields while running, not useful to
# have the engine record but useful to provide to end-users when doing
# execution iterations.
ignorable_states = (st.SCHEDULING, st.WAITING, st.RESUMING, st.ANALYZING)
def __init__(self, analyzer, storage, task_action, retry_action):
self._analyzer = analyzer
self._storage = storage
@@ -64,23 +69,41 @@ class FutureGraphAction(object):
return (futures, [misc.Failure()])
return (futures, [])
def execute(self):
def execute_iter(self, timeout=None):
if timeout is None:
timeout = _WAITING_TIMEOUT
# Prepare flow to be resumed
yield st.RESUMING
next_nodes = self._prepare_flow_for_resume()
next_nodes.update(self._analyzer.get_next_nodes())
not_done, failures = self._schedule(next_nodes)
# Schedule nodes to be worked on
yield st.SCHEDULING
if self.is_running():
not_done, failures = self._schedule(next_nodes)
else:
not_done, failures = ([], [])
# Run!
#
# At this point we need to ensure we wait for all active nodes to
# finish running (even if we are asked to suspend) since we can not
# preempt those tasks (maybe in the future we will be better able to do
# this).
while not_done:
# NOTE(imelnikov): if timeout occurs before any of futures
# completes, done list will be empty and we'll just go
# for next iteration.
done, not_done = self._task_action.wait_for_any(
not_done, _WAITING_TIMEOUT)
yield st.WAITING
# TODO(harlowja): maybe we should start doing 'yield from' this
# call sometime in the future, or equivalent that will work in
# py2 and py3.
done, not_done = self._task_action.wait_for_any(not_done, timeout)
# Analyze the results and schedule more nodes (unless we had
# failures). If failures occurred just continue processing what
# is running (so that we don't leave it abandoned) but do not
# schedule anything new.
yield st.ANALYZING
next_nodes = set()
for future in done:
try:
@@ -102,17 +125,20 @@ class FutureGraphAction(object):
else:
next_nodes.update(more_nodes)
if next_nodes and not failures and self.is_running():
more_not_done, failures = self._schedule(next_nodes)
not_done.extend(more_not_done)
yield st.SCHEDULING
# Recheck incase someone suspended it.
if self.is_running():
more_not_done, failures = self._schedule(next_nodes)
not_done.extend(more_not_done)
if failures:
misc.Failure.reraise_if_any(failures)
if self._analyzer.get_next_nodes():
return st.SUSPENDED
yield st.SUSPENDED
elif self._analyzer.is_success():
return st.SUCCESS
yield st.SUCCESS
else:
return st.REVERTED
yield st.REVERTED
def _schedule_task(self, task):
"""Schedules the given task for revert or execute depending

View File

@@ -48,6 +48,11 @@ REVERT = 'REVERT'
RETRY = 'RETRY'
INTENTIONS = [EXECUTE, IGNORE, REVERT, RETRY]
# Additional engine states
SCHEDULING = 'SCHEDULING'
WAITING = 'WAITING'
ANALYZING = 'ANALYZING'
## Flow state transitions
# See: http://docs.openstack.org/developer/taskflow/states.html

View File

@@ -140,6 +140,50 @@ class EngineLinearFlowTest(utils.EngineTestBase):
self.assertEqual(self.values, ['task1', 'task2'])
self.assertEqual(len(flow), 2)
def test_sequential_flow_two_tasks_iter(self):
flow = lf.Flow('flow-2').add(
utils.SaveOrderTask(name='task1'),
utils.SaveOrderTask(name='task2')
)
e = self._make_engine(flow)
gathered_states = list(e.run_iter())
self.assertTrue(len(gathered_states) > 0)
self.assertEqual(self.values, ['task1', 'task2'])
self.assertEqual(len(flow), 2)
def test_sequential_flow_iter_suspend_resume(self):
flow = lf.Flow('flow-2').add(
utils.SaveOrderTask(name='task1'),
utils.SaveOrderTask(name='task2')
)
_lb, fd = p_utils.temporary_flow_detail(self.backend)
e = self._make_engine(flow, flow_detail=fd)
it = e.run_iter()
gathered_states = []
suspend_it = None
while True:
try:
s = it.send(suspend_it)
gathered_states.append(s)
if s == states.WAITING:
# Stop it before task2 runs/starts.
suspend_it = True
except StopIteration:
break
self.assertTrue(len(gathered_states) > 0)
self.assertEqual(self.values, ['task1'])
self.assertEqual(states.SUSPENDED, e.storage.get_flow_state())
# Attempt to resume it and see what runs now...
#
# NOTE(harlowja): Clear all the values, but don't reset the reference.
while len(self.values):
self.values.pop()
gathered_states = list(e.run_iter())
self.assertTrue(len(gathered_states) > 0)
self.assertEqual(self.values, ['task2'])
self.assertEqual(states.SUCCESS, e.storage.get_flow_state())
def test_revert_removes_data(self):
flow = lf.Flow('revert-removes').add(
utils.TaskOneReturn(provides='one'),

View File

@@ -258,7 +258,7 @@ class EngineTestBase(object):
conn.clear_all()
super(EngineTestBase, self).tearDown()
def _make_engine(self, flow, flow_detail=None):
def _make_engine(self, flow, **kwargs):
raise NotImplementedError()

View File

@@ -36,6 +36,17 @@ from taskflow.utils import threading_utils as tu
LOG = logging.getLogger(__name__)
@contextlib.contextmanager
def try_lock(lock):
"""Attempts to acquire a lock, and autoreleases if acquisition occurred."""
was_locked = lock.acquire(blocking=False)
try:
yield was_locked
finally:
if was_locked:
lock.release()
def locked(*args, **kwargs):
"""A decorator that looks for a given attribute (typically a lock or a list
of locks) and before executing the decorated function uses the given lock