Merge tag '1.25.0' into debian/mitaka
taskflow 1.25.0 release
This commit is contained in:
@@ -4,5 +4,5 @@ source = taskflow
|
||||
omit = taskflow/tests/*,taskflow/openstack/*,taskflow/test.py
|
||||
|
||||
[report]
|
||||
ignore-errors = True
|
||||
ignore_errors = True
|
||||
|
||||
|
||||
2
.gitignore
vendored
2
.gitignore
vendored
@@ -55,7 +55,7 @@ AUTHORS
|
||||
ChangeLog
|
||||
|
||||
# doc
|
||||
doc/_build/
|
||||
doc/build/
|
||||
|
||||
.idea
|
||||
env
|
||||
|
||||
@@ -9,7 +9,7 @@ Conductors
|
||||
Overview
|
||||
========
|
||||
|
||||
Conductors in TaskFlow provide a mechanism that unifies the various TaskFlow
|
||||
Conductors provide a mechanism that unifies the various
|
||||
concepts under a single easy to use (as plug-and-play as we can make it)
|
||||
construct.
|
||||
|
||||
@@ -66,6 +66,7 @@ Interfaces
|
||||
|
||||
.. automodule:: taskflow.conductors.base
|
||||
.. automodule:: taskflow.conductors.backends
|
||||
.. automodule:: taskflow.conductors.backends.impl_executor
|
||||
|
||||
Implementations
|
||||
===============
|
||||
@@ -75,12 +76,19 @@ Blocking
|
||||
|
||||
.. automodule:: taskflow.conductors.backends.impl_blocking
|
||||
|
||||
Non-blocking
|
||||
------------
|
||||
|
||||
.. automodule:: taskflow.conductors.backends.impl_nonblocking
|
||||
|
||||
Hierarchy
|
||||
=========
|
||||
|
||||
.. inheritance-diagram::
|
||||
taskflow.conductors.base
|
||||
taskflow.conductors.backends.impl_blocking
|
||||
taskflow.conductors.backends.impl_nonblocking
|
||||
taskflow.conductors.backends.impl_executor
|
||||
:parts: 1
|
||||
|
||||
.. _musical conductors: http://en.wikipedia.org/wiki/Conducting
|
||||
|
||||
@@ -104,8 +104,8 @@ projects, frameworks and libraries.
|
||||
|
||||
shelf
|
||||
|
||||
History
|
||||
-------
|
||||
Release notes
|
||||
-------------
|
||||
|
||||
.. toctree::
|
||||
:maxdepth: 2
|
||||
|
||||
@@ -48,10 +48,15 @@ Jobboards
|
||||
High level architecture
|
||||
=======================
|
||||
|
||||
.. image:: img/jobboard.png
|
||||
.. figure:: img/jobboard.png
|
||||
:height: 350px
|
||||
:align: right
|
||||
|
||||
**Note:** This diagram shows the high-level diagram (and further
|
||||
parts of this documentation also refer to it as well) of the zookeeper
|
||||
implementation (other implementations will typically have
|
||||
different architectures).
|
||||
|
||||
Features
|
||||
========
|
||||
|
||||
|
||||
@@ -180,6 +180,11 @@ Capturing listener
|
||||
|
||||
.. autoclass:: taskflow.listeners.capturing.CaptureListener
|
||||
|
||||
Formatters
|
||||
----------
|
||||
|
||||
.. automodule:: taskflow.formatters
|
||||
|
||||
Hierarchy
|
||||
=========
|
||||
|
||||
|
||||
@@ -17,6 +17,11 @@ Cache
|
||||
|
||||
.. automodule:: taskflow.types.cache
|
||||
|
||||
Entity
|
||||
======
|
||||
|
||||
.. automodule:: taskflow.types.entity
|
||||
|
||||
Failure
|
||||
=======
|
||||
|
||||
|
||||
@@ -1,4 +0,0 @@
|
||||
[DEFAULT]
|
||||
|
||||
# The base module to hold the copy of openstack.common
|
||||
base=taskflow
|
||||
@@ -3,7 +3,7 @@
|
||||
# process, which may cause wedges in the gate later.
|
||||
|
||||
# See: https://bugs.launchpad.net/pbr/+bug/1384919 for why this is here...
|
||||
pbr<2.0,>=1.6
|
||||
pbr>=1.6
|
||||
|
||||
# Packages needed for using this library.
|
||||
|
||||
@@ -41,8 +41,8 @@ jsonschema!=2.5.0,<3.0.0,>=2.0.0
|
||||
automaton>=0.5.0 # Apache-2.0
|
||||
|
||||
# For common utilities
|
||||
oslo.utils>=2.0.0 # Apache-2.0
|
||||
oslo.serialization>=1.4.0 # Apache-2.0
|
||||
oslo.utils>=2.8.0 # Apache-2.0
|
||||
oslo.serialization>=1.10.0 # Apache-2.0
|
||||
|
||||
# For lru caches and such
|
||||
cachetools>=1.0.0 # MIT License
|
||||
|
||||
@@ -37,6 +37,7 @@ taskflow.jobboards =
|
||||
|
||||
taskflow.conductors =
|
||||
blocking = taskflow.conductors.backends.impl_blocking:BlockingConductor
|
||||
nonblocking = taskflow.conductors.backends.impl_nonblocking:NonBlockingConductor
|
||||
|
||||
taskflow.persistence =
|
||||
dir = taskflow.persistence.backends.impl_dir:DirBackend
|
||||
@@ -58,6 +59,9 @@ taskflow.engines =
|
||||
cover-erase = true
|
||||
verbosity = 2
|
||||
|
||||
[pbr]
|
||||
warnerrors = True
|
||||
|
||||
[wheel]
|
||||
universal = 1
|
||||
|
||||
|
||||
2
setup.py
2
setup.py
@@ -25,5 +25,5 @@ except ImportError:
|
||||
pass
|
||||
|
||||
setuptools.setup(
|
||||
setup_requires=['pbr>=1.3'],
|
||||
setup_requires=['pbr>=1.8'],
|
||||
pbr=True)
|
||||
|
||||
@@ -194,6 +194,33 @@ class Atom(object):
|
||||
this atom produces.
|
||||
"""
|
||||
|
||||
priority = 0
|
||||
"""A numeric priority that instances of this class will have when running,
|
||||
used when there are multiple *parallel* candidates to execute and/or
|
||||
revert. During this situation the candidate list will be stably sorted
|
||||
based on this priority attribute which will result in atoms with higher
|
||||
priorities executing (or reverting) before atoms with lower
|
||||
priorities (higher being defined as a number bigger, or greater tha
|
||||
an atom with a lower priority number). By default all atoms have the same
|
||||
priority (zero).
|
||||
|
||||
For example when the following is combined into a
|
||||
graph (where each node in the denoted graph is some task)::
|
||||
|
||||
a -> b
|
||||
b -> c
|
||||
b -> e
|
||||
b -> f
|
||||
|
||||
When ``b`` finishes there will then be three candidates that can run
|
||||
``(c, e, f)`` and they may run in any order. What this priority does is
|
||||
sort those three by their priority before submitting them to be
|
||||
worked on (so that instead of say a random run order they will now be
|
||||
ran by there sorted order). This is also true when reverting (in that the
|
||||
sort order of the potential nodes will be used to determine the
|
||||
submission order).
|
||||
"""
|
||||
|
||||
def __init__(self, name=None, provides=None, inject=None):
|
||||
self.name = name
|
||||
self.version = (1, 0)
|
||||
|
||||
@@ -12,208 +12,30 @@
|
||||
# License for the specific language governing permissions and limitations
|
||||
# under the License.
|
||||
|
||||
import threading
|
||||
import futurist
|
||||
|
||||
try:
|
||||
from contextlib import ExitStack # noqa
|
||||
except ImportError:
|
||||
from contextlib2 import ExitStack # noqa
|
||||
|
||||
from debtcollector import removals
|
||||
from oslo_utils import excutils
|
||||
import six
|
||||
|
||||
from taskflow.conductors import base
|
||||
from taskflow import exceptions as excp
|
||||
from taskflow.listeners import logging as logging_listener
|
||||
from taskflow import logging
|
||||
from taskflow.types import timing as tt
|
||||
from taskflow.utils import async_utils
|
||||
|
||||
LOG = logging.getLogger(__name__)
|
||||
WAIT_TIMEOUT = 0.5
|
||||
NO_CONSUME_EXCEPTIONS = tuple([
|
||||
excp.ExecutionFailure,
|
||||
excp.StorageFailure,
|
||||
])
|
||||
from taskflow.conductors.backends import impl_executor
|
||||
|
||||
|
||||
class BlockingConductor(base.Conductor):
|
||||
"""A conductor that runs jobs in its own dispatching loop.
|
||||
class BlockingConductor(impl_executor.ExecutorConductor):
|
||||
"""Blocking conductor that processes job(s) in a blocking manner."""
|
||||
|
||||
This conductor iterates over jobs in the provided jobboard (waiting for
|
||||
the given timeout if no jobs exist) and attempts to claim them, work on
|
||||
those jobs in its local thread (blocking further work from being claimed
|
||||
and consumed) and then consume those work units after completetion. This
|
||||
process will repeat until the conductor has been stopped or other critical
|
||||
error occurs.
|
||||
|
||||
NOTE(harlowja): consumption occurs even if a engine fails to run due to
|
||||
a task failure. This is only skipped when an execution failure or
|
||||
a storage failure occurs which are *usually* correctable by re-running on
|
||||
a different conductor (storage failures and execution failures may be
|
||||
transient issues that can be worked around by later execution). If a job
|
||||
after completing can not be consumed or abandoned the conductor relies
|
||||
upon the jobboard capabilities to automatically abandon these jobs.
|
||||
MAX_SIMULTANEOUS_JOBS = 1
|
||||
"""
|
||||
Default maximum number of jobs that can be in progress at the same time.
|
||||
"""
|
||||
|
||||
START_FINISH_EVENTS_EMITTED = tuple([
|
||||
'compilation', 'preparation',
|
||||
'validation', 'running',
|
||||
])
|
||||
"""Events will be emitted for the start and finish of each engine
|
||||
activity defined above, the actual event name that can be registered
|
||||
to subscribe to will be ``${event}_start`` and ``${event}_end`` where
|
||||
the ``${event}`` in this pseudo-variable will be one of these events.
|
||||
"""
|
||||
@staticmethod
|
||||
def _executor_factory():
|
||||
return futurist.SynchronousExecutor()
|
||||
|
||||
def __init__(self, name, jobboard,
|
||||
persistence=None, engine=None,
|
||||
engine_options=None, wait_timeout=None):
|
||||
engine_options=None, wait_timeout=None,
|
||||
log=None, max_simultaneous_jobs=MAX_SIMULTANEOUS_JOBS):
|
||||
super(BlockingConductor, self).__init__(
|
||||
name, jobboard, persistence=persistence,
|
||||
engine=engine, engine_options=engine_options)
|
||||
if wait_timeout is None:
|
||||
wait_timeout = WAIT_TIMEOUT
|
||||
if isinstance(wait_timeout, (int, float) + six.string_types):
|
||||
self._wait_timeout = tt.Timeout(float(wait_timeout))
|
||||
elif isinstance(wait_timeout, tt.Timeout):
|
||||
self._wait_timeout = wait_timeout
|
||||
else:
|
||||
raise ValueError("Invalid timeout literal: %s" % (wait_timeout))
|
||||
self._dead = threading.Event()
|
||||
|
||||
@removals.removed_kwarg('timeout', version="0.8", removal_version="2.0")
|
||||
def stop(self, timeout=None):
|
||||
"""Requests the conductor to stop dispatching.
|
||||
|
||||
This method can be used to request that a conductor stop its
|
||||
consumption & dispatching loop.
|
||||
|
||||
The method returns immediately regardless of whether the conductor has
|
||||
been stopped.
|
||||
|
||||
.. deprecated:: 0.8
|
||||
|
||||
The ``timeout`` parameter is **deprecated** and is present for
|
||||
backward compatibility **only**. In order to wait for the
|
||||
conductor to gracefully shut down, :py:meth:`wait` should be used
|
||||
instead.
|
||||
"""
|
||||
self._wait_timeout.interrupt()
|
||||
|
||||
@property
|
||||
def dispatching(self):
|
||||
return not self._dead.is_set()
|
||||
|
||||
def _listeners_from_job(self, job, engine):
|
||||
listeners = super(BlockingConductor, self)._listeners_from_job(job,
|
||||
engine)
|
||||
listeners.append(logging_listener.LoggingListener(engine, log=LOG))
|
||||
return listeners
|
||||
|
||||
def _dispatch_job(self, job):
|
||||
engine = self._engine_from_job(job)
|
||||
listeners = self._listeners_from_job(job, engine)
|
||||
with ExitStack() as stack:
|
||||
for listener in listeners:
|
||||
stack.enter_context(listener)
|
||||
LOG.debug("Dispatching engine for job '%s'", job)
|
||||
consume = True
|
||||
try:
|
||||
for stage_func, event_name in [(engine.compile, 'compilation'),
|
||||
(engine.prepare, 'preparation'),
|
||||
(engine.validate, 'validation'),
|
||||
(engine.run, 'running')]:
|
||||
self._notifier.notify("%s_start" % event_name, {
|
||||
'job': job,
|
||||
'engine': engine,
|
||||
'conductor': self,
|
||||
})
|
||||
stage_func()
|
||||
self._notifier.notify("%s_end" % event_name, {
|
||||
'job': job,
|
||||
'engine': engine,
|
||||
'conductor': self,
|
||||
})
|
||||
except excp.WrappedFailure as e:
|
||||
if all((f.check(*NO_CONSUME_EXCEPTIONS) for f in e)):
|
||||
consume = False
|
||||
if LOG.isEnabledFor(logging.WARNING):
|
||||
if consume:
|
||||
LOG.warn("Job execution failed (consumption being"
|
||||
" skipped): %s [%s failures]", job, len(e))
|
||||
else:
|
||||
LOG.warn("Job execution failed (consumption"
|
||||
" proceeding): %s [%s failures]", job, len(e))
|
||||
# Show the failure/s + traceback (if possible)...
|
||||
for i, f in enumerate(e):
|
||||
LOG.warn("%s. %s", i + 1, f.pformat(traceback=True))
|
||||
except NO_CONSUME_EXCEPTIONS:
|
||||
LOG.warn("Job execution failed (consumption being"
|
||||
" skipped): %s", job, exc_info=True)
|
||||
consume = False
|
||||
except Exception:
|
||||
LOG.warn("Job execution failed (consumption proceeding): %s",
|
||||
job, exc_info=True)
|
||||
else:
|
||||
LOG.info("Job completed successfully: %s", job)
|
||||
return async_utils.make_completed_future(consume)
|
||||
|
||||
def run(self):
|
||||
self._dead.clear()
|
||||
try:
|
||||
while True:
|
||||
if self._wait_timeout.is_stopped():
|
||||
break
|
||||
dispatched = 0
|
||||
for job in self._jobboard.iterjobs():
|
||||
if self._wait_timeout.is_stopped():
|
||||
break
|
||||
LOG.debug("Trying to claim job: %s", job)
|
||||
try:
|
||||
self._jobboard.claim(job, self._name)
|
||||
except (excp.UnclaimableJob, excp.NotFound):
|
||||
LOG.debug("Job already claimed or consumed: %s", job)
|
||||
continue
|
||||
consume = False
|
||||
try:
|
||||
f = self._dispatch_job(job)
|
||||
except KeyboardInterrupt:
|
||||
with excutils.save_and_reraise_exception():
|
||||
LOG.warn("Job dispatching interrupted: %s", job)
|
||||
except Exception:
|
||||
LOG.warn("Job dispatching failed: %s", job,
|
||||
exc_info=True)
|
||||
else:
|
||||
dispatched += 1
|
||||
consume = f.result()
|
||||
try:
|
||||
if consume:
|
||||
self._jobboard.consume(job, self._name)
|
||||
else:
|
||||
self._jobboard.abandon(job, self._name)
|
||||
except (excp.JobFailure, excp.NotFound):
|
||||
if consume:
|
||||
LOG.warn("Failed job consumption: %s", job,
|
||||
exc_info=True)
|
||||
else:
|
||||
LOG.warn("Failed job abandonment: %s", job,
|
||||
exc_info=True)
|
||||
if dispatched == 0 and not self._wait_timeout.is_stopped():
|
||||
self._wait_timeout.wait()
|
||||
finally:
|
||||
self._dead.set()
|
||||
|
||||
def wait(self, timeout=None):
|
||||
"""Waits for the conductor to gracefully exit.
|
||||
|
||||
This method waits for the conductor to gracefully exit. An optional
|
||||
timeout can be provided, which will cause the method to return
|
||||
within the specified timeout. If the timeout is reached, the returned
|
||||
value will be False.
|
||||
|
||||
:param timeout: Maximum number of seconds that the :meth:`wait` method
|
||||
should block for.
|
||||
"""
|
||||
return self._dead.wait(timeout)
|
||||
name, jobboard,
|
||||
persistence=persistence, engine=engine,
|
||||
engine_options=engine_options,
|
||||
wait_timeout=wait_timeout, log=log,
|
||||
max_simultaneous_jobs=max_simultaneous_jobs)
|
||||
|
||||
357
taskflow/conductors/backends/impl_executor.py
Normal file
357
taskflow/conductors/backends/impl_executor.py
Normal file
@@ -0,0 +1,357 @@
|
||||
# -*- coding: utf-8 -*-
|
||||
|
||||
# Licensed under the Apache License, Version 2.0 (the "License"); you may
|
||||
# not use this file except in compliance with the License. You may obtain
|
||||
# a copy of the License at
|
||||
#
|
||||
# http://www.apache.org/licenses/LICENSE-2.0
|
||||
#
|
||||
# Unless required by applicable law or agreed to in writing, software
|
||||
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
|
||||
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
|
||||
# License for the specific language governing permissions and limitations
|
||||
# under the License.
|
||||
|
||||
import abc
|
||||
import functools
|
||||
import itertools
|
||||
import threading
|
||||
|
||||
try:
|
||||
from contextlib import ExitStack # noqa
|
||||
except ImportError:
|
||||
from contextlib2 import ExitStack # noqa
|
||||
|
||||
from debtcollector import removals
|
||||
from oslo_utils import excutils
|
||||
import six
|
||||
|
||||
from taskflow.conductors import base
|
||||
from taskflow import exceptions as excp
|
||||
from taskflow.listeners import logging as logging_listener
|
||||
from taskflow import logging
|
||||
from taskflow.types import timing as tt
|
||||
from taskflow.utils import iter_utils
|
||||
from taskflow.utils import misc
|
||||
|
||||
LOG = logging.getLogger(__name__)
|
||||
|
||||
|
||||
def _convert_to_timeout(value=None, default_value=None, event_factory=None):
|
||||
if value is None:
|
||||
value = default_value
|
||||
if isinstance(value, (int, float) + six.string_types):
|
||||
return tt.Timeout(float(value), event_factory=event_factory)
|
||||
elif isinstance(value, tt.Timeout):
|
||||
return value
|
||||
else:
|
||||
raise ValueError("Invalid timeout literal '%s'" % (value))
|
||||
|
||||
|
||||
@six.add_metaclass(abc.ABCMeta)
|
||||
class ExecutorConductor(base.Conductor):
|
||||
"""Dispatches jobs from blocking :py:meth:`.run` method to some executor.
|
||||
|
||||
This conductor iterates over jobs in the provided jobboard (waiting for
|
||||
the given timeout if no jobs exist) and attempts to claim them, work on
|
||||
those jobs using an executor (potentially blocking further work from being
|
||||
claimed and consumed) and then consume those work units after
|
||||
completion. This process will repeat until the conductor has been stopped
|
||||
or other critical error occurs.
|
||||
|
||||
NOTE(harlowja): consumption occurs even if a engine fails to run due to
|
||||
a atom failure. This is only skipped when an execution failure or
|
||||
a storage failure occurs which are *usually* correctable by re-running on
|
||||
a different conductor (storage failures and execution failures may be
|
||||
transient issues that can be worked around by later execution). If a job
|
||||
after completing can not be consumed or abandoned the conductor relies
|
||||
upon the jobboard capabilities to automatically abandon these jobs.
|
||||
"""
|
||||
|
||||
LOG = None
|
||||
"""
|
||||
Logger that will be used for listening to events (if none then the module
|
||||
level logger will be used instead).
|
||||
"""
|
||||
|
||||
#: Default timeout used to idle/wait when no jobs have been found.
|
||||
WAIT_TIMEOUT = 0.5
|
||||
|
||||
MAX_SIMULTANEOUS_JOBS = -1
|
||||
"""
|
||||
Default maximum number of jobs that can be in progress at the same time.
|
||||
|
||||
Negative or zero values imply no limit (do note that if a executor is
|
||||
used that is built on a queue, as most are, that this will imply that the
|
||||
queue will contain a potentially large & unfinished backlog of
|
||||
submitted jobs). This *may* get better someday if
|
||||
https://bugs.python.org/issue22737 is ever implemented and released.
|
||||
"""
|
||||
|
||||
#: Exceptions that will **not** cause consumption to occur.
|
||||
NO_CONSUME_EXCEPTIONS = tuple([
|
||||
excp.ExecutionFailure,
|
||||
excp.StorageFailure,
|
||||
])
|
||||
|
||||
_event_factory = threading.Event
|
||||
"""This attribute *can* be overridden by subclasses (for example if
|
||||
an eventlet *green* event works better for the conductor user)."""
|
||||
|
||||
START_FINISH_EVENTS_EMITTED = tuple([
|
||||
'compilation', 'preparation',
|
||||
'validation', 'running',
|
||||
])
|
||||
"""Events will be emitted for the start and finish of each engine
|
||||
activity defined above, the actual event name that can be registered
|
||||
to subscribe to will be ``${event}_start`` and ``${event}_end`` where
|
||||
the ``${event}`` in this pseudo-variable will be one of these events.
|
||||
|
||||
.. deprecated:: 1.23.0
|
||||
Use :py:attr:`~EVENTS_EMITTED`
|
||||
"""
|
||||
|
||||
EVENTS_EMITTED = tuple([
|
||||
'compilation_start', 'compilation_end',
|
||||
'preparation_start', 'preparation_end',
|
||||
'validation_start', 'validation_end',
|
||||
'running_start', 'running_end',
|
||||
'job_consumed', 'job_abandoned',
|
||||
])
|
||||
"""Events will be emitted for each of the events above. The event is
|
||||
emitted to listeners registered with the conductor.
|
||||
"""
|
||||
|
||||
def __init__(self, name, jobboard,
|
||||
persistence=None, engine=None,
|
||||
engine_options=None, wait_timeout=None,
|
||||
log=None, max_simultaneous_jobs=MAX_SIMULTANEOUS_JOBS):
|
||||
super(ExecutorConductor, self).__init__(
|
||||
name, jobboard, persistence=persistence,
|
||||
engine=engine, engine_options=engine_options)
|
||||
self._wait_timeout = _convert_to_timeout(
|
||||
value=wait_timeout, default_value=self.WAIT_TIMEOUT,
|
||||
event_factory=self._event_factory)
|
||||
self._dead = self._event_factory()
|
||||
self._log = misc.pick_first_not_none(log, self.LOG, LOG)
|
||||
self._max_simultaneous_jobs = int(
|
||||
misc.pick_first_not_none(max_simultaneous_jobs,
|
||||
self.MAX_SIMULTANEOUS_JOBS))
|
||||
self._dispatched = set()
|
||||
|
||||
def _executor_factory(self):
|
||||
"""Creates an executor to be used during dispatching."""
|
||||
raise excp.NotImplementedError("This method must be implemented but"
|
||||
" it has not been")
|
||||
|
||||
@removals.removed_kwarg('timeout', version="0.8", removal_version="2.0")
|
||||
def stop(self, timeout=None):
|
||||
"""Requests the conductor to stop dispatching.
|
||||
|
||||
This method can be used to request that a conductor stop its
|
||||
consumption & dispatching loop.
|
||||
|
||||
The method returns immediately regardless of whether the conductor has
|
||||
been stopped.
|
||||
|
||||
.. deprecated:: 0.8
|
||||
|
||||
The ``timeout`` parameter is **deprecated** and is present for
|
||||
backward compatibility **only**. In order to wait for the
|
||||
conductor to gracefully shut down, :py:meth:`wait` should be used
|
||||
instead.
|
||||
"""
|
||||
self._wait_timeout.interrupt()
|
||||
|
||||
@property
|
||||
def dispatching(self):
|
||||
"""Whether or not the dispatching loop is still dispatching."""
|
||||
return not self._dead.is_set()
|
||||
|
||||
def _listeners_from_job(self, job, engine):
|
||||
listeners = super(ExecutorConductor, self)._listeners_from_job(
|
||||
job, engine)
|
||||
listeners.append(logging_listener.LoggingListener(engine,
|
||||
log=self._log))
|
||||
return listeners
|
||||
|
||||
def _dispatch_job(self, job):
|
||||
engine = self._engine_from_job(job)
|
||||
listeners = self._listeners_from_job(job, engine)
|
||||
with ExitStack() as stack:
|
||||
for listener in listeners:
|
||||
stack.enter_context(listener)
|
||||
self._log.debug("Dispatching engine for job '%s'", job)
|
||||
consume = True
|
||||
try:
|
||||
for stage_func, event_name in [(engine.compile, 'compilation'),
|
||||
(engine.prepare, 'preparation'),
|
||||
(engine.validate, 'validation'),
|
||||
(engine.run, 'running')]:
|
||||
self._notifier.notify("%s_start" % event_name, {
|
||||
'job': job,
|
||||
'engine': engine,
|
||||
'conductor': self,
|
||||
})
|
||||
stage_func()
|
||||
self._notifier.notify("%s_end" % event_name, {
|
||||
'job': job,
|
||||
'engine': engine,
|
||||
'conductor': self,
|
||||
})
|
||||
except excp.WrappedFailure as e:
|
||||
if all((f.check(*self.NO_CONSUME_EXCEPTIONS) for f in e)):
|
||||
consume = False
|
||||
if self._log.isEnabledFor(logging.WARNING):
|
||||
if consume:
|
||||
self._log.warn(
|
||||
"Job execution failed (consumption being"
|
||||
" skipped): %s [%s failures]", job, len(e))
|
||||
else:
|
||||
self._log.warn(
|
||||
"Job execution failed (consumption"
|
||||
" proceeding): %s [%s failures]", job, len(e))
|
||||
# Show the failure/s + traceback (if possible)...
|
||||
for i, f in enumerate(e):
|
||||
self._log.warn("%s. %s", i + 1,
|
||||
f.pformat(traceback=True))
|
||||
except self.NO_CONSUME_EXCEPTIONS:
|
||||
self._log.warn("Job execution failed (consumption being"
|
||||
" skipped): %s", job, exc_info=True)
|
||||
consume = False
|
||||
except Exception:
|
||||
self._log.warn(
|
||||
"Job execution failed (consumption proceeding): %s",
|
||||
job, exc_info=True)
|
||||
else:
|
||||
self._log.info("Job completed successfully: %s", job)
|
||||
return consume
|
||||
|
||||
def _try_finish_job(self, job, consume):
|
||||
try:
|
||||
if consume:
|
||||
self._jobboard.consume(job, self._name)
|
||||
self._notifier.notify("job_consumed", {
|
||||
'job': job,
|
||||
'conductor': self,
|
||||
'persistence': self._persistence,
|
||||
})
|
||||
else:
|
||||
self._jobboard.abandon(job, self._name)
|
||||
self._notifier.notify("job_abandoned", {
|
||||
'job': job,
|
||||
'conductor': self,
|
||||
'persistence': self._persistence,
|
||||
})
|
||||
except (excp.JobFailure, excp.NotFound):
|
||||
if consume:
|
||||
self._log.warn("Failed job consumption: %s", job,
|
||||
exc_info=True)
|
||||
else:
|
||||
self._log.warn("Failed job abandonment: %s", job,
|
||||
exc_info=True)
|
||||
|
||||
def _on_job_done(self, job, fut):
|
||||
consume = False
|
||||
try:
|
||||
consume = fut.result()
|
||||
except KeyboardInterrupt:
|
||||
with excutils.save_and_reraise_exception():
|
||||
self._log.warn("Job dispatching interrupted: %s", job)
|
||||
except Exception:
|
||||
self._log.warn("Job dispatching failed: %s", job, exc_info=True)
|
||||
try:
|
||||
self._try_finish_job(job, consume)
|
||||
finally:
|
||||
self._dispatched.discard(fut)
|
||||
|
||||
def _can_claim_more_jobs(self, job):
|
||||
if self._wait_timeout.is_stopped():
|
||||
return False
|
||||
if self._max_simultaneous_jobs <= 0:
|
||||
return True
|
||||
if len(self._dispatched) >= self._max_simultaneous_jobs:
|
||||
return False
|
||||
else:
|
||||
return True
|
||||
|
||||
def _run_until_dead(self, executor, max_dispatches=None):
|
||||
total_dispatched = 0
|
||||
if max_dispatches is None:
|
||||
# NOTE(TheSriram): if max_dispatches is not set,
|
||||
# then the conductor will run indefinitely, and not
|
||||
# stop after 'n' number of dispatches
|
||||
max_dispatches = -1
|
||||
dispatch_gen = iter_utils.iter_forever(max_dispatches)
|
||||
is_stopped = self._wait_timeout.is_stopped
|
||||
try:
|
||||
# Don't even do any work in the first place...
|
||||
if max_dispatches == 0:
|
||||
raise StopIteration
|
||||
while not is_stopped():
|
||||
any_dispatched = False
|
||||
for job in itertools.takewhile(self._can_claim_more_jobs,
|
||||
self._jobboard.iterjobs()):
|
||||
self._log.debug("Trying to claim job: %s", job)
|
||||
try:
|
||||
self._jobboard.claim(job, self._name)
|
||||
except (excp.UnclaimableJob, excp.NotFound):
|
||||
self._log.debug("Job already claimed or"
|
||||
" consumed: %s", job)
|
||||
else:
|
||||
try:
|
||||
fut = executor.submit(self._dispatch_job, job)
|
||||
except RuntimeError:
|
||||
with excutils.save_and_reraise_exception():
|
||||
self._log.warn("Job dispatch submitting"
|
||||
" failed: %s", job)
|
||||
self._try_finish_job(job, False)
|
||||
else:
|
||||
fut.job = job
|
||||
self._dispatched.add(fut)
|
||||
any_dispatched = True
|
||||
fut.add_done_callback(
|
||||
functools.partial(self._on_job_done, job))
|
||||
total_dispatched = next(dispatch_gen)
|
||||
if not any_dispatched and not is_stopped():
|
||||
self._wait_timeout.wait()
|
||||
except StopIteration:
|
||||
# This will be raised from 'dispatch_gen' if it reaches its
|
||||
# max dispatch number (which implies we should do no more work).
|
||||
with excutils.save_and_reraise_exception():
|
||||
if max_dispatches >= 0 and total_dispatched >= max_dispatches:
|
||||
self._log.info("Maximum dispatch limit of %s reached",
|
||||
max_dispatches)
|
||||
|
||||
def run(self, max_dispatches=None):
|
||||
self._dead.clear()
|
||||
self._dispatched.clear()
|
||||
try:
|
||||
self._jobboard.register_entity(self.conductor)
|
||||
with self._executor_factory() as executor:
|
||||
self._run_until_dead(executor,
|
||||
max_dispatches=max_dispatches)
|
||||
except StopIteration:
|
||||
pass
|
||||
except KeyboardInterrupt:
|
||||
with excutils.save_and_reraise_exception():
|
||||
self._log.warn("Job dispatching interrupted")
|
||||
finally:
|
||||
self._dead.set()
|
||||
|
||||
# Inherit the docs, so we can reference them in our class docstring,
|
||||
# if we don't do this sphinx gets confused...
|
||||
run.__doc__ = base.Conductor.run.__doc__
|
||||
|
||||
def wait(self, timeout=None):
|
||||
"""Waits for the conductor to gracefully exit.
|
||||
|
||||
This method waits for the conductor to gracefully exit. An optional
|
||||
timeout can be provided, which will cause the method to return
|
||||
within the specified timeout. If the timeout is reached, the returned
|
||||
value will be ``False``, otherwise it will be ``True``.
|
||||
|
||||
:param timeout: Maximum number of seconds that the :meth:`wait` method
|
||||
should block for.
|
||||
"""
|
||||
return self._dead.wait(timeout)
|
||||
69
taskflow/conductors/backends/impl_nonblocking.py
Normal file
69
taskflow/conductors/backends/impl_nonblocking.py
Normal file
@@ -0,0 +1,69 @@
|
||||
# -*- coding: utf-8 -*-
|
||||
|
||||
# Licensed under the Apache License, Version 2.0 (the "License"); you may
|
||||
# not use this file except in compliance with the License. You may obtain
|
||||
# a copy of the License at
|
||||
#
|
||||
# http://www.apache.org/licenses/LICENSE-2.0
|
||||
#
|
||||
# Unless required by applicable law or agreed to in writing, software
|
||||
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
|
||||
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
|
||||
# License for the specific language governing permissions and limitations
|
||||
# under the License.
|
||||
|
||||
import futurist
|
||||
import six
|
||||
|
||||
from taskflow.conductors.backends import impl_executor
|
||||
from taskflow.utils import threading_utils as tu
|
||||
|
||||
|
||||
class NonBlockingConductor(impl_executor.ExecutorConductor):
|
||||
"""Non-blocking conductor that processes job(s) using a thread executor.
|
||||
|
||||
NOTE(harlowja): A custom executor factory can be provided via keyword
|
||||
argument ``executor_factory``, if provided it will be
|
||||
invoked at
|
||||
:py:meth:`~taskflow.conductors.base.Conductor.run` time
|
||||
with one positional argument (this conductor) and it must
|
||||
return a compatible `executor`_ which can be used
|
||||
to submit jobs to. If ``None`` is a provided a thread pool
|
||||
backed executor is selected by default (it will have
|
||||
an equivalent number of workers as this conductors
|
||||
simultaneous job count).
|
||||
|
||||
.. _executor: https://docs.python.org/dev/library/\
|
||||
concurrent.futures.html#executor-objects
|
||||
"""
|
||||
|
||||
MAX_SIMULTANEOUS_JOBS = tu.get_optimal_thread_count()
|
||||
"""
|
||||
Default maximum number of jobs that can be in progress at the same time.
|
||||
"""
|
||||
|
||||
def _default_executor_factory(self):
|
||||
max_simultaneous_jobs = self._max_simultaneous_jobs
|
||||
if max_simultaneous_jobs <= 0:
|
||||
max_workers = tu.get_optimal_thread_count()
|
||||
else:
|
||||
max_workers = max_simultaneous_jobs
|
||||
return futurist.ThreadPoolExecutor(max_workers=max_workers)
|
||||
|
||||
def __init__(self, name, jobboard,
|
||||
persistence=None, engine=None,
|
||||
engine_options=None, wait_timeout=None,
|
||||
log=None, max_simultaneous_jobs=MAX_SIMULTANEOUS_JOBS,
|
||||
executor_factory=None):
|
||||
super(NonBlockingConductor, self).__init__(
|
||||
name, jobboard,
|
||||
persistence=persistence, engine=engine,
|
||||
engine_options=engine_options, wait_timeout=wait_timeout,
|
||||
log=log, max_simultaneous_jobs=max_simultaneous_jobs)
|
||||
if executor_factory is None:
|
||||
self._executor_factory = self._default_executor_factory
|
||||
else:
|
||||
if not six.callable(executor_factory):
|
||||
raise ValueError("Provided keyword argument 'executor_factory'"
|
||||
" must be callable")
|
||||
self._executor_factory = executor_factory
|
||||
@@ -13,6 +13,7 @@
|
||||
# under the License.
|
||||
|
||||
import abc
|
||||
import os
|
||||
import threading
|
||||
|
||||
import fasteners
|
||||
@@ -20,7 +21,9 @@ import six
|
||||
|
||||
from taskflow import engines
|
||||
from taskflow import exceptions as excp
|
||||
from taskflow.types import entity
|
||||
from taskflow.types import notifier
|
||||
from taskflow.utils import misc
|
||||
|
||||
|
||||
@six.add_metaclass(abc.ABCMeta)
|
||||
@@ -35,6 +38,9 @@ class Conductor(object):
|
||||
period of time will finish up the prior failed conductors work.
|
||||
"""
|
||||
|
||||
#: Entity kind used when creating new entity objects
|
||||
ENTITY_KIND = 'conductor'
|
||||
|
||||
def __init__(self, name, jobboard,
|
||||
persistence=None, engine=None, engine_options=None):
|
||||
self._name = name
|
||||
@@ -48,6 +54,18 @@ class Conductor(object):
|
||||
self._lock = threading.RLock()
|
||||
self._notifier = notifier.Notifier()
|
||||
|
||||
@misc.cachedproperty
|
||||
def conductor(self):
|
||||
"""Entity object that represents this conductor."""
|
||||
hostname = misc.get_hostname()
|
||||
pid = os.getpid()
|
||||
name = '@'.join([self._name, hostname + ":" + str(pid)])
|
||||
metadata = {
|
||||
'hostname': hostname,
|
||||
'pid': pid,
|
||||
}
|
||||
return entity.Entity(self.ENTITY_KIND, name, metadata)
|
||||
|
||||
@property
|
||||
def notifier(self):
|
||||
"""The conductor actions (or other state changes) notifier.
|
||||
@@ -134,8 +152,17 @@ class Conductor(object):
|
||||
self._jobboard.close()
|
||||
|
||||
@abc.abstractmethod
|
||||
def run(self):
|
||||
"""Continuously claims, runs, and consumes jobs (and repeat)."""
|
||||
def run(self, max_dispatches=None):
|
||||
"""Continuously claims, runs, and consumes jobs (and repeat).
|
||||
|
||||
:param max_dispatches: An upper bound on the number of jobs that will
|
||||
be dispatched, if none or negative this implies
|
||||
there is no limit to the number of jobs that
|
||||
will be dispatched, otherwise if positive this
|
||||
run method will return when that amount of jobs
|
||||
has been dispatched (instead of running
|
||||
forever and/or until stopped).
|
||||
"""
|
||||
|
||||
@abc.abstractmethod
|
||||
def _dispatch_job(self, job):
|
||||
|
||||
@@ -18,10 +18,31 @@ import abc
|
||||
import itertools
|
||||
import weakref
|
||||
|
||||
from networkx.algorithms import traversal
|
||||
import six
|
||||
|
||||
from taskflow.engines.action_engine import compiler as co
|
||||
from taskflow import states as st
|
||||
from taskflow.utils import iter_utils
|
||||
|
||||
|
||||
def _depth_first_iterate(graph, connected_to_functors, initial_nodes_iter):
|
||||
"""Iterates connected nodes in execution graph (from starting set).
|
||||
|
||||
Jumps over nodes with ``noop`` attribute (does not yield them back).
|
||||
"""
|
||||
stack = list(initial_nodes_iter)
|
||||
while stack:
|
||||
node = stack.pop()
|
||||
node_attrs = graph.node[node]
|
||||
if not node_attrs.get('noop'):
|
||||
yield node
|
||||
try:
|
||||
node_kind = node_attrs['kind']
|
||||
connected_to_functor = connected_to_functors[node_kind]
|
||||
except KeyError:
|
||||
pass
|
||||
else:
|
||||
stack.extend(connected_to_functor(node))
|
||||
|
||||
|
||||
@six.add_metaclass(abc.ABCMeta)
|
||||
@@ -59,10 +80,14 @@ class IgnoreDecider(Decider):
|
||||
|
||||
def check(self, runtime):
|
||||
"""Returns bool of whether this decider should allow running."""
|
||||
# Gather all atoms results so that those results can be used
|
||||
# by the decider(s) that are making a decision as to pass or
|
||||
# not pass...
|
||||
results = {}
|
||||
for name in six.iterkeys(self._edge_deciders):
|
||||
results[name] = runtime.storage.get(name)
|
||||
for local_decider in six.itervalues(self._edge_deciders):
|
||||
for node, node_kind, _local_decider in self._edge_deciders:
|
||||
if node_kind in co.ATOMS:
|
||||
results[node.name] = runtime.storage.get(node.name)
|
||||
for _node, _node_kind, local_decider in self._edge_deciders:
|
||||
if not local_decider(history=results):
|
||||
return False
|
||||
return True
|
||||
@@ -74,8 +99,8 @@ class IgnoreDecider(Decider):
|
||||
state to ``IGNORE`` so that they are ignored in future runtime
|
||||
activities.
|
||||
"""
|
||||
successors_iter = runtime.analyzer.iterate_subgraph(self._atom)
|
||||
runtime.reset_nodes(itertools.chain([self._atom], successors_iter),
|
||||
successors_iter = runtime.analyzer.iterate_connected_atoms(self._atom)
|
||||
runtime.reset_atoms(itertools.chain([self._atom], successors_iter),
|
||||
state=st.IGNORE, intention=st.IGNORE)
|
||||
|
||||
|
||||
@@ -105,149 +130,167 @@ class Analyzer(object):
|
||||
self._storage = runtime.storage
|
||||
self._execution_graph = runtime.compilation.execution_graph
|
||||
|
||||
def get_next_nodes(self, node=None):
|
||||
"""Get next nodes to run (originating from node or all nodes)."""
|
||||
if node is None:
|
||||
execute = self.browse_nodes_for_execute()
|
||||
revert = self.browse_nodes_for_revert()
|
||||
return execute + revert
|
||||
state = self.get_state(node)
|
||||
intention = self._storage.get_atom_intention(node.name)
|
||||
def iter_next_atoms(self, atom=None):
|
||||
"""Iterate next atoms to run (originating from atom or all atoms)."""
|
||||
if atom is None:
|
||||
return iter_utils.unique_seen(self.browse_atoms_for_execute(),
|
||||
self.browse_atoms_for_revert())
|
||||
state = self._storage.get_atom_state(atom.name)
|
||||
intention = self._storage.get_atom_intention(atom.name)
|
||||
if state == st.SUCCESS:
|
||||
if intention == st.REVERT:
|
||||
return [
|
||||
(node, NoOpDecider()),
|
||||
]
|
||||
return iter([
|
||||
(atom, NoOpDecider()),
|
||||
])
|
||||
elif intention == st.EXECUTE:
|
||||
return self.browse_nodes_for_execute(node)
|
||||
return self.browse_atoms_for_execute(atom=atom)
|
||||
else:
|
||||
return []
|
||||
return iter([])
|
||||
elif state == st.REVERTED:
|
||||
return self.browse_nodes_for_revert(node)
|
||||
return self.browse_atoms_for_revert(atom=atom)
|
||||
elif state == st.FAILURE:
|
||||
return self.browse_nodes_for_revert()
|
||||
return self.browse_atoms_for_revert()
|
||||
else:
|
||||
return []
|
||||
return iter([])
|
||||
|
||||
def browse_nodes_for_execute(self, node=None):
|
||||
"""Browse next nodes to execute.
|
||||
def browse_atoms_for_execute(self, atom=None):
|
||||
"""Browse next atoms to execute.
|
||||
|
||||
This returns a collection of nodes that *may* be ready to be
|
||||
executed, if given a specific node it will only examine the successors
|
||||
of that node, otherwise it will examine the whole graph.
|
||||
This returns a iterator of atoms that *may* be ready to be
|
||||
executed, if given a specific atom, it will only examine the successors
|
||||
of that atom, otherwise it will examine the whole graph.
|
||||
"""
|
||||
if node is not None:
|
||||
nodes = self._execution_graph.successors(node)
|
||||
if atom is None:
|
||||
atom_it = self.iterate_nodes(co.ATOMS)
|
||||
else:
|
||||
nodes = self._execution_graph.nodes_iter()
|
||||
ready_nodes = []
|
||||
for node in nodes:
|
||||
is_ready, late_decider = self._get_maybe_ready_for_execute(node)
|
||||
successors_iter = self._execution_graph.successors_iter
|
||||
atom_it = _depth_first_iterate(self._execution_graph,
|
||||
{co.FLOW: successors_iter},
|
||||
successors_iter(atom))
|
||||
for atom in atom_it:
|
||||
is_ready, late_decider = self._get_maybe_ready_for_execute(atom)
|
||||
if is_ready:
|
||||
ready_nodes.append((node, late_decider))
|
||||
return ready_nodes
|
||||
yield (atom, late_decider)
|
||||
|
||||
def browse_nodes_for_revert(self, node=None):
|
||||
"""Browse next nodes to revert.
|
||||
def browse_atoms_for_revert(self, atom=None):
|
||||
"""Browse next atoms to revert.
|
||||
|
||||
This returns a collection of nodes that *may* be ready to be be
|
||||
reverted, if given a specific node it will only examine the
|
||||
predecessors of that node, otherwise it will examine the whole
|
||||
This returns a iterator of atoms that *may* be ready to be be
|
||||
reverted, if given a specific atom it will only examine the
|
||||
predecessors of that atom, otherwise it will examine the whole
|
||||
graph.
|
||||
"""
|
||||
if node is not None:
|
||||
nodes = self._execution_graph.predecessors(node)
|
||||
if atom is None:
|
||||
atom_it = self.iterate_nodes(co.ATOMS)
|
||||
else:
|
||||
nodes = self._execution_graph.nodes_iter()
|
||||
ready_nodes = []
|
||||
for node in nodes:
|
||||
is_ready, late_decider = self._get_maybe_ready_for_revert(node)
|
||||
predecessors_iter = self._execution_graph.predecessors_iter
|
||||
atom_it = _depth_first_iterate(self._execution_graph,
|
||||
{co.FLOW: predecessors_iter},
|
||||
predecessors_iter(atom))
|
||||
for atom in atom_it:
|
||||
is_ready, late_decider = self._get_maybe_ready_for_revert(atom)
|
||||
if is_ready:
|
||||
ready_nodes.append((node, late_decider))
|
||||
return ready_nodes
|
||||
yield (atom, late_decider)
|
||||
|
||||
def _get_maybe_ready(self, atom, transition_to, allowed_intentions,
|
||||
connected_fetcher, connected_checker,
|
||||
decider_fetcher):
|
||||
state = self._storage.get_atom_state(atom.name)
|
||||
ok_to_transition = self._runtime.check_atom_transition(atom, state,
|
||||
transition_to)
|
||||
if not ok_to_transition:
|
||||
return (False, None)
|
||||
intention = self._storage.get_atom_intention(atom.name)
|
||||
if intention not in allowed_intentions:
|
||||
return (False, None)
|
||||
connected_states = self._storage.get_atoms_states(
|
||||
connected_atom.name for connected_atom in connected_fetcher(atom))
|
||||
ok_to_run = connected_checker(six.itervalues(connected_states))
|
||||
if not ok_to_run:
|
||||
return (False, None)
|
||||
else:
|
||||
return (True, decider_fetcher(atom))
|
||||
|
||||
def _get_maybe_ready_for_execute(self, atom):
|
||||
"""Returns if an atom is *likely* ready to be executed."""
|
||||
|
||||
state = self.get_state(atom)
|
||||
intention = self._storage.get_atom_intention(atom.name)
|
||||
transition = self._runtime.check_atom_transition(atom, state,
|
||||
st.RUNNING)
|
||||
if not transition or intention != st.EXECUTE:
|
||||
return (False, None)
|
||||
|
||||
predecessor_names = []
|
||||
for previous_atom in self._execution_graph.predecessors(atom):
|
||||
predecessor_names.append(previous_atom.name)
|
||||
|
||||
predecessor_states = self._storage.get_atoms_states(predecessor_names)
|
||||
predecessor_states_iter = six.itervalues(predecessor_states)
|
||||
ok_to_run = all(state == st.SUCCESS and intention == st.EXECUTE
|
||||
for state, intention in predecessor_states_iter)
|
||||
|
||||
if not ok_to_run:
|
||||
return (False, None)
|
||||
else:
|
||||
def decider_fetcher(atom):
|
||||
edge_deciders = self._runtime.fetch_edge_deciders(atom)
|
||||
return (True, IgnoreDecider(atom, edge_deciders))
|
||||
if edge_deciders:
|
||||
return IgnoreDecider(atom, edge_deciders)
|
||||
else:
|
||||
return NoOpDecider()
|
||||
predecessors_iter = self._execution_graph.predecessors_iter
|
||||
connected_fetcher = lambda atom: \
|
||||
_depth_first_iterate(self._execution_graph,
|
||||
{co.FLOW: predecessors_iter},
|
||||
predecessors_iter(atom))
|
||||
connected_checker = lambda connected_iter: \
|
||||
all(state == st.SUCCESS and intention == st.EXECUTE
|
||||
for state, intention in connected_iter)
|
||||
return self._get_maybe_ready(atom, st.RUNNING, [st.EXECUTE],
|
||||
connected_fetcher, connected_checker,
|
||||
decider_fetcher)
|
||||
|
||||
def _get_maybe_ready_for_revert(self, atom):
|
||||
"""Returns if an atom is *likely* ready to be reverted."""
|
||||
successors_iter = self._execution_graph.successors_iter
|
||||
connected_fetcher = lambda atom: \
|
||||
_depth_first_iterate(self._execution_graph,
|
||||
{co.FLOW: successors_iter},
|
||||
successors_iter(atom))
|
||||
connected_checker = lambda connected_iter: \
|
||||
all(state in (st.PENDING, st.REVERTED)
|
||||
for state, _intention in connected_iter)
|
||||
decider_fetcher = lambda atom: NoOpDecider()
|
||||
return self._get_maybe_ready(atom, st.REVERTING, [st.REVERT, st.RETRY],
|
||||
connected_fetcher, connected_checker,
|
||||
decider_fetcher)
|
||||
|
||||
state = self.get_state(atom)
|
||||
intention = self._storage.get_atom_intention(atom.name)
|
||||
transition = self._runtime.check_atom_transition(atom, state,
|
||||
st.REVERTING)
|
||||
if not transition or intention not in (st.REVERT, st.RETRY):
|
||||
return (False, None)
|
||||
|
||||
predecessor_names = []
|
||||
for previous_atom in self._execution_graph.successors(atom):
|
||||
predecessor_names.append(previous_atom.name)
|
||||
|
||||
predecessor_states = self._storage.get_atoms_states(predecessor_names)
|
||||
predecessor_states_iter = six.itervalues(predecessor_states)
|
||||
ok_to_run = all(state in (st.PENDING, st.REVERTED)
|
||||
for state, intention in predecessor_states_iter)
|
||||
|
||||
if not ok_to_run:
|
||||
return (False, None)
|
||||
else:
|
||||
return (True, NoOpDecider())
|
||||
|
||||
def iterate_subgraph(self, atom):
|
||||
"""Iterates a subgraph connected to given atom."""
|
||||
for _src, dst in traversal.dfs_edges(self._execution_graph, atom):
|
||||
yield dst
|
||||
def iterate_connected_atoms(self, atom):
|
||||
"""Iterates **all** successor atoms connected to given atom."""
|
||||
successors_iter = self._execution_graph.successors_iter
|
||||
return _depth_first_iterate(
|
||||
self._execution_graph, {
|
||||
co.FLOW: successors_iter,
|
||||
co.TASK: successors_iter,
|
||||
co.RETRY: successors_iter,
|
||||
}, successors_iter(atom))
|
||||
|
||||
def iterate_retries(self, state=None):
|
||||
"""Iterates retry atoms that match the provided state.
|
||||
|
||||
If no state is provided it will yield back all retry atoms.
|
||||
"""
|
||||
for atom in self._runtime.fetch_atoms_by_kind('retry'):
|
||||
if not state or self.get_state(atom) == state:
|
||||
if state:
|
||||
atoms = list(self.iterate_nodes((co.RETRY,)))
|
||||
atom_states = self._storage.get_atoms_states(atom.name
|
||||
for atom in atoms)
|
||||
for atom in atoms:
|
||||
if atom_states[atom.name][0] == state:
|
||||
yield atom
|
||||
else:
|
||||
for atom in self.iterate_nodes((co.RETRY,)):
|
||||
yield atom
|
||||
|
||||
def iterate_all_nodes(self):
|
||||
"""Yields back all nodes in the execution graph."""
|
||||
for node in self._execution_graph.nodes_iter():
|
||||
yield node
|
||||
def iterate_nodes(self, allowed_kinds):
|
||||
"""Yields back all nodes of specified kinds in the execution graph."""
|
||||
for node, node_data in self._execution_graph.nodes_iter(data=True):
|
||||
if node_data['kind'] in allowed_kinds:
|
||||
yield node
|
||||
|
||||
def find_atom_retry(self, atom):
|
||||
"""Returns the retry atom associated to the given atom (or none)."""
|
||||
return self._execution_graph.node[atom].get('retry')
|
||||
def find_retry(self, node):
|
||||
"""Returns the retry atom associated to the given node (or none)."""
|
||||
return self._execution_graph.node[node].get(co.RETRY)
|
||||
|
||||
def is_success(self):
|
||||
"""Checks if all nodes in the execution graph are in 'happy' state."""
|
||||
for atom in self.iterate_all_nodes():
|
||||
atom_state = self.get_state(atom)
|
||||
"""Checks if all atoms in the execution graph are in 'happy' state."""
|
||||
atoms = list(self.iterate_nodes(co.ATOMS))
|
||||
atom_states = self._storage.get_atoms_states(atom.name
|
||||
for atom in atoms)
|
||||
for atom in atoms:
|
||||
atom_state = atom_states[atom.name][0]
|
||||
if atom_state == st.IGNORE:
|
||||
continue
|
||||
if atom_state != st.SUCCESS:
|
||||
return False
|
||||
return True
|
||||
|
||||
def get_state(self, atom):
|
||||
"""Gets the state of a given atom (from the backend storage unit)."""
|
||||
return self._storage.get_atom_state(atom.name)
|
||||
|
||||
@@ -21,6 +21,7 @@ from automaton import machines
|
||||
from taskflow import logging
|
||||
from taskflow import states as st
|
||||
from taskflow.types import failure
|
||||
from taskflow.utils import iter_utils
|
||||
|
||||
# Default waiting state timeout (in seconds).
|
||||
WAITING_TIMEOUT = 60
|
||||
@@ -48,7 +49,7 @@ class MachineMemory(object):
|
||||
"""State machine memory."""
|
||||
|
||||
def __init__(self):
|
||||
self.next_nodes = set()
|
||||
self.next_up = set()
|
||||
self.not_done = set()
|
||||
self.failures = []
|
||||
self.done = set()
|
||||
@@ -107,28 +108,37 @@ class MachineBuilder(object):
|
||||
timeout = WAITING_TIMEOUT
|
||||
|
||||
# Cache some local functions/methods...
|
||||
do_schedule = self._scheduler.schedule
|
||||
do_complete = self._completer.complete
|
||||
|
||||
def do_schedule(next_nodes):
|
||||
return self._scheduler.schedule(
|
||||
sorted(next_nodes,
|
||||
key=lambda node: getattr(node, 'priority', 0),
|
||||
reverse=True))
|
||||
|
||||
def is_runnable():
|
||||
# Checks if the storage says the flow is still runnable...
|
||||
return self._storage.get_flow_state() == st.RUNNING
|
||||
|
||||
def iter_next_nodes(target_node=None):
|
||||
# Yields and filters and tweaks the next nodes to execute...
|
||||
maybe_nodes = self._analyzer.get_next_nodes(node=target_node)
|
||||
for node, late_decider in maybe_nodes:
|
||||
proceed = late_decider.check_and_affect(self._runtime)
|
||||
if proceed:
|
||||
yield node
|
||||
def iter_next_atoms(atom=None, apply_deciders=True):
|
||||
# Yields and filters and tweaks the next atoms to run...
|
||||
maybe_atoms_it = self._analyzer.iter_next_atoms(atom=atom)
|
||||
for atom, late_decider in maybe_atoms_it:
|
||||
if apply_deciders:
|
||||
proceed = late_decider.check_and_affect(self._runtime)
|
||||
if proceed:
|
||||
yield atom
|
||||
else:
|
||||
yield atom
|
||||
|
||||
def resume(old_state, new_state, event):
|
||||
# This reaction function just updates the state machines memory
|
||||
# to include any nodes that need to be executed (from a previous
|
||||
# attempt, which may be empty if never ran before) and any nodes
|
||||
# that are now ready to be ran.
|
||||
memory.next_nodes.update(self._completer.resume())
|
||||
memory.next_nodes.update(iter_next_nodes())
|
||||
memory.next_up.update(
|
||||
iter_utils.unique_seen(self._completer.resume(),
|
||||
iter_next_atoms()))
|
||||
return SCHEDULE
|
||||
|
||||
def game_over(old_state, new_state, event):
|
||||
@@ -138,7 +148,17 @@ class MachineBuilder(object):
|
||||
# it is *always* called before the final state is entered.
|
||||
if memory.failures:
|
||||
return FAILED
|
||||
if any(1 for node in iter_next_nodes()):
|
||||
leftover_atoms = iter_utils.count(
|
||||
# Avoid activating the deciders, since at this point
|
||||
# the engine is finishing and there will be no more further
|
||||
# work done anyway...
|
||||
iter_next_atoms(apply_deciders=False))
|
||||
if leftover_atoms:
|
||||
# Ok we didn't finish (either reverting or executing...) so
|
||||
# that means we must of been stopped at some point...
|
||||
LOG.blather("Suspension determined to have been reacted to"
|
||||
" since (at least) %s atoms have been left in an"
|
||||
" unfinished state", leftover_atoms)
|
||||
return SUSPENDED
|
||||
elif self._analyzer.is_success():
|
||||
return SUCCESS
|
||||
@@ -151,13 +171,13 @@ class MachineBuilder(object):
|
||||
# if the user of this engine has requested the engine/storage
|
||||
# that holds this information to stop or suspend); handles failures
|
||||
# that occur during this process safely...
|
||||
if is_runnable() and memory.next_nodes:
|
||||
not_done, failures = do_schedule(memory.next_nodes)
|
||||
if is_runnable() and memory.next_up:
|
||||
not_done, failures = do_schedule(memory.next_up)
|
||||
if not_done:
|
||||
memory.not_done.update(not_done)
|
||||
if failures:
|
||||
memory.failures.extend(failures)
|
||||
memory.next_nodes.clear()
|
||||
memory.next_up.intersection_update(not_done)
|
||||
return WAIT
|
||||
|
||||
def wait(old_state, new_state, event):
|
||||
@@ -176,13 +196,13 @@ class MachineBuilder(object):
|
||||
# out what nodes are now ready to be ran (and then triggering those
|
||||
# nodes to be scheduled in the future); handles failures that
|
||||
# occur during this process safely...
|
||||
next_nodes = set()
|
||||
next_up = set()
|
||||
while memory.done:
|
||||
fut = memory.done.pop()
|
||||
node = fut.atom
|
||||
atom = fut.atom
|
||||
try:
|
||||
event, result = fut.result()
|
||||
retain = do_complete(node, event, result)
|
||||
outcome, result = fut.result()
|
||||
retain = do_complete(atom, outcome, result)
|
||||
if isinstance(result, failure.Failure):
|
||||
if retain:
|
||||
memory.failures.append(result)
|
||||
@@ -194,24 +214,24 @@ class MachineBuilder(object):
|
||||
# is not enabled, which would suck...)
|
||||
if LOG.isEnabledFor(logging.DEBUG):
|
||||
intention = self._storage.get_atom_intention(
|
||||
node.name)
|
||||
atom.name)
|
||||
LOG.debug("Discarding failure '%s' (in"
|
||||
" response to event '%s') under"
|
||||
" response to outcome '%s') under"
|
||||
" completion units request during"
|
||||
" completion of node '%s' (intention"
|
||||
" is to %s)", result, event,
|
||||
node, intention)
|
||||
" completion of atom '%s' (intention"
|
||||
" is to %s)", result, outcome,
|
||||
atom, intention)
|
||||
except Exception:
|
||||
memory.failures.append(failure.Failure())
|
||||
else:
|
||||
try:
|
||||
more_nodes = set(iter_next_nodes(target_node=node))
|
||||
more_work = set(iter_next_atoms(atom=atom))
|
||||
except Exception:
|
||||
memory.failures.append(failure.Failure())
|
||||
else:
|
||||
next_nodes.update(more_nodes)
|
||||
if is_runnable() and next_nodes and not memory.failures:
|
||||
memory.next_nodes.update(next_nodes)
|
||||
next_up.update(more_work)
|
||||
if is_runnable() and next_up and not memory.failures:
|
||||
memory.next_up.update(next_up)
|
||||
return SCHEDULE
|
||||
elif memory.not_done:
|
||||
return WAIT
|
||||
|
||||
@@ -14,32 +14,48 @@
|
||||
# License for the specific language governing permissions and limitations
|
||||
# under the License.
|
||||
|
||||
import collections
|
||||
import threading
|
||||
|
||||
import fasteners
|
||||
from oslo_utils import excutils
|
||||
import six
|
||||
|
||||
from taskflow import exceptions as exc
|
||||
from taskflow import flow
|
||||
from taskflow import logging
|
||||
from taskflow import task
|
||||
from taskflow.types import graph as gr
|
||||
from taskflow.types import tree as tr
|
||||
from taskflow.utils import iter_utils
|
||||
from taskflow.utils import misc
|
||||
|
||||
from taskflow.flow import (LINK_INVARIANT, LINK_RETRY) # noqa
|
||||
|
||||
LOG = logging.getLogger(__name__)
|
||||
|
||||
_RETRY_EDGE_DATA = {
|
||||
flow.LINK_RETRY: True,
|
||||
}
|
||||
_EDGE_INVARIANTS = (flow.LINK_INVARIANT, flow.LINK_MANUAL, flow.LINK_RETRY)
|
||||
_EDGE_REASONS = flow.LINK_REASONS
|
||||
# Constants attached to node attributes in the execution graph (and tree
|
||||
# node metadata), provided as constants here and constants in the compilation
|
||||
# class (so that users will not have to import this file to access them); but
|
||||
# provide them as module constants so that internal code can more
|
||||
# easily access them...
|
||||
TASK = 'task'
|
||||
RETRY = 'retry'
|
||||
FLOW = 'flow'
|
||||
|
||||
# Quite often used together, so make a tuple everyone can share...
|
||||
ATOMS = (TASK, RETRY)
|
||||
|
||||
|
||||
class Compilation(object):
|
||||
"""The result of a compilers compile() is this *immutable* object."""
|
||||
|
||||
#: Task nodes will have a ``kind`` attribute/metadata key with this value.
|
||||
TASK = TASK
|
||||
|
||||
#: Retry nodes will have a ``kind`` attribute/metadata key with this value.
|
||||
RETRY = RETRY
|
||||
|
||||
#: Flow nodes will have a ``kind`` attribute/metadata key with this value.
|
||||
FLOW = FLOW
|
||||
|
||||
def __init__(self, execution_graph, hierarchy):
|
||||
self._execution_graph = execution_graph
|
||||
self._hierarchy = hierarchy
|
||||
@@ -55,6 +71,12 @@ class Compilation(object):
|
||||
return self._hierarchy
|
||||
|
||||
|
||||
def _overlap_occurence_detector(to_graph, from_graph):
|
||||
"""Returns how many nodes in 'from' graph are in 'to' graph (if any)."""
|
||||
return iter_utils.count(node for node in from_graph.nodes_iter()
|
||||
if node in to_graph)
|
||||
|
||||
|
||||
def _add_update_edges(graph, nodes_from, nodes_to, attr_dict=None):
|
||||
"""Adds/updates edges from nodes to other nodes in the specified graph.
|
||||
|
||||
@@ -79,118 +101,7 @@ def _add_update_edges(graph, nodes_from, nodes_to, attr_dict=None):
|
||||
graph.add_edge(u, v, attr_dict=attr_dict.copy())
|
||||
|
||||
|
||||
class Linker(object):
|
||||
"""Compiler helper that adds pattern(s) constraints onto a graph."""
|
||||
|
||||
@staticmethod
|
||||
def _is_not_empty(graph):
|
||||
# Returns true if the given graph is *not* empty...
|
||||
return graph.number_of_nodes() > 0
|
||||
|
||||
@staticmethod
|
||||
def _find_first_decomposed(node, priors,
|
||||
decomposed_members, decomposed_filter):
|
||||
# How this works; traverse backwards and find only the predecessor
|
||||
# items that are actually connected to this entity, and avoid any
|
||||
# linkage that is not directly connected. This is guaranteed to be
|
||||
# valid since we always iter_links() over predecessors before
|
||||
# successors in all currently known patterns; a queue is used here
|
||||
# since it is possible for a node to have 2+ different predecessors so
|
||||
# we must search back through all of them in a reverse BFS order...
|
||||
#
|
||||
# Returns the first decomposed graph of those nodes (including the
|
||||
# passed in node) that passes the provided filter
|
||||
# function (returns none if none match).
|
||||
frontier = collections.deque([node])
|
||||
# NOTE(harowja): None is in this initial set since the first prior in
|
||||
# the priors list has None as its predecessor (which we don't want to
|
||||
# look for a decomposed member of).
|
||||
visited = set([None])
|
||||
while frontier:
|
||||
node = frontier.popleft()
|
||||
if node in visited:
|
||||
continue
|
||||
node_graph = decomposed_members[node]
|
||||
if decomposed_filter(node_graph):
|
||||
return node_graph
|
||||
visited.add(node)
|
||||
# TODO(harlowja): optimize this more to avoid searching through
|
||||
# things already searched...
|
||||
for (u, v) in reversed(priors):
|
||||
if node == v:
|
||||
# Queue its predecessor to be searched in the future...
|
||||
frontier.append(u)
|
||||
else:
|
||||
return None
|
||||
|
||||
def apply_constraints(self, graph, flow, decomposed_members):
|
||||
# This list is used to track the links that have been previously
|
||||
# iterated over, so that when we are trying to find a entry to
|
||||
# connect to that we iterate backwards through this list, finding
|
||||
# connected nodes to the current target (lets call it v) and find
|
||||
# the first (u_n, or u_n - 1, u_n - 2...) that was decomposed into
|
||||
# a non-empty graph. We also retain all predecessors of v so that we
|
||||
# can correctly locate u_n - 1 if u_n turns out to have decomposed into
|
||||
# an empty graph (and so on).
|
||||
priors = []
|
||||
# NOTE(harlowja): u, v are flows/tasks (also graph terminology since
|
||||
# we are compiling things down into a flattened graph), the meaning
|
||||
# of this link iteration via iter_links() is that u -> v (with the
|
||||
# provided dictionary attributes, if any).
|
||||
for (u, v, attr_dict) in flow.iter_links():
|
||||
if not priors:
|
||||
priors.append((None, u))
|
||||
v_g = decomposed_members[v]
|
||||
if not v_g.number_of_nodes():
|
||||
priors.append((u, v))
|
||||
continue
|
||||
invariant = any(attr_dict.get(k) for k in _EDGE_INVARIANTS)
|
||||
if not invariant:
|
||||
# This is a symbol *only* dependency, connect
|
||||
# corresponding providers and consumers to allow the consumer
|
||||
# to be executed immediately after the provider finishes (this
|
||||
# is an optimization for these types of dependencies...)
|
||||
u_g = decomposed_members[u]
|
||||
if not u_g.number_of_nodes():
|
||||
# This must always exist, but incase it somehow doesn't...
|
||||
raise exc.CompilationFailure(
|
||||
"Non-invariant link being created from '%s' ->"
|
||||
" '%s' even though the target '%s' was found to be"
|
||||
" decomposed into an empty graph" % (v, u, u))
|
||||
for u in u_g.nodes_iter():
|
||||
for v in v_g.nodes_iter():
|
||||
# This is using the intersection() method vs the &
|
||||
# operator since the latter doesn't work with frozen
|
||||
# sets (when used in combination with ordered sets).
|
||||
#
|
||||
# If this is not done the following happens...
|
||||
#
|
||||
# TypeError: unsupported operand type(s)
|
||||
# for &: 'frozenset' and 'OrderedSet'
|
||||
depends_on = u.provides.intersection(v.requires)
|
||||
if depends_on:
|
||||
edge_attrs = {
|
||||
_EDGE_REASONS: frozenset(depends_on),
|
||||
}
|
||||
_add_update_edges(graph,
|
||||
[u], [v],
|
||||
attr_dict=edge_attrs)
|
||||
else:
|
||||
# Connect nodes with no predecessors in v to nodes with no
|
||||
# successors in the *first* non-empty predecessor of v (thus
|
||||
# maintaining the edge dependency).
|
||||
match = self._find_first_decomposed(u, priors,
|
||||
decomposed_members,
|
||||
self._is_not_empty)
|
||||
if match is not None:
|
||||
_add_update_edges(graph,
|
||||
match.no_successors_iter(),
|
||||
list(v_g.no_predecessors_iter()),
|
||||
attr_dict=attr_dict)
|
||||
priors.append((u, v))
|
||||
|
||||
|
||||
class _TaskCompiler(object):
|
||||
class TaskCompiler(object):
|
||||
"""Non-recursive compiler of tasks."""
|
||||
|
||||
@staticmethod
|
||||
@@ -199,71 +110,67 @@ class _TaskCompiler(object):
|
||||
|
||||
def compile(self, task, parent=None):
|
||||
graph = gr.DiGraph(name=task.name)
|
||||
graph.add_node(task)
|
||||
node = tr.Node(task)
|
||||
graph.add_node(task, kind=TASK)
|
||||
node = tr.Node(task, kind=TASK)
|
||||
if parent is not None:
|
||||
parent.add(node)
|
||||
return graph, node
|
||||
|
||||
|
||||
class _FlowCompiler(object):
|
||||
class FlowCompiler(object):
|
||||
"""Recursive compiler of flows."""
|
||||
|
||||
@staticmethod
|
||||
def handles(obj):
|
||||
return isinstance(obj, flow.Flow)
|
||||
|
||||
def __init__(self, deep_compiler_func, linker):
|
||||
def __init__(self, deep_compiler_func):
|
||||
self._deep_compiler_func = deep_compiler_func
|
||||
self._linker = linker
|
||||
|
||||
def _connect_retry(self, retry, graph):
|
||||
graph.add_node(retry)
|
||||
|
||||
# All nodes that have no predecessors should depend on this retry.
|
||||
nodes_to = [n for n in graph.no_predecessors_iter() if n is not retry]
|
||||
if nodes_to:
|
||||
_add_update_edges(graph, [retry], nodes_to,
|
||||
attr_dict=_RETRY_EDGE_DATA)
|
||||
|
||||
# Add association for each node of graph that has no existing retry.
|
||||
for n in graph.nodes_iter():
|
||||
if n is not retry and flow.LINK_RETRY not in graph.node[n]:
|
||||
graph.node[n][flow.LINK_RETRY] = retry
|
||||
|
||||
@staticmethod
|
||||
def _occurence_detector(to_graph, from_graph):
|
||||
return iter_utils.count(node for node in from_graph.nodes_iter()
|
||||
if node in to_graph)
|
||||
|
||||
def _decompose_flow(self, flow, parent=None):
|
||||
"""Decomposes a flow into a graph, tree node + decomposed subgraphs."""
|
||||
graph = gr.DiGraph(name=flow.name)
|
||||
node = tr.Node(flow)
|
||||
if parent is not None:
|
||||
parent.add(node)
|
||||
if flow.retry is not None:
|
||||
node.add(tr.Node(flow.retry))
|
||||
decomposed_members = {}
|
||||
for item in flow:
|
||||
subgraph, _subnode = self._deep_compiler_func(item, parent=node)
|
||||
decomposed_members[item] = subgraph
|
||||
if subgraph.number_of_nodes():
|
||||
graph = gr.merge_graphs(
|
||||
graph, subgraph,
|
||||
# We can specialize this to be simpler than the default
|
||||
# algorithm which creates overhead that we don't
|
||||
# need for our purposes...
|
||||
overlap_detector=self._occurence_detector)
|
||||
return graph, node, decomposed_members
|
||||
|
||||
def compile(self, flow, parent=None):
|
||||
graph, node, decomposed_members = self._decompose_flow(flow,
|
||||
parent=parent)
|
||||
self._linker.apply_constraints(graph, flow, decomposed_members)
|
||||
"""Decomposes a flow into a graph and scope tree hierarchy."""
|
||||
graph = gr.DiGraph(name=flow.name)
|
||||
graph.add_node(flow, kind=FLOW, noop=True)
|
||||
tree_node = tr.Node(flow, kind=FLOW, noop=True)
|
||||
if parent is not None:
|
||||
parent.add(tree_node)
|
||||
if flow.retry is not None:
|
||||
self._connect_retry(flow.retry, graph)
|
||||
return graph, node
|
||||
tree_node.add(tr.Node(flow.retry, kind=RETRY))
|
||||
decomposed = dict(
|
||||
(child, self._deep_compiler_func(child, parent=tree_node)[0])
|
||||
for child in flow)
|
||||
decomposed_graphs = list(six.itervalues(decomposed))
|
||||
graph = gr.merge_graphs(graph, *decomposed_graphs,
|
||||
overlap_detector=_overlap_occurence_detector)
|
||||
for u, v, attr_dict in flow.iter_links():
|
||||
u_graph = decomposed[u]
|
||||
v_graph = decomposed[v]
|
||||
_add_update_edges(graph, u_graph.no_successors_iter(),
|
||||
list(v_graph.no_predecessors_iter()),
|
||||
attr_dict=attr_dict)
|
||||
if flow.retry is not None:
|
||||
graph.add_node(flow.retry, kind=RETRY)
|
||||
_add_update_edges(graph, [flow], [flow.retry],
|
||||
attr_dict={LINK_INVARIANT: True})
|
||||
for node in graph.nodes_iter():
|
||||
if node is not flow.retry and node is not flow:
|
||||
graph.node[node].setdefault(RETRY, flow.retry)
|
||||
from_nodes = [flow.retry]
|
||||
connected_attr_dict = {LINK_INVARIANT: True, LINK_RETRY: True}
|
||||
else:
|
||||
from_nodes = [flow]
|
||||
connected_attr_dict = {LINK_INVARIANT: True}
|
||||
connected_to = [
|
||||
node for node in graph.no_predecessors_iter() if node is not flow
|
||||
]
|
||||
if connected_to:
|
||||
# Ensure all nodes in this graph(s) that have no
|
||||
# predecessors depend on this flow (or this flow's retry) so that
|
||||
# we can depend on the flow being traversed before its
|
||||
# children (even though at the current time it will be skipped).
|
||||
_add_update_edges(graph, from_nodes, connected_to,
|
||||
attr_dict=connected_attr_dict)
|
||||
return graph, tree_node
|
||||
|
||||
|
||||
class PatternCompiler(object):
|
||||
@@ -288,8 +195,8 @@ class PatternCompiler(object):
|
||||
the recursion (now with a decomposed mapping from contained patterns or
|
||||
atoms to there corresponding subgraph) we have to then connect the
|
||||
subgraphs (and the atom(s) there-in) that were decomposed for a pattern
|
||||
correctly into a new graph (using a :py:class:`.Linker` object to ensure
|
||||
the pattern mandated constraints are retained) and then return to the
|
||||
correctly into a new graph and then ensure the pattern mandated
|
||||
constraints are retained. Finally we then return to the
|
||||
caller (and they will do the same thing up until the root node, which by
|
||||
that point one graph is created with all contained atoms in the
|
||||
pattern/nested patterns mandated ordering).
|
||||
@@ -364,14 +271,10 @@ class PatternCompiler(object):
|
||||
def __init__(self, root, freeze=True):
|
||||
self._root = root
|
||||
self._history = set()
|
||||
self._linker = Linker()
|
||||
self._freeze = freeze
|
||||
self._lock = threading.Lock()
|
||||
self._compilation = None
|
||||
self._matchers = [
|
||||
_FlowCompiler(self._compile, self._linker),
|
||||
_TaskCompiler(),
|
||||
]
|
||||
self._matchers = (FlowCompiler(self._compile), TaskCompiler())
|
||||
self._level = 0
|
||||
|
||||
def _compile(self, item, parent=None):
|
||||
@@ -418,25 +321,26 @@ class PatternCompiler(object):
|
||||
|
||||
def _post_compile(self, graph, node):
|
||||
"""Called after the compilation of the root finishes successfully."""
|
||||
dup_names = misc.get_duplicate_keys(graph.nodes_iter(),
|
||||
key=lambda node: node.name)
|
||||
if dup_names:
|
||||
raise exc.Duplicate(
|
||||
"Atoms with duplicate names found: %s" % (sorted(dup_names)))
|
||||
if graph.number_of_nodes() == 0:
|
||||
raise exc.Empty("Root container '%s' (%s) is empty"
|
||||
% (self._root, type(self._root)))
|
||||
self._history.clear()
|
||||
self._level = 0
|
||||
|
||||
@fasteners.locked
|
||||
def compile(self):
|
||||
"""Compiles the contained item into a compiled equivalent."""
|
||||
if self._compilation is None:
|
||||
self._pre_compile()
|
||||
graph, node = self._compile(self._root, parent=None)
|
||||
self._post_compile(graph, node)
|
||||
if self._freeze:
|
||||
graph.freeze()
|
||||
node.freeze()
|
||||
self._compilation = Compilation(graph, node)
|
||||
try:
|
||||
graph, node = self._compile(self._root, parent=None)
|
||||
except Exception:
|
||||
with excutils.save_and_reraise_exception():
|
||||
# Always clear the history, to avoid retaining junk
|
||||
# in memory that isn't needed to be in memory if
|
||||
# compilation fails...
|
||||
self._history.clear()
|
||||
else:
|
||||
self._post_compile(graph, node)
|
||||
if self._freeze:
|
||||
graph.freeze()
|
||||
node.freeze()
|
||||
self._compilation = Compilation(graph, node)
|
||||
return self._compilation
|
||||
|
||||
@@ -18,8 +18,10 @@ import abc
|
||||
import weakref
|
||||
|
||||
from oslo_utils import reflection
|
||||
from oslo_utils import strutils
|
||||
import six
|
||||
|
||||
from taskflow.engines.action_engine import compiler as co
|
||||
from taskflow.engines.action_engine import executor as ex
|
||||
from taskflow import logging
|
||||
from taskflow import retry as retry_atom
|
||||
@@ -62,7 +64,7 @@ class RevertAndRetry(Strategy):
|
||||
self._retry = retry
|
||||
|
||||
def apply(self):
|
||||
tweaked = self._runtime.reset_nodes([self._retry], state=None,
|
||||
tweaked = self._runtime.reset_atoms([self._retry], state=None,
|
||||
intention=st.RETRY)
|
||||
tweaked.extend(self._runtime.reset_subgraph(self._retry, state=None,
|
||||
intention=st.REVERT))
|
||||
@@ -79,8 +81,9 @@ class RevertAll(Strategy):
|
||||
self._analyzer = runtime.analyzer
|
||||
|
||||
def apply(self):
|
||||
return self._runtime.reset_nodes(self._analyzer.iterate_all_nodes(),
|
||||
state=None, intention=st.REVERT)
|
||||
return self._runtime.reset_atoms(
|
||||
self._analyzer.iterate_nodes(co.ATOMS),
|
||||
state=None, intention=st.REVERT)
|
||||
|
||||
|
||||
class Revert(Strategy):
|
||||
@@ -93,7 +96,7 @@ class Revert(Strategy):
|
||||
self._atom = atom
|
||||
|
||||
def apply(self):
|
||||
tweaked = self._runtime.reset_nodes([self._atom], state=None,
|
||||
tweaked = self._runtime.reset_atoms([self._atom], state=None,
|
||||
intention=st.REVERT)
|
||||
tweaked.extend(self._runtime.reset_subgraph(self._atom, state=None,
|
||||
intention=st.REVERT))
|
||||
@@ -111,54 +114,61 @@ class Completer(object):
|
||||
self._retry_action = runtime.retry_action
|
||||
self._undefined_resolver = RevertAll(self._runtime)
|
||||
|
||||
def _complete_task(self, task, event, result):
|
||||
def _complete_task(self, task, outcome, result):
|
||||
"""Completes the given task, processes task failure."""
|
||||
if event == ex.EXECUTED:
|
||||
if outcome == ex.EXECUTED:
|
||||
self._task_action.complete_execution(task, result)
|
||||
else:
|
||||
self._task_action.complete_reversion(task, result)
|
||||
|
||||
def _complete_retry(self, retry, event, result):
|
||||
def _complete_retry(self, retry, outcome, result):
|
||||
"""Completes the given retry, processes retry failure."""
|
||||
if event == ex.EXECUTED:
|
||||
if outcome == ex.EXECUTED:
|
||||
self._retry_action.complete_execution(retry, result)
|
||||
else:
|
||||
self._retry_action.complete_reversion(retry, result)
|
||||
|
||||
def resume(self):
|
||||
"""Resumes nodes in the contained graph.
|
||||
"""Resumes atoms in the contained graph.
|
||||
|
||||
This is done to allow any previously completed or failed nodes to
|
||||
be analyzed, there results processed and any potential nodes affected
|
||||
This is done to allow any previously completed or failed atoms to
|
||||
be analyzed, there results processed and any potential atoms affected
|
||||
to be adjusted as needed.
|
||||
|
||||
This should return a set of nodes which should be the initial set of
|
||||
nodes that were previously not finished (due to a RUNNING or REVERTING
|
||||
This should return a set of atoms which should be the initial set of
|
||||
atoms that were previously not finished (due to a RUNNING or REVERTING
|
||||
attempt not previously finishing).
|
||||
"""
|
||||
for node in self._analyzer.iterate_all_nodes():
|
||||
if self._analyzer.get_state(node) == st.FAILURE:
|
||||
self._process_atom_failure(node, self._storage.get(node.name))
|
||||
atoms = list(self._analyzer.iterate_nodes(co.ATOMS))
|
||||
atom_states = self._storage.get_atoms_states(atom.name
|
||||
for atom in atoms)
|
||||
for atom in atoms:
|
||||
atom_state = atom_states[atom.name][0]
|
||||
if atom_state == st.FAILURE:
|
||||
self._process_atom_failure(atom, self._storage.get(atom.name))
|
||||
for retry in self._analyzer.iterate_retries(st.RETRYING):
|
||||
self._runtime.retry_subflow(retry)
|
||||
unfinished_nodes = set()
|
||||
for node in self._analyzer.iterate_all_nodes():
|
||||
if self._analyzer.get_state(node) in (st.RUNNING, st.REVERTING):
|
||||
unfinished_nodes.add(node)
|
||||
return unfinished_nodes
|
||||
for atom, state, intention in self._runtime.retry_subflow(retry):
|
||||
if state:
|
||||
atom_states[atom.name] = (state, intention)
|
||||
unfinished_atoms = set()
|
||||
for atom in atoms:
|
||||
atom_state = atom_states[atom.name][0]
|
||||
if atom_state in (st.RUNNING, st.REVERTING):
|
||||
unfinished_atoms.add(atom)
|
||||
return unfinished_atoms
|
||||
|
||||
def complete(self, node, event, result):
|
||||
def complete(self, node, outcome, result):
|
||||
"""Performs post-execution completion of a node.
|
||||
|
||||
Returns whether the result should be saved into an accumulator of
|
||||
failures or whether this should not be done.
|
||||
"""
|
||||
if isinstance(node, task_atom.BaseTask):
|
||||
self._complete_task(node, event, result)
|
||||
self._complete_task(node, outcome, result)
|
||||
else:
|
||||
self._complete_retry(node, event, result)
|
||||
self._complete_retry(node, outcome, result)
|
||||
if isinstance(result, failure.Failure):
|
||||
if event == ex.EXECUTED:
|
||||
if outcome == ex.EXECUTED:
|
||||
self._process_atom_failure(node, result)
|
||||
else:
|
||||
# Reverting failed, always retain the failure...
|
||||
@@ -167,7 +177,7 @@ class Completer(object):
|
||||
|
||||
def _determine_resolution(self, atom, failure):
|
||||
"""Determines which resolution strategy to activate/apply."""
|
||||
retry = self._analyzer.find_atom_retry(atom)
|
||||
retry = self._analyzer.find_retry(atom)
|
||||
if retry is not None:
|
||||
# Ask retry controller what to do in case of failure.
|
||||
strategy = self._retry_action.on_failure(retry, atom, failure)
|
||||
@@ -176,6 +186,20 @@ class Completer(object):
|
||||
elif strategy == retry_atom.REVERT:
|
||||
# Ask parent retry and figure out what to do...
|
||||
parent_resolver = self._determine_resolution(retry, failure)
|
||||
|
||||
# In the future, this will be the only behavior. REVERT
|
||||
# should defer to the parent retry if it exists, or use the
|
||||
# default REVERT_ALL if it doesn't. This lets you safely nest
|
||||
# flows with retries inside flows without retries and it still
|
||||
# behave as a user would expect, i.e. if the retry gets
|
||||
# exhausted it reverts the outer flow unless the outer flow
|
||||
# has a separate retry behavior.
|
||||
defer_reverts = strutils.bool_from_string(
|
||||
self._runtime.options.get('defer_reverts', False)
|
||||
)
|
||||
if defer_reverts:
|
||||
return parent_resolver
|
||||
|
||||
# Ok if the parent resolver says something not REVERT, and
|
||||
# it isn't just using the undefined resolver, assume the
|
||||
# parent knows best.
|
||||
|
||||
@@ -222,6 +222,24 @@ class ActionEngine(base.Engine):
|
||||
six.itervalues(self.storage.get_revert_failures()))
|
||||
failure.Failure.reraise_if_any(it)
|
||||
|
||||
@staticmethod
|
||||
def _check_compilation(compilation):
|
||||
"""Performs post compilation validation/checks."""
|
||||
seen = set()
|
||||
dups = set()
|
||||
execution_graph = compilation.execution_graph
|
||||
for node, node_attrs in execution_graph.nodes_iter(data=True):
|
||||
if node_attrs['kind'] in compiler.ATOMS:
|
||||
atom_name = node.name
|
||||
if atom_name in seen:
|
||||
dups.add(atom_name)
|
||||
else:
|
||||
seen.add(atom_name)
|
||||
if dups:
|
||||
raise exc.Duplicate(
|
||||
"Atoms with duplicate names found: %s" % (sorted(dups)))
|
||||
return compilation
|
||||
|
||||
def _change_state(self, state):
|
||||
with self._state_lock:
|
||||
old_state = self.storage.get_flow_state()
|
||||
@@ -241,11 +259,10 @@ class ActionEngine(base.Engine):
|
||||
transient = strutils.bool_from_string(
|
||||
self._options.get('inject_transient', True))
|
||||
self.storage.ensure_atoms(
|
||||
self._compilation.execution_graph.nodes_iter())
|
||||
for node in self._compilation.execution_graph.nodes_iter():
|
||||
if node.inject:
|
||||
self.storage.inject_atom_args(node.name,
|
||||
node.inject,
|
||||
self._runtime.analyzer.iterate_nodes(compiler.ATOMS))
|
||||
for atom in self._runtime.analyzer.iterate_nodes(compiler.ATOMS):
|
||||
if atom.inject:
|
||||
self.storage.inject_atom_args(atom.name, atom.inject,
|
||||
transient=transient)
|
||||
|
||||
@fasteners.locked
|
||||
@@ -255,8 +272,8 @@ class ActionEngine(base.Engine):
|
||||
# flow/task provided or storage provided, if there are still missing
|
||||
# dependencies then this flow will fail at runtime (which we can avoid
|
||||
# by failing at validation time).
|
||||
execution_graph = self._compilation.execution_graph
|
||||
if LOG.isEnabledFor(logging.BLATHER):
|
||||
execution_graph = self._compilation.execution_graph
|
||||
LOG.blather("Validating scoping and argument visibility for"
|
||||
" execution graph with %s nodes and %s edges with"
|
||||
" density %0.3f", execution_graph.number_of_nodes(),
|
||||
@@ -269,18 +286,17 @@ class ActionEngine(base.Engine):
|
||||
last_cause = None
|
||||
last_node = None
|
||||
missing_nodes = 0
|
||||
fetch_func = self.storage.fetch_unsatisfied_args
|
||||
for node in execution_graph.nodes_iter():
|
||||
node_missing = fetch_func(node.name, node.rebind,
|
||||
optional_args=node.optional)
|
||||
if node_missing:
|
||||
cause = exc.MissingDependencies(node,
|
||||
sorted(node_missing),
|
||||
for atom in self._runtime.analyzer.iterate_nodes(compiler.ATOMS):
|
||||
atom_missing = self.storage.fetch_unsatisfied_args(
|
||||
atom.name, atom.rebind, optional_args=atom.optional)
|
||||
if atom_missing:
|
||||
cause = exc.MissingDependencies(atom,
|
||||
sorted(atom_missing),
|
||||
cause=last_cause)
|
||||
last_cause = cause
|
||||
last_node = node
|
||||
last_node = atom
|
||||
missing_nodes += 1
|
||||
missing.update(node_missing)
|
||||
missing.update(atom_missing)
|
||||
if missing:
|
||||
# For when a task is provided (instead of a flow) and that
|
||||
# task is the only item in the graph and its missing deps, avoid
|
||||
@@ -320,12 +336,13 @@ class ActionEngine(base.Engine):
|
||||
def compile(self):
|
||||
if self._compiled:
|
||||
return
|
||||
self._compilation = self._compiler.compile()
|
||||
self._compilation = self._check_compilation(self._compiler.compile())
|
||||
self._runtime = runtime.Runtime(self._compilation,
|
||||
self.storage,
|
||||
self.atom_notifier,
|
||||
self._task_executor,
|
||||
self._retry_executor)
|
||||
self._retry_executor,
|
||||
options=self._options)
|
||||
self._runtime.compile()
|
||||
self._compiled = True
|
||||
|
||||
|
||||
@@ -35,7 +35,7 @@ from taskflow.types import failure
|
||||
from taskflow.types import notifier
|
||||
from taskflow.utils import threading_utils
|
||||
|
||||
# Execution and reversion events.
|
||||
# Execution and reversion outcomes.
|
||||
EXECUTED = 'executed'
|
||||
REVERTED = 'reverted'
|
||||
|
||||
|
||||
@@ -14,6 +14,7 @@
|
||||
# License for the specific language governing permissions and limitations
|
||||
# under the License.
|
||||
|
||||
import collections
|
||||
import functools
|
||||
|
||||
from futurist import waiters
|
||||
@@ -22,13 +23,13 @@ from taskflow.engines.action_engine.actions import retry as ra
|
||||
from taskflow.engines.action_engine.actions import task as ta
|
||||
from taskflow.engines.action_engine import analyzer as an
|
||||
from taskflow.engines.action_engine import builder as bu
|
||||
from taskflow.engines.action_engine import compiler as com
|
||||
from taskflow.engines.action_engine import completer as co
|
||||
from taskflow.engines.action_engine import scheduler as sched
|
||||
from taskflow.engines.action_engine import scopes as sc
|
||||
from taskflow import flow
|
||||
from taskflow import exceptions as exc
|
||||
from taskflow.flow import LINK_DECIDER
|
||||
from taskflow import states as st
|
||||
from taskflow import task
|
||||
from taskflow.utils import async_utils
|
||||
from taskflow.utils import misc
|
||||
|
||||
|
||||
@@ -41,14 +42,42 @@ class Runtime(object):
|
||||
"""
|
||||
|
||||
def __init__(self, compilation, storage, atom_notifier,
|
||||
task_executor, retry_executor):
|
||||
task_executor, retry_executor, options=None):
|
||||
self._atom_notifier = atom_notifier
|
||||
self._task_executor = task_executor
|
||||
self._retry_executor = retry_executor
|
||||
self._storage = storage
|
||||
self._compilation = compilation
|
||||
self._atom_cache = {}
|
||||
self._atoms_by_kind = {}
|
||||
self._options = misc.ensure_dict(options)
|
||||
|
||||
@staticmethod
|
||||
def _walk_edge_deciders(graph, atom):
|
||||
"""Iterates through all nodes, deciders that alter atoms execution."""
|
||||
# This is basically a reverse breadth first exploration, with
|
||||
# special logic to further traverse down flow nodes...
|
||||
predecessors_iter = graph.predecessors_iter
|
||||
nodes = collections.deque((u_node, atom)
|
||||
for u_node in predecessors_iter(atom))
|
||||
visited = set()
|
||||
while nodes:
|
||||
u_node, v_node = nodes.popleft()
|
||||
u_node_kind = graph.node[u_node]['kind']
|
||||
try:
|
||||
yield (u_node, u_node_kind,
|
||||
graph.adj[u_node][v_node][LINK_DECIDER])
|
||||
except KeyError:
|
||||
pass
|
||||
if u_node_kind == com.FLOW and u_node not in visited:
|
||||
# Avoid re-exploring the same flow if we get to this
|
||||
# same flow by a different *future* path...
|
||||
visited.add(u_node)
|
||||
# Since we *currently* jump over flow node(s), we need to make
|
||||
# sure that any prior decider that was directed at this flow
|
||||
# node also gets used during future decisions about this
|
||||
# atom node.
|
||||
nodes.extend((u_u_node, u_node)
|
||||
for u_u_node in predecessors_iter(u_node))
|
||||
|
||||
def compile(self):
|
||||
"""Compiles & caches frequently used execution helper objects.
|
||||
@@ -60,47 +89,39 @@ class Runtime(object):
|
||||
specific scheduler and so-on).
|
||||
"""
|
||||
change_state_handlers = {
|
||||
'task': functools.partial(self.task_action.change_state,
|
||||
progress=0.0),
|
||||
'retry': self.retry_action.change_state,
|
||||
com.TASK: functools.partial(self.task_action.change_state,
|
||||
progress=0.0),
|
||||
com.RETRY: self.retry_action.change_state,
|
||||
}
|
||||
schedulers = {
|
||||
'retry': self.retry_scheduler,
|
||||
'task': self.task_scheduler,
|
||||
com.RETRY: self.retry_scheduler,
|
||||
com.TASK: self.task_scheduler,
|
||||
}
|
||||
execution_graph = self._compilation.execution_graph
|
||||
all_retry_atoms = []
|
||||
all_task_atoms = []
|
||||
for atom in self.analyzer.iterate_all_nodes():
|
||||
metadata = {}
|
||||
walker = sc.ScopeWalker(self.compilation, atom, names_only=True)
|
||||
if isinstance(atom, task.BaseTask):
|
||||
check_transition_handler = st.check_task_transition
|
||||
change_state_handler = change_state_handlers['task']
|
||||
scheduler = schedulers['task']
|
||||
all_task_atoms.append(atom)
|
||||
check_transition_handlers = {
|
||||
com.TASK: st.check_task_transition,
|
||||
com.RETRY: st.check_retry_transition,
|
||||
}
|
||||
graph = self._compilation.execution_graph
|
||||
for node, node_data in graph.nodes_iter(data=True):
|
||||
node_kind = node_data['kind']
|
||||
if node_kind == com.FLOW:
|
||||
continue
|
||||
elif node_kind in com.ATOMS:
|
||||
check_transition_handler = check_transition_handlers[node_kind]
|
||||
change_state_handler = change_state_handlers[node_kind]
|
||||
scheduler = schedulers[node_kind]
|
||||
else:
|
||||
check_transition_handler = st.check_retry_transition
|
||||
change_state_handler = change_state_handlers['retry']
|
||||
scheduler = schedulers['retry']
|
||||
all_retry_atoms.append(atom)
|
||||
edge_deciders = {}
|
||||
for previous_atom in execution_graph.predecessors(atom):
|
||||
# If there is any link function that says if this connection
|
||||
# is able to run (or should not) ensure we retain it and use
|
||||
# it later as needed.
|
||||
u_v_data = execution_graph.adj[previous_atom][atom]
|
||||
u_v_decider = u_v_data.get(flow.LINK_DECIDER)
|
||||
if u_v_decider is not None:
|
||||
edge_deciders[previous_atom.name] = u_v_decider
|
||||
raise exc.CompilationFailure("Unknown node kind '%s'"
|
||||
" encountered" % node_kind)
|
||||
metadata = {}
|
||||
deciders_it = self._walk_edge_deciders(graph, node)
|
||||
walker = sc.ScopeWalker(self.compilation, node, names_only=True)
|
||||
metadata['scope_walker'] = walker
|
||||
metadata['check_transition_handler'] = check_transition_handler
|
||||
metadata['change_state_handler'] = change_state_handler
|
||||
metadata['scheduler'] = scheduler
|
||||
metadata['edge_deciders'] = edge_deciders
|
||||
self._atom_cache[atom.name] = metadata
|
||||
self._atoms_by_kind['retry'] = all_retry_atoms
|
||||
self._atoms_by_kind['task'] = all_task_atoms
|
||||
metadata['edge_deciders'] = tuple(deciders_it)
|
||||
self._atom_cache[node.name] = metadata
|
||||
|
||||
@property
|
||||
def compilation(self):
|
||||
@@ -110,6 +131,10 @@ class Runtime(object):
|
||||
def storage(self):
|
||||
return self._storage
|
||||
|
||||
@property
|
||||
def options(self):
|
||||
return self._options
|
||||
|
||||
@misc.cachedproperty
|
||||
def analyzer(self):
|
||||
return an.Analyzer(self)
|
||||
@@ -163,15 +188,6 @@ class Runtime(object):
|
||||
metadata = self._atom_cache[atom.name]
|
||||
return metadata['edge_deciders']
|
||||
|
||||
def fetch_atoms_by_kind(self, kind):
|
||||
"""Fetches all the atoms of a given kind.
|
||||
|
||||
NOTE(harlowja): Currently only ``task`` or ``retry`` are valid
|
||||
kinds of atoms (requesting other kinds will just
|
||||
return empty lists).
|
||||
"""
|
||||
return self._atoms_by_kind.get(kind, [])
|
||||
|
||||
def fetch_scheduler(self, atom):
|
||||
"""Fetches the cached specific scheduler for the given atom."""
|
||||
# This does not check if the name exists (since this is only used
|
||||
@@ -198,7 +214,7 @@ class Runtime(object):
|
||||
# Various helper methods used by the runtime components; not for public
|
||||
# consumption...
|
||||
|
||||
def reset_nodes(self, atoms, state=st.PENDING, intention=st.EXECUTE):
|
||||
def reset_atoms(self, atoms, state=st.PENDING, intention=st.EXECUTE):
|
||||
"""Resets all the provided atoms to the given state and intention."""
|
||||
tweaked = []
|
||||
for atom in atoms:
|
||||
@@ -214,7 +230,7 @@ class Runtime(object):
|
||||
|
||||
def reset_all(self, state=st.PENDING, intention=st.EXECUTE):
|
||||
"""Resets all atoms to the given state and intention."""
|
||||
return self.reset_nodes(self.analyzer.iterate_all_nodes(),
|
||||
return self.reset_atoms(self.analyzer.iterate_nodes(com.ATOMS),
|
||||
state=state, intention=intention)
|
||||
|
||||
def reset_subgraph(self, atom, state=st.PENDING, intention=st.EXECUTE):
|
||||
@@ -222,8 +238,9 @@ class Runtime(object):
|
||||
|
||||
The subgraph is contained of all of the atoms successors.
|
||||
"""
|
||||
return self.reset_nodes(self.analyzer.iterate_subgraph(atom),
|
||||
state=state, intention=intention)
|
||||
return self.reset_atoms(
|
||||
self.analyzer.iterate_connected_atoms(atom),
|
||||
state=state, intention=intention)
|
||||
|
||||
def retry_subflow(self, retry):
|
||||
"""Prepares a retrys + its subgraph for execution.
|
||||
@@ -232,5 +249,6 @@ class Runtime(object):
|
||||
subgraph (its successors) to the ``PENDING`` state with an ``EXECUTE``
|
||||
intention.
|
||||
"""
|
||||
self.storage.set_atom_intention(retry.name, st.EXECUTE)
|
||||
self.reset_subgraph(retry)
|
||||
tweaked = self.reset_atoms([retry], state=None, intention=st.EXECUTE)
|
||||
tweaked.extend(self.reset_subgraph(retry))
|
||||
return tweaked
|
||||
|
||||
@@ -14,14 +14,18 @@
|
||||
# License for the specific language governing permissions and limitations
|
||||
# under the License.
|
||||
|
||||
from taskflow import atom as atom_type
|
||||
from taskflow import flow as flow_type
|
||||
from taskflow.engines.action_engine import compiler as co
|
||||
from taskflow import logging
|
||||
|
||||
LOG = logging.getLogger(__name__)
|
||||
|
||||
|
||||
def _extract_atoms_iter(node, idx=-1):
|
||||
def _depth_first_reverse_iterate(node, idx=-1):
|
||||
"""Iterates connected (in reverse) nodes (from starting node).
|
||||
|
||||
Jumps through nodes with ``FLOW`` ``kind`` attribute (does not yield
|
||||
them back).
|
||||
"""
|
||||
# Always go left to right, since right to left is the pattern order
|
||||
# and we want to go backwards and not forwards through that ordering...
|
||||
if idx == -1:
|
||||
@@ -29,15 +33,13 @@ def _extract_atoms_iter(node, idx=-1):
|
||||
else:
|
||||
children_iter = reversed(node[0:idx])
|
||||
for child in children_iter:
|
||||
if isinstance(child.item, flow_type.Flow):
|
||||
for atom in _extract_atoms_iter(child):
|
||||
yield atom
|
||||
elif isinstance(child.item, atom_type.Atom):
|
||||
yield child.item
|
||||
if child.metadata['kind'] == co.FLOW:
|
||||
# Jump through these...
|
||||
for child_child in child.dfs_iter(right_to_left=False):
|
||||
if child_child.metadata['kind'] in co.ATOMS:
|
||||
yield child_child.item
|
||||
else:
|
||||
raise TypeError(
|
||||
"Unknown extraction item '%s' (%s)" % (child.item,
|
||||
type(child.item)))
|
||||
yield child.item
|
||||
|
||||
|
||||
class ScopeWalker(object):
|
||||
@@ -57,13 +59,10 @@ class ScopeWalker(object):
|
||||
" hierarchy" % atom)
|
||||
self._level_cache = {}
|
||||
self._atom = atom
|
||||
self._graph = compilation.execution_graph
|
||||
self._execution_graph = compilation.execution_graph
|
||||
self._names_only = names_only
|
||||
self._predecessors = None
|
||||
|
||||
#: Function that extracts the *associated* atoms of a given tree node.
|
||||
_extract_atoms_iter = staticmethod(_extract_atoms_iter)
|
||||
|
||||
def __iter__(self):
|
||||
"""Iterates over the visible scopes.
|
||||
|
||||
@@ -99,10 +98,14 @@ class ScopeWalker(object):
|
||||
nodes (aka we have reached the top of the tree) or we run out of
|
||||
predecessors.
|
||||
"""
|
||||
graph = self._execution_graph
|
||||
if self._predecessors is None:
|
||||
pred_iter = self._graph.bfs_predecessors_iter(self._atom)
|
||||
self._predecessors = set(pred_iter)
|
||||
predecessors = self._predecessors.copy()
|
||||
predecessors = set(
|
||||
node for node in graph.bfs_predecessors_iter(self._atom)
|
||||
if graph.node[node]['kind'] in co.ATOMS)
|
||||
self._predecessors = predecessors.copy()
|
||||
else:
|
||||
predecessors = self._predecessors.copy()
|
||||
last = self._node
|
||||
for lvl, parent in enumerate(self._node.path_iter(include_self=False)):
|
||||
if not predecessors:
|
||||
@@ -114,7 +117,7 @@ class ScopeWalker(object):
|
||||
except KeyError:
|
||||
visible = []
|
||||
removals = set()
|
||||
for atom in self._extract_atoms_iter(parent, idx=last_idx):
|
||||
for atom in _depth_first_reverse_iterate(parent, idx=last_idx):
|
||||
if atom in predecessors:
|
||||
predecessors.remove(atom)
|
||||
removals.add(atom)
|
||||
|
||||
@@ -21,6 +21,7 @@ from debtcollector import moves
|
||||
import six
|
||||
|
||||
from taskflow.types import notifier
|
||||
from taskflow.utils import misc
|
||||
|
||||
|
||||
@six.add_metaclass(abc.ABCMeta)
|
||||
@@ -41,10 +42,7 @@ class Engine(object):
|
||||
self._flow = flow
|
||||
self._flow_detail = flow_detail
|
||||
self._backend = backend
|
||||
if not options:
|
||||
self._options = {}
|
||||
else:
|
||||
self._options = dict(options)
|
||||
self._options = misc.ensure_dict(options)
|
||||
self._notifier = notifier.Notifier()
|
||||
self._atom_notifier = notifier.Notifier()
|
||||
|
||||
|
||||
@@ -95,8 +95,8 @@ class WorkerTaskExecutor(executor.TaskExecutor):
|
||||
request = self._requests_cache.get(task_uuid)
|
||||
if request is not None:
|
||||
response = pr.Response.from_dict(response)
|
||||
LOG.debug("Response with state '%s' received for '%s'",
|
||||
response.state, request)
|
||||
LOG.debug("Extracted response '%s' and matched it to"
|
||||
" request '%s'", response, request)
|
||||
if response.state == pr.RUNNING:
|
||||
request.transition_and_log_error(pr.RUNNING, logger=LOG)
|
||||
elif response.state == pr.EVENT:
|
||||
|
||||
@@ -104,9 +104,10 @@ LOG = logging.getLogger(__name__)
|
||||
class Message(object):
|
||||
"""Base class for all message types."""
|
||||
|
||||
def __str__(self):
|
||||
cls_name = reflection.get_class_name(self, fully_qualified=False)
|
||||
return "<%s> %s" % (cls_name, self.to_dict())
|
||||
def __repr__(self):
|
||||
return ("<%s object at 0x%x with contents %s>"
|
||||
% (reflection.get_class_name(self, fully_qualified=False),
|
||||
id(self), self.to_dict()))
|
||||
|
||||
@abc.abstractmethod
|
||||
def to_dict(self):
|
||||
@@ -150,6 +151,14 @@ class Notify(Message):
|
||||
def __init__(self, **data):
|
||||
self._data = data
|
||||
|
||||
@property
|
||||
def topic(self):
|
||||
return self._data.get('topic')
|
||||
|
||||
@property
|
||||
def tasks(self):
|
||||
return self._data.get('tasks')
|
||||
|
||||
def to_dict(self):
|
||||
return self._data
|
||||
|
||||
|
||||
@@ -206,18 +206,18 @@ class ProxyWorkerFinder(WorkerFinder):
|
||||
self._workers[topic] = worker
|
||||
return (worker, True)
|
||||
|
||||
def _process_response(self, response, message):
|
||||
"""Process notify message from remote side."""
|
||||
LOG.debug("Started processing notify message '%s'",
|
||||
def _process_response(self, data, message):
|
||||
"""Process notify message sent from remote side."""
|
||||
LOG.debug("Started processing notify response message '%s'",
|
||||
ku.DelayedPretty(message))
|
||||
topic = response['topic']
|
||||
tasks = response['tasks']
|
||||
response = pr.Notify(**data)
|
||||
LOG.debug("Extracted notify response '%s'", response)
|
||||
with self._cond:
|
||||
worker, new_or_updated = self._add(topic, tasks)
|
||||
worker, new_or_updated = self._add(response.topic,
|
||||
response.tasks)
|
||||
if new_or_updated:
|
||||
LOG.debug("Received notification about worker '%s' (%s"
|
||||
" total workers are currently known)", worker,
|
||||
self._total_workers())
|
||||
LOG.debug("Updated worker '%s' (%s total workers are"
|
||||
" currently known)", worker, self._total_workers())
|
||||
self._cond.notify_all()
|
||||
if new_or_updated:
|
||||
self.notifier.notify(self.WORKER_ARRIVED, {'worker': worker})
|
||||
|
||||
@@ -20,6 +20,9 @@ import logging
|
||||
import os
|
||||
import sys
|
||||
import time
|
||||
import traceback
|
||||
|
||||
from kazoo import client
|
||||
|
||||
top_dir = os.path.abspath(os.path.join(os.path.dirname(__file__),
|
||||
os.pardir,
|
||||
@@ -201,14 +204,30 @@ def main_local():
|
||||
run_conductor(only_run_once=True)
|
||||
|
||||
|
||||
def check_for_zookeeper(timeout=1):
|
||||
sys.stderr.write("Testing for the existence of a zookeeper server...\n")
|
||||
sys.stderr.write("Please wait....\n")
|
||||
with contextlib.closing(client.KazooClient()) as test_client:
|
||||
try:
|
||||
test_client.start(timeout=timeout)
|
||||
except test_client.handler.timeout_exception:
|
||||
sys.stderr.write("Zookeeper is needed for running this example!\n")
|
||||
traceback.print_exc()
|
||||
return False
|
||||
else:
|
||||
test_client.stop()
|
||||
return True
|
||||
|
||||
|
||||
def main():
|
||||
logging.basicConfig(level=logging.ERROR)
|
||||
if not check_for_zookeeper():
|
||||
return
|
||||
if len(sys.argv) == 1:
|
||||
main_local()
|
||||
elif sys.argv[1] in ('p', 'c'):
|
||||
if sys.argv[-1] == "v":
|
||||
logging.basicConfig(level=5)
|
||||
else:
|
||||
logging.basicConfig(level=logging.ERROR)
|
||||
if sys.argv[1] == 'p':
|
||||
run_poster()
|
||||
else:
|
||||
|
||||
@@ -98,6 +98,15 @@ class Flow(object):
|
||||
* ``meta`` is link metadata, a dictionary.
|
||||
"""
|
||||
|
||||
@abc.abstractmethod
|
||||
def iter_nodes(self):
|
||||
"""Iterate over nodes of the flow.
|
||||
|
||||
Iterates over 2-tuples ``(A, meta)``, where
|
||||
* ``A`` is a child (atom or subflow) of current flow;
|
||||
* ``meta`` is link metadata, a dictionary.
|
||||
"""
|
||||
|
||||
def __str__(self):
|
||||
return "%s: %s(len=%d)" % (reflection.get_class_name(self),
|
||||
self.name, len(self))
|
||||
|
||||
174
taskflow/formatters.py
Normal file
174
taskflow/formatters.py
Normal file
@@ -0,0 +1,174 @@
|
||||
# -*- coding: utf-8 -*-
|
||||
|
||||
# Copyright (C) 2014 Yahoo! Inc. All Rights Reserved.
|
||||
#
|
||||
# Licensed under the Apache License, Version 2.0 (the "License"); you may
|
||||
# not use this file except in compliance with the License. You may obtain
|
||||
# a copy of the License at
|
||||
#
|
||||
# http://www.apache.org/licenses/LICENSE-2.0
|
||||
#
|
||||
# Unless required by applicable law or agreed to in writing, software
|
||||
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
|
||||
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
|
||||
# License for the specific language governing permissions and limitations
|
||||
# under the License.
|
||||
|
||||
import functools
|
||||
|
||||
from taskflow.engines.action_engine import compiler
|
||||
from taskflow import exceptions as exc
|
||||
from taskflow import states
|
||||
from taskflow.types import tree
|
||||
from taskflow.utils import misc
|
||||
|
||||
|
||||
def _cached_get(cache, cache_key, atom_name, fetch_func, *args, **kwargs):
|
||||
"""Tries to get a previously saved value or fetches it and caches it."""
|
||||
value, value_found = None, False
|
||||
try:
|
||||
value, value_found = cache[cache_key][atom_name]
|
||||
except KeyError:
|
||||
try:
|
||||
value = fetch_func(*args, **kwargs)
|
||||
value_found = True
|
||||
except (exc.StorageFailure, exc.NotFound):
|
||||
pass
|
||||
cache[cache_key][atom_name] = value, value_found
|
||||
return value, value_found
|
||||
|
||||
|
||||
def _fetch_predecessor_tree(graph, atom):
|
||||
"""Creates a tree of predecessors, rooted at given atom."""
|
||||
root = tree.Node(atom)
|
||||
stack = [(root, atom)]
|
||||
seen = set()
|
||||
while stack:
|
||||
parent, node = stack.pop()
|
||||
for pred_node in graph.predecessors_iter(node):
|
||||
child = tree.Node(pred_node,
|
||||
**graph.node[pred_node])
|
||||
parent.add(child)
|
||||
stack.append((child, pred_node))
|
||||
seen.add(pred_node)
|
||||
return len(seen), root
|
||||
|
||||
|
||||
class FailureFormatter(object):
|
||||
"""Formats a failure and connects it to associated atoms & engine."""
|
||||
|
||||
_BUILDERS = {
|
||||
states.EXECUTE: (_fetch_predecessor_tree, 'predecessors'),
|
||||
}
|
||||
|
||||
def __init__(self, engine, hide_inputs_outputs_of=()):
|
||||
self._hide_inputs_outputs_of = hide_inputs_outputs_of
|
||||
self._engine = engine
|
||||
self._formatter_funcs = {
|
||||
compiler.FLOW: self._format_flow,
|
||||
}
|
||||
for kind in compiler.ATOMS:
|
||||
self._formatter_funcs[kind] = self._format_atom
|
||||
|
||||
def _format_atom(self, storage, cache, node):
|
||||
"""Formats a single tree node (atom) into a string version."""
|
||||
atom = node.item
|
||||
atom_name = atom.name
|
||||
atom_attrs = {}
|
||||
intention, intention_found = _cached_get(cache, 'intentions',
|
||||
atom_name,
|
||||
storage.get_atom_intention,
|
||||
atom_name)
|
||||
if intention_found:
|
||||
atom_attrs['intention'] = intention
|
||||
state, state_found = _cached_get(cache, 'states', atom_name,
|
||||
storage.get_atom_state, atom_name)
|
||||
if state_found:
|
||||
atom_attrs['state'] = state
|
||||
if atom_name not in self._hide_inputs_outputs_of:
|
||||
# When the cache does not exist for this atom this
|
||||
# will be called with the rest of these arguments
|
||||
# used to populate the cache.
|
||||
fetch_mapped_args = functools.partial(
|
||||
storage.fetch_mapped_args, atom.rebind,
|
||||
atom_name=atom_name, optional_args=atom.optional)
|
||||
requires, requires_found = _cached_get(cache, 'requires',
|
||||
atom_name,
|
||||
fetch_mapped_args)
|
||||
if requires_found:
|
||||
atom_attrs['requires'] = requires
|
||||
provides, provides_found = _cached_get(cache, 'provides',
|
||||
atom_name,
|
||||
storage.get_execute_result,
|
||||
atom_name)
|
||||
if provides_found:
|
||||
atom_attrs['provides'] = provides
|
||||
if atom_attrs:
|
||||
return "Atom '%s' %s" % (atom_name, atom_attrs)
|
||||
else:
|
||||
return "Atom '%s'" % (atom_name)
|
||||
|
||||
def _format_flow(self, storage, cache, node):
|
||||
"""Formats a single tree node (flow) into a string version."""
|
||||
flow = node.item
|
||||
return flow.name
|
||||
|
||||
def _format_node(self, storage, cache, node):
|
||||
"""Formats a single tree node into a string version."""
|
||||
formatter_func = self. _formatter_funcs[node.metadata['kind']]
|
||||
return formatter_func(storage, cache, node)
|
||||
|
||||
def format(self, fail, atom_matcher):
|
||||
"""Returns a (exc_info, details) tuple about the failure.
|
||||
|
||||
The ``exc_info`` tuple should be a standard three element
|
||||
(exctype, value, traceback) tuple that will be used for further
|
||||
logging. A non-empty string is typically returned for ``details``; it
|
||||
should contain any string info about the failure (with any specific
|
||||
details the ``exc_info`` may not have/contain).
|
||||
"""
|
||||
buff = misc.StringIO()
|
||||
storage = self._engine.storage
|
||||
compilation = self._engine.compilation
|
||||
if fail.exc_info is None:
|
||||
# Remote failures will not have a 'exc_info' tuple, so just use
|
||||
# the captured traceback that was captured by the creator when it
|
||||
# failed...
|
||||
buff.write_nl(fail.pformat(traceback=True))
|
||||
if storage is None or compilation is None:
|
||||
# Somehow we got called before prepared and/or compiled; ok
|
||||
# that's weird, skip doing the rest...
|
||||
return (fail.exc_info, buff.getvalue())
|
||||
hierarchy = compilation.hierarchy
|
||||
graph = compilation.execution_graph
|
||||
atom_node = hierarchy.find_first_match(atom_matcher)
|
||||
atom = None
|
||||
priors = 0
|
||||
atom_intention = None
|
||||
if atom_node is not None:
|
||||
atom = atom_node.item
|
||||
atom_intention = storage.get_atom_intention(atom.name)
|
||||
priors = sum(c for (_n, c) in graph.in_degree_iter([atom]))
|
||||
if atom is not None and priors and atom_intention in self._BUILDERS:
|
||||
# Cache as much as we can, since the path of various atoms
|
||||
# may cause the same atom to be seen repeatedly depending on
|
||||
# the graph structure...
|
||||
cache = {
|
||||
'intentions': {},
|
||||
'provides': {},
|
||||
'requires': {},
|
||||
'states': {},
|
||||
}
|
||||
builder, kind = self._BUILDERS[atom_intention]
|
||||
count, rooted_tree = builder(graph, atom)
|
||||
buff.write_nl('%s %s (most recent atoms first):' % (count, kind))
|
||||
formatter = functools.partial(self._format_node, storage, cache)
|
||||
child_count = rooted_tree.child_count()
|
||||
for i, child in enumerate(rooted_tree, 1):
|
||||
if i == child_count:
|
||||
buff.write(child.pformat(stringify_node=formatter,
|
||||
starting_prefix=" "))
|
||||
else:
|
||||
buff.write_nl(child.pformat(stringify_node=formatter,
|
||||
starting_prefix=" "))
|
||||
return (fail.exc_info, buff.getvalue())
|
||||
@@ -808,6 +808,10 @@ return cmsgpack.pack(result)
|
||||
ensure_fresh=ensure_fresh,
|
||||
board_fetch_func=lambda ensure_fresh: self._fetch_jobs())
|
||||
|
||||
def register_entity(self, entity):
|
||||
# Will implement a redis jobboard conductor register later
|
||||
pass
|
||||
|
||||
@base.check_who
|
||||
def consume(self, job, who):
|
||||
script = self._get_script('consume')
|
||||
|
||||
@@ -30,6 +30,7 @@ from oslo_utils import timeutils
|
||||
from oslo_utils import uuidutils
|
||||
import six
|
||||
|
||||
from taskflow.conductors import base as c_base
|
||||
from taskflow import exceptions as excp
|
||||
from taskflow.jobs import base
|
||||
from taskflow import logging
|
||||
@@ -236,6 +237,10 @@ class ZookeeperJobBoard(base.NotifyingJobBoard):
|
||||
#: Znode child path created under root path that contains trashed jobs.
|
||||
TRASH_FOLDER = ".trash"
|
||||
|
||||
#: Znode child path created under root path that contains registered
|
||||
#: entities.
|
||||
ENTITY_FOLDER = ".entities"
|
||||
|
||||
#: Znode **prefix** that job entries have.
|
||||
JOB_PREFIX = 'job'
|
||||
|
||||
@@ -259,6 +264,9 @@ class ZookeeperJobBoard(base.NotifyingJobBoard):
|
||||
self._path = path
|
||||
self._trash_path = self._path.replace(k_paths.basename(self._path),
|
||||
self.TRASH_FOLDER)
|
||||
self._entity_path = self._path.replace(
|
||||
k_paths.basename(self._path),
|
||||
self.ENTITY_FOLDER)
|
||||
# The backend to load the full logbooks from, since what is sent over
|
||||
# the data connection is only the logbook uuid and name, and not the
|
||||
# full logbook.
|
||||
@@ -300,6 +308,11 @@ class ZookeeperJobBoard(base.NotifyingJobBoard):
|
||||
"""Path where all trashed job znodes will be stored."""
|
||||
return self._trash_path
|
||||
|
||||
@property
|
||||
def entity_path(self):
|
||||
"""Path where all conductor info znodes will be stored."""
|
||||
return self._entity_path
|
||||
|
||||
@property
|
||||
def job_count(self):
|
||||
return len(self._known_jobs)
|
||||
@@ -552,6 +565,22 @@ class ZookeeperJobBoard(base.NotifyingJobBoard):
|
||||
return (misc.decode_json(lock_data), lock_stat,
|
||||
misc.decode_json(job_data), job_stat)
|
||||
|
||||
def register_entity(self, entity):
|
||||
entity_type = entity.kind
|
||||
if entity_type == c_base.Conductor.ENTITY_KIND:
|
||||
entity_path = k_paths.join(self.entity_path, entity_type)
|
||||
self._client.ensure_path(entity_path)
|
||||
|
||||
conductor_name = entity.name
|
||||
self._client.create(k_paths.join(entity_path,
|
||||
conductor_name),
|
||||
value=misc.binary_encode(
|
||||
jsonutils.dumps(entity.to_dict())),
|
||||
ephemeral=True)
|
||||
else:
|
||||
raise excp.NotImplementedError(
|
||||
"Not implemented for other entity type '%s'" % entity_type)
|
||||
|
||||
@base.check_who
|
||||
def consume(self, job, who):
|
||||
with self._wrap(job.uuid, job.path,
|
||||
|
||||
@@ -386,6 +386,15 @@ class JobBoard(object):
|
||||
this must be the same name that was used for claiming this job.
|
||||
"""
|
||||
|
||||
@abc.abstractmethod
|
||||
def register_entity(self, entity):
|
||||
"""Register an entity to the jobboard('s backend), e.g: a conductor.
|
||||
|
||||
:param entity: entity to register as being associated with the
|
||||
jobboard('s backend)
|
||||
:type entity: :py:class:`~taskflow.types.entity.Entity`
|
||||
"""
|
||||
|
||||
@abc.abstractproperty
|
||||
def connected(self):
|
||||
"""Returns if this jobboard is connected."""
|
||||
|
||||
@@ -18,9 +18,11 @@ from __future__ import absolute_import
|
||||
|
||||
import os
|
||||
|
||||
from taskflow import formatters
|
||||
from taskflow.listeners import base
|
||||
from taskflow import logging
|
||||
from taskflow import states
|
||||
from taskflow import task
|
||||
from taskflow.types import failure
|
||||
from taskflow.utils import misc
|
||||
|
||||
@@ -56,6 +58,16 @@ class LoggingListener(base.DumpingListener):
|
||||
self._logger.log(self._level, message, *args, **kwargs)
|
||||
|
||||
|
||||
def _make_matcher(task_name):
|
||||
"""Returns a function that matches a node with task item with same name."""
|
||||
|
||||
def _task_matcher(node):
|
||||
item = node.item
|
||||
return isinstance(item, task.BaseTask) and item.name == task_name
|
||||
|
||||
return _task_matcher
|
||||
|
||||
|
||||
class DynamicLoggingListener(base.Listener):
|
||||
"""Listener that logs notifications it receives.
|
||||
|
||||
@@ -99,7 +111,7 @@ class DynamicLoggingListener(base.Listener):
|
||||
flow_listen_for=base.DEFAULT_LISTEN_FOR,
|
||||
retry_listen_for=base.DEFAULT_LISTEN_FOR,
|
||||
log=None, failure_level=logging.WARNING,
|
||||
level=logging.DEBUG):
|
||||
level=logging.DEBUG, hide_inputs_outputs_of=()):
|
||||
super(DynamicLoggingListener, self).__init__(
|
||||
engine, task_listen_for=task_listen_for,
|
||||
flow_listen_for=flow_listen_for, retry_listen_for=retry_listen_for)
|
||||
@@ -115,33 +127,10 @@ class DynamicLoggingListener(base.Listener):
|
||||
states.FAILURE: self._failure_level,
|
||||
states.REVERTED: self._failure_level,
|
||||
}
|
||||
self._hide_inputs_outputs_of = frozenset(hide_inputs_outputs_of)
|
||||
self._logger = misc.pick_first_not_none(log, self._LOGGER, LOG)
|
||||
|
||||
@staticmethod
|
||||
def _format_failure(fail):
|
||||
"""Returns a (exc_info, exc_details) tuple about the failure.
|
||||
|
||||
The ``exc_info`` tuple should be a standard three element
|
||||
(exctype, value, traceback) tuple that will be used for further
|
||||
logging. If a non-empty string is returned for ``exc_details`` it
|
||||
should contain any string info about the failure (with any specific
|
||||
details the ``exc_info`` may not have/contain). If the ``exc_info``
|
||||
tuple is returned as ``None`` then it will cause the logging
|
||||
system to avoid outputting any traceback information (read
|
||||
the python documentation on the logger interaction with ``exc_info``
|
||||
to learn more).
|
||||
"""
|
||||
if fail.exc_info:
|
||||
exc_info = fail.exc_info
|
||||
exc_details = ''
|
||||
else:
|
||||
# When a remote failure occurs (or somehow the failure
|
||||
# object lost its traceback), we will not have a valid
|
||||
# exc_info that can be used but we *should* have a string
|
||||
# version that we can use instead...
|
||||
exc_info = None
|
||||
exc_details = "%s%s" % (os.linesep, fail.pformat(traceback=True))
|
||||
return (exc_info, exc_details)
|
||||
self._fail_formatter = formatters.FailureFormatter(
|
||||
self._engine, hide_inputs_outputs_of=self._hide_inputs_outputs_of)
|
||||
|
||||
def _flow_receiver(self, state, details):
|
||||
"""Gets called on flow state changes."""
|
||||
@@ -152,39 +141,49 @@ class DynamicLoggingListener(base.Listener):
|
||||
|
||||
def _task_receiver(self, state, details):
|
||||
"""Gets called on task state changes."""
|
||||
task_name = details['task_name']
|
||||
task_uuid = details['task_uuid']
|
||||
if 'result' in details and state in base.FINISH_STATES:
|
||||
# If the task failed, it's useful to show the exception traceback
|
||||
# and any other available exception information.
|
||||
result = details.get('result')
|
||||
if isinstance(result, failure.Failure):
|
||||
exc_info, exc_details = self._format_failure(result)
|
||||
self._logger.log(self._failure_level,
|
||||
"Task '%s' (%s) transitioned into state"
|
||||
" '%s' from state '%s'%s",
|
||||
details['task_name'], details['task_uuid'],
|
||||
state, details['old_state'], exc_details,
|
||||
exc_info=exc_info)
|
||||
exc_info, fail_details = self._fail_formatter.format(
|
||||
result, _make_matcher(task_name))
|
||||
if fail_details:
|
||||
self._logger.log(self._failure_level,
|
||||
"Task '%s' (%s) transitioned into state"
|
||||
" '%s' from state '%s'%s%s",
|
||||
task_name, task_uuid, state,
|
||||
details['old_state'], os.linesep,
|
||||
fail_details, exc_info=exc_info)
|
||||
else:
|
||||
self._logger.log(self._failure_level,
|
||||
"Task '%s' (%s) transitioned into state"
|
||||
" '%s' from state '%s'", task_name,
|
||||
task_uuid, state, details['old_state'],
|
||||
exc_info=exc_info)
|
||||
else:
|
||||
# Otherwise, depending on the enabled logging level/state we
|
||||
# will show or hide results that the task may have produced
|
||||
# during execution.
|
||||
level = self._task_log_levels.get(state, self._level)
|
||||
if (self._logger.isEnabledFor(self._level)
|
||||
or state in self._FAILURE_STATES):
|
||||
show_result = (self._logger.isEnabledFor(self._level)
|
||||
or state == states.FAILURE)
|
||||
if show_result and \
|
||||
task_name not in self._hide_inputs_outputs_of:
|
||||
self._logger.log(level, "Task '%s' (%s) transitioned into"
|
||||
" state '%s' from state '%s' with"
|
||||
" result '%s'", details['task_name'],
|
||||
details['task_uuid'], state,
|
||||
details['old_state'], result)
|
||||
" result '%s'", task_name, task_uuid,
|
||||
state, details['old_state'], result)
|
||||
else:
|
||||
self._logger.log(level, "Task '%s' (%s) transitioned into"
|
||||
" state '%s' from state '%s'",
|
||||
details['task_name'],
|
||||
details['task_uuid'], state,
|
||||
task_name, task_uuid, state,
|
||||
details['old_state'])
|
||||
else:
|
||||
# Just a intermediary state, carry on!
|
||||
level = self._task_log_levels.get(state, self._level)
|
||||
self._logger.log(level, "Task '%s' (%s) transitioned into state"
|
||||
" '%s' from state '%s'", details['task_name'],
|
||||
details['task_uuid'], state, details['old_state'])
|
||||
" '%s' from state '%s'", task_name, task_uuid,
|
||||
state, details['old_state'])
|
||||
|
||||
@@ -67,7 +67,7 @@ class Flow(flow.Flow):
|
||||
|
||||
def __init__(self, name, retry=None):
|
||||
super(Flow, self).__init__(name, retry)
|
||||
self._graph = gr.DiGraph()
|
||||
self._graph = gr.DiGraph(name=name)
|
||||
self._graph.freeze()
|
||||
|
||||
#: Extracts the unsatisified symbol requirements of a single node.
|
||||
@@ -266,12 +266,16 @@ class Flow(flow.Flow):
|
||||
return self._get_subgraph().number_of_nodes()
|
||||
|
||||
def __iter__(self):
|
||||
for n in self._get_subgraph().topological_sort():
|
||||
for n, _n_data in self.iter_nodes():
|
||||
yield n
|
||||
|
||||
def iter_links(self):
|
||||
for (u, v, e_data) in self._get_subgraph().edges_iter(data=True):
|
||||
yield (u, v, e_data)
|
||||
return self._get_subgraph().edges_iter(data=True)
|
||||
|
||||
def iter_nodes(self):
|
||||
g = self._get_subgraph()
|
||||
for n in g.topological_sort():
|
||||
yield n, g.node[n]
|
||||
|
||||
@property
|
||||
def requires(self):
|
||||
|
||||
@@ -15,9 +15,7 @@
|
||||
# under the License.
|
||||
|
||||
from taskflow import flow
|
||||
|
||||
|
||||
_LINK_METADATA = {flow.LINK_INVARIANT: True}
|
||||
from taskflow.types import graph as gr
|
||||
|
||||
|
||||
class Flow(flow.Flow):
|
||||
@@ -28,22 +26,37 @@ class Flow(flow.Flow):
|
||||
the reverse order that the *tasks/flows* have been applied in.
|
||||
"""
|
||||
|
||||
_no_last_item = object()
|
||||
"""Sentinel object used to denote no last item has been assigned.
|
||||
|
||||
This is used to track no last item being added, since at creation there
|
||||
is no last item, but since the :meth:`.add` routine can take any object
|
||||
including none, we have to use a different object to be able to
|
||||
distinguish the lack of any last item...
|
||||
"""
|
||||
|
||||
def __init__(self, name, retry=None):
|
||||
super(Flow, self).__init__(name, retry)
|
||||
self._children = []
|
||||
self._graph = gr.OrderedDiGraph(name=name)
|
||||
self._last_item = self._no_last_item
|
||||
|
||||
def add(self, *items):
|
||||
"""Adds a given task/tasks/flow/flows to this flow."""
|
||||
items = [i for i in items if i not in self._children]
|
||||
self._children.extend(items)
|
||||
for item in items:
|
||||
if not self._graph.has_node(item):
|
||||
self._graph.add_node(item)
|
||||
if self._last_item is not self._no_last_item:
|
||||
self._graph.add_edge(self._last_item, item,
|
||||
attr_dict={flow.LINK_INVARIANT: True})
|
||||
self._last_item = item
|
||||
return self
|
||||
|
||||
def __len__(self):
|
||||
return len(self._children)
|
||||
return len(self._graph)
|
||||
|
||||
def __iter__(self):
|
||||
for child in self._children:
|
||||
yield child
|
||||
for item in self._graph.nodes_iter():
|
||||
yield item
|
||||
|
||||
@property
|
||||
def requires(self):
|
||||
@@ -57,6 +70,10 @@ class Flow(flow.Flow):
|
||||
prior_provides.update(item.provides)
|
||||
return frozenset(requires)
|
||||
|
||||
def iter_nodes(self):
|
||||
for (n, n_data) in self._graph.nodes_iter(data=True):
|
||||
yield (n, n_data)
|
||||
|
||||
def iter_links(self):
|
||||
for src, dst in zip(self._children[:-1], self._children[1:]):
|
||||
yield (src, dst, _LINK_METADATA.copy())
|
||||
for (u, v, e_data) in self._graph.edges_iter(data=True):
|
||||
yield (u, v, e_data)
|
||||
|
||||
@@ -15,6 +15,7 @@
|
||||
# under the License.
|
||||
|
||||
from taskflow import flow
|
||||
from taskflow.types import graph as gr
|
||||
|
||||
|
||||
class Flow(flow.Flow):
|
||||
@@ -26,27 +27,29 @@ class Flow(flow.Flow):
|
||||
|
||||
def __init__(self, name, retry=None):
|
||||
super(Flow, self).__init__(name, retry)
|
||||
# NOTE(imelnikov): A unordered flow is unordered, so we use
|
||||
# set instead of list to save children, children so that
|
||||
# people using it don't depend on the ordering.
|
||||
self._children = set()
|
||||
self._graph = gr.Graph(name=name)
|
||||
|
||||
def add(self, *items):
|
||||
"""Adds a given task/tasks/flow/flows to this flow."""
|
||||
self._children.update(items)
|
||||
for item in items:
|
||||
if not self._graph.has_node(item):
|
||||
self._graph.add_node(item)
|
||||
return self
|
||||
|
||||
def __len__(self):
|
||||
return len(self._children)
|
||||
return len(self._graph)
|
||||
|
||||
def __iter__(self):
|
||||
for child in self._children:
|
||||
yield child
|
||||
for item in self._graph:
|
||||
yield item
|
||||
|
||||
def iter_links(self):
|
||||
# NOTE(imelnikov): children in unordered flow have no dependencies
|
||||
# between each other due to invariants retained during construction.
|
||||
return iter(())
|
||||
for (u, v, e_data) in self._graph.edges_iter(data=True):
|
||||
yield (u, v, e_data)
|
||||
|
||||
def iter_nodes(self):
|
||||
for n, n_data in self._graph.nodes_iter(data=True):
|
||||
yield (n, n_data)
|
||||
|
||||
@property
|
||||
def requires(self):
|
||||
|
||||
@@ -136,9 +136,13 @@ class Connection(path_based.PathBasedConnection):
|
||||
shutil.rmtree(path)
|
||||
|
||||
def _get_children(self, path):
|
||||
if path == self.book_path:
|
||||
filter_func = os.path.isdir
|
||||
else:
|
||||
filter_func = os.path.islink
|
||||
with _storagefailure_wrapper():
|
||||
return [link for link in os.listdir(path)
|
||||
if os.path.islink(self._join_path(path, link))]
|
||||
return [child for child in os.listdir(path)
|
||||
if filter_func(self._join_path(path, child))]
|
||||
|
||||
def _ensure_path(self, path):
|
||||
with _storagefailure_wrapper():
|
||||
|
||||
@@ -34,13 +34,25 @@ class Decision(misc.StrEnum):
|
||||
|
||||
This strategy first consults the parent atom before reverting the
|
||||
associated subflow to determine if the parent retry object provides a
|
||||
different reconciliation strategy (if no parent retry object exists
|
||||
then reverting will proceed, if one does exist the parent retry may
|
||||
override this reconciliation strategy with its own).
|
||||
different reconciliation strategy. This allows for safe nesting of
|
||||
flows with different retry strategies.
|
||||
|
||||
If the parent flow has no retry strategy, the default behavior is
|
||||
to just revert the atoms in the associated subflow. This is
|
||||
generally not the desired behavior, but is left as the default in
|
||||
order to keep backwards-compatibility. The ``defer_reverts``
|
||||
engine option will let you change this behavior. If that is set
|
||||
to True, a REVERT will always defer to the parent, meaning that
|
||||
if the parent has no retry strategy, it will be reverted as well.
|
||||
"""
|
||||
|
||||
#: Completely reverts the whole flow.
|
||||
REVERT_ALL = "REVERT_ALL"
|
||||
"""Reverts the entire flow, regardless of parent strategy.
|
||||
|
||||
This strategy will revert every atom that has executed thus
|
||||
far, regardless of whether the parent flow has a separate
|
||||
retry strategy associated with it.
|
||||
"""
|
||||
|
||||
#: Retries the surrounding/associated subflow again.
|
||||
RETRY = "RETRY"
|
||||
|
||||
@@ -87,7 +87,7 @@ def check_job_transition(old_state, new_state):
|
||||
|
||||
|
||||
# Flow state transitions
|
||||
# See: http://docs.openstack.org/developer/taskflow/states.html
|
||||
# See: http://docs.openstack.org/developer/taskflow/states.html#flow
|
||||
|
||||
_ALLOWED_FLOW_TRANSITIONS = frozenset((
|
||||
(PENDING, RUNNING), # run it!
|
||||
|
||||
@@ -385,10 +385,12 @@ class Storage(object):
|
||||
|
||||
@fasteners.read_locked
|
||||
def get_atoms_states(self, atom_names):
|
||||
"""Gets all atoms states given a set of names."""
|
||||
return dict((name, (self.get_atom_state(name),
|
||||
self.get_atom_intention(name)))
|
||||
for name in atom_names)
|
||||
"""Gets a dict of atom name => (state, intention) given atom names."""
|
||||
details = {}
|
||||
for name in set(atom_names):
|
||||
source, _clone = self._atomdetail_by_name(name)
|
||||
details[name] = (source.state, source.intention)
|
||||
return details
|
||||
|
||||
@fasteners.write_locked
|
||||
def _update_atom_metadata(self, atom_name, update_with,
|
||||
|
||||
@@ -132,7 +132,7 @@ class ExamplesTestCase(test.TestCase):
|
||||
# replace them with some constant string
|
||||
output = UUID_RE.sub('<SOME UUID>', output)
|
||||
expected_output = UUID_RE.sub('<SOME UUID>', expected_output)
|
||||
self.assertEqual(output, expected_output)
|
||||
self.assertEqual(expected_output, output)
|
||||
|
||||
|
||||
def make_output_files():
|
||||
|
||||
@@ -37,18 +37,19 @@ class BuildersTest(test.TestCase):
|
||||
compilation = compiler.PatternCompiler(flow).compile()
|
||||
flow_detail = pu.create_flow_detail(flow)
|
||||
store = storage.Storage(flow_detail)
|
||||
# This ensures the tasks exist in storage...
|
||||
for task in compilation.execution_graph:
|
||||
store.ensure_atom(task)
|
||||
nodes_iter = compilation.execution_graph.nodes_iter(data=True)
|
||||
for node, node_attrs in nodes_iter:
|
||||
if node_attrs['kind'] in ('task', 'retry'):
|
||||
store.ensure_atom(node)
|
||||
if initial_state:
|
||||
store.set_flow_state(initial_state)
|
||||
task_notifier = notifier.Notifier()
|
||||
atom_notifier = notifier.Notifier()
|
||||
task_executor = executor.SerialTaskExecutor()
|
||||
retry_executor = executor.SerialRetryExecutor()
|
||||
task_executor.start()
|
||||
self.addCleanup(task_executor.stop)
|
||||
r = runtime.Runtime(compilation, store,
|
||||
task_notifier, task_executor,
|
||||
atom_notifier, task_executor,
|
||||
retry_executor)
|
||||
r.compile()
|
||||
return r
|
||||
@@ -305,6 +306,6 @@ class BuildersTest(test.TestCase):
|
||||
self.assertEqual(1, occurrences.get((builder.GAME_OVER, st.SUCCESS)))
|
||||
self.assertEqual(1, occurrences.get((builder.UNDEFINED, st.RESUMING)))
|
||||
|
||||
self.assertEqual(0, len(memory.next_nodes))
|
||||
self.assertEqual(0, len(memory.next_up))
|
||||
self.assertEqual(0, len(memory.not_done))
|
||||
self.assertEqual(0, len(memory.failures))
|
||||
|
||||
@@ -14,6 +14,7 @@
|
||||
# License for the specific language governing permissions and limitations
|
||||
# under the License.
|
||||
|
||||
from taskflow import engines
|
||||
from taskflow.engines.action_engine import compiler
|
||||
from taskflow import exceptions as exc
|
||||
from taskflow.patterns import graph_flow as gf
|
||||
@@ -29,8 +30,8 @@ class PatternCompileTest(test.TestCase):
|
||||
task = test_utils.DummyTask(name='a')
|
||||
compilation = compiler.PatternCompiler(task).compile()
|
||||
g = compilation.execution_graph
|
||||
self.assertEqual(list(g.nodes()), [task])
|
||||
self.assertEqual(list(g.edges()), [])
|
||||
self.assertEqual([task], list(g.nodes()))
|
||||
self.assertEqual([], list(g.edges()))
|
||||
|
||||
def test_retry(self):
|
||||
r = retry.AlwaysRevert('r1')
|
||||
@@ -43,27 +44,29 @@ class PatternCompileTest(test.TestCase):
|
||||
|
||||
def test_empty(self):
|
||||
flo = lf.Flow("test")
|
||||
self.assertRaises(exc.Empty, compiler.PatternCompiler(flo).compile)
|
||||
compiler.PatternCompiler(flo).compile()
|
||||
|
||||
def test_linear(self):
|
||||
a, b, c, d = test_utils.make_many(4)
|
||||
flo = lf.Flow("test")
|
||||
flo.add(a, b, c)
|
||||
sflo = lf.Flow("sub-test")
|
||||
sflo.add(d)
|
||||
flo.add(sflo)
|
||||
inner_flo = lf.Flow("sub-test")
|
||||
inner_flo.add(d)
|
||||
flo.add(inner_flo)
|
||||
|
||||
compilation = compiler.PatternCompiler(flo).compile()
|
||||
g = compilation.execution_graph
|
||||
self.assertEqual(4, len(g))
|
||||
self.assertEqual(6, len(g))
|
||||
|
||||
order = g.topological_sort()
|
||||
self.assertEqual([a, b, c, d], order)
|
||||
self.assertTrue(g.has_edge(c, d))
|
||||
self.assertEqual(g.get_edge_data(c, d), {'invariant': True})
|
||||
self.assertEqual([flo, a, b, c, inner_flo, d], order)
|
||||
self.assertTrue(g.has_edge(c, inner_flo))
|
||||
self.assertTrue(g.has_edge(inner_flo, d))
|
||||
self.assertEqual({'invariant': True},
|
||||
g.get_edge_data(inner_flo, d))
|
||||
|
||||
self.assertEqual([d], list(g.no_successors_iter()))
|
||||
self.assertEqual([a], list(g.no_predecessors_iter()))
|
||||
self.assertEqual([flo], list(g.no_predecessors_iter()))
|
||||
|
||||
def test_invalid(self):
|
||||
a, b, c = test_utils.make_many(3)
|
||||
@@ -79,36 +82,42 @@ class PatternCompileTest(test.TestCase):
|
||||
flo.add(a, b, c, d)
|
||||
compilation = compiler.PatternCompiler(flo).compile()
|
||||
g = compilation.execution_graph
|
||||
self.assertEqual(4, len(g))
|
||||
self.assertEqual(0, g.number_of_edges())
|
||||
self.assertEqual(5, len(g))
|
||||
self.assertItemsEqual(g.edges(), [
|
||||
(flo, a),
|
||||
(flo, b),
|
||||
(flo, c),
|
||||
(flo, d),
|
||||
])
|
||||
self.assertEqual(set([a, b, c, d]),
|
||||
set(g.no_successors_iter()))
|
||||
self.assertEqual(set([a, b, c, d]),
|
||||
self.assertEqual(set([flo]),
|
||||
set(g.no_predecessors_iter()))
|
||||
|
||||
def test_linear_nested(self):
|
||||
a, b, c, d = test_utils.make_many(4)
|
||||
flo = lf.Flow("test")
|
||||
flo.add(a, b)
|
||||
flo2 = uf.Flow("test2")
|
||||
flo2.add(c, d)
|
||||
flo.add(flo2)
|
||||
inner_flo = uf.Flow("test2")
|
||||
inner_flo.add(c, d)
|
||||
flo.add(inner_flo)
|
||||
|
||||
compilation = compiler.PatternCompiler(flo).compile()
|
||||
g = compilation.execution_graph
|
||||
self.assertEqual(4, len(g))
|
||||
graph = compilation.execution_graph
|
||||
self.assertEqual(6, len(graph))
|
||||
|
||||
lb = g.subgraph([a, b])
|
||||
lb = graph.subgraph([a, b])
|
||||
self.assertFalse(lb.has_edge(b, a))
|
||||
self.assertTrue(lb.has_edge(a, b))
|
||||
self.assertEqual(g.get_edge_data(a, b), {'invariant': True})
|
||||
self.assertEqual({'invariant': True}, graph.get_edge_data(a, b))
|
||||
|
||||
ub = g.subgraph([c, d])
|
||||
ub = graph.subgraph([c, d])
|
||||
self.assertEqual(0, ub.number_of_edges())
|
||||
|
||||
# This ensures that c and d do not start executing until after b.
|
||||
self.assertTrue(g.has_edge(b, c))
|
||||
self.assertTrue(g.has_edge(b, d))
|
||||
self.assertTrue(graph.has_edge(b, inner_flo))
|
||||
self.assertTrue(graph.has_edge(inner_flo, c))
|
||||
self.assertTrue(graph.has_edge(inner_flo, d))
|
||||
|
||||
def test_unordered_nested(self):
|
||||
a, b, c, d = test_utils.make_many(4)
|
||||
@@ -120,34 +129,30 @@ class PatternCompileTest(test.TestCase):
|
||||
|
||||
compilation = compiler.PatternCompiler(flo).compile()
|
||||
g = compilation.execution_graph
|
||||
self.assertEqual(4, len(g))
|
||||
for n in [a, b]:
|
||||
self.assertFalse(g.has_edge(n, c))
|
||||
self.assertFalse(g.has_edge(n, d))
|
||||
self.assertFalse(g.has_edge(d, c))
|
||||
self.assertTrue(g.has_edge(c, d))
|
||||
self.assertEqual(g.get_edge_data(c, d), {'invariant': True})
|
||||
|
||||
ub = g.subgraph([a, b])
|
||||
self.assertEqual(0, ub.number_of_edges())
|
||||
lb = g.subgraph([c, d])
|
||||
self.assertEqual(1, lb.number_of_edges())
|
||||
self.assertEqual(6, len(g))
|
||||
self.assertItemsEqual(g.edges(), [
|
||||
(flo, a),
|
||||
(flo, b),
|
||||
(flo, flo2),
|
||||
(flo2, c),
|
||||
(c, d)
|
||||
])
|
||||
|
||||
def test_unordered_nested_in_linear(self):
|
||||
a, b, c, d = test_utils.make_many(4)
|
||||
flo = lf.Flow('lt').add(
|
||||
a,
|
||||
uf.Flow('ut').add(b, c),
|
||||
d)
|
||||
inner_flo = uf.Flow('ut').add(b, c)
|
||||
flo = lf.Flow('lt').add(a, inner_flo, d)
|
||||
|
||||
compilation = compiler.PatternCompiler(flo).compile()
|
||||
g = compilation.execution_graph
|
||||
self.assertEqual(4, len(g))
|
||||
self.assertEqual(6, len(g))
|
||||
self.assertItemsEqual(g.edges(), [
|
||||
(a, b),
|
||||
(a, c),
|
||||
(flo, a),
|
||||
(a, inner_flo),
|
||||
(inner_flo, b),
|
||||
(inner_flo, c),
|
||||
(b, d),
|
||||
(c, d)
|
||||
(c, d),
|
||||
])
|
||||
|
||||
def test_graph(self):
|
||||
@@ -157,8 +162,8 @@ class PatternCompileTest(test.TestCase):
|
||||
|
||||
compilation = compiler.PatternCompiler(flo).compile()
|
||||
g = compilation.execution_graph
|
||||
self.assertEqual(4, len(g))
|
||||
self.assertEqual(0, g.number_of_edges())
|
||||
self.assertEqual(5, len(g))
|
||||
self.assertEqual(4, g.number_of_edges())
|
||||
|
||||
def test_graph_nested(self):
|
||||
a, b, c, d, e, f, g = test_utils.make_many(7)
|
||||
@@ -171,10 +176,17 @@ class PatternCompileTest(test.TestCase):
|
||||
|
||||
compilation = compiler.PatternCompiler(flo).compile()
|
||||
graph = compilation.execution_graph
|
||||
self.assertEqual(7, len(graph))
|
||||
self.assertItemsEqual(graph.edges(data=True), [
|
||||
(e, f, {'invariant': True}),
|
||||
(f, g, {'invariant': True})
|
||||
self.assertEqual(9, len(graph))
|
||||
self.assertItemsEqual(graph.edges(), [
|
||||
(flo, a),
|
||||
(flo, b),
|
||||
(flo, c),
|
||||
(flo, d),
|
||||
(flo, flo2),
|
||||
|
||||
(flo2, e),
|
||||
(e, f),
|
||||
(f, g),
|
||||
])
|
||||
|
||||
def test_graph_nested_graph(self):
|
||||
@@ -187,9 +199,19 @@ class PatternCompileTest(test.TestCase):
|
||||
flo.add(flo2)
|
||||
|
||||
compilation = compiler.PatternCompiler(flo).compile()
|
||||
g = compilation.execution_graph
|
||||
self.assertEqual(7, len(g))
|
||||
self.assertEqual(0, g.number_of_edges())
|
||||
graph = compilation.execution_graph
|
||||
self.assertEqual(9, len(graph))
|
||||
self.assertItemsEqual(graph.edges(), [
|
||||
(flo, a),
|
||||
(flo, b),
|
||||
(flo, c),
|
||||
(flo, d),
|
||||
(flo, flo2),
|
||||
|
||||
(flo2, e),
|
||||
(flo2, f),
|
||||
(flo2, g),
|
||||
])
|
||||
|
||||
def test_graph_links(self):
|
||||
a, b, c, d = test_utils.make_many(4)
|
||||
@@ -201,13 +223,15 @@ class PatternCompileTest(test.TestCase):
|
||||
|
||||
compilation = compiler.PatternCompiler(flo).compile()
|
||||
g = compilation.execution_graph
|
||||
self.assertEqual(4, len(g))
|
||||
self.assertEqual(5, len(g))
|
||||
self.assertItemsEqual(g.edges(data=True), [
|
||||
(flo, a, {'invariant': True}),
|
||||
|
||||
(a, b, {'manual': True}),
|
||||
(b, c, {'manual': True}),
|
||||
(c, d, {'manual': True}),
|
||||
])
|
||||
self.assertItemsEqual([a], g.no_predecessors_iter())
|
||||
self.assertItemsEqual([flo], g.no_predecessors_iter())
|
||||
self.assertItemsEqual([d], g.no_successors_iter())
|
||||
|
||||
def test_graph_dependencies(self):
|
||||
@@ -217,96 +241,112 @@ class PatternCompileTest(test.TestCase):
|
||||
|
||||
compilation = compiler.PatternCompiler(flo).compile()
|
||||
g = compilation.execution_graph
|
||||
self.assertEqual(2, len(g))
|
||||
self.assertEqual(3, len(g))
|
||||
self.assertItemsEqual(g.edges(data=True), [
|
||||
(flo, a, {'invariant': True}),
|
||||
(a, b, {'reasons': set(['x'])})
|
||||
])
|
||||
self.assertItemsEqual([a], g.no_predecessors_iter())
|
||||
self.assertItemsEqual([flo], g.no_predecessors_iter())
|
||||
self.assertItemsEqual([b], g.no_successors_iter())
|
||||
|
||||
def test_graph_nested_requires(self):
|
||||
a = test_utils.ProvidesRequiresTask('a', provides=['x'], requires=[])
|
||||
b = test_utils.ProvidesRequiresTask('b', provides=[], requires=[])
|
||||
c = test_utils.ProvidesRequiresTask('c', provides=[], requires=['x'])
|
||||
flo = gf.Flow("test").add(
|
||||
a,
|
||||
lf.Flow("test2").add(b, c)
|
||||
)
|
||||
inner_flo = lf.Flow("test2").add(b, c)
|
||||
flo = gf.Flow("test").add(a, inner_flo)
|
||||
|
||||
compilation = compiler.PatternCompiler(flo).compile()
|
||||
g = compilation.execution_graph
|
||||
self.assertEqual(3, len(g))
|
||||
self.assertItemsEqual(g.edges(data=True), [
|
||||
(a, c, {'reasons': set(['x'])}),
|
||||
(b, c, {'invariant': True})
|
||||
graph = compilation.execution_graph
|
||||
self.assertEqual(5, len(graph))
|
||||
self.assertItemsEqual(graph.edges(data=True), [
|
||||
(flo, a, {'invariant': True}),
|
||||
(inner_flo, b, {'invariant': True}),
|
||||
(a, inner_flo, {'reasons': set(['x'])}),
|
||||
(b, c, {'invariant': True}),
|
||||
])
|
||||
self.assertItemsEqual([a, b], g.no_predecessors_iter())
|
||||
self.assertItemsEqual([c], g.no_successors_iter())
|
||||
self.assertItemsEqual([flo], graph.no_predecessors_iter())
|
||||
self.assertItemsEqual([c], graph.no_successors_iter())
|
||||
|
||||
def test_graph_nested_provides(self):
|
||||
a = test_utils.ProvidesRequiresTask('a', provides=[], requires=['x'])
|
||||
b = test_utils.ProvidesRequiresTask('b', provides=['x'], requires=[])
|
||||
c = test_utils.ProvidesRequiresTask('c', provides=[], requires=[])
|
||||
flo = gf.Flow("test").add(
|
||||
a,
|
||||
lf.Flow("test2").add(b, c)
|
||||
)
|
||||
inner_flo = lf.Flow("test2").add(b, c)
|
||||
flo = gf.Flow("test").add(a, inner_flo)
|
||||
|
||||
compilation = compiler.PatternCompiler(flo).compile()
|
||||
g = compilation.execution_graph
|
||||
self.assertEqual(3, len(g))
|
||||
self.assertItemsEqual(g.edges(data=True), [
|
||||
graph = compilation.execution_graph
|
||||
self.assertEqual(5, len(graph))
|
||||
self.assertItemsEqual(graph.edges(data=True), [
|
||||
(flo, inner_flo, {'invariant': True}),
|
||||
|
||||
(inner_flo, b, {'invariant': True}),
|
||||
(b, c, {'invariant': True}),
|
||||
(b, a, {'reasons': set(['x'])})
|
||||
(c, a, {'reasons': set(['x'])}),
|
||||
])
|
||||
self.assertItemsEqual([b], g.no_predecessors_iter())
|
||||
self.assertItemsEqual([a, c], g.no_successors_iter())
|
||||
self.assertItemsEqual([flo], graph.no_predecessors_iter())
|
||||
self.assertItemsEqual([a], graph.no_successors_iter())
|
||||
|
||||
def test_empty_flow_in_linear_flow(self):
|
||||
flow = lf.Flow('lf')
|
||||
flo = lf.Flow('lf')
|
||||
a = test_utils.ProvidesRequiresTask('a', provides=[], requires=[])
|
||||
b = test_utils.ProvidesRequiresTask('b', provides=[], requires=[])
|
||||
empty_flow = gf.Flow("empty")
|
||||
flow.add(a, empty_flow, b)
|
||||
empty_flo = gf.Flow("empty")
|
||||
flo.add(a, empty_flo, b)
|
||||
|
||||
compilation = compiler.PatternCompiler(flow).compile()
|
||||
g = compilation.execution_graph
|
||||
self.assertItemsEqual(g.edges(data=True), [
|
||||
(a, b, {'invariant': True}),
|
||||
compilation = compiler.PatternCompiler(flo).compile()
|
||||
graph = compilation.execution_graph
|
||||
self.assertItemsEqual(graph.edges(), [
|
||||
(flo, a),
|
||||
(a, empty_flo),
|
||||
(empty_flo, b),
|
||||
])
|
||||
|
||||
def test_many_empty_in_graph_flow(self):
|
||||
flow = gf.Flow('root')
|
||||
flo = gf.Flow('root')
|
||||
|
||||
a = test_utils.ProvidesRequiresTask('a', provides=[], requires=[])
|
||||
flow.add(a)
|
||||
flo.add(a)
|
||||
|
||||
b = lf.Flow('b')
|
||||
b_0 = test_utils.ProvidesRequiresTask('b.0', provides=[], requires=[])
|
||||
b_1 = lf.Flow('b.1')
|
||||
b_2 = lf.Flow('b.2')
|
||||
b_3 = test_utils.ProvidesRequiresTask('b.3', provides=[], requires=[])
|
||||
b.add(
|
||||
b_0,
|
||||
lf.Flow('b.1'), lf.Flow('b.2'),
|
||||
b_3,
|
||||
)
|
||||
flow.add(b)
|
||||
b.add(b_0, b_1, b_2, b_3)
|
||||
flo.add(b)
|
||||
|
||||
c = lf.Flow('c')
|
||||
c.add(lf.Flow('c.0'), lf.Flow('c.1'), lf.Flow('c.2'))
|
||||
flow.add(c)
|
||||
c_0 = lf.Flow('c.0')
|
||||
c_1 = lf.Flow('c.1')
|
||||
c_2 = lf.Flow('c.2')
|
||||
c.add(c_0, c_1, c_2)
|
||||
flo.add(c)
|
||||
|
||||
d = test_utils.ProvidesRequiresTask('d', provides=[], requires=[])
|
||||
flow.add(d)
|
||||
flo.add(d)
|
||||
|
||||
flow.link(b, d)
|
||||
flow.link(a, d)
|
||||
flow.link(c, d)
|
||||
flo.link(b, d)
|
||||
flo.link(a, d)
|
||||
flo.link(c, d)
|
||||
|
||||
compilation = compiler.PatternCompiler(flow).compile()
|
||||
g = compilation.execution_graph
|
||||
self.assertTrue(g.has_edge(b_0, b_3))
|
||||
self.assertTrue(g.has_edge(b_3, d))
|
||||
self.assertEqual(4, len(g))
|
||||
compilation = compiler.PatternCompiler(flo).compile()
|
||||
graph = compilation.execution_graph
|
||||
|
||||
self.assertTrue(graph.has_edge(flo, a))
|
||||
|
||||
self.assertTrue(graph.has_edge(flo, b))
|
||||
self.assertTrue(graph.has_edge(b_0, b_1))
|
||||
self.assertTrue(graph.has_edge(b_1, b_2))
|
||||
self.assertTrue(graph.has_edge(b_2, b_3))
|
||||
|
||||
self.assertTrue(graph.has_edge(flo, c))
|
||||
self.assertTrue(graph.has_edge(c_0, c_1))
|
||||
self.assertTrue(graph.has_edge(c_1, c_2))
|
||||
|
||||
self.assertTrue(graph.has_edge(b_3, d))
|
||||
self.assertEqual(12, len(graph))
|
||||
|
||||
def test_empty_flow_in_nested_flow(self):
|
||||
flow = lf.Flow('lf')
|
||||
@@ -323,9 +363,10 @@ class PatternCompileTest(test.TestCase):
|
||||
compilation = compiler.PatternCompiler(flow).compile()
|
||||
g = compilation.execution_graph
|
||||
|
||||
self.assertTrue(g.has_edge(a, c))
|
||||
self.assertTrue(g.has_edge(c, d))
|
||||
self.assertTrue(g.has_edge(d, b))
|
||||
for source, target in [(flow, a), (a, flow2),
|
||||
(flow2, c), (c, empty_flow),
|
||||
(empty_flow, d), (d, b)]:
|
||||
self.assertTrue(g.has_edge(source, target))
|
||||
|
||||
def test_empty_flow_in_graph_flow(self):
|
||||
flow = lf.Flow('lf')
|
||||
@@ -336,19 +377,9 @@ class PatternCompileTest(test.TestCase):
|
||||
|
||||
compilation = compiler.PatternCompiler(flow).compile()
|
||||
g = compilation.execution_graph
|
||||
self.assertTrue(g.has_edge(a, b))
|
||||
|
||||
def test_empty_flow_in_graph_flow_empty_linkage(self):
|
||||
flow = gf.Flow('lf')
|
||||
a = test_utils.ProvidesRequiresTask('a', provides=[], requires=[])
|
||||
b = test_utils.ProvidesRequiresTask('b', provides=[], requires=[])
|
||||
empty_flow = lf.Flow("empty")
|
||||
flow.add(a, empty_flow, b)
|
||||
flow.link(empty_flow, b)
|
||||
|
||||
compilation = compiler.PatternCompiler(flow).compile()
|
||||
g = compilation.execution_graph
|
||||
self.assertEqual(0, len(g.edges()))
|
||||
self.assertTrue(g.has_edge(flow, a))
|
||||
self.assertTrue(g.has_edge(a, empty_flow))
|
||||
self.assertTrue(g.has_edge(empty_flow, b))
|
||||
|
||||
def test_empty_flow_in_graph_flow_linkage(self):
|
||||
flow = gf.Flow('lf')
|
||||
@@ -360,60 +391,66 @@ class PatternCompileTest(test.TestCase):
|
||||
|
||||
compilation = compiler.PatternCompiler(flow).compile()
|
||||
g = compilation.execution_graph
|
||||
self.assertEqual(1, len(g.edges()))
|
||||
self.assertTrue(g.has_edge(a, b))
|
||||
self.assertTrue(g.has_edge(flow, a))
|
||||
self.assertTrue(g.has_edge(flow, empty_flow))
|
||||
|
||||
def test_checks_for_dups(self):
|
||||
flo = gf.Flow("test").add(
|
||||
test_utils.DummyTask(name="a"),
|
||||
test_utils.DummyTask(name="a")
|
||||
)
|
||||
e = engines.load(flo)
|
||||
self.assertRaisesRegexp(exc.Duplicate,
|
||||
'^Atoms with duplicate names',
|
||||
compiler.PatternCompiler(flo).compile)
|
||||
e.compile)
|
||||
|
||||
def test_checks_for_dups_globally(self):
|
||||
flo = gf.Flow("test").add(
|
||||
gf.Flow("int1").add(test_utils.DummyTask(name="a")),
|
||||
gf.Flow("int2").add(test_utils.DummyTask(name="a")))
|
||||
e = engines.load(flo)
|
||||
self.assertRaisesRegexp(exc.Duplicate,
|
||||
'^Atoms with duplicate names',
|
||||
compiler.PatternCompiler(flo).compile)
|
||||
e.compile)
|
||||
|
||||
def test_retry_in_linear_flow(self):
|
||||
flo = lf.Flow("test", retry.AlwaysRevert("c"))
|
||||
compilation = compiler.PatternCompiler(flo).compile()
|
||||
g = compilation.execution_graph
|
||||
self.assertEqual(1, len(g))
|
||||
self.assertEqual(0, g.number_of_edges())
|
||||
self.assertEqual(2, len(g))
|
||||
self.assertEqual(1, g.number_of_edges())
|
||||
|
||||
def test_retry_in_unordered_flow(self):
|
||||
flo = uf.Flow("test", retry.AlwaysRevert("c"))
|
||||
compilation = compiler.PatternCompiler(flo).compile()
|
||||
g = compilation.execution_graph
|
||||
self.assertEqual(1, len(g))
|
||||
self.assertEqual(0, g.number_of_edges())
|
||||
self.assertEqual(2, len(g))
|
||||
self.assertEqual(1, g.number_of_edges())
|
||||
|
||||
def test_retry_in_graph_flow(self):
|
||||
flo = gf.Flow("test", retry.AlwaysRevert("c"))
|
||||
compilation = compiler.PatternCompiler(flo).compile()
|
||||
g = compilation.execution_graph
|
||||
self.assertEqual(1, len(g))
|
||||
self.assertEqual(0, g.number_of_edges())
|
||||
self.assertEqual(2, len(g))
|
||||
self.assertEqual(1, g.number_of_edges())
|
||||
|
||||
def test_retry_in_nested_flows(self):
|
||||
c1 = retry.AlwaysRevert("c1")
|
||||
c2 = retry.AlwaysRevert("c2")
|
||||
flo = lf.Flow("test", c1).add(lf.Flow("test2", c2))
|
||||
inner_flo = lf.Flow("test2", c2)
|
||||
flo = lf.Flow("test", c1).add(inner_flo)
|
||||
compilation = compiler.PatternCompiler(flo).compile()
|
||||
g = compilation.execution_graph
|
||||
|
||||
self.assertEqual(2, len(g))
|
||||
self.assertEqual(4, len(g))
|
||||
self.assertItemsEqual(g.edges(data=True), [
|
||||
(c1, c2, {'retry': True})
|
||||
(flo, c1, {'invariant': True}),
|
||||
(c1, inner_flo, {'invariant': True, 'retry': True}),
|
||||
(inner_flo, c2, {'invariant': True}),
|
||||
])
|
||||
self.assertIs(c1, g.node[c2]['retry'])
|
||||
self.assertItemsEqual([c1], g.no_predecessors_iter())
|
||||
self.assertItemsEqual([flo], g.no_predecessors_iter())
|
||||
self.assertItemsEqual([c2], g.no_successors_iter())
|
||||
|
||||
def test_retry_in_linear_flow_with_tasks(self):
|
||||
@@ -423,13 +460,14 @@ class PatternCompileTest(test.TestCase):
|
||||
compilation = compiler.PatternCompiler(flo).compile()
|
||||
g = compilation.execution_graph
|
||||
|
||||
self.assertEqual(3, len(g))
|
||||
self.assertEqual(4, len(g))
|
||||
self.assertItemsEqual(g.edges(data=True), [
|
||||
(flo, c, {'invariant': True}),
|
||||
(a, b, {'invariant': True}),
|
||||
(c, a, {'retry': True})
|
||||
(c, a, {'invariant': True, 'retry': True})
|
||||
])
|
||||
|
||||
self.assertItemsEqual([c], g.no_predecessors_iter())
|
||||
self.assertItemsEqual([flo], g.no_predecessors_iter())
|
||||
self.assertItemsEqual([b], g.no_successors_iter())
|
||||
self.assertIs(c, g.node[a]['retry'])
|
||||
self.assertIs(c, g.node[b]['retry'])
|
||||
@@ -441,13 +479,14 @@ class PatternCompileTest(test.TestCase):
|
||||
compilation = compiler.PatternCompiler(flo).compile()
|
||||
g = compilation.execution_graph
|
||||
|
||||
self.assertEqual(3, len(g))
|
||||
self.assertEqual(4, len(g))
|
||||
self.assertItemsEqual(g.edges(data=True), [
|
||||
(c, a, {'retry': True}),
|
||||
(c, b, {'retry': True})
|
||||
(flo, c, {'invariant': True}),
|
||||
(c, a, {'invariant': True, 'retry': True}),
|
||||
(c, b, {'invariant': True, 'retry': True}),
|
||||
])
|
||||
|
||||
self.assertItemsEqual([c], g.no_predecessors_iter())
|
||||
self.assertItemsEqual([flo], g.no_predecessors_iter())
|
||||
self.assertItemsEqual([a, b], g.no_successors_iter())
|
||||
self.assertIs(c, g.node[a]['retry'])
|
||||
self.assertIs(c, g.node[b]['retry'])
|
||||
@@ -458,15 +497,16 @@ class PatternCompileTest(test.TestCase):
|
||||
flo = gf.Flow("test", r).add(a, b, c).link(b, c)
|
||||
compilation = compiler.PatternCompiler(flo).compile()
|
||||
g = compilation.execution_graph
|
||||
self.assertEqual(4, len(g))
|
||||
self.assertEqual(5, len(g))
|
||||
|
||||
self.assertItemsEqual(g.edges(data=True), [
|
||||
(r, a, {'retry': True}),
|
||||
(r, b, {'retry': True}),
|
||||
(flo, r, {'invariant': True}),
|
||||
(r, a, {'invariant': True, 'retry': True}),
|
||||
(r, b, {'invariant': True, 'retry': True}),
|
||||
(b, c, {'manual': True})
|
||||
])
|
||||
|
||||
self.assertItemsEqual([r], g.no_predecessors_iter())
|
||||
self.assertItemsEqual([flo], g.no_predecessors_iter())
|
||||
self.assertItemsEqual([a, c], g.no_successors_iter())
|
||||
self.assertIs(r, g.node[a]['retry'])
|
||||
self.assertIs(r, g.node[b]['retry'])
|
||||
@@ -476,18 +516,18 @@ class PatternCompileTest(test.TestCase):
|
||||
c1 = retry.AlwaysRevert("cp1")
|
||||
c2 = retry.AlwaysRevert("cp2")
|
||||
a, b, c, d = test_utils.make_many(4)
|
||||
flo = lf.Flow("test", c1).add(
|
||||
a,
|
||||
lf.Flow("test", c2).add(b, c),
|
||||
d)
|
||||
inner_flo = lf.Flow("test", c2).add(b, c)
|
||||
flo = lf.Flow("test", c1).add(a, inner_flo, d)
|
||||
compilation = compiler.PatternCompiler(flo).compile()
|
||||
g = compilation.execution_graph
|
||||
|
||||
self.assertEqual(6, len(g))
|
||||
self.assertEqual(8, len(g))
|
||||
self.assertItemsEqual(g.edges(data=True), [
|
||||
(c1, a, {'retry': True}),
|
||||
(a, c2, {'invariant': True}),
|
||||
(c2, b, {'retry': True}),
|
||||
(flo, c1, {'invariant': True}),
|
||||
(c1, a, {'invariant': True, 'retry': True}),
|
||||
(a, inner_flo, {'invariant': True}),
|
||||
(inner_flo, c2, {'invariant': True}),
|
||||
(c2, b, {'invariant': True, 'retry': True}),
|
||||
(b, c, {'invariant': True}),
|
||||
(c, d, {'invariant': True}),
|
||||
])
|
||||
@@ -501,17 +541,17 @@ class PatternCompileTest(test.TestCase):
|
||||
def test_retry_subflows_hierarchy(self):
|
||||
c1 = retry.AlwaysRevert("cp1")
|
||||
a, b, c, d = test_utils.make_many(4)
|
||||
flo = lf.Flow("test", c1).add(
|
||||
a,
|
||||
lf.Flow("test").add(b, c),
|
||||
d)
|
||||
inner_flo = lf.Flow("test").add(b, c)
|
||||
flo = lf.Flow("test", c1).add(a, inner_flo, d)
|
||||
compilation = compiler.PatternCompiler(flo).compile()
|
||||
g = compilation.execution_graph
|
||||
|
||||
self.assertEqual(5, len(g))
|
||||
self.assertEqual(7, len(g))
|
||||
self.assertItemsEqual(g.edges(data=True), [
|
||||
(c1, a, {'retry': True}),
|
||||
(a, b, {'invariant': True}),
|
||||
(flo, c1, {'invariant': True}),
|
||||
(c1, a, {'invariant': True, 'retry': True}),
|
||||
(a, inner_flo, {'invariant': True}),
|
||||
(inner_flo, b, {'invariant': True}),
|
||||
(b, c, {'invariant': True}),
|
||||
(c, d, {'invariant': True}),
|
||||
])
|
||||
|
||||
@@ -245,7 +245,7 @@ class MixedPatternScopingTest(test.TestCase):
|
||||
first_subroot = i
|
||||
break
|
||||
self.assertGreater(first_subroot, first_root)
|
||||
self.assertEqual(scope[0][-2:], ['root.2', 'root.1'])
|
||||
self.assertEqual(['root.2', 'root.1'], scope[0][-2:])
|
||||
|
||||
def test_shadow_graph(self):
|
||||
r = gf.Flow("root")
|
||||
|
||||
@@ -17,6 +17,7 @@
|
||||
import contextlib
|
||||
import threading
|
||||
|
||||
from kazoo.protocol import paths as k_paths
|
||||
from kazoo.recipe import watchers
|
||||
from oslo_serialization import jsonutils
|
||||
from oslo_utils import uuidutils
|
||||
@@ -25,12 +26,14 @@ import testtools
|
||||
from zake import fake_client
|
||||
from zake import utils as zake_utils
|
||||
|
||||
from taskflow import exceptions as excp
|
||||
from taskflow.jobs.backends import impl_zookeeper
|
||||
from taskflow import states
|
||||
from taskflow import test
|
||||
from taskflow.test import mock
|
||||
from taskflow.tests.unit.jobs import base
|
||||
from taskflow.tests import utils as test_utils
|
||||
from taskflow.types import entity
|
||||
from taskflow.utils import kazoo_utils
|
||||
from taskflow.utils import misc
|
||||
from taskflow.utils import persistence_utils as p_utils
|
||||
@@ -97,7 +100,7 @@ class ZookeeperBoardTestMixin(base.BoardTestMixin):
|
||||
def test_board_iter(self):
|
||||
with base.connect_close(self.board):
|
||||
it = self.board.iterjobs()
|
||||
self.assertEqual(it.board, self.board)
|
||||
self.assertEqual(self.board, it.board)
|
||||
self.assertFalse(it.only_unclaimed)
|
||||
self.assertFalse(it.ensure_fresh)
|
||||
|
||||
@@ -222,8 +225,8 @@ class ZakeJobboardTest(test.TestCase, ZookeeperBoardTestMixin):
|
||||
and not path.endswith(LOCK_POSTFIX)):
|
||||
jobs.append(path)
|
||||
|
||||
self.assertEqual(len(trashed), 1)
|
||||
self.assertEqual(len(jobs), 0)
|
||||
self.assertEqual(1, len(trashed))
|
||||
self.assertEqual(0, len(jobs))
|
||||
|
||||
def test_posting_received_raw(self):
|
||||
book = p_utils.temporary_log_book()
|
||||
@@ -259,3 +262,34 @@ class ZakeJobboardTest(test.TestCase, ZookeeperBoardTestMixin):
|
||||
},
|
||||
'details': {},
|
||||
}, jsonutils.loads(misc.binary_decode(paths[path_key]['data'])))
|
||||
|
||||
def test_register_entity(self):
|
||||
conductor_name = "conductor-abc@localhost:4123"
|
||||
entity_instance = entity.Entity("conductor",
|
||||
conductor_name,
|
||||
{})
|
||||
with base.connect_close(self.board):
|
||||
self.board.register_entity(entity_instance)
|
||||
# Check '.entity' node has been created
|
||||
self.assertTrue(self.board.entity_path in self.client.storage.paths)
|
||||
|
||||
conductor_entity_path = k_paths.join(self.board.entity_path,
|
||||
'conductor',
|
||||
conductor_name)
|
||||
self.assertTrue(conductor_entity_path in self.client.storage.paths)
|
||||
conductor_data = (
|
||||
self.client.storage.paths[conductor_entity_path]['data'])
|
||||
self.assertTrue(len(conductor_data) > 0)
|
||||
self.assertDictEqual({
|
||||
'name': conductor_name,
|
||||
'kind': 'conductor',
|
||||
'metadata': {},
|
||||
}, jsonutils.loads(misc.binary_decode(conductor_data)))
|
||||
|
||||
entity_instance_2 = entity.Entity("non-sense",
|
||||
"other_name",
|
||||
{})
|
||||
with base.connect_close(self.board):
|
||||
self.assertRaises(excp.NotImplementedError,
|
||||
self.board.register_entity,
|
||||
entity_instance_2)
|
||||
|
||||
@@ -30,21 +30,21 @@ class GraphFlowTest(test.TestCase):
|
||||
def test_graph_flow_starts_as_empty(self):
|
||||
f = gf.Flow('test')
|
||||
|
||||
self.assertEqual(len(f), 0)
|
||||
self.assertEqual(list(f), [])
|
||||
self.assertEqual(list(f.iter_links()), [])
|
||||
self.assertEqual(0, len(f))
|
||||
self.assertEqual([], list(f))
|
||||
self.assertEqual([], list(f.iter_links()))
|
||||
|
||||
self.assertEqual(f.requires, set())
|
||||
self.assertEqual(f.provides, set())
|
||||
self.assertEqual(set(), f.requires)
|
||||
self.assertEqual(set(), f.provides)
|
||||
|
||||
expected = 'taskflow.patterns.graph_flow.Flow: test(len=0)'
|
||||
self.assertEqual(str(f), expected)
|
||||
self.assertEqual(expected, str(f))
|
||||
|
||||
def test_graph_flow_add_nothing(self):
|
||||
f = gf.Flow('test')
|
||||
result = f.add()
|
||||
self.assertIs(f, result)
|
||||
self.assertEqual(len(f), 0)
|
||||
self.assertEqual(0, len(f))
|
||||
|
||||
def test_graph_flow_one_task(self):
|
||||
f = gf.Flow('test')
|
||||
@@ -53,45 +53,43 @@ class GraphFlowTest(test.TestCase):
|
||||
|
||||
self.assertIs(f, result)
|
||||
|
||||
self.assertEqual(len(f), 1)
|
||||
self.assertEqual(list(f), [task])
|
||||
self.assertEqual(list(f.iter_links()), [])
|
||||
self.assertEqual(f.requires, set(['a', 'b']))
|
||||
self.assertEqual(f.provides, set(['c', 'd']))
|
||||
self.assertEqual(1, len(f))
|
||||
self.assertEqual([task], list(f))
|
||||
self.assertEqual([], list(f.iter_links()))
|
||||
self.assertEqual(set(['a', 'b']), f.requires)
|
||||
self.assertEqual(set(['c', 'd']), f.provides)
|
||||
|
||||
def test_graph_flow_two_independent_tasks(self):
|
||||
task1 = _task(name='task1')
|
||||
task2 = _task(name='task2')
|
||||
f = gf.Flow('test').add(task1, task2)
|
||||
|
||||
self.assertEqual(len(f), 2)
|
||||
self.assertEqual(2, len(f))
|
||||
self.assertItemsEqual(f, [task1, task2])
|
||||
self.assertEqual(list(f.iter_links()), [])
|
||||
self.assertEqual([], list(f.iter_links()))
|
||||
|
||||
def test_graph_flow_two_dependent_tasks(self):
|
||||
task1 = _task(name='task1', provides=['a'])
|
||||
task2 = _task(name='task2', requires=['a'])
|
||||
f = gf.Flow('test').add(task1, task2)
|
||||
|
||||
self.assertEqual(len(f), 2)
|
||||
self.assertEqual(2, len(f))
|
||||
self.assertItemsEqual(f, [task1, task2])
|
||||
self.assertEqual(list(f.iter_links()), [
|
||||
(task1, task2, {'reasons': set(['a'])})
|
||||
])
|
||||
self.assertEqual([(task1, task2, {'reasons': set(['a'])})],
|
||||
list(f.iter_links()))
|
||||
|
||||
self.assertEqual(f.requires, set())
|
||||
self.assertEqual(f.provides, set(['a']))
|
||||
self.assertEqual(set(), f.requires)
|
||||
self.assertEqual(set(['a']), f.provides)
|
||||
|
||||
def test_graph_flow_two_dependent_tasks_two_different_calls(self):
|
||||
task1 = _task(name='task1', provides=['a'])
|
||||
task2 = _task(name='task2', requires=['a'])
|
||||
f = gf.Flow('test').add(task1).add(task2)
|
||||
|
||||
self.assertEqual(len(f), 2)
|
||||
self.assertEqual(2, len(f))
|
||||
self.assertItemsEqual(f, [task1, task2])
|
||||
self.assertEqual(list(f.iter_links()), [
|
||||
(task1, task2, {'reasons': set(['a'])})
|
||||
])
|
||||
self.assertEqual([(task1, task2, {'reasons': set(['a'])})],
|
||||
list(f.iter_links()))
|
||||
|
||||
def test_graph_flow_two_task_same_provide(self):
|
||||
task1 = _task(name='task1', provides=['a', 'b'])
|
||||
@@ -136,10 +134,10 @@ class GraphFlowTest(test.TestCase):
|
||||
ret = retry.AlwaysRevert(requires=['a'], provides=['b'])
|
||||
f = gf.Flow('test', ret)
|
||||
self.assertIs(f.retry, ret)
|
||||
self.assertEqual(ret.name, 'test_retry')
|
||||
self.assertEqual('test_retry', ret.name)
|
||||
|
||||
self.assertEqual(f.requires, set(['a']))
|
||||
self.assertEqual(f.provides, set(['b']))
|
||||
self.assertEqual(set(['a']), f.requires)
|
||||
self.assertEqual(set(['b']), f.provides)
|
||||
|
||||
def test_graph_flow_ordering(self):
|
||||
task1 = _task('task1', provides=set(['a', 'b']))
|
||||
@@ -212,6 +210,31 @@ class GraphFlowTest(test.TestCase):
|
||||
f = gf.Flow('test').add(task1, task2)
|
||||
self.assertRaises(exc.DependencyFailure, f.add, task3)
|
||||
|
||||
def test_iter_nodes(self):
|
||||
task1 = _task('task1', provides=['a'], requires=['c'])
|
||||
task2 = _task('task2', provides=['b'], requires=['a'])
|
||||
task3 = _task('task3', provides=['c'])
|
||||
f1 = gf.Flow('nested')
|
||||
f1.add(task3)
|
||||
tasks = set([task1, task2, f1])
|
||||
f = gf.Flow('test').add(task1, task2, f1)
|
||||
for (n, data) in f.iter_nodes():
|
||||
self.assertTrue(n in tasks)
|
||||
self.assertDictEqual({}, data)
|
||||
|
||||
def test_iter_links(self):
|
||||
task1 = _task('task1')
|
||||
task2 = _task('task2')
|
||||
task3 = _task('task3')
|
||||
f1 = gf.Flow('nested')
|
||||
f1.add(task3)
|
||||
tasks = set([task1, task2, f1])
|
||||
f = gf.Flow('test').add(task1, task2, f1)
|
||||
for (u, v, data) in f.iter_links():
|
||||
self.assertTrue(u in tasks)
|
||||
self.assertTrue(v in tasks)
|
||||
self.assertDictEqual({}, data)
|
||||
|
||||
|
||||
class TargetedGraphFlowTest(test.TestCase):
|
||||
|
||||
@@ -223,7 +246,7 @@ class TargetedGraphFlowTest(test.TestCase):
|
||||
task4 = _task('task4', provides=[], requires=['b'])
|
||||
f.add(task1, task2, task3, task4)
|
||||
f.set_target(task3)
|
||||
self.assertEqual(len(f), 3)
|
||||
self.assertEqual(3, len(f))
|
||||
self.assertItemsEqual(f, [task1, task2, task3])
|
||||
self.assertNotIn('c', f.provides)
|
||||
|
||||
@@ -236,7 +259,7 @@ class TargetedGraphFlowTest(test.TestCase):
|
||||
f.add(task1, task2, task3, task4)
|
||||
f.set_target(task3)
|
||||
f.reset_target()
|
||||
self.assertEqual(len(f), 4)
|
||||
self.assertEqual(4, len(f))
|
||||
self.assertItemsEqual(f, [task1, task2, task3, task4])
|
||||
self.assertIn('c', f.provides)
|
||||
|
||||
@@ -253,7 +276,7 @@ class TargetedGraphFlowTest(test.TestCase):
|
||||
task1 = _task('task1', provides=['a'], requires=[])
|
||||
f.add(task1)
|
||||
f.set_target(task1)
|
||||
self.assertEqual(len(f), 1)
|
||||
self.assertEqual(1, len(f))
|
||||
self.assertItemsEqual(f, [task1])
|
||||
|
||||
def test_recache_on_add(self):
|
||||
@@ -286,6 +309,5 @@ class TargetedGraphFlowTest(test.TestCase):
|
||||
|
||||
f.link(task2, task1)
|
||||
self.assertEqual(2, len(f))
|
||||
self.assertEqual(list(f.iter_links()), [
|
||||
(task2, task1, {'manual': True})
|
||||
])
|
||||
self.assertEqual([(task2, task1, {'manual': True})],
|
||||
list(f.iter_links()), )
|
||||
|
||||
@@ -29,21 +29,21 @@ class LinearFlowTest(test.TestCase):
|
||||
def test_linear_flow_starts_as_empty(self):
|
||||
f = lf.Flow('test')
|
||||
|
||||
self.assertEqual(len(f), 0)
|
||||
self.assertEqual(list(f), [])
|
||||
self.assertEqual(list(f.iter_links()), [])
|
||||
self.assertEqual(0, len(f))
|
||||
self.assertEqual([], list(f))
|
||||
self.assertEqual([], list(f.iter_links()))
|
||||
|
||||
self.assertEqual(f.requires, set())
|
||||
self.assertEqual(f.provides, set())
|
||||
self.assertEqual(set(), f.requires)
|
||||
self.assertEqual(set(), f.provides)
|
||||
|
||||
expected = 'taskflow.patterns.linear_flow.Flow: test(len=0)'
|
||||
self.assertEqual(str(f), expected)
|
||||
self.assertEqual(expected, str(f))
|
||||
|
||||
def test_linear_flow_add_nothing(self):
|
||||
f = lf.Flow('test')
|
||||
result = f.add()
|
||||
self.assertIs(f, result)
|
||||
self.assertEqual(len(f), 0)
|
||||
self.assertEqual(0, len(f))
|
||||
|
||||
def test_linear_flow_one_task(self):
|
||||
f = lf.Flow('test')
|
||||
@@ -52,47 +52,44 @@ class LinearFlowTest(test.TestCase):
|
||||
|
||||
self.assertIs(f, result)
|
||||
|
||||
self.assertEqual(len(f), 1)
|
||||
self.assertEqual(list(f), [task])
|
||||
self.assertEqual(list(f.iter_links()), [])
|
||||
self.assertEqual(f.requires, set(['a', 'b']))
|
||||
self.assertEqual(f.provides, set(['c', 'd']))
|
||||
self.assertEqual(1, len(f))
|
||||
self.assertEqual([task], list(f))
|
||||
self.assertEqual([], list(f.iter_links()))
|
||||
self.assertEqual(set(['a', 'b']), f.requires)
|
||||
self.assertEqual(set(['c', 'd']), f.provides)
|
||||
|
||||
def test_linear_flow_two_independent_tasks(self):
|
||||
task1 = _task(name='task1')
|
||||
task2 = _task(name='task2')
|
||||
f = lf.Flow('test').add(task1, task2)
|
||||
|
||||
self.assertEqual(len(f), 2)
|
||||
self.assertEqual(list(f), [task1, task2])
|
||||
self.assertEqual(list(f.iter_links()), [
|
||||
(task1, task2, {'invariant': True})
|
||||
])
|
||||
self.assertEqual(2, len(f))
|
||||
self.assertEqual([task1, task2], list(f))
|
||||
self.assertEqual([(task1, task2, {'invariant': True})],
|
||||
list(f.iter_links()))
|
||||
|
||||
def test_linear_flow_two_dependent_tasks(self):
|
||||
task1 = _task(name='task1', provides=['a'])
|
||||
task2 = _task(name='task2', requires=['a'])
|
||||
f = lf.Flow('test').add(task1, task2)
|
||||
|
||||
self.assertEqual(len(f), 2)
|
||||
self.assertEqual(list(f), [task1, task2])
|
||||
self.assertEqual(list(f.iter_links()), [
|
||||
(task1, task2, {'invariant': True})
|
||||
])
|
||||
self.assertEqual(2, len(f))
|
||||
self.assertEqual([task1, task2], list(f))
|
||||
self.assertEqual([(task1, task2, {'invariant': True})],
|
||||
list(f.iter_links()))
|
||||
|
||||
self.assertEqual(f.requires, set())
|
||||
self.assertEqual(f.provides, set(['a']))
|
||||
self.assertEqual(set(), f.requires)
|
||||
self.assertEqual(set(['a']), f.provides)
|
||||
|
||||
def test_linear_flow_two_dependent_tasks_two_different_calls(self):
|
||||
task1 = _task(name='task1', provides=['a'])
|
||||
task2 = _task(name='task2', requires=['a'])
|
||||
f = lf.Flow('test').add(task1).add(task2)
|
||||
|
||||
self.assertEqual(len(f), 2)
|
||||
self.assertEqual(list(f), [task1, task2])
|
||||
self.assertEqual(list(f.iter_links()), [
|
||||
(task1, task2, {'invariant': True})
|
||||
])
|
||||
self.assertEqual(2, len(f))
|
||||
self.assertEqual([task1, task2], list(f))
|
||||
self.assertEqual([(task1, task2, {'invariant': True})],
|
||||
list(f.iter_links()), )
|
||||
|
||||
def test_linear_flow_three_tasks(self):
|
||||
task1 = _task(name='task1')
|
||||
@@ -100,21 +97,42 @@ class LinearFlowTest(test.TestCase):
|
||||
task3 = _task(name='task3')
|
||||
f = lf.Flow('test').add(task1, task2, task3)
|
||||
|
||||
self.assertEqual(len(f), 3)
|
||||
self.assertEqual(list(f), [task1, task2, task3])
|
||||
self.assertEqual(list(f.iter_links()), [
|
||||
self.assertEqual(3, len(f))
|
||||
self.assertEqual([task1, task2, task3], list(f))
|
||||
self.assertEqual([
|
||||
(task1, task2, {'invariant': True}),
|
||||
(task2, task3, {'invariant': True})
|
||||
])
|
||||
], list(f.iter_links()))
|
||||
|
||||
expected = 'taskflow.patterns.linear_flow.Flow: test(len=3)'
|
||||
self.assertEqual(str(f), expected)
|
||||
self.assertEqual(expected, str(f))
|
||||
|
||||
def test_linear_flow_with_retry(self):
|
||||
ret = retry.AlwaysRevert(requires=['a'], provides=['b'])
|
||||
f = lf.Flow('test', ret)
|
||||
self.assertIs(f.retry, ret)
|
||||
self.assertEqual(ret.name, 'test_retry')
|
||||
self.assertEqual('test_retry', ret.name)
|
||||
|
||||
self.assertEqual(f.requires, set(['a']))
|
||||
self.assertEqual(f.provides, set(['b']))
|
||||
self.assertEqual(set(['a']), f.requires)
|
||||
self.assertEqual(set(['b']), f.provides)
|
||||
|
||||
def test_iter_nodes(self):
|
||||
task1 = _task(name='task1')
|
||||
task2 = _task(name='task2')
|
||||
task3 = _task(name='task3')
|
||||
f = lf.Flow('test').add(task1, task2, task3)
|
||||
tasks = set([task1, task2, task3])
|
||||
for (node, data) in f.iter_nodes():
|
||||
self.assertTrue(node in tasks)
|
||||
self.assertDictEqual({}, data)
|
||||
|
||||
def test_iter_links(self):
|
||||
task1 = _task(name='task1')
|
||||
task2 = _task(name='task2')
|
||||
task3 = _task(name='task3')
|
||||
f = lf.Flow('test').add(task1, task2, task3)
|
||||
tasks = set([task1, task2, task3])
|
||||
for (u, v, data) in f.iter_links():
|
||||
self.assertTrue(u in tasks)
|
||||
self.assertTrue(v in tasks)
|
||||
self.assertDictEqual({'invariant': True}, data)
|
||||
|
||||
@@ -29,21 +29,21 @@ class UnorderedFlowTest(test.TestCase):
|
||||
def test_unordered_flow_starts_as_empty(self):
|
||||
f = uf.Flow('test')
|
||||
|
||||
self.assertEqual(len(f), 0)
|
||||
self.assertEqual(list(f), [])
|
||||
self.assertEqual(list(f.iter_links()), [])
|
||||
self.assertEqual(0, len(f))
|
||||
self.assertEqual([], list(f))
|
||||
self.assertEqual([], list(f.iter_links()))
|
||||
|
||||
self.assertEqual(f.requires, set())
|
||||
self.assertEqual(f.provides, set())
|
||||
self.assertEqual(set(), f.requires)
|
||||
self.assertEqual(set(), f.provides)
|
||||
|
||||
expected = 'taskflow.patterns.unordered_flow.Flow: test(len=0)'
|
||||
self.assertEqual(str(f), expected)
|
||||
self.assertEqual(expected, str(f))
|
||||
|
||||
def test_unordered_flow_add_nothing(self):
|
||||
f = uf.Flow('test')
|
||||
result = f.add()
|
||||
self.assertIs(f, result)
|
||||
self.assertEqual(len(f), 0)
|
||||
self.assertEqual(0, len(f))
|
||||
|
||||
def test_unordered_flow_one_task(self):
|
||||
f = uf.Flow('test')
|
||||
@@ -52,27 +52,27 @@ class UnorderedFlowTest(test.TestCase):
|
||||
|
||||
self.assertIs(f, result)
|
||||
|
||||
self.assertEqual(len(f), 1)
|
||||
self.assertEqual(list(f), [task])
|
||||
self.assertEqual(list(f.iter_links()), [])
|
||||
self.assertEqual(f.requires, set(['a', 'b']))
|
||||
self.assertEqual(f.provides, set(['c', 'd']))
|
||||
self.assertEqual(1, len(f))
|
||||
self.assertEqual([task], list(f))
|
||||
self.assertEqual([], list(f.iter_links()))
|
||||
self.assertEqual(set(['a', 'b']), f.requires)
|
||||
self.assertEqual(set(['c', 'd']), f.provides)
|
||||
|
||||
def test_unordered_flow_two_tasks(self):
|
||||
task1 = _task(name='task1')
|
||||
task2 = _task(name='task2')
|
||||
f = uf.Flow('test').add(task1, task2)
|
||||
|
||||
self.assertEqual(len(f), 2)
|
||||
self.assertEqual(set(f), set([task1, task2]))
|
||||
self.assertEqual(list(f.iter_links()), [])
|
||||
self.assertEqual(2, len(f))
|
||||
self.assertEqual(set([task1, task2]), set(f))
|
||||
self.assertEqual([], list(f.iter_links()))
|
||||
|
||||
def test_unordered_flow_two_tasks_two_different_calls(self):
|
||||
task1 = _task(name='task1', provides=['a'])
|
||||
task2 = _task(name='task2', requires=['a'])
|
||||
f = uf.Flow('test').add(task1)
|
||||
f.add(task2)
|
||||
self.assertEqual(len(f), 2)
|
||||
self.assertEqual(2, len(f))
|
||||
self.assertEqual(set(['a']), f.requires)
|
||||
self.assertEqual(set(['a']), f.provides)
|
||||
|
||||
@@ -80,7 +80,7 @@ class UnorderedFlowTest(test.TestCase):
|
||||
task1 = _task(name='task1', provides=['a'])
|
||||
task2 = _task(name='task2', requires=['a'])
|
||||
f = uf.Flow('test').add(task2).add(task1)
|
||||
self.assertEqual(len(f), 2)
|
||||
self.assertEqual(2, len(f))
|
||||
self.assertEqual(set(['a']), f.requires)
|
||||
self.assertEqual(set(['a']), f.provides)
|
||||
|
||||
@@ -89,22 +89,40 @@ class UnorderedFlowTest(test.TestCase):
|
||||
task2 = _task(name='task2', provides=['a', 'c'])
|
||||
f = uf.Flow('test')
|
||||
f.add(task2, task1)
|
||||
self.assertEqual(len(f), 2)
|
||||
self.assertEqual(2, len(f))
|
||||
|
||||
def test_unordered_flow_with_retry(self):
|
||||
ret = retry.AlwaysRevert(requires=['a'], provides=['b'])
|
||||
f = uf.Flow('test', ret)
|
||||
self.assertIs(f.retry, ret)
|
||||
self.assertEqual(ret.name, 'test_retry')
|
||||
self.assertEqual('test_retry', ret.name)
|
||||
|
||||
self.assertEqual(f.requires, set(['a']))
|
||||
self.assertEqual(f.provides, set(['b']))
|
||||
self.assertEqual(set(['a']), f.requires)
|
||||
self.assertEqual(set(['b']), f.provides)
|
||||
|
||||
def test_unordered_flow_with_retry_fully_satisfies(self):
|
||||
ret = retry.AlwaysRevert(provides=['b', 'a'])
|
||||
f = uf.Flow('test', ret)
|
||||
f.add(_task(name='task1', requires=['a']))
|
||||
self.assertIs(f.retry, ret)
|
||||
self.assertEqual(ret.name, 'test_retry')
|
||||
self.assertEqual(f.requires, set([]))
|
||||
self.assertEqual(f.provides, set(['b', 'a']))
|
||||
self.assertEqual('test_retry', ret.name)
|
||||
self.assertEqual(set([]), f.requires)
|
||||
self.assertEqual(set(['b', 'a']), f.provides)
|
||||
|
||||
def test_iter_nodes(self):
|
||||
task1 = _task(name='task1', provides=['a', 'b'])
|
||||
task2 = _task(name='task2', provides=['a', 'c'])
|
||||
tasks = set([task1, task2])
|
||||
f = uf.Flow('test')
|
||||
f.add(task2, task1)
|
||||
for (node, data) in f.iter_nodes():
|
||||
self.assertTrue(node in tasks)
|
||||
self.assertDictEqual({}, data)
|
||||
|
||||
def test_iter_links(self):
|
||||
task1 = _task(name='task1', provides=['a', 'b'])
|
||||
task2 = _task(name='task2', provides=['a', 'c'])
|
||||
f = uf.Flow('test')
|
||||
f.add(task2, task1)
|
||||
for (u, v, data) in f.iter_links():
|
||||
raise AssertionError('links iterator should be empty')
|
||||
|
||||
@@ -69,6 +69,27 @@ class PersistenceTestMixin(object):
|
||||
self.assertIsNotNone(lb2.find(fd.uuid))
|
||||
self.assertIsNotNone(lb2.find(fd2.uuid))
|
||||
|
||||
def test_logbook_save_retrieve_many(self):
|
||||
lb_ids = {}
|
||||
for i in range(0, 10):
|
||||
lb_id = uuidutils.generate_uuid()
|
||||
lb_name = 'lb-%s-%s' % (i, lb_id)
|
||||
lb = models.LogBook(name=lb_name, uuid=lb_id)
|
||||
lb_ids[lb_id] = True
|
||||
|
||||
# Should not already exist
|
||||
with contextlib.closing(self._get_connection()) as conn:
|
||||
self.assertRaises(exc.NotFound, conn.get_logbook, lb_id)
|
||||
conn.save_logbook(lb)
|
||||
|
||||
# Now fetch them all
|
||||
with contextlib.closing(self._get_connection()) as conn:
|
||||
lbs = conn.get_logbooks()
|
||||
for lb in lbs:
|
||||
self.assertIn(lb.uuid, lb_ids)
|
||||
lb_ids.pop(lb.uuid)
|
||||
self.assertEqual(0, len(lb_ids))
|
||||
|
||||
def test_logbook_save_retrieve(self):
|
||||
lb_id = uuidutils.generate_uuid()
|
||||
lb_meta = {'1': 2}
|
||||
@@ -128,7 +149,7 @@ class PersistenceTestMixin(object):
|
||||
with contextlib.closing(self._get_connection()) as conn:
|
||||
lb2 = conn.get_logbook(lb_id)
|
||||
fd2 = lb2.find(fd.uuid)
|
||||
self.assertEqual(fd2.meta.get('test'), 43)
|
||||
self.assertEqual(43, fd2.meta.get('test'))
|
||||
|
||||
def test_flow_detail_lazy_fetch(self):
|
||||
lb_id = uuidutils.generate_uuid()
|
||||
@@ -191,7 +212,7 @@ class PersistenceTestMixin(object):
|
||||
lb2 = conn.get_logbook(lb_id)
|
||||
fd2 = lb2.find(fd.uuid)
|
||||
td2 = fd2.find(td.uuid)
|
||||
self.assertEqual(td2.meta.get('test'), 43)
|
||||
self.assertEqual(43, td2.meta.get('test'))
|
||||
self.assertIsInstance(td2, models.TaskDetail)
|
||||
|
||||
def test_task_detail_with_failure(self):
|
||||
@@ -219,9 +240,9 @@ class PersistenceTestMixin(object):
|
||||
lb2 = conn.get_logbook(lb_id)
|
||||
fd2 = lb2.find(fd.uuid)
|
||||
td2 = fd2.find(td.uuid)
|
||||
self.assertEqual(td2.failure.exception_str, 'Woot!')
|
||||
self.assertEqual('Woot!', td2.failure.exception_str)
|
||||
self.assertIs(td2.failure.check(RuntimeError), RuntimeError)
|
||||
self.assertEqual(td2.failure.traceback_str, td.failure.traceback_str)
|
||||
self.assertEqual(td.failure.traceback_str, td2.failure.traceback_str)
|
||||
self.assertIsInstance(td2, models.TaskDetail)
|
||||
|
||||
def test_logbook_merge_flow_detail(self):
|
||||
@@ -291,9 +312,9 @@ class PersistenceTestMixin(object):
|
||||
fd2 = lb2.find(fd.uuid)
|
||||
td2 = fd2.find(td.uuid)
|
||||
self.assertIsNot(td2, None)
|
||||
self.assertEqual(td2.name, 'detail-1')
|
||||
self.assertEqual(td2.version, '4.2')
|
||||
self.assertEqual(td2.intention, states.EXECUTE)
|
||||
self.assertEqual('detail-1', td2.name)
|
||||
self.assertEqual('4.2', td2.version)
|
||||
self.assertEqual(states.EXECUTE, td2.intention)
|
||||
|
||||
def test_logbook_delete(self):
|
||||
lb_id = uuidutils.generate_uuid()
|
||||
@@ -329,7 +350,7 @@ class PersistenceTestMixin(object):
|
||||
lb2 = conn.get_logbook(lb_id)
|
||||
fd2 = lb2.find(fd.uuid)
|
||||
rd2 = fd2.find(rd.uuid)
|
||||
self.assertEqual(rd2.intention, states.REVERT)
|
||||
self.assertEqual(states.REVERT, rd2.intention)
|
||||
self.assertIsInstance(rd2, models.RetryDetail)
|
||||
|
||||
def test_retry_detail_save_with_task_failure(self):
|
||||
@@ -384,5 +405,5 @@ class PersistenceTestMixin(object):
|
||||
lb2 = conn.get_logbook(lb_id)
|
||||
fd2 = lb2.find(fd.uuid)
|
||||
rd2 = fd2.find(rd.uuid)
|
||||
self.assertEqual(rd2.intention, states.REVERT)
|
||||
self.assertEqual(states.REVERT, rd2.intention)
|
||||
self.assertIsInstance(rd2, models.RetryDetail)
|
||||
|
||||
@@ -30,24 +30,24 @@ class ArgumentsPassingTest(utils.EngineTestBase):
|
||||
flow = utils.TaskOneReturn(name='task1', provides='first_data')
|
||||
engine = self._make_engine(flow)
|
||||
engine.run()
|
||||
self.assertEqual(engine.storage.fetch_all(), {'first_data': 1})
|
||||
self.assertEqual({'first_data': 1}, engine.storage.fetch_all())
|
||||
|
||||
def test_save_all_in_one(self):
|
||||
flow = utils.TaskMultiReturn(provides='all_data')
|
||||
engine = self._make_engine(flow)
|
||||
engine.run()
|
||||
self.assertEqual(engine.storage.fetch_all(),
|
||||
{'all_data': (1, 3, 5)})
|
||||
self.assertEqual({'all_data': (1, 3, 5)},
|
||||
engine.storage.fetch_all())
|
||||
|
||||
def test_save_several_values(self):
|
||||
flow = utils.TaskMultiReturn(provides=('badger', 'mushroom', 'snake'))
|
||||
engine = self._make_engine(flow)
|
||||
engine.run()
|
||||
self.assertEqual(engine.storage.fetch_all(), {
|
||||
self.assertEqual({
|
||||
'badger': 1,
|
||||
'mushroom': 3,
|
||||
'snake': 5
|
||||
})
|
||||
}, engine.storage.fetch_all())
|
||||
|
||||
def test_save_dict(self):
|
||||
flow = utils.TaskMultiDict(provides=set(['badger',
|
||||
@@ -55,11 +55,11 @@ class ArgumentsPassingTest(utils.EngineTestBase):
|
||||
'snake']))
|
||||
engine = self._make_engine(flow)
|
||||
engine.run()
|
||||
self.assertEqual(engine.storage.fetch_all(), {
|
||||
self.assertEqual({
|
||||
'badger': 0,
|
||||
'mushroom': 1,
|
||||
'snake': 2,
|
||||
})
|
||||
}, engine.storage.fetch_all())
|
||||
|
||||
def test_bad_save_as_value(self):
|
||||
self.assertRaises(TypeError,
|
||||
@@ -71,10 +71,10 @@ class ArgumentsPassingTest(utils.EngineTestBase):
|
||||
engine = self._make_engine(flow)
|
||||
engine.storage.inject({'x': 1, 'y': 4, 'z': 9, 'a': 17})
|
||||
engine.run()
|
||||
self.assertEqual(engine.storage.fetch_all(), {
|
||||
self.assertEqual({
|
||||
'x': 1, 'y': 4, 'z': 9, 'a': 17,
|
||||
'result': 14,
|
||||
})
|
||||
}, engine.storage.fetch_all())
|
||||
|
||||
def test_arguments_missing(self):
|
||||
flow = utils.TaskMultiArg()
|
||||
@@ -88,19 +88,19 @@ class ArgumentsPassingTest(utils.EngineTestBase):
|
||||
engine = self._make_engine(flow)
|
||||
engine.storage.inject({'x': 1, 'y': 4, 'z': 9, 'a': 17})
|
||||
engine.run()
|
||||
self.assertEqual(engine.storage.fetch_all(), {
|
||||
self.assertEqual({
|
||||
'x': 1, 'y': 4, 'z': 9, 'a': 17,
|
||||
'result': 30,
|
||||
})
|
||||
}, engine.storage.fetch_all())
|
||||
|
||||
def test_argument_injection(self):
|
||||
flow = utils.TaskMultiArgOneReturn(provides='result',
|
||||
inject={'x': 1, 'y': 4, 'z': 9})
|
||||
engine = self._make_engine(flow)
|
||||
engine.run()
|
||||
self.assertEqual(engine.storage.fetch_all(), {
|
||||
self.assertEqual({
|
||||
'result': 14,
|
||||
})
|
||||
}, engine.storage.fetch_all())
|
||||
|
||||
def test_argument_injection_rebind(self):
|
||||
flow = utils.TaskMultiArgOneReturn(provides='result',
|
||||
@@ -108,9 +108,9 @@ class ArgumentsPassingTest(utils.EngineTestBase):
|
||||
inject={'a': 1, 'b': 4, 'c': 9})
|
||||
engine = self._make_engine(flow)
|
||||
engine.run()
|
||||
self.assertEqual(engine.storage.fetch_all(), {
|
||||
self.assertEqual({
|
||||
'result': 14,
|
||||
})
|
||||
}, engine.storage.fetch_all())
|
||||
|
||||
def test_argument_injection_required(self):
|
||||
flow = utils.TaskMultiArgOneReturn(provides='result',
|
||||
@@ -119,9 +119,9 @@ class ArgumentsPassingTest(utils.EngineTestBase):
|
||||
'a': 0, 'b': 0, 'c': 0})
|
||||
engine = self._make_engine(flow)
|
||||
engine.run()
|
||||
self.assertEqual(engine.storage.fetch_all(), {
|
||||
self.assertEqual({
|
||||
'result': 14,
|
||||
})
|
||||
}, engine.storage.fetch_all())
|
||||
|
||||
def test_all_arguments_mapping(self):
|
||||
flow = utils.TaskMultiArgOneReturn(provides='result',
|
||||
@@ -131,10 +131,10 @@ class ArgumentsPassingTest(utils.EngineTestBase):
|
||||
'a': 1, 'b': 2, 'c': 3, 'x': 4, 'y': 5, 'z': 6
|
||||
})
|
||||
engine.run()
|
||||
self.assertEqual(engine.storage.fetch_all(), {
|
||||
self.assertEqual({
|
||||
'a': 1, 'b': 2, 'c': 3, 'x': 4, 'y': 5, 'z': 6,
|
||||
'result': 6,
|
||||
})
|
||||
}, engine.storage.fetch_all())
|
||||
|
||||
def test_invalid_argument_name_map(self):
|
||||
flow = utils.TaskMultiArg(rebind={'z': 'b'})
|
||||
@@ -159,9 +159,9 @@ class ArgumentsPassingTest(utils.EngineTestBase):
|
||||
engine = self._make_engine(flow)
|
||||
engine.storage.inject({'long_arg_name': 1})
|
||||
engine.run()
|
||||
self.assertEqual(engine.storage.fetch_all(), {
|
||||
self.assertEqual({
|
||||
'long_arg_name': 1, 'result': 1
|
||||
})
|
||||
}, engine.storage.fetch_all())
|
||||
|
||||
|
||||
class SerialEngineTest(ArgumentsPassingTest, test.TestCase):
|
||||
|
||||
@@ -18,6 +18,8 @@ import collections
|
||||
import contextlib
|
||||
import threading
|
||||
|
||||
import futurist
|
||||
import testscenarios
|
||||
from zake import fake_client
|
||||
|
||||
from taskflow.conductors import backends
|
||||
@@ -51,23 +53,39 @@ def test_factory(blowup):
|
||||
return f
|
||||
|
||||
|
||||
def single_factory():
|
||||
return futurist.ThreadPoolExecutor(max_workers=1)
|
||||
|
||||
|
||||
ComponentBundle = collections.namedtuple('ComponentBundle',
|
||||
['board', 'client',
|
||||
'persistence', 'conductor'])
|
||||
|
||||
|
||||
class BlockingConductorTest(test_utils.EngineTestBase, test.TestCase):
|
||||
KIND = 'blocking'
|
||||
class ManyConductorTest(testscenarios.TestWithScenarios,
|
||||
test_utils.EngineTestBase, test.TestCase):
|
||||
scenarios = [
|
||||
('blocking', {'kind': 'blocking',
|
||||
'conductor_kwargs': {'wait_timeout': 0.1}}),
|
||||
('nonblocking_many_thread',
|
||||
{'kind': 'nonblocking', 'conductor_kwargs': {'wait_timeout': 0.1}}),
|
||||
('nonblocking_one_thread', {'kind': 'nonblocking',
|
||||
'conductor_kwargs': {
|
||||
'executor_factory': single_factory,
|
||||
'wait_timeout': 0.1,
|
||||
}})
|
||||
]
|
||||
|
||||
def make_components(self, name='testing', wait_timeout=0.1):
|
||||
def make_components(self):
|
||||
client = fake_client.FakeClient()
|
||||
persistence = impl_memory.MemoryBackend()
|
||||
board = impl_zookeeper.ZookeeperJobBoard(name, {},
|
||||
board = impl_zookeeper.ZookeeperJobBoard('testing', {},
|
||||
client=client,
|
||||
persistence=persistence)
|
||||
conductor = backends.fetch(self.KIND, name, board,
|
||||
persistence=persistence,
|
||||
wait_timeout=wait_timeout)
|
||||
conductor_kwargs = self.conductor_kwargs.copy()
|
||||
conductor_kwargs['persistence'] = persistence
|
||||
conductor = backends.fetch(self.kind, 'testing', board,
|
||||
**conductor_kwargs)
|
||||
return ComponentBundle(board, client, persistence, conductor)
|
||||
|
||||
def test_connection(self):
|
||||
@@ -95,11 +113,25 @@ class BlockingConductorTest(test_utils.EngineTestBase, test.TestCase):
|
||||
components = self.make_components()
|
||||
components.conductor.connect()
|
||||
consumed_event = threading.Event()
|
||||
job_consumed_event = threading.Event()
|
||||
job_abandoned_event = threading.Event()
|
||||
|
||||
def on_consume(state, details):
|
||||
consumed_event.set()
|
||||
|
||||
def on_job_consumed(event, details):
|
||||
if event == 'job_consumed':
|
||||
job_consumed_event.set()
|
||||
|
||||
def on_job_abandoned(event, details):
|
||||
if event == 'job_abandoned':
|
||||
job_abandoned_event.set()
|
||||
|
||||
components.board.notifier.register(base.REMOVAL, on_consume)
|
||||
components.conductor.notifier.register("job_consumed",
|
||||
on_job_consumed)
|
||||
components.conductor.notifier.register("job_abandoned",
|
||||
on_job_abandoned)
|
||||
with close_many(components.conductor, components.client):
|
||||
t = threading_utils.daemon_thread(components.conductor.run)
|
||||
t.start()
|
||||
@@ -110,6 +142,8 @@ class BlockingConductorTest(test_utils.EngineTestBase, test.TestCase):
|
||||
components.board.post('poke', lb,
|
||||
details={'flow_uuid': fd.uuid})
|
||||
self.assertTrue(consumed_event.wait(test_utils.WAIT_TIMEOUT))
|
||||
self.assertTrue(job_consumed_event.wait(test_utils.WAIT_TIMEOUT))
|
||||
self.assertFalse(job_abandoned_event.wait(1))
|
||||
components.conductor.stop()
|
||||
self.assertTrue(components.conductor.wait(test_utils.WAIT_TIMEOUT))
|
||||
self.assertFalse(components.conductor.dispatching)
|
||||
@@ -121,7 +155,7 @@ class BlockingConductorTest(test_utils.EngineTestBase, test.TestCase):
|
||||
self.assertIsNotNone(fd)
|
||||
self.assertEqual(st.SUCCESS, fd.state)
|
||||
|
||||
def test_fail_run(self):
|
||||
def test_run_max_dispatches(self):
|
||||
components = self.make_components()
|
||||
components.conductor.connect()
|
||||
consumed_event = threading.Event()
|
||||
@@ -130,6 +164,48 @@ class BlockingConductorTest(test_utils.EngineTestBase, test.TestCase):
|
||||
consumed_event.set()
|
||||
|
||||
components.board.notifier.register(base.REMOVAL, on_consume)
|
||||
with close_many(components.client, components.conductor):
|
||||
t = threading_utils.daemon_thread(
|
||||
lambda: components.conductor.run(max_dispatches=5))
|
||||
t.start()
|
||||
lb, fd = pu.temporary_flow_detail(components.persistence)
|
||||
engines.save_factory_details(fd, test_factory,
|
||||
[False], {},
|
||||
backend=components.persistence)
|
||||
for _ in range(5):
|
||||
components.board.post('poke', lb,
|
||||
details={'flow_uuid': fd.uuid})
|
||||
self.assertTrue(consumed_event.wait(
|
||||
test_utils.WAIT_TIMEOUT))
|
||||
components.board.post('poke', lb,
|
||||
details={'flow_uuid': fd.uuid})
|
||||
components.conductor.stop()
|
||||
self.assertTrue(components.conductor.wait(test_utils.WAIT_TIMEOUT))
|
||||
self.assertFalse(components.conductor.dispatching)
|
||||
|
||||
def test_fail_run(self):
|
||||
components = self.make_components()
|
||||
components.conductor.connect()
|
||||
consumed_event = threading.Event()
|
||||
job_consumed_event = threading.Event()
|
||||
job_abandoned_event = threading.Event()
|
||||
|
||||
def on_consume(state, details):
|
||||
consumed_event.set()
|
||||
|
||||
def on_job_consumed(event, details):
|
||||
if event == 'job_consumed':
|
||||
job_consumed_event.set()
|
||||
|
||||
def on_job_abandoned(event, details):
|
||||
if event == 'job_abandoned':
|
||||
job_abandoned_event.set()
|
||||
|
||||
components.board.notifier.register(base.REMOVAL, on_consume)
|
||||
components.conductor.notifier.register("job_consumed",
|
||||
on_job_consumed)
|
||||
components.conductor.notifier.register("job_abandoned",
|
||||
on_job_abandoned)
|
||||
with close_many(components.conductor, components.client):
|
||||
t = threading_utils.daemon_thread(components.conductor.run)
|
||||
t.start()
|
||||
@@ -140,6 +216,8 @@ class BlockingConductorTest(test_utils.EngineTestBase, test.TestCase):
|
||||
components.board.post('poke', lb,
|
||||
details={'flow_uuid': fd.uuid})
|
||||
self.assertTrue(consumed_event.wait(test_utils.WAIT_TIMEOUT))
|
||||
self.assertTrue(job_consumed_event.wait(test_utils.WAIT_TIMEOUT))
|
||||
self.assertFalse(job_abandoned_event.wait(1))
|
||||
components.conductor.stop()
|
||||
self.assertTrue(components.conductor.wait(test_utils.WAIT_TIMEOUT))
|
||||
self.assertFalse(components.conductor.dispatching)
|
||||
@@ -150,3 +228,29 @@ class BlockingConductorTest(test_utils.EngineTestBase, test.TestCase):
|
||||
fd = lb.find(fd.uuid)
|
||||
self.assertIsNotNone(fd)
|
||||
self.assertEqual(st.REVERTED, fd.state)
|
||||
|
||||
|
||||
class NonBlockingExecutorTest(test.TestCase):
|
||||
def test_bad_wait_timeout(self):
|
||||
persistence = impl_memory.MemoryBackend()
|
||||
client = fake_client.FakeClient()
|
||||
board = impl_zookeeper.ZookeeperJobBoard('testing', {},
|
||||
client=client,
|
||||
persistence=persistence)
|
||||
self.assertRaises(ValueError,
|
||||
backends.fetch,
|
||||
'nonblocking', 'testing', board,
|
||||
persistence=persistence,
|
||||
wait_timeout='testing')
|
||||
|
||||
def test_bad_factory(self):
|
||||
persistence = impl_memory.MemoryBackend()
|
||||
client = fake_client.FakeClient()
|
||||
board = impl_zookeeper.ZookeeperJobBoard('testing', {},
|
||||
client=client,
|
||||
persistence=persistence)
|
||||
self.assertRaises(ValueError,
|
||||
backends.fetch,
|
||||
'nonblocking', 'testing', board,
|
||||
persistence=persistence,
|
||||
executor_factory='testing')
|
||||
@@ -85,7 +85,7 @@ class FlowFromDetailTestCase(test.TestCase):
|
||||
return_value=lambda: 'RESULT') as mock_import:
|
||||
result = taskflow.engines.flow_from_detail(flow_detail)
|
||||
mock_import.assert_called_once_with(name)
|
||||
self.assertEqual(result, 'RESULT')
|
||||
self.assertEqual('RESULT', result)
|
||||
|
||||
def test_factory_with_arg(self):
|
||||
name = 'some.test.factory'
|
||||
@@ -96,7 +96,7 @@ class FlowFromDetailTestCase(test.TestCase):
|
||||
return_value=lambda x: 'RESULT %s' % x) as mock_import:
|
||||
result = taskflow.engines.flow_from_detail(flow_detail)
|
||||
mock_import.assert_called_once_with(name)
|
||||
self.assertEqual(result, 'RESULT foo')
|
||||
self.assertEqual('RESULT foo', result)
|
||||
|
||||
|
||||
def my_flow_factory(task_name):
|
||||
@@ -121,12 +121,12 @@ class LoadFromFactoryTestCase(test.TestCase):
|
||||
self.assertIsInstance(engine._flow, test_utils.DummyTask)
|
||||
|
||||
fd = engine.storage._flowdetail
|
||||
self.assertEqual(fd.name, 'test1')
|
||||
self.assertEqual(fd.meta.get('factory'), {
|
||||
self.assertEqual('test1', fd.name)
|
||||
self.assertEqual({
|
||||
'name': '%s.my_flow_factory' % __name__,
|
||||
'args': [],
|
||||
'kwargs': {'task_name': 'test1'},
|
||||
})
|
||||
}, fd.meta.get('factory'))
|
||||
|
||||
def test_it_works_by_name(self):
|
||||
factory_name = '%s.my_flow_factory' % __name__
|
||||
@@ -135,9 +135,9 @@ class LoadFromFactoryTestCase(test.TestCase):
|
||||
self.assertIsInstance(engine._flow, test_utils.DummyTask)
|
||||
|
||||
fd = engine.storage._flowdetail
|
||||
self.assertEqual(fd.name, 'test1')
|
||||
self.assertEqual(fd.meta.get('factory'), {
|
||||
self.assertEqual('test1', fd.name)
|
||||
self.assertEqual({
|
||||
'name': factory_name,
|
||||
'args': [],
|
||||
'kwargs': {'task_name': 'test1'},
|
||||
})
|
||||
}, fd.meta.get('factory'))
|
||||
|
||||
@@ -42,6 +42,13 @@ from taskflow.utils import persistence_utils as p_utils
|
||||
from taskflow.utils import threading_utils as tu
|
||||
|
||||
|
||||
# Expected engine transitions when empty workflows are ran...
|
||||
_EMPTY_TRANSITIONS = [
|
||||
states.RESUMING, states.SCHEDULING, states.WAITING,
|
||||
states.ANALYZING, states.SUCCESS,
|
||||
]
|
||||
|
||||
|
||||
class EngineTaskTest(object):
|
||||
|
||||
def test_run_task_as_flow(self):
|
||||
@@ -72,14 +79,14 @@ class EngineTaskTest(object):
|
||||
with utils.CaptureListener(engine, values=values) as capturer:
|
||||
self.assertFailuresRegexp(RuntimeError, '^Woot', engine.run)
|
||||
self.assertEqual(expected, capturer.values)
|
||||
self.assertEqual(engine.storage.get_flow_state(), states.REVERTED)
|
||||
self.assertEqual(states.REVERTED, engine.storage.get_flow_state())
|
||||
with utils.CaptureListener(engine, values=values) as capturer:
|
||||
self.assertFailuresRegexp(RuntimeError, '^Woot', engine.run)
|
||||
now_expected = list(expected)
|
||||
now_expected.extend(['fail.t PENDING', 'fail.f PENDING'])
|
||||
now_expected.extend(expected)
|
||||
self.assertEqual(now_expected, values)
|
||||
self.assertEqual(engine.storage.get_flow_state(), states.REVERTED)
|
||||
self.assertEqual(states.REVERTED, engine.storage.get_flow_state())
|
||||
|
||||
def test_invalid_flow_raises(self):
|
||||
|
||||
@@ -123,33 +130,33 @@ class EngineOptionalRequirementsTest(utils.EngineTestBase):
|
||||
engine = self._make_engine(flow_no_inject, store={'a': 3})
|
||||
engine.run()
|
||||
result = engine.storage.fetch_all()
|
||||
self.assertEqual(result, {'a': 3, 'result': 15})
|
||||
self.assertEqual({'a': 3, 'result': 15}, result)
|
||||
|
||||
engine = self._make_engine(flow_no_inject,
|
||||
store={'a': 3, 'b': 7})
|
||||
engine.run()
|
||||
result = engine.storage.fetch_all()
|
||||
self.assertEqual(result, {'a': 3, 'b': 7, 'result': 21})
|
||||
self.assertEqual({'a': 3, 'b': 7, 'result': 21}, result)
|
||||
|
||||
engine = self._make_engine(flow_inject_a, store={'a': 3})
|
||||
engine.run()
|
||||
result = engine.storage.fetch_all()
|
||||
self.assertEqual(result, {'a': 3, 'result': 50})
|
||||
self.assertEqual({'a': 3, 'result': 50}, result)
|
||||
|
||||
engine = self._make_engine(flow_inject_a, store={'a': 3, 'b': 7})
|
||||
engine.run()
|
||||
result = engine.storage.fetch_all()
|
||||
self.assertEqual(result, {'a': 3, 'b': 7, 'result': 70})
|
||||
self.assertEqual({'a': 3, 'b': 7, 'result': 70}, result)
|
||||
|
||||
engine = self._make_engine(flow_inject_b, store={'a': 3})
|
||||
engine.run()
|
||||
result = engine.storage.fetch_all()
|
||||
self.assertEqual(result, {'a': 3, 'result': 3000})
|
||||
self.assertEqual({'a': 3, 'result': 3000}, result)
|
||||
|
||||
engine = self._make_engine(flow_inject_b, store={'a': 3, 'b': 7})
|
||||
engine.run()
|
||||
result = engine.storage.fetch_all()
|
||||
self.assertEqual(result, {'a': 3, 'b': 7, 'result': 3000})
|
||||
self.assertEqual({'a': 3, 'b': 7, 'result': 3000}, result)
|
||||
|
||||
|
||||
class EngineMultipleResultsTest(utils.EngineTestBase):
|
||||
@@ -160,7 +167,7 @@ class EngineMultipleResultsTest(utils.EngineTestBase):
|
||||
engine = self._make_engine(flow)
|
||||
engine.run()
|
||||
result = engine.storage.fetch('x')
|
||||
self.assertEqual(result, 1)
|
||||
self.assertEqual(1, result)
|
||||
|
||||
def test_many_results_visible_to(self):
|
||||
flow = lf.Flow("flow")
|
||||
@@ -223,7 +230,7 @@ class EngineMultipleResultsTest(utils.EngineTestBase):
|
||||
engine = self._make_engine(flow, store={'x': 0})
|
||||
engine.run()
|
||||
result = engine.storage.fetch('x')
|
||||
self.assertEqual(result, 0)
|
||||
self.assertEqual(0, result)
|
||||
|
||||
def test_fetch_all_with_a_single_result(self):
|
||||
flow = lf.Flow("flow")
|
||||
@@ -232,7 +239,7 @@ class EngineMultipleResultsTest(utils.EngineTestBase):
|
||||
engine = self._make_engine(flow)
|
||||
engine.run()
|
||||
result = engine.storage.fetch_all()
|
||||
self.assertEqual(result, {'x': 1})
|
||||
self.assertEqual({'x': 1}, result)
|
||||
|
||||
def test_fetch_all_with_two_results(self):
|
||||
flow = lf.Flow("flow")
|
||||
@@ -241,7 +248,7 @@ class EngineMultipleResultsTest(utils.EngineTestBase):
|
||||
engine = self._make_engine(flow, store={'x': 0})
|
||||
engine.run()
|
||||
result = engine.storage.fetch_all()
|
||||
self.assertEqual(result, {'x': [0, 1]})
|
||||
self.assertEqual({'x': [0, 1]}, result)
|
||||
|
||||
def test_task_can_update_value(self):
|
||||
flow = lf.Flow("flow")
|
||||
@@ -250,15 +257,15 @@ class EngineMultipleResultsTest(utils.EngineTestBase):
|
||||
engine = self._make_engine(flow, store={'x': 0})
|
||||
engine.run()
|
||||
result = engine.storage.fetch_all()
|
||||
self.assertEqual(result, {'x': [0, 1]})
|
||||
self.assertEqual({'x': [0, 1]}, result)
|
||||
|
||||
|
||||
class EngineLinearFlowTest(utils.EngineTestBase):
|
||||
|
||||
def test_run_empty_flow(self):
|
||||
def test_run_empty_linear_flow(self):
|
||||
flow = lf.Flow('flow-1')
|
||||
engine = self._make_engine(flow)
|
||||
self.assertRaises(exc.Empty, engine.run)
|
||||
self.assertEqual(_EMPTY_TRANSITIONS, list(engine.run_iter()))
|
||||
|
||||
def test_overlap_parent_sibling_expected_result(self):
|
||||
flow = lf.Flow('flow-1')
|
||||
@@ -315,7 +322,7 @@ class EngineLinearFlowTest(utils.EngineTestBase):
|
||||
expected = ['task1.t RUNNING', 'task1.t SUCCESS(5)',
|
||||
'task2.t RUNNING', 'task2.t SUCCESS(5)']
|
||||
self.assertEqual(expected, capturer.values)
|
||||
self.assertEqual(len(flow), 2)
|
||||
self.assertEqual(2, len(flow))
|
||||
|
||||
def test_sequential_flow_two_tasks_iter(self):
|
||||
flow = lf.Flow('flow-2').add(
|
||||
@@ -329,7 +336,7 @@ class EngineLinearFlowTest(utils.EngineTestBase):
|
||||
expected = ['task1.t RUNNING', 'task1.t SUCCESS(5)',
|
||||
'task2.t RUNNING', 'task2.t SUCCESS(5)']
|
||||
self.assertEqual(expected, capturer.values)
|
||||
self.assertEqual(len(flow), 2)
|
||||
self.assertEqual(2, len(flow))
|
||||
|
||||
def test_sequential_flow_iter_suspend_resume(self):
|
||||
flow = lf.Flow('flow-2').add(
|
||||
@@ -373,7 +380,7 @@ class EngineLinearFlowTest(utils.EngineTestBase):
|
||||
)
|
||||
engine = self._make_engine(flow)
|
||||
self.assertFailuresRegexp(RuntimeError, '^Woot', engine.run)
|
||||
self.assertEqual(engine.storage.fetch_all(), {})
|
||||
self.assertEqual({}, engine.storage.fetch_all())
|
||||
|
||||
def test_revert_provided(self):
|
||||
flow = lf.Flow('revert').add(
|
||||
@@ -382,7 +389,7 @@ class EngineLinearFlowTest(utils.EngineTestBase):
|
||||
)
|
||||
engine = self._make_engine(flow, store={'value': 0})
|
||||
self.assertFailuresRegexp(RuntimeError, '^Woot', engine.run)
|
||||
self.assertEqual(engine.storage.get_revert_result('giver'), 2)
|
||||
self.assertEqual(2, engine.storage.get_revert_result('giver'))
|
||||
|
||||
def test_nasty_revert(self):
|
||||
flow = lf.Flow('revert').add(
|
||||
@@ -456,10 +463,36 @@ class EngineLinearFlowTest(utils.EngineTestBase):
|
||||
|
||||
class EngineParallelFlowTest(utils.EngineTestBase):
|
||||
|
||||
def test_run_empty_flow(self):
|
||||
def test_run_empty_unordered_flow(self):
|
||||
flow = uf.Flow('p-1')
|
||||
engine = self._make_engine(flow)
|
||||
self.assertRaises(exc.Empty, engine.run)
|
||||
self.assertEqual(_EMPTY_TRANSITIONS, list(engine.run_iter()))
|
||||
|
||||
def test_parallel_flow_with_priority(self):
|
||||
flow = uf.Flow('p-1')
|
||||
for i in range(0, 10):
|
||||
t = utils.ProgressingTask(name='task%s' % i)
|
||||
t.priority = i
|
||||
flow.add(t)
|
||||
engine = self._make_engine(flow)
|
||||
with utils.CaptureListener(engine, capture_flow=False) as capturer:
|
||||
engine.run()
|
||||
expected = [
|
||||
'task9.t RUNNING',
|
||||
'task8.t RUNNING',
|
||||
'task7.t RUNNING',
|
||||
'task6.t RUNNING',
|
||||
'task5.t RUNNING',
|
||||
'task4.t RUNNING',
|
||||
'task3.t RUNNING',
|
||||
'task2.t RUNNING',
|
||||
'task1.t RUNNING',
|
||||
'task0.t RUNNING',
|
||||
]
|
||||
# NOTE(harlowja): chop off the gathering of SUCCESS states, since we
|
||||
# don't care if thats in order...
|
||||
gotten = capturer.values[0:10]
|
||||
self.assertEqual(expected, gotten)
|
||||
|
||||
def test_parallel_flow_one_task(self):
|
||||
flow = uf.Flow('p-1').add(
|
||||
@@ -470,7 +503,7 @@ class EngineParallelFlowTest(utils.EngineTestBase):
|
||||
engine.run()
|
||||
expected = ['task1.t RUNNING', 'task1.t SUCCESS(5)']
|
||||
self.assertEqual(expected, capturer.values)
|
||||
self.assertEqual(engine.storage.fetch_all(), {'a': 5})
|
||||
self.assertEqual({'a': 5}, engine.storage.fetch_all())
|
||||
|
||||
def test_parallel_flow_two_tasks(self):
|
||||
flow = uf.Flow('p-2').add(
|
||||
@@ -533,8 +566,8 @@ class EngineParallelFlowTest(utils.EngineTestBase):
|
||||
engine.run()
|
||||
expected = ['task2.t RUNNING', 'task2.t SUCCESS(5)']
|
||||
self.assertEqual(expected, capturer.values)
|
||||
self.assertEqual(engine.storage.fetch_all(),
|
||||
{'x1': 17, 'x2': 5})
|
||||
self.assertEqual({'x1': 17, 'x2': 5},
|
||||
engine.storage.fetch_all())
|
||||
|
||||
|
||||
class EngineLinearAndUnorderedExceptionsTest(utils.EngineTestBase):
|
||||
@@ -638,16 +671,16 @@ class EngineLinearAndUnorderedExceptionsTest(utils.EngineTestBase):
|
||||
|
||||
class EngineGraphFlowTest(utils.EngineTestBase):
|
||||
|
||||
def test_run_empty_flow(self):
|
||||
def test_run_empty_graph_flow(self):
|
||||
flow = gf.Flow('g-1')
|
||||
engine = self._make_engine(flow)
|
||||
self.assertRaises(exc.Empty, engine.run)
|
||||
self.assertEqual(_EMPTY_TRANSITIONS, list(engine.run_iter()))
|
||||
|
||||
def test_run_nested_empty_flows(self):
|
||||
def test_run_empty_nested_graph_flows(self):
|
||||
flow = gf.Flow('g-1').add(lf.Flow('l-1'),
|
||||
gf.Flow('g-2'))
|
||||
engine = self._make_engine(flow)
|
||||
self.assertRaises(exc.Empty, engine.run)
|
||||
self.assertEqual(_EMPTY_TRANSITIONS, list(engine.run_iter()))
|
||||
|
||||
def test_graph_flow_one_task(self):
|
||||
flow = gf.Flow('g-1').add(
|
||||
@@ -670,7 +703,7 @@ class EngineGraphFlowTest(utils.EngineTestBase):
|
||||
expected = set(['task2.t SUCCESS(5)', 'task2.t RUNNING',
|
||||
'task1.t RUNNING', 'task1.t SUCCESS(5)'])
|
||||
self.assertEqual(expected, set(capturer.values))
|
||||
self.assertEqual(len(flow), 2)
|
||||
self.assertEqual(2, len(flow))
|
||||
|
||||
def test_graph_flow_two_tasks(self):
|
||||
flow = gf.Flow('g-1-1').add(
|
||||
@@ -728,7 +761,7 @@ class EngineGraphFlowTest(utils.EngineTestBase):
|
||||
'task1.t REVERTING',
|
||||
'task1.t REVERTED(None)']
|
||||
self.assertEqual(expected, capturer.values)
|
||||
self.assertEqual(engine.storage.get_flow_state(), states.REVERTED)
|
||||
self.assertEqual(states.REVERTED, engine.storage.get_flow_state())
|
||||
|
||||
def test_graph_flow_four_tasks_revert_failure(self):
|
||||
flow = gf.Flow('g-3-nasty').add(
|
||||
@@ -738,7 +771,7 @@ class EngineGraphFlowTest(utils.EngineTestBase):
|
||||
|
||||
engine = self._make_engine(flow)
|
||||
self.assertFailuresRegexp(RuntimeError, '^Gotcha', engine.run)
|
||||
self.assertEqual(engine.storage.get_flow_state(), states.FAILURE)
|
||||
self.assertEqual(states.FAILURE, engine.storage.get_flow_state())
|
||||
|
||||
def test_graph_flow_with_multireturn_and_multiargs_tasks(self):
|
||||
flow = gf.Flow('g-3-multi').add(
|
||||
@@ -751,14 +784,14 @@ class EngineGraphFlowTest(utils.EngineTestBase):
|
||||
engine = self._make_engine(flow)
|
||||
engine.storage.inject({'x': 30})
|
||||
engine.run()
|
||||
self.assertEqual(engine.storage.fetch_all(), {
|
||||
self.assertEqual({
|
||||
'a': 1,
|
||||
'b': 3,
|
||||
'c': 5,
|
||||
'x': 30,
|
||||
'y': 38,
|
||||
'z': 42
|
||||
})
|
||||
}, engine.storage.fetch_all())
|
||||
|
||||
def test_task_graph_property(self):
|
||||
flow = gf.Flow('test').add(
|
||||
@@ -933,6 +966,87 @@ class EngineResetTests(utils.EngineTestBase):
|
||||
|
||||
class EngineGraphConditionalFlowTest(utils.EngineTestBase):
|
||||
|
||||
def test_graph_flow_conditional_jumps_across_2(self):
|
||||
histories = []
|
||||
|
||||
def should_go(history):
|
||||
histories.append(history)
|
||||
return False
|
||||
|
||||
task1 = utils.ProgressingTask(name='task1')
|
||||
task2 = utils.ProgressingTask(name='task2')
|
||||
task3 = utils.ProgressingTask(name='task3')
|
||||
task4 = utils.ProgressingTask(name='task4')
|
||||
|
||||
subflow = lf.Flow("more-work")
|
||||
subsub_flow = lf.Flow("more-more-work")
|
||||
subsub_flow.add(task3, task4)
|
||||
subflow.add(subsub_flow)
|
||||
|
||||
flow = gf.Flow("main-work")
|
||||
flow.add(task1, task2)
|
||||
flow.link(task1, task2)
|
||||
flow.add(subflow)
|
||||
flow.link(task2, subflow, decider=should_go)
|
||||
|
||||
engine = self._make_engine(flow)
|
||||
with utils.CaptureListener(engine, capture_flow=False) as capturer:
|
||||
engine.run()
|
||||
|
||||
expected = [
|
||||
'task1.t RUNNING',
|
||||
'task1.t SUCCESS(5)',
|
||||
|
||||
'task2.t RUNNING',
|
||||
'task2.t SUCCESS(5)',
|
||||
|
||||
'task3.t IGNORE',
|
||||
'task4.t IGNORE',
|
||||
]
|
||||
self.assertEqual(expected, capturer.values)
|
||||
self.assertEqual(1, len(histories))
|
||||
self.assertIn('task2', histories[0])
|
||||
|
||||
def test_graph_flow_conditional_jumps_across(self):
|
||||
histories = []
|
||||
|
||||
def should_go(history):
|
||||
histories.append(history)
|
||||
return False
|
||||
|
||||
task1 = utils.ProgressingTask(name='task1')
|
||||
task2 = utils.ProgressingTask(name='task2')
|
||||
task3 = utils.ProgressingTask(name='task3')
|
||||
task4 = utils.ProgressingTask(name='task4')
|
||||
|
||||
subflow = lf.Flow("more-work")
|
||||
subflow.add(task3, task4)
|
||||
flow = gf.Flow("main-work")
|
||||
flow.add(task1, task2)
|
||||
flow.link(task1, task2)
|
||||
flow.add(subflow)
|
||||
flow.link(task2, subflow, decider=should_go)
|
||||
flow.link(task1, subflow, decider=should_go)
|
||||
|
||||
engine = self._make_engine(flow)
|
||||
with utils.CaptureListener(engine, capture_flow=False) as capturer:
|
||||
engine.run()
|
||||
|
||||
expected = [
|
||||
'task1.t RUNNING',
|
||||
'task1.t SUCCESS(5)',
|
||||
|
||||
'task2.t RUNNING',
|
||||
'task2.t SUCCESS(5)',
|
||||
|
||||
'task3.t IGNORE',
|
||||
'task4.t IGNORE',
|
||||
]
|
||||
self.assertEqual(expected, capturer.values)
|
||||
self.assertEqual(1, len(histories))
|
||||
self.assertIn('task1', histories[0])
|
||||
self.assertIn('task2', histories[0])
|
||||
|
||||
def test_graph_flow_conditional(self):
|
||||
flow = gf.Flow('root')
|
||||
|
||||
@@ -1110,11 +1224,11 @@ class EngineCheckingTaskTest(utils.EngineTestBase):
|
||||
return 'RESULT'
|
||||
|
||||
def revert(m_self, result, flow_failures):
|
||||
self.assertEqual(result, 'RESULT')
|
||||
self.assertEqual(list(flow_failures.keys()), ['fail1'])
|
||||
self.assertEqual('RESULT', result)
|
||||
self.assertEqual(['fail1'], list(flow_failures.keys()))
|
||||
fail = flow_failures['fail1']
|
||||
self.assertIsInstance(fail, failure.Failure)
|
||||
self.assertEqual(str(fail), 'Failure: RuntimeError: Woot!')
|
||||
self.assertEqual('Failure: RuntimeError: Woot!', str(fail))
|
||||
|
||||
flow = lf.Flow('test').add(
|
||||
CheckingTask(),
|
||||
|
||||
@@ -44,15 +44,15 @@ def _make_exc_info(msg):
|
||||
class GeneralFailureObjTestsMixin(object):
|
||||
|
||||
def test_captures_message(self):
|
||||
self.assertEqual(self.fail_obj.exception_str, 'Woot!')
|
||||
self.assertEqual('Woot!', self.fail_obj.exception_str)
|
||||
|
||||
def test_str(self):
|
||||
self.assertEqual(str(self.fail_obj),
|
||||
'Failure: RuntimeError: Woot!')
|
||||
self.assertEqual('Failure: RuntimeError: Woot!',
|
||||
str(self.fail_obj))
|
||||
|
||||
def test_exception_types(self):
|
||||
self.assertEqual(list(self.fail_obj),
|
||||
test_utils.RUNTIME_ERROR_CLASSES[:-2])
|
||||
self.assertEqual(test_utils.RUNTIME_ERROR_CLASSES[:-2],
|
||||
list(self.fail_obj))
|
||||
|
||||
def test_pformat_no_traceback(self):
|
||||
text = self.fail_obj.pformat()
|
||||
@@ -60,11 +60,11 @@ class GeneralFailureObjTestsMixin(object):
|
||||
|
||||
def test_check_str(self):
|
||||
val = 'Exception'
|
||||
self.assertEqual(self.fail_obj.check(val), val)
|
||||
self.assertEqual(val, self.fail_obj.check(val))
|
||||
|
||||
def test_check_str_not_there(self):
|
||||
val = 'ValueError'
|
||||
self.assertEqual(self.fail_obj.check(val), None)
|
||||
self.assertEqual(None, self.fail_obj.check(val))
|
||||
|
||||
def test_check_type(self):
|
||||
self.assertIs(self.fail_obj.check(RuntimeError), RuntimeError)
|
||||
@@ -84,8 +84,8 @@ class CaptureFailureTestCase(test.TestCase, GeneralFailureObjTestsMixin):
|
||||
|
||||
def test_captures_exc_info(self):
|
||||
exc_info = self.fail_obj.exc_info
|
||||
self.assertEqual(len(exc_info), 3)
|
||||
self.assertEqual(exc_info[0], RuntimeError)
|
||||
self.assertEqual(3, len(exc_info))
|
||||
self.assertEqual(RuntimeError, exc_info[0])
|
||||
self.assertIs(exc_info[1], self.fail_obj.exception)
|
||||
|
||||
def test_reraises(self):
|
||||
@@ -181,7 +181,7 @@ class FailureObjectTestCase(test.TestCase):
|
||||
exc_type_names=['Exception'],
|
||||
hi='hi there')
|
||||
expected = "Failure.__init__ got unexpected keyword argument(s): hi"
|
||||
self.assertEqual(str(exc), expected)
|
||||
self.assertEqual(expected, str(exc))
|
||||
|
||||
def test_empty_does_not_reraise(self):
|
||||
self.assertIs(failure.Failure.reraise_if_any([]), None)
|
||||
@@ -198,7 +198,7 @@ class FailureObjectTestCase(test.TestCase):
|
||||
]
|
||||
exc = self.assertRaises(exceptions.WrappedFailure,
|
||||
failure.Failure.reraise_if_any, fls)
|
||||
self.assertEqual(list(exc), fls)
|
||||
self.assertEqual(fls, list(exc))
|
||||
|
||||
def test_failure_copy(self):
|
||||
fail_obj = _captured_failure('Woot!')
|
||||
@@ -267,14 +267,14 @@ class WrappedFailureTestCase(test.TestCase):
|
||||
def test_simple_iter(self):
|
||||
fail_obj = _captured_failure('Woot!')
|
||||
wf = exceptions.WrappedFailure([fail_obj])
|
||||
self.assertEqual(len(wf), 1)
|
||||
self.assertEqual(list(wf), [fail_obj])
|
||||
self.assertEqual(1, len(wf))
|
||||
self.assertEqual([fail_obj], list(wf))
|
||||
|
||||
def test_simple_check(self):
|
||||
fail_obj = _captured_failure('Woot!')
|
||||
wf = exceptions.WrappedFailure([fail_obj])
|
||||
self.assertEqual(wf.check(RuntimeError), RuntimeError)
|
||||
self.assertEqual(wf.check(ValueError), None)
|
||||
self.assertEqual(RuntimeError, wf.check(RuntimeError))
|
||||
self.assertEqual(None, wf.check(ValueError))
|
||||
|
||||
def test_two_failures(self):
|
||||
fls = [
|
||||
@@ -282,8 +282,8 @@ class WrappedFailureTestCase(test.TestCase):
|
||||
_captured_failure('Oh, not again!')
|
||||
]
|
||||
wf = exceptions.WrappedFailure(fls)
|
||||
self.assertEqual(len(wf), 2)
|
||||
self.assertEqual(list(wf), fls)
|
||||
self.assertEqual(2, len(wf))
|
||||
self.assertEqual(fls, list(wf))
|
||||
|
||||
def test_flattening(self):
|
||||
f1 = _captured_failure('Wrap me')
|
||||
@@ -295,7 +295,7 @@ class WrappedFailureTestCase(test.TestCase):
|
||||
fail_obj = failure.Failure()
|
||||
|
||||
wf = exceptions.WrappedFailure([fail_obj, f3])
|
||||
self.assertEqual(list(wf), [f1, f2, f3])
|
||||
self.assertEqual([f1, f2, f3], list(wf))
|
||||
|
||||
|
||||
class NonAsciiExceptionsTestCase(test.TestCase):
|
||||
@@ -304,8 +304,8 @@ class NonAsciiExceptionsTestCase(test.TestCase):
|
||||
bad_string = chr(200)
|
||||
excp = ValueError(bad_string)
|
||||
fail = failure.Failure.from_exception(excp)
|
||||
self.assertEqual(fail.exception_str,
|
||||
encodeutils.exception_to_unicode(excp))
|
||||
self.assertEqual(encodeutils.exception_to_unicode(excp),
|
||||
fail.exception_str)
|
||||
# This is slightly different on py2 vs py3... due to how
|
||||
# __str__ or __unicode__ is called and what is expected from
|
||||
# both...
|
||||
@@ -314,15 +314,15 @@ class NonAsciiExceptionsTestCase(test.TestCase):
|
||||
expected = 'Failure: ValueError: %s' % msg.encode('utf-8')
|
||||
else:
|
||||
expected = u'Failure: ValueError: \xc8'
|
||||
self.assertEqual(str(fail), expected)
|
||||
self.assertEqual(expected, str(fail))
|
||||
|
||||
def test_exception_non_ascii_unicode(self):
|
||||
hi_ru = u'привет'
|
||||
fail = failure.Failure.from_exception(ValueError(hi_ru))
|
||||
self.assertEqual(fail.exception_str, hi_ru)
|
||||
self.assertEqual(hi_ru, fail.exception_str)
|
||||
self.assertIsInstance(fail.exception_str, six.text_type)
|
||||
self.assertEqual(six.text_type(fail),
|
||||
u'Failure: ValueError: %s' % hi_ru)
|
||||
self.assertEqual(u'Failure: ValueError: %s' % hi_ru,
|
||||
six.text_type(fail))
|
||||
|
||||
def test_wrapped_failure_non_ascii_unicode(self):
|
||||
hi_cn = u'嗨'
|
||||
|
||||
@@ -66,29 +66,29 @@ class FlowDependenciesTest(test.TestCase):
|
||||
flow = lf.Flow('lf').add(
|
||||
utils.TaskOneArg('task1'),
|
||||
utils.TaskMultiArg('task2'))
|
||||
self.assertEqual(flow.requires, set(['x', 'y', 'z']))
|
||||
self.assertEqual(flow.provides, set())
|
||||
self.assertEqual(set(['x', 'y', 'z']), flow.requires)
|
||||
self.assertEqual(set(), flow.provides)
|
||||
|
||||
def test_linear_flow_requires_rebind_values(self):
|
||||
flow = lf.Flow('lf').add(
|
||||
utils.TaskOneArg('task1', rebind=['q']),
|
||||
utils.TaskMultiArg('task2'))
|
||||
self.assertEqual(flow.requires, set(['x', 'y', 'z', 'q']))
|
||||
self.assertEqual(flow.provides, set())
|
||||
self.assertEqual(set(['x', 'y', 'z', 'q']), flow.requires)
|
||||
self.assertEqual(set(), flow.provides)
|
||||
|
||||
def test_linear_flow_provides_values(self):
|
||||
flow = lf.Flow('lf').add(
|
||||
utils.TaskOneReturn('task1', provides='x'),
|
||||
utils.TaskMultiReturn('task2', provides=['a', 'b', 'c']))
|
||||
self.assertEqual(flow.requires, set())
|
||||
self.assertEqual(flow.provides, set(['x', 'a', 'b', 'c']))
|
||||
self.assertEqual(set(), flow.requires)
|
||||
self.assertEqual(set(['x', 'a', 'b', 'c']), flow.provides)
|
||||
|
||||
def test_linear_flow_provides_required_values(self):
|
||||
flow = lf.Flow('lf').add(
|
||||
utils.TaskOneReturn('task1', provides='x'),
|
||||
utils.TaskOneArg('task2'))
|
||||
self.assertEqual(flow.requires, set())
|
||||
self.assertEqual(flow.provides, set(['x']))
|
||||
self.assertEqual(set(), flow.requires)
|
||||
self.assertEqual(set(['x']), flow.provides)
|
||||
|
||||
def test_linear_flow_multi_provides_and_requires_values(self):
|
||||
flow = lf.Flow('lf').add(
|
||||
@@ -97,36 +97,36 @@ class FlowDependenciesTest(test.TestCase):
|
||||
provides=['x', 'y', 'q']),
|
||||
utils.TaskMultiArgMultiReturn('task2',
|
||||
provides=['i', 'j', 'k']))
|
||||
self.assertEqual(flow.requires, set(['a', 'b', 'c', 'z']))
|
||||
self.assertEqual(flow.provides, set(['x', 'y', 'q', 'i', 'j', 'k']))
|
||||
self.assertEqual(set(['a', 'b', 'c', 'z']), flow.requires)
|
||||
self.assertEqual(set(['x', 'y', 'q', 'i', 'j', 'k']), flow.provides)
|
||||
|
||||
def test_unordered_flow_without_dependencies(self):
|
||||
flow = uf.Flow('uf').add(
|
||||
utils.TaskNoRequiresNoReturns('task1'),
|
||||
utils.TaskNoRequiresNoReturns('task2'))
|
||||
self.assertEqual(flow.requires, set())
|
||||
self.assertEqual(flow.provides, set())
|
||||
self.assertEqual(set(), flow.requires)
|
||||
self.assertEqual(set(), flow.provides)
|
||||
|
||||
def test_unordered_flow_requires_values(self):
|
||||
flow = uf.Flow('uf').add(
|
||||
utils.TaskOneArg('task1'),
|
||||
utils.TaskMultiArg('task2'))
|
||||
self.assertEqual(flow.requires, set(['x', 'y', 'z']))
|
||||
self.assertEqual(flow.provides, set())
|
||||
self.assertEqual(set(['x', 'y', 'z']), flow.requires)
|
||||
self.assertEqual(set(), flow.provides)
|
||||
|
||||
def test_unordered_flow_requires_rebind_values(self):
|
||||
flow = uf.Flow('uf').add(
|
||||
utils.TaskOneArg('task1', rebind=['q']),
|
||||
utils.TaskMultiArg('task2'))
|
||||
self.assertEqual(flow.requires, set(['x', 'y', 'z', 'q']))
|
||||
self.assertEqual(flow.provides, set())
|
||||
self.assertEqual(set(['x', 'y', 'z', 'q']), flow.requires)
|
||||
self.assertEqual(set(), flow.provides)
|
||||
|
||||
def test_unordered_flow_provides_values(self):
|
||||
flow = uf.Flow('uf').add(
|
||||
utils.TaskOneReturn('task1', provides='x'),
|
||||
utils.TaskMultiReturn('task2', provides=['a', 'b', 'c']))
|
||||
self.assertEqual(flow.requires, set())
|
||||
self.assertEqual(flow.provides, set(['x', 'a', 'b', 'c']))
|
||||
self.assertEqual(set(), flow.requires)
|
||||
self.assertEqual(set(['x', 'a', 'b', 'c']), flow.provides)
|
||||
|
||||
def test_unordered_flow_provides_required_values(self):
|
||||
flow = uf.Flow('uf')
|
||||
@@ -159,8 +159,8 @@ class FlowDependenciesTest(test.TestCase):
|
||||
provides=['d', 'e', 'f']),
|
||||
utils.TaskMultiArgMultiReturn('task2',
|
||||
provides=['i', 'j', 'k']))
|
||||
self.assertEqual(flow.requires, set(['a', 'b', 'c', 'x', 'y', 'z']))
|
||||
self.assertEqual(flow.provides, set(['d', 'e', 'f', 'i', 'j', 'k']))
|
||||
self.assertEqual(set(['a', 'b', 'c', 'x', 'y', 'z']), flow.requires)
|
||||
self.assertEqual(set(['d', 'e', 'f', 'i', 'j', 'k']), flow.provides)
|
||||
|
||||
def test_unordered_flow_provides_same_values(self):
|
||||
flow = uf.Flow('uf').add(utils.TaskOneReturn(provides='x'))
|
||||
@@ -184,36 +184,36 @@ class FlowDependenciesTest(test.TestCase):
|
||||
rebind=['b'], provides=['z']),
|
||||
utils.TaskOneArgOneReturn('task4', rebind=['c'],
|
||||
provides=['q'])))
|
||||
self.assertEqual(flow.requires, set(['a', 'b', 'c']))
|
||||
self.assertEqual(flow.provides, set(['x', 'y', 'z', 'q']))
|
||||
self.assertEqual(set(['a', 'b', 'c']), flow.requires)
|
||||
self.assertEqual(set(['x', 'y', 'z', 'q']), flow.provides)
|
||||
|
||||
def test_graph_flow_requires_values(self):
|
||||
flow = gf.Flow('gf').add(
|
||||
utils.TaskOneArg('task1'),
|
||||
utils.TaskMultiArg('task2'))
|
||||
self.assertEqual(flow.requires, set(['x', 'y', 'z']))
|
||||
self.assertEqual(flow.provides, set())
|
||||
self.assertEqual(set(['x', 'y', 'z']), flow.requires)
|
||||
self.assertEqual(set(), flow.provides)
|
||||
|
||||
def test_graph_flow_requires_rebind_values(self):
|
||||
flow = gf.Flow('gf').add(
|
||||
utils.TaskOneArg('task1', rebind=['q']),
|
||||
utils.TaskMultiArg('task2'))
|
||||
self.assertEqual(flow.requires, set(['x', 'y', 'z', 'q']))
|
||||
self.assertEqual(flow.provides, set())
|
||||
self.assertEqual(set(['x', 'y', 'z', 'q']), flow.requires)
|
||||
self.assertEqual(set(), flow.provides)
|
||||
|
||||
def test_graph_flow_provides_values(self):
|
||||
flow = gf.Flow('gf').add(
|
||||
utils.TaskOneReturn('task1', provides='x'),
|
||||
utils.TaskMultiReturn('task2', provides=['a', 'b', 'c']))
|
||||
self.assertEqual(flow.requires, set())
|
||||
self.assertEqual(flow.provides, set(['x', 'a', 'b', 'c']))
|
||||
self.assertEqual(set(), flow.requires)
|
||||
self.assertEqual(set(['x', 'a', 'b', 'c']), flow.provides)
|
||||
|
||||
def test_graph_flow_provides_required_values(self):
|
||||
flow = gf.Flow('gf').add(
|
||||
utils.TaskOneReturn('task1', provides='x'),
|
||||
utils.TaskOneArg('task2'))
|
||||
self.assertEqual(flow.requires, set())
|
||||
self.assertEqual(flow.provides, set(['x']))
|
||||
self.assertEqual(set(), flow.requires)
|
||||
self.assertEqual(set(['x']), flow.provides)
|
||||
|
||||
def test_graph_flow_provides_provided_value_other_call(self):
|
||||
flow = gf.Flow('gf')
|
||||
@@ -228,8 +228,8 @@ class FlowDependenciesTest(test.TestCase):
|
||||
provides=['d', 'e', 'f']),
|
||||
utils.TaskMultiArgMultiReturn('task2',
|
||||
provides=['i', 'j', 'k']))
|
||||
self.assertEqual(flow.requires, set(['a', 'b', 'c', 'x', 'y', 'z']))
|
||||
self.assertEqual(flow.provides, set(['d', 'e', 'f', 'i', 'j', 'k']))
|
||||
self.assertEqual(set(['a', 'b', 'c', 'x', 'y', 'z']), flow.requires)
|
||||
self.assertEqual(set(['d', 'e', 'f', 'i', 'j', 'k']), flow.provides)
|
||||
|
||||
def test_graph_cyclic_dependency(self):
|
||||
flow = gf.Flow('g-3-cyclic')
|
||||
@@ -245,81 +245,81 @@ class FlowDependenciesTest(test.TestCase):
|
||||
def test_task_requires_and_provides_same_values(self):
|
||||
flow = lf.Flow('lf', utils.TaskOneArgOneReturn('rt', requires='x',
|
||||
provides='x'))
|
||||
self.assertEqual(flow.requires, set('x'))
|
||||
self.assertEqual(flow.provides, set('x'))
|
||||
self.assertEqual(set('x'), flow.requires)
|
||||
self.assertEqual(set('x'), flow.provides)
|
||||
|
||||
def test_retry_in_linear_flow_no_requirements_no_provides(self):
|
||||
flow = lf.Flow('lf', retry.AlwaysRevert('rt'))
|
||||
self.assertEqual(flow.requires, set())
|
||||
self.assertEqual(flow.provides, set())
|
||||
self.assertEqual(set(), flow.requires)
|
||||
self.assertEqual(set(), flow.provides)
|
||||
|
||||
def test_retry_in_linear_flow_with_requirements(self):
|
||||
flow = lf.Flow('lf', retry.AlwaysRevert('rt', requires=['x', 'y']))
|
||||
self.assertEqual(flow.requires, set(['x', 'y']))
|
||||
self.assertEqual(flow.provides, set())
|
||||
self.assertEqual(set(['x', 'y']), flow.requires)
|
||||
self.assertEqual(set(), flow.provides)
|
||||
|
||||
def test_retry_in_linear_flow_with_provides(self):
|
||||
flow = lf.Flow('lf', retry.AlwaysRevert('rt', provides=['x', 'y']))
|
||||
self.assertEqual(flow.requires, set())
|
||||
self.assertEqual(flow.provides, set(['x', 'y']))
|
||||
self.assertEqual(set(), flow.requires)
|
||||
self.assertEqual(set(['x', 'y']), flow.provides)
|
||||
|
||||
def test_retry_in_linear_flow_requires_and_provides(self):
|
||||
flow = lf.Flow('lf', retry.AlwaysRevert('rt',
|
||||
requires=['x', 'y'],
|
||||
provides=['a', 'b']))
|
||||
self.assertEqual(flow.requires, set(['x', 'y']))
|
||||
self.assertEqual(flow.provides, set(['a', 'b']))
|
||||
self.assertEqual(set(['x', 'y']), flow.requires)
|
||||
self.assertEqual(set(['a', 'b']), flow.provides)
|
||||
|
||||
def test_retry_requires_and_provides_same_value(self):
|
||||
flow = lf.Flow('lf', retry.AlwaysRevert('rt',
|
||||
requires=['x', 'y'],
|
||||
provides=['x', 'y']))
|
||||
self.assertEqual(flow.requires, set(['x', 'y']))
|
||||
self.assertEqual(flow.provides, set(['x', 'y']))
|
||||
self.assertEqual(set(['x', 'y']), flow.requires)
|
||||
self.assertEqual(set(['x', 'y']), flow.provides)
|
||||
|
||||
def test_retry_in_unordered_flow_no_requirements_no_provides(self):
|
||||
flow = uf.Flow('uf', retry.AlwaysRevert('rt'))
|
||||
self.assertEqual(flow.requires, set())
|
||||
self.assertEqual(flow.provides, set())
|
||||
self.assertEqual(set(), flow.requires)
|
||||
self.assertEqual(set(), flow.provides)
|
||||
|
||||
def test_retry_in_unordered_flow_with_requirements(self):
|
||||
flow = uf.Flow('uf', retry.AlwaysRevert('rt', requires=['x', 'y']))
|
||||
self.assertEqual(flow.requires, set(['x', 'y']))
|
||||
self.assertEqual(flow.provides, set())
|
||||
self.assertEqual(set(['x', 'y']), flow.requires)
|
||||
self.assertEqual(set(), flow.provides)
|
||||
|
||||
def test_retry_in_unordered_flow_with_provides(self):
|
||||
flow = uf.Flow('uf', retry.AlwaysRevert('rt', provides=['x', 'y']))
|
||||
self.assertEqual(flow.requires, set())
|
||||
self.assertEqual(flow.provides, set(['x', 'y']))
|
||||
self.assertEqual(set(), flow.requires)
|
||||
self.assertEqual(set(['x', 'y']), flow.provides)
|
||||
|
||||
def test_retry_in_unordered_flow_requires_and_provides(self):
|
||||
flow = uf.Flow('uf', retry.AlwaysRevert('rt',
|
||||
requires=['x', 'y'],
|
||||
provides=['a', 'b']))
|
||||
self.assertEqual(flow.requires, set(['x', 'y']))
|
||||
self.assertEqual(flow.provides, set(['a', 'b']))
|
||||
self.assertEqual(set(['x', 'y']), flow.requires)
|
||||
self.assertEqual(set(['a', 'b']), flow.provides)
|
||||
|
||||
def test_retry_in_graph_flow_no_requirements_no_provides(self):
|
||||
flow = gf.Flow('gf', retry.AlwaysRevert('rt'))
|
||||
self.assertEqual(flow.requires, set())
|
||||
self.assertEqual(flow.provides, set())
|
||||
self.assertEqual(set(), flow.requires)
|
||||
self.assertEqual(set(), flow.provides)
|
||||
|
||||
def test_retry_in_graph_flow_with_requirements(self):
|
||||
flow = gf.Flow('gf', retry.AlwaysRevert('rt', requires=['x', 'y']))
|
||||
self.assertEqual(flow.requires, set(['x', 'y']))
|
||||
self.assertEqual(flow.provides, set())
|
||||
self.assertEqual(set(['x', 'y']), flow.requires)
|
||||
self.assertEqual(set(), flow.provides)
|
||||
|
||||
def test_retry_in_graph_flow_with_provides(self):
|
||||
flow = gf.Flow('gf', retry.AlwaysRevert('rt', provides=['x', 'y']))
|
||||
self.assertEqual(flow.requires, set())
|
||||
self.assertEqual(flow.provides, set(['x', 'y']))
|
||||
self.assertEqual(set(), flow.requires)
|
||||
self.assertEqual(set(['x', 'y']), flow.provides)
|
||||
|
||||
def test_retry_in_graph_flow_requires_and_provides(self):
|
||||
flow = gf.Flow('gf', retry.AlwaysRevert('rt',
|
||||
requires=['x', 'y'],
|
||||
provides=['a', 'b']))
|
||||
self.assertEqual(flow.requires, set(['x', 'y']))
|
||||
self.assertEqual(flow.provides, set(['a', 'b']))
|
||||
self.assertEqual(set(['x', 'y']), flow.requires)
|
||||
self.assertEqual(set(['a', 'b']), flow.provides)
|
||||
|
||||
def test_linear_flow_retry_and_task(self):
|
||||
flow = lf.Flow('lf', retry.AlwaysRevert('rt',
|
||||
@@ -328,8 +328,8 @@ class FlowDependenciesTest(test.TestCase):
|
||||
flow.add(utils.TaskMultiArgOneReturn(rebind=['a', 'x', 'c'],
|
||||
provides=['z']))
|
||||
|
||||
self.assertEqual(flow.requires, set(['x', 'y', 'c']))
|
||||
self.assertEqual(flow.provides, set(['a', 'b', 'z']))
|
||||
self.assertEqual(set(['x', 'y', 'c']), flow.requires)
|
||||
self.assertEqual(set(['a', 'b', 'z']), flow.provides)
|
||||
|
||||
def test_unordered_flow_retry_and_task(self):
|
||||
flow = uf.Flow('uf', retry.AlwaysRevert('rt',
|
||||
@@ -338,8 +338,8 @@ class FlowDependenciesTest(test.TestCase):
|
||||
flow.add(utils.TaskMultiArgOneReturn(rebind=['a', 'x', 'c'],
|
||||
provides=['z']))
|
||||
|
||||
self.assertEqual(flow.requires, set(['x', 'y', 'c']))
|
||||
self.assertEqual(flow.provides, set(['a', 'b', 'z']))
|
||||
self.assertEqual(set(['x', 'y', 'c']), flow.requires)
|
||||
self.assertEqual(set(['a', 'b', 'z']), flow.provides)
|
||||
|
||||
def test_unordered_flow_retry_and_task_same_requires_provides(self):
|
||||
flow = uf.Flow('uf', retry.AlwaysRevert('rt', requires=['x']))
|
||||
@@ -365,8 +365,8 @@ class FlowDependenciesTest(test.TestCase):
|
||||
flow.add(utils.TaskMultiArgOneReturn(rebind=['a', 'x', 'c'],
|
||||
provides=['z']))
|
||||
|
||||
self.assertEqual(flow.requires, set(['x', 'y', 'c']))
|
||||
self.assertEqual(flow.provides, set(['a', 'b', 'z']))
|
||||
self.assertEqual(set(['x', 'y', 'c']), flow.requires)
|
||||
self.assertEqual(set(['a', 'b', 'z']), flow.provides)
|
||||
|
||||
def test_graph_flow_retry_and_task_dependency_provide_require(self):
|
||||
flow = gf.Flow('gf', retry.AlwaysRevert('rt', requires=['x']))
|
||||
@@ -389,4 +389,4 @@ class FlowDependenciesTest(test.TestCase):
|
||||
pass
|
||||
|
||||
flow = lf.Flow('lf', retry=FullArgsRetry(requires='a'))
|
||||
self.assertEqual(flow.requires, set(['a']))
|
||||
self.assertEqual(set(['a']), flow.requires)
|
||||
|
||||
102
taskflow/tests/unit/test_formatters.py
Normal file
102
taskflow/tests/unit/test_formatters.py
Normal file
@@ -0,0 +1,102 @@
|
||||
# -*- coding: utf-8 -*-
|
||||
|
||||
# Copyright (C) 2015 Yahoo! Inc. All Rights Reserved.
|
||||
#
|
||||
# Licensed under the Apache License, Version 2.0 (the "License"); you may
|
||||
# not use this file except in compliance with the License. You may obtain
|
||||
# a copy of the License at
|
||||
#
|
||||
# http://www.apache.org/licenses/LICENSE-2.0
|
||||
#
|
||||
# Unless required by applicable law or agreed to in writing, software
|
||||
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
|
||||
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
|
||||
# License for the specific language governing permissions and limitations
|
||||
# under the License.
|
||||
|
||||
from taskflow import engines
|
||||
from taskflow import formatters
|
||||
from taskflow.listeners import logging as logging_listener
|
||||
from taskflow.patterns import linear_flow
|
||||
from taskflow import states
|
||||
from taskflow import test
|
||||
from taskflow.test import mock
|
||||
from taskflow.test import utils as test_utils
|
||||
|
||||
|
||||
class FormattersTest(test.TestCase):
|
||||
|
||||
@staticmethod
|
||||
def _broken_atom_matcher(node):
|
||||
return node.item.name == 'Broken'
|
||||
|
||||
def _make_test_flow(self):
|
||||
b = test_utils.TaskWithFailure("Broken")
|
||||
h_1 = test_utils.ProgressingTask("Happy-1")
|
||||
h_2 = test_utils.ProgressingTask("Happy-2")
|
||||
flo = linear_flow.Flow("test")
|
||||
flo.add(h_1, h_2, b)
|
||||
return flo
|
||||
|
||||
def test_exc_info_format(self):
|
||||
flo = self._make_test_flow()
|
||||
e = engines.load(flo)
|
||||
self.assertRaises(RuntimeError, e.run)
|
||||
|
||||
fails = e.storage.get_execute_failures()
|
||||
self.assertEqual(1, len(fails))
|
||||
self.assertIn('Broken', fails)
|
||||
fail = fails['Broken']
|
||||
|
||||
f = formatters.FailureFormatter(e)
|
||||
(exc_info, details) = f.format(fail, self._broken_atom_matcher)
|
||||
self.assertEqual(3, len(exc_info))
|
||||
self.assertEqual("", details)
|
||||
|
||||
@mock.patch('taskflow.formatters.FailureFormatter._format_node')
|
||||
def test_exc_info_with_details_format(self, mock_format_node):
|
||||
mock_format_node.return_value = 'A node'
|
||||
|
||||
flo = self._make_test_flow()
|
||||
e = engines.load(flo)
|
||||
self.assertRaises(RuntimeError, e.run)
|
||||
fails = e.storage.get_execute_failures()
|
||||
self.assertEqual(1, len(fails))
|
||||
self.assertIn('Broken', fails)
|
||||
fail = fails['Broken']
|
||||
|
||||
# Doing this allows the details to be shown...
|
||||
e.storage.set_atom_intention("Broken", states.EXECUTE)
|
||||
f = formatters.FailureFormatter(e)
|
||||
(exc_info, details) = f.format(fail, self._broken_atom_matcher)
|
||||
self.assertEqual(3, len(exc_info))
|
||||
self.assertTrue(mock_format_node.called)
|
||||
|
||||
@mock.patch('taskflow.storage.Storage.get_execute_result')
|
||||
def test_exc_info_with_details_format_hidden(self, mock_get_execute):
|
||||
flo = self._make_test_flow()
|
||||
e = engines.load(flo)
|
||||
self.assertRaises(RuntimeError, e.run)
|
||||
fails = e.storage.get_execute_failures()
|
||||
self.assertEqual(1, len(fails))
|
||||
self.assertIn('Broken', fails)
|
||||
fail = fails['Broken']
|
||||
|
||||
# Doing this allows the details to be shown...
|
||||
e.storage.set_atom_intention("Broken", states.EXECUTE)
|
||||
hide_inputs_outputs_of = ['Broken', "Happy-1", "Happy-2"]
|
||||
f = formatters.FailureFormatter(
|
||||
e, hide_inputs_outputs_of=hide_inputs_outputs_of)
|
||||
(exc_info, details) = f.format(fail, self._broken_atom_matcher)
|
||||
self.assertEqual(3, len(exc_info))
|
||||
self.assertFalse(mock_get_execute.called)
|
||||
|
||||
@mock.patch('taskflow.formatters.FailureFormatter._format_node')
|
||||
def test_formatted_via_listener(self, mock_format_node):
|
||||
mock_format_node.return_value = 'A node'
|
||||
|
||||
flo = self._make_test_flow()
|
||||
e = engines.load(flo)
|
||||
with logging_listener.DynamicLoggingListener(e):
|
||||
self.assertRaises(RuntimeError, e.run)
|
||||
self.assertTrue(mock_format_node.called)
|
||||
@@ -49,11 +49,11 @@ class FunctorTaskTest(test.TestCase):
|
||||
|
||||
def test_simple(self):
|
||||
task = base.FunctorTask(add)
|
||||
self.assertEqual(task.name, __name__ + '.add')
|
||||
self.assertEqual(__name__ + '.add', task.name)
|
||||
|
||||
def test_other_name(self):
|
||||
task = base.FunctorTask(add, name='my task')
|
||||
self.assertEqual(task.name, 'my task')
|
||||
self.assertEqual('my task', task.name)
|
||||
|
||||
def test_it_runs(self):
|
||||
values = []
|
||||
@@ -67,7 +67,7 @@ class FunctorTaskTest(test.TestCase):
|
||||
)
|
||||
self.assertRaisesRegexp(RuntimeError, '^Woot',
|
||||
taskflow.engines.run, flow)
|
||||
self.assertEqual(values, ['one', 'fail', 'revert one'])
|
||||
self.assertEqual(['one', 'fail', 'revert one'], values)
|
||||
|
||||
def test_lambda_functors(self):
|
||||
t = base.FunctorTask
|
||||
|
||||
@@ -117,11 +117,11 @@ class TestProgress(test.TestCase):
|
||||
end_progress = e.storage.get_task_progress("test")
|
||||
self.assertEqual(1.0, end_progress)
|
||||
end_details = e.storage.get_task_progress_details("test")
|
||||
self.assertEqual(end_details.get('at_progress'), 0.5)
|
||||
self.assertEqual(end_details.get('details'), {
|
||||
self.assertEqual(0.5, end_details.get('at_progress'))
|
||||
self.assertEqual({
|
||||
'test': 'test data',
|
||||
'foo': 'bar'
|
||||
})
|
||||
}, end_details.get('details'))
|
||||
|
||||
def test_dual_storage_progress(self):
|
||||
fired_events = []
|
||||
|
||||
@@ -53,19 +53,19 @@ class RetryTest(utils.EngineTestBase):
|
||||
flow = lf.Flow('flow-1', utils.OneReturnRetry(provides='x'))
|
||||
engine = self._make_engine(flow)
|
||||
engine.run()
|
||||
self.assertEqual(engine.storage.fetch_all(), {'x': 1})
|
||||
self.assertEqual({'x': 1}, engine.storage.fetch_all())
|
||||
|
||||
def test_run_empty_unordered_flow(self):
|
||||
flow = uf.Flow('flow-1', utils.OneReturnRetry(provides='x'))
|
||||
engine = self._make_engine(flow)
|
||||
engine.run()
|
||||
self.assertEqual(engine.storage.fetch_all(), {'x': 1})
|
||||
self.assertEqual({'x': 1}, engine.storage.fetch_all())
|
||||
|
||||
def test_run_empty_graph_flow(self):
|
||||
flow = gf.Flow('flow-1', utils.OneReturnRetry(provides='x'))
|
||||
engine = self._make_engine(flow)
|
||||
engine.run()
|
||||
self.assertEqual(engine.storage.fetch_all(), {'x': 1})
|
||||
self.assertEqual({'x': 1}, engine.storage.fetch_all())
|
||||
|
||||
def test_states_retry_success_linear_flow(self):
|
||||
flow = lf.Flow('flow-1', retry.Times(4, 'r1', provides='x')).add(
|
||||
@@ -76,7 +76,7 @@ class RetryTest(utils.EngineTestBase):
|
||||
engine.storage.inject({'y': 2})
|
||||
with utils.CaptureListener(engine) as capturer:
|
||||
engine.run()
|
||||
self.assertEqual(engine.storage.fetch_all(), {'y': 2, 'x': 2})
|
||||
self.assertEqual({'y': 2, 'x': 2}, engine.storage.fetch_all())
|
||||
expected = ['flow-1.f RUNNING',
|
||||
'r1.r RUNNING', 'r1.r SUCCESS(1)',
|
||||
'task1.t RUNNING', 'task1.t SUCCESS(5)',
|
||||
@@ -105,7 +105,7 @@ class RetryTest(utils.EngineTestBase):
|
||||
engine.storage.inject({'y': 4})
|
||||
with utils.CaptureListener(engine) as capturer:
|
||||
self.assertRaisesRegexp(RuntimeError, '^Woot', engine.run)
|
||||
self.assertEqual(engine.storage.fetch_all(), {'y': 4})
|
||||
self.assertEqual({'y': 4}, engine.storage.fetch_all())
|
||||
expected = ['flow-1.f RUNNING',
|
||||
'r1.r RUNNING',
|
||||
'r1.r SUCCESS(1)',
|
||||
@@ -144,7 +144,7 @@ class RetryTest(utils.EngineTestBase):
|
||||
engine.storage.inject({'y': 4})
|
||||
with utils.CaptureListener(engine) as capturer:
|
||||
self.assertRaisesRegexp(RuntimeError, '^Gotcha', engine.run)
|
||||
self.assertEqual(engine.storage.fetch_all(), {'y': 4, 'x': 1})
|
||||
self.assertEqual({'y': 4, 'x': 1}, engine.storage.fetch_all())
|
||||
expected = ['flow-1.f RUNNING',
|
||||
'r1.r RUNNING',
|
||||
'r1.r SUCCESS(1)',
|
||||
@@ -172,7 +172,7 @@ class RetryTest(utils.EngineTestBase):
|
||||
engine.storage.inject({'y': 2})
|
||||
with utils.CaptureListener(engine) as capturer:
|
||||
engine.run()
|
||||
self.assertEqual(engine.storage.fetch_all(), {'y': 2, 'x': 2})
|
||||
self.assertEqual({'y': 2, 'x': 2}, engine.storage.fetch_all())
|
||||
expected = ['flow-1.f RUNNING',
|
||||
'r1.r RUNNING',
|
||||
'r1.r SUCCESS(None)',
|
||||
@@ -202,6 +202,69 @@ class RetryTest(utils.EngineTestBase):
|
||||
'flow-1.f SUCCESS']
|
||||
self.assertEqual(expected, capturer.values)
|
||||
|
||||
def test_new_revert_vs_old(self):
|
||||
flow = lf.Flow('flow-1').add(
|
||||
utils.TaskNoRequiresNoReturns("task1"),
|
||||
lf.Flow('flow-2', retry.Times(1, 'r1', provides='x')).add(
|
||||
utils.TaskNoRequiresNoReturns("task2"),
|
||||
utils.ConditionalTask("task3")
|
||||
),
|
||||
utils.TaskNoRequiresNoReturns("task4")
|
||||
)
|
||||
engine = self._make_engine(flow)
|
||||
engine.storage.inject({'y': 2})
|
||||
with utils.CaptureListener(engine) as capturer:
|
||||
try:
|
||||
engine.run()
|
||||
except Exception:
|
||||
pass
|
||||
|
||||
expected = ['flow-1.f RUNNING',
|
||||
'task1.t RUNNING',
|
||||
'task1.t SUCCESS(None)',
|
||||
'r1.r RUNNING',
|
||||
'r1.r SUCCESS(1)',
|
||||
'task2.t RUNNING',
|
||||
'task2.t SUCCESS(None)',
|
||||
'task3.t RUNNING',
|
||||
'task3.t FAILURE(Failure: RuntimeError: Woot!)',
|
||||
'task3.t REVERTING',
|
||||
'task3.t REVERTED(None)',
|
||||
'task2.t REVERTING',
|
||||
'task2.t REVERTED(None)',
|
||||
'r1.r REVERTING',
|
||||
'r1.r REVERTED(None)',
|
||||
'flow-1.f REVERTED']
|
||||
self.assertEqual(expected, capturer.values)
|
||||
|
||||
engine = self._make_engine(flow, defer_reverts=True)
|
||||
engine.storage.inject({'y': 2})
|
||||
with utils.CaptureListener(engine) as capturer:
|
||||
try:
|
||||
engine.run()
|
||||
except Exception:
|
||||
pass
|
||||
|
||||
expected = ['flow-1.f RUNNING',
|
||||
'task1.t RUNNING',
|
||||
'task1.t SUCCESS(None)',
|
||||
'r1.r RUNNING',
|
||||
'r1.r SUCCESS(1)',
|
||||
'task2.t RUNNING',
|
||||
'task2.t SUCCESS(None)',
|
||||
'task3.t RUNNING',
|
||||
'task3.t FAILURE(Failure: RuntimeError: Woot!)',
|
||||
'task3.t REVERTING',
|
||||
'task3.t REVERTED(None)',
|
||||
'task2.t REVERTING',
|
||||
'task2.t REVERTED(None)',
|
||||
'r1.r REVERTING',
|
||||
'r1.r REVERTED(None)',
|
||||
'task1.t REVERTING',
|
||||
'task1.t REVERTED(None)',
|
||||
'flow-1.f REVERTED']
|
||||
self.assertEqual(expected, capturer.values)
|
||||
|
||||
def test_states_retry_failure_parent_flow_fails(self):
|
||||
flow = lf.Flow('flow-1', retry.Times(3, 'r1', provides='x1')).add(
|
||||
utils.TaskNoRequiresNoReturns("task1"),
|
||||
@@ -215,8 +278,9 @@ class RetryTest(utils.EngineTestBase):
|
||||
engine.storage.inject({'y': 2})
|
||||
with utils.CaptureListener(engine) as capturer:
|
||||
engine.run()
|
||||
self.assertEqual(engine.storage.fetch_all(), {'y': 2, 'x1': 2,
|
||||
'x2': 1})
|
||||
self.assertEqual({'y': 2, 'x1': 2,
|
||||
'x2': 1},
|
||||
engine.storage.fetch_all())
|
||||
expected = ['flow-1.f RUNNING',
|
||||
'r1.r RUNNING',
|
||||
'r1.r SUCCESS(1)',
|
||||
@@ -270,7 +334,7 @@ class RetryTest(utils.EngineTestBase):
|
||||
engine.storage.inject({'y': 2})
|
||||
with utils.CaptureListener(engine) as capturer:
|
||||
engine.run()
|
||||
self.assertEqual(engine.storage.fetch_all(), {'y': 2, 'x': 2})
|
||||
self.assertEqual({'y': 2, 'x': 2}, engine.storage.fetch_all())
|
||||
expected = ['flow-1.f RUNNING',
|
||||
'r.r RUNNING',
|
||||
'r.r SUCCESS(1)',
|
||||
@@ -305,7 +369,7 @@ class RetryTest(utils.EngineTestBase):
|
||||
engine.storage.inject({'y': 2})
|
||||
with utils.CaptureListener(engine) as capturer:
|
||||
engine.run()
|
||||
self.assertEqual(engine.storage.fetch_all(), {'y': 2, 'x': 2, 'x2': 1})
|
||||
self.assertEqual({'y': 2, 'x': 2, 'x2': 1}, engine.storage.fetch_all())
|
||||
expected = ['flow-1.f RUNNING',
|
||||
'r1.r RUNNING',
|
||||
'r1.r SUCCESS(1)',
|
||||
@@ -350,7 +414,7 @@ class RetryTest(utils.EngineTestBase):
|
||||
engine.run()
|
||||
except Exception:
|
||||
pass
|
||||
self.assertEqual(engine.storage.fetch_all(), {'y': 2})
|
||||
self.assertEqual({'y': 2}, engine.storage.fetch_all())
|
||||
expected = ['flow-1.f RUNNING',
|
||||
'task1.t RUNNING',
|
||||
'task1.t SUCCESS(5)',
|
||||
@@ -379,7 +443,7 @@ class RetryTest(utils.EngineTestBase):
|
||||
engine.run()
|
||||
except Exception:
|
||||
pass
|
||||
self.assertEqual(engine.storage.fetch_all(), {'y': 2})
|
||||
self.assertEqual({'y': 2}, engine.storage.fetch_all())
|
||||
expected = ['flow-1.f RUNNING',
|
||||
'task1.t RUNNING',
|
||||
'task1.t SUCCESS(5)',
|
||||
@@ -406,7 +470,7 @@ class RetryTest(utils.EngineTestBase):
|
||||
engine.storage.inject({'y': 2})
|
||||
with utils.CaptureListener(engine) as capturer:
|
||||
self.assertRaisesRegexp(RuntimeError, '^Woot', engine.run)
|
||||
self.assertEqual(engine.storage.fetch_all(), {'y': 2})
|
||||
self.assertEqual({'y': 2}, engine.storage.fetch_all())
|
||||
expected = ['flow-1.f RUNNING',
|
||||
'r1.r RUNNING',
|
||||
'r1.r SUCCESS(1)',
|
||||
@@ -471,7 +535,7 @@ class RetryTest(utils.EngineTestBase):
|
||||
't3.t RUNNING',
|
||||
't3.t SUCCESS(5)',
|
||||
'flow-1.f SUCCESS']
|
||||
self.assertEqual(capturer.values, expected)
|
||||
self.assertEqual(expected, capturer.values)
|
||||
|
||||
def test_resume_flow_that_should_be_retried(self):
|
||||
flow = lf.Flow('flow-1', retry.Times(3, 'r1')).add(
|
||||
@@ -525,7 +589,7 @@ class RetryTest(utils.EngineTestBase):
|
||||
't1.t RUNNING',
|
||||
't1.t SUCCESS(5)',
|
||||
'flow-1.f SUCCESS']
|
||||
self.assertEqual(capturer.values, expected)
|
||||
self.assertEqual(expected, capturer.values)
|
||||
|
||||
def test_default_times_retry(self):
|
||||
flow = lf.Flow('flow-1', retry.Times(3, 'r1')).add(
|
||||
@@ -1040,7 +1104,7 @@ class RetryTest(utils.EngineTestBase):
|
||||
'task1.t RUNNING',
|
||||
'task1.t SUCCESS(5)',
|
||||
'flow-1.f SUCCESS']
|
||||
self.assertEqual(capturer.values, expected)
|
||||
self.assertEqual(expected, capturer.values)
|
||||
|
||||
def test_retry_fails(self):
|
||||
r = FailingRetry()
|
||||
@@ -1048,7 +1112,7 @@ class RetryTest(utils.EngineTestBase):
|
||||
engine = self._make_engine(flow)
|
||||
self.assertRaisesRegexp(ValueError, '^OMG', engine.run)
|
||||
self.assertEqual(1, len(engine.storage.get_retry_histories()))
|
||||
self.assertEqual(len(r.history), 0)
|
||||
self.assertEqual(0, len(r.history))
|
||||
self.assertEqual([], list(r.history.outcomes_iter()))
|
||||
self.assertIsNotNone(r.history.failure)
|
||||
self.assertTrue(r.history.caused_by(ValueError, include_retry=True))
|
||||
@@ -1088,7 +1152,7 @@ class RetryTest(utils.EngineTestBase):
|
||||
'c.t FAILURE(Failure: RuntimeError: Woot!)',
|
||||
'b.t REVERTED(None)',
|
||||
])
|
||||
self.assertEqual(engine.storage.get_flow_state(), st.REVERTED)
|
||||
self.assertEqual(st.REVERTED, engine.storage.get_flow_state())
|
||||
|
||||
def test_nested_provides_graph_retried_correctly(self):
|
||||
flow = gf.Flow("test").add(
|
||||
@@ -1123,7 +1187,7 @@ class RetryTest(utils.EngineTestBase):
|
||||
'a.t SUCCESS(5)',
|
||||
'c.t SUCCESS(5)']
|
||||
self.assertItemsEqual(expected, capturer.values[4:])
|
||||
self.assertEqual(engine.storage.get_flow_state(), st.SUCCESS)
|
||||
self.assertEqual(st.SUCCESS, engine.storage.get_flow_state())
|
||||
|
||||
|
||||
class RetryParallelExecutionTest(utils.EngineTestBase):
|
||||
@@ -1142,7 +1206,7 @@ class RetryParallelExecutionTest(utils.EngineTestBase):
|
||||
engine.storage.inject({'y': 2})
|
||||
with utils.CaptureListener(engine, capture_flow=False) as capturer:
|
||||
engine.run()
|
||||
self.assertEqual(engine.storage.fetch_all(), {'y': 2, 'x': 2})
|
||||
self.assertEqual({'y': 2, 'x': 2}, engine.storage.fetch_all())
|
||||
expected = ['r.r RUNNING',
|
||||
'r.r SUCCESS(1)',
|
||||
'task1.t RUNNING',
|
||||
@@ -1178,7 +1242,7 @@ class RetryParallelExecutionTest(utils.EngineTestBase):
|
||||
engine.storage.inject({'y': 2})
|
||||
with utils.CaptureListener(engine, capture_flow=False) as capturer:
|
||||
engine.run()
|
||||
self.assertEqual(engine.storage.fetch_all(), {'y': 2, 'x': 2})
|
||||
self.assertEqual({'y': 2, 'x': 2}, engine.storage.fetch_all())
|
||||
expected = ['r.r RUNNING',
|
||||
'r.r SUCCESS(1)',
|
||||
'task1.t RUNNING',
|
||||
@@ -1209,11 +1273,12 @@ class RetryParallelExecutionTest(utils.EngineTestBase):
|
||||
|
||||
|
||||
class SerialEngineTest(RetryTest, test.TestCase):
|
||||
def _make_engine(self, flow, flow_detail=None):
|
||||
def _make_engine(self, flow, defer_reverts=None, flow_detail=None):
|
||||
return taskflow.engines.load(flow,
|
||||
flow_detail=flow_detail,
|
||||
engine='serial',
|
||||
backend=self.backend)
|
||||
backend=self.backend,
|
||||
defer_reverts=defer_reverts)
|
||||
|
||||
|
||||
class ParallelEngineWithThreadsTest(RetryTest,
|
||||
@@ -1221,36 +1286,46 @@ class ParallelEngineWithThreadsTest(RetryTest,
|
||||
test.TestCase):
|
||||
_EXECUTOR_WORKERS = 2
|
||||
|
||||
def _make_engine(self, flow, flow_detail=None, executor=None):
|
||||
def _make_engine(self, flow, defer_reverts=None, flow_detail=None,
|
||||
executor=None):
|
||||
if executor is None:
|
||||
executor = 'threads'
|
||||
return taskflow.engines.load(flow, flow_detail=flow_detail,
|
||||
return taskflow.engines.load(flow,
|
||||
flow_detail=flow_detail,
|
||||
engine='parallel',
|
||||
backend=self.backend,
|
||||
executor=executor,
|
||||
max_workers=self._EXECUTOR_WORKERS)
|
||||
max_workers=self._EXECUTOR_WORKERS,
|
||||
defer_reverts=defer_reverts)
|
||||
|
||||
|
||||
@testtools.skipIf(not eu.EVENTLET_AVAILABLE, 'eventlet is not available')
|
||||
class ParallelEngineWithEventletTest(RetryTest, test.TestCase):
|
||||
|
||||
def _make_engine(self, flow, flow_detail=None, executor=None):
|
||||
def _make_engine(self, flow, defer_reverts=None, flow_detail=None,
|
||||
executor=None):
|
||||
if executor is None:
|
||||
executor = futurist.GreenThreadPoolExecutor()
|
||||
self.addCleanup(executor.shutdown)
|
||||
return taskflow.engines.load(flow, flow_detail=flow_detail,
|
||||
backend=self.backend, engine='parallel',
|
||||
executor=executor)
|
||||
return taskflow.engines.load(flow,
|
||||
flow_detail=flow_detail,
|
||||
backend=self.backend,
|
||||
engine='parallel',
|
||||
executor=executor,
|
||||
defer_reverts=defer_reverts)
|
||||
|
||||
|
||||
class ParallelEngineWithProcessTest(RetryTest, test.TestCase):
|
||||
_EXECUTOR_WORKERS = 2
|
||||
|
||||
def _make_engine(self, flow, flow_detail=None, executor=None):
|
||||
def _make_engine(self, flow, defer_reverts=None, flow_detail=None,
|
||||
executor=None):
|
||||
if executor is None:
|
||||
executor = 'processes'
|
||||
return taskflow.engines.load(flow, flow_detail=flow_detail,
|
||||
return taskflow.engines.load(flow,
|
||||
flow_detail=flow_detail,
|
||||
engine='parallel',
|
||||
backend=self.backend,
|
||||
executor=executor,
|
||||
max_workers=self._EXECUTOR_WORKERS)
|
||||
max_workers=self._EXECUTOR_WORKERS,
|
||||
defer_reverts=defer_reverts)
|
||||
|
||||
@@ -63,13 +63,13 @@ class StorageTestMixin(object):
|
||||
def test_flow_name_and_uuid(self):
|
||||
flow_detail = models.FlowDetail(name='test-fd', uuid='aaaa')
|
||||
s = self._get_storage(flow_detail)
|
||||
self.assertEqual(s.flow_name, 'test-fd')
|
||||
self.assertEqual(s.flow_uuid, 'aaaa')
|
||||
self.assertEqual('test-fd', s.flow_name)
|
||||
self.assertEqual('aaaa', s.flow_uuid)
|
||||
|
||||
def test_ensure_task(self):
|
||||
s = self._get_storage()
|
||||
s.ensure_atom(test_utils.NoopTask('my task'))
|
||||
self.assertEqual(s.get_atom_state('my task'), states.PENDING)
|
||||
self.assertEqual(states.PENDING, s.get_atom_state('my task'))
|
||||
self.assertTrue(uuidutils.is_uuid_like(s.get_atom_uuid('my task')))
|
||||
|
||||
def test_get_tasks_states(self):
|
||||
@@ -81,7 +81,7 @@ class StorageTestMixin(object):
|
||||
'my task': (states.SUCCESS, states.EXECUTE),
|
||||
'my task2': (states.PENDING, states.EXECUTE),
|
||||
}
|
||||
self.assertEqual(s.get_atoms_states(['my task', 'my task2']), expected)
|
||||
self.assertEqual(expected, s.get_atoms_states(['my task', 'my task2']))
|
||||
|
||||
def test_ensure_task_flow_detail(self):
|
||||
_lb, flow_detail = p_utils.temporary_flow_detail(self.backend)
|
||||
@@ -91,9 +91,9 @@ class StorageTestMixin(object):
|
||||
s.ensure_atom(t)
|
||||
td = flow_detail.find(s.get_atom_uuid('my task'))
|
||||
self.assertIsNotNone(td)
|
||||
self.assertEqual(td.name, 'my task')
|
||||
self.assertEqual(td.version, '3.11')
|
||||
self.assertEqual(td.state, states.PENDING)
|
||||
self.assertEqual('my task', td.name)
|
||||
self.assertEqual('3.11', td.version)
|
||||
self.assertEqual(states.PENDING, td.state)
|
||||
|
||||
def test_get_without_save(self):
|
||||
_lb, flow_detail = p_utils.temporary_flow_detail(self.backend)
|
||||
@@ -114,26 +114,26 @@ class StorageTestMixin(object):
|
||||
s = self._get_storage()
|
||||
s.ensure_atom(test_utils.NoopTask('my task'))
|
||||
s.save('my task', 5)
|
||||
self.assertEqual(s.get('my task'), 5)
|
||||
self.assertEqual(s.fetch_all(), {})
|
||||
self.assertEqual(s.get_atom_state('my task'), states.SUCCESS)
|
||||
self.assertEqual(5, s.get('my task'))
|
||||
self.assertEqual({}, s.fetch_all())
|
||||
self.assertEqual(states.SUCCESS, s.get_atom_state('my task'))
|
||||
|
||||
def test_save_and_get_cached_failure(self):
|
||||
a_failure = failure.Failure.from_exception(RuntimeError('Woot!'))
|
||||
s = self._get_storage()
|
||||
s.ensure_atom(test_utils.NoopTask('my task'))
|
||||
s.save('my task', a_failure, states.FAILURE)
|
||||
self.assertEqual(s.get('my task'), a_failure)
|
||||
self.assertEqual(s.get_atom_state('my task'), states.FAILURE)
|
||||
self.assertEqual(a_failure, s.get('my task'))
|
||||
self.assertEqual(states.FAILURE, s.get_atom_state('my task'))
|
||||
self.assertTrue(s.has_failures())
|
||||
self.assertEqual(s.get_failures(), {'my task': a_failure})
|
||||
self.assertEqual({'my task': a_failure}, s.get_failures())
|
||||
|
||||
def test_save_and_get_non_cached_failure(self):
|
||||
a_failure = failure.Failure.from_exception(RuntimeError('Woot!'))
|
||||
s = self._get_storage()
|
||||
s.ensure_atom(test_utils.NoopTask('my task'))
|
||||
s.save('my task', a_failure, states.FAILURE)
|
||||
self.assertEqual(s.get('my task'), a_failure)
|
||||
self.assertEqual(a_failure, s.get('my task'))
|
||||
s._failures['my task'] = {}
|
||||
self.assertTrue(a_failure.matches(s.get('my task')))
|
||||
|
||||
@@ -145,10 +145,10 @@ class StorageTestMixin(object):
|
||||
s.save('my task', a_failure, states.FAILURE)
|
||||
|
||||
s.set_atom_state('my task', states.REVERTING)
|
||||
self.assertEqual(s.get('my task'), a_failure)
|
||||
self.assertEqual(a_failure, s.get('my task'))
|
||||
|
||||
s.set_atom_state('my task', states.REVERTED)
|
||||
self.assertEqual(s.get('my task'), a_failure)
|
||||
self.assertEqual(a_failure, s.get('my task'))
|
||||
|
||||
def test_get_failure_after_reload(self):
|
||||
a_failure = failure.Failure.from_exception(RuntimeError('Woot!'))
|
||||
@@ -159,7 +159,7 @@ class StorageTestMixin(object):
|
||||
self.assertTrue(s2.has_failures())
|
||||
self.assertEqual(1, len(s2.get_failures()))
|
||||
self.assertTrue(a_failure.matches(s2.get('my task')))
|
||||
self.assertEqual(s2.get_atom_state('my task'), states.FAILURE)
|
||||
self.assertEqual(states.FAILURE, s2.get_atom_state('my task'))
|
||||
|
||||
def test_get_non_existing_var(self):
|
||||
s = self._get_storage()
|
||||
@@ -171,21 +171,21 @@ class StorageTestMixin(object):
|
||||
s.ensure_atom(test_utils.NoopTask('my task'))
|
||||
s.save('my task', 5)
|
||||
s.reset('my task')
|
||||
self.assertEqual(s.get_atom_state('my task'), states.PENDING)
|
||||
self.assertEqual(states.PENDING, s.get_atom_state('my task'))
|
||||
self.assertRaises(exceptions.NotFound, s.get, 'my task')
|
||||
|
||||
def test_reset_unknown_task(self):
|
||||
s = self._get_storage()
|
||||
s.ensure_atom(test_utils.NoopTask('my task'))
|
||||
self.assertEqual(s.reset('my task'), None)
|
||||
self.assertEqual(None, s.reset('my task'))
|
||||
|
||||
def test_fetch_by_name(self):
|
||||
s = self._get_storage()
|
||||
name = 'my result'
|
||||
s.ensure_atom(test_utils.NoopTask('my task', provides=name))
|
||||
s.save('my task', 5)
|
||||
self.assertEqual(s.fetch(name), 5)
|
||||
self.assertEqual(s.fetch_all(), {name: 5})
|
||||
self.assertEqual(5, s.fetch(name))
|
||||
self.assertEqual({name: 5}, s.fetch_all())
|
||||
|
||||
def test_fetch_unknown_name(self):
|
||||
s = self._get_storage()
|
||||
@@ -203,108 +203,108 @@ class StorageTestMixin(object):
|
||||
s = self._get_storage()
|
||||
s.ensure_atom(test_utils.NoopTask('my task'))
|
||||
s.update_atom_metadata('my task', None)
|
||||
self.assertEqual(s.get_task_progress('my task'), 0.0)
|
||||
self.assertEqual(0.0, s.get_task_progress('my task'))
|
||||
s.set_task_progress('my task', 0.5)
|
||||
self.assertEqual(s.get_task_progress('my task'), 0.5)
|
||||
self.assertEqual(0.5, s.get_task_progress('my task'))
|
||||
s.update_atom_metadata('my task', None)
|
||||
self.assertEqual(s.get_task_progress('my task'), 0.5)
|
||||
self.assertEqual(0.5, s.get_task_progress('my task'))
|
||||
|
||||
def test_default_task_progress(self):
|
||||
s = self._get_storage()
|
||||
s.ensure_atom(test_utils.NoopTask('my task'))
|
||||
self.assertEqual(s.get_task_progress('my task'), 0.0)
|
||||
self.assertEqual(s.get_task_progress_details('my task'), None)
|
||||
self.assertEqual(0.0, s.get_task_progress('my task'))
|
||||
self.assertEqual(None, s.get_task_progress_details('my task'))
|
||||
|
||||
def test_task_progress(self):
|
||||
s = self._get_storage()
|
||||
s.ensure_atom(test_utils.NoopTask('my task'))
|
||||
|
||||
s.set_task_progress('my task', 0.5, {'test_data': 11})
|
||||
self.assertEqual(s.get_task_progress('my task'), 0.5)
|
||||
self.assertEqual(s.get_task_progress_details('my task'), {
|
||||
self.assertEqual(0.5, s.get_task_progress('my task'))
|
||||
self.assertEqual({
|
||||
'at_progress': 0.5,
|
||||
'details': {'test_data': 11}
|
||||
})
|
||||
}, s.get_task_progress_details('my task'))
|
||||
|
||||
s.set_task_progress('my task', 0.7, {'test_data': 17})
|
||||
self.assertEqual(s.get_task_progress('my task'), 0.7)
|
||||
self.assertEqual(s.get_task_progress_details('my task'), {
|
||||
self.assertEqual(0.7, s.get_task_progress('my task'))
|
||||
self.assertEqual({
|
||||
'at_progress': 0.7,
|
||||
'details': {'test_data': 17}
|
||||
})
|
||||
}, s.get_task_progress_details('my task'))
|
||||
|
||||
s.set_task_progress('my task', 0.99)
|
||||
self.assertEqual(s.get_task_progress('my task'), 0.99)
|
||||
self.assertEqual(s.get_task_progress_details('my task'), {
|
||||
self.assertEqual(0.99, s.get_task_progress('my task'))
|
||||
self.assertEqual({
|
||||
'at_progress': 0.7,
|
||||
'details': {'test_data': 17}
|
||||
})
|
||||
}, s.get_task_progress_details('my task'))
|
||||
|
||||
def test_task_progress_erase(self):
|
||||
s = self._get_storage()
|
||||
s.ensure_atom(test_utils.NoopTask('my task'))
|
||||
|
||||
s.set_task_progress('my task', 0.8, {})
|
||||
self.assertEqual(s.get_task_progress('my task'), 0.8)
|
||||
self.assertEqual(s.get_task_progress_details('my task'), None)
|
||||
self.assertEqual(0.8, s.get_task_progress('my task'))
|
||||
self.assertEqual(None, s.get_task_progress_details('my task'))
|
||||
|
||||
def test_fetch_result_not_ready(self):
|
||||
s = self._get_storage()
|
||||
name = 'my result'
|
||||
s.ensure_atom(test_utils.NoopTask('my task', provides=name))
|
||||
self.assertRaises(exceptions.NotFound, s.get, name)
|
||||
self.assertEqual(s.fetch_all(), {})
|
||||
self.assertEqual({}, s.fetch_all())
|
||||
|
||||
def test_save_multiple_results(self):
|
||||
s = self._get_storage()
|
||||
s.ensure_atom(test_utils.NoopTask('my task', provides=['foo', 'bar']))
|
||||
s.save('my task', ('spam', 'eggs'))
|
||||
self.assertEqual(s.fetch_all(), {
|
||||
self.assertEqual({
|
||||
'foo': 'spam',
|
||||
'bar': 'eggs',
|
||||
})
|
||||
}, s.fetch_all())
|
||||
|
||||
def test_mapping_none(self):
|
||||
s = self._get_storage()
|
||||
s.ensure_atom(test_utils.NoopTask('my task'))
|
||||
s.save('my task', 5)
|
||||
self.assertEqual(s.fetch_all(), {})
|
||||
self.assertEqual({}, s.fetch_all())
|
||||
|
||||
def test_inject(self):
|
||||
s = self._get_storage()
|
||||
s.inject({'foo': 'bar', 'spam': 'eggs'})
|
||||
self.assertEqual(s.fetch('spam'), 'eggs')
|
||||
self.assertEqual(s.fetch_all(), {
|
||||
self.assertEqual('eggs', s.fetch('spam'))
|
||||
self.assertEqual({
|
||||
'foo': 'bar',
|
||||
'spam': 'eggs',
|
||||
})
|
||||
}, s.fetch_all())
|
||||
|
||||
def test_inject_twice(self):
|
||||
s = self._get_storage()
|
||||
s.inject({'foo': 'bar'})
|
||||
self.assertEqual(s.fetch_all(), {'foo': 'bar'})
|
||||
self.assertEqual({'foo': 'bar'}, s.fetch_all())
|
||||
s.inject({'spam': 'eggs'})
|
||||
self.assertEqual(s.fetch_all(), {
|
||||
self.assertEqual({
|
||||
'foo': 'bar',
|
||||
'spam': 'eggs',
|
||||
})
|
||||
}, s.fetch_all())
|
||||
|
||||
def test_inject_resumed(self):
|
||||
s = self._get_storage()
|
||||
s.inject({'foo': 'bar', 'spam': 'eggs'})
|
||||
# verify it's there
|
||||
self.assertEqual(s.fetch_all(), {
|
||||
self.assertEqual({
|
||||
'foo': 'bar',
|
||||
'spam': 'eggs',
|
||||
})
|
||||
}, s.fetch_all())
|
||||
# imagine we are resuming, so we need to make new
|
||||
# storage from same flow details
|
||||
s2 = self._get_storage(s._flowdetail)
|
||||
# injected data should still be there:
|
||||
self.assertEqual(s2.fetch_all(), {
|
||||
self.assertEqual({
|
||||
'foo': 'bar',
|
||||
'spam': 'eggs',
|
||||
})
|
||||
}, s2.fetch_all())
|
||||
|
||||
def test_many_thread_ensure_same_task(self):
|
||||
s = self._get_storage()
|
||||
@@ -341,8 +341,8 @@ class StorageTestMixin(object):
|
||||
def test_fetch_mapped_args(self):
|
||||
s = self._get_storage()
|
||||
s.inject({'foo': 'bar', 'spam': 'eggs'})
|
||||
self.assertEqual(s.fetch_mapped_args({'viking': 'spam'}),
|
||||
{'viking': 'eggs'})
|
||||
self.assertEqual({'viking': 'eggs'},
|
||||
s.fetch_mapped_args({'viking': 'spam'}))
|
||||
|
||||
def test_fetch_not_found_args(self):
|
||||
s = self._get_storage()
|
||||
@@ -353,23 +353,23 @@ class StorageTestMixin(object):
|
||||
def test_fetch_optional_args_found(self):
|
||||
s = self._get_storage()
|
||||
s.inject({'foo': 'bar', 'spam': 'eggs'})
|
||||
self.assertEqual(s.fetch_mapped_args({'viking': 'spam'},
|
||||
optional_args=set(['viking'])),
|
||||
{'viking': 'eggs'})
|
||||
self.assertEqual({'viking': 'eggs'},
|
||||
s.fetch_mapped_args({'viking': 'spam'},
|
||||
optional_args=set(['viking'])))
|
||||
|
||||
def test_fetch_optional_args_not_found(self):
|
||||
s = self._get_storage()
|
||||
s.inject({'foo': 'bar', 'spam': 'eggs'})
|
||||
self.assertEqual(s.fetch_mapped_args({'viking': 'helmet'},
|
||||
optional_args=set(['viking'])),
|
||||
{})
|
||||
self.assertEqual({},
|
||||
s.fetch_mapped_args({'viking': 'helmet'},
|
||||
optional_args=set(['viking'])))
|
||||
|
||||
def test_set_and_get_task_state(self):
|
||||
s = self._get_storage()
|
||||
state = states.PENDING
|
||||
s.ensure_atom(test_utils.NoopTask('my task'))
|
||||
s.set_atom_state('my task', state)
|
||||
self.assertEqual(s.get_atom_state('my task'), state)
|
||||
self.assertEqual(state, s.get_atom_state('my task'))
|
||||
|
||||
def test_get_state_of_unknown_task(self):
|
||||
s = self._get_storage()
|
||||
@@ -418,7 +418,7 @@ class StorageTestMixin(object):
|
||||
|
||||
def test_initial_flow_state(self):
|
||||
s = self._get_storage()
|
||||
self.assertEqual(s.get_flow_state(), states.PENDING)
|
||||
self.assertEqual(states.PENDING, s.get_flow_state())
|
||||
|
||||
def test_get_flow_state(self):
|
||||
_lb, flow_detail = p_utils.temporary_flow_detail(backend=self.backend)
|
||||
@@ -426,12 +426,12 @@ class StorageTestMixin(object):
|
||||
with contextlib.closing(self.backend.get_connection()) as conn:
|
||||
flow_detail.update(conn.update_flow_details(flow_detail))
|
||||
s = self._get_storage(flow_detail)
|
||||
self.assertEqual(s.get_flow_state(), states.FAILURE)
|
||||
self.assertEqual(states.FAILURE, s.get_flow_state())
|
||||
|
||||
def test_set_and_get_flow_state(self):
|
||||
s = self._get_storage()
|
||||
s.set_flow_state(states.SUCCESS)
|
||||
self.assertEqual(s.get_flow_state(), states.SUCCESS)
|
||||
self.assertEqual(states.SUCCESS, s.get_flow_state())
|
||||
|
||||
def test_result_is_checked(self):
|
||||
s = self._get_storage()
|
||||
@@ -451,7 +451,7 @@ class StorageTestMixin(object):
|
||||
s = self._get_storage()
|
||||
s.ensure_atom(test_utils.NoopTask('my task', provides=['a', 'b']))
|
||||
s.save('my task', ['result'])
|
||||
self.assertEqual(s.fetch('a'), 'result')
|
||||
self.assertEqual('result', s.fetch('a'))
|
||||
self.assertRaisesRegexp(exceptions.NotFound,
|
||||
'^Unable to find result', s.fetch, 'b')
|
||||
|
||||
@@ -495,9 +495,9 @@ class StorageTestMixin(object):
|
||||
s.save('my retry', 'b')
|
||||
s.cleanup_retry_history('my retry', states.REVERTED)
|
||||
history = s.get_retry_history('my retry')
|
||||
self.assertEqual(list(history), [])
|
||||
self.assertEqual([], list(history))
|
||||
self.assertEqual(0, len(history))
|
||||
self.assertEqual(s.fetch_all(), {})
|
||||
self.assertEqual({}, s.fetch_all())
|
||||
|
||||
def test_cached_retry_failure(self):
|
||||
a_failure = failure.Failure.from_exception(RuntimeError('Woot!'))
|
||||
@@ -511,7 +511,7 @@ class StorageTestMixin(object):
|
||||
self.assertIsNotNone(history.failure)
|
||||
self.assertEqual(1, len(history))
|
||||
self.assertTrue(s.has_failures())
|
||||
self.assertEqual(s.get_failures(), {'my retry': a_failure})
|
||||
self.assertEqual({'my retry': a_failure}, s.get_failures())
|
||||
|
||||
def test_logbook_get_unknown_atom_type(self):
|
||||
self.assertRaisesRegexp(TypeError,
|
||||
@@ -523,14 +523,14 @@ class StorageTestMixin(object):
|
||||
s.ensure_atom(test_utils.NoopTask('my task'))
|
||||
s.set_atom_intention('my task', states.REVERT)
|
||||
intention = s.get_atom_intention('my task')
|
||||
self.assertEqual(intention, states.REVERT)
|
||||
self.assertEqual(states.REVERT, intention)
|
||||
|
||||
def test_save_retry_intention(self):
|
||||
s = self._get_storage()
|
||||
s.ensure_atom(test_utils.NoopTask('my retry'))
|
||||
s.set_atom_intention('my retry', states.RETRY)
|
||||
intention = s.get_atom_intention('my retry')
|
||||
self.assertEqual(intention, states.RETRY)
|
||||
self.assertEqual(states.RETRY, intention)
|
||||
|
||||
def test_inject_persistent_missing(self):
|
||||
t = test_utils.ProgressingTask('my retry', requires=['x'])
|
||||
|
||||
@@ -49,13 +49,13 @@ class SuspendTest(utils.EngineTestBase):
|
||||
with SuspendingListener(engine, task_name='b',
|
||||
task_state=states.SUCCESS) as capturer:
|
||||
engine.run()
|
||||
self.assertEqual(engine.storage.get_flow_state(), states.SUCCESS)
|
||||
self.assertEqual(states.SUCCESS, engine.storage.get_flow_state())
|
||||
expected = ['a.t RUNNING', 'a.t SUCCESS(5)']
|
||||
self.assertEqual(expected, capturer.values)
|
||||
with SuspendingListener(engine, task_name='b',
|
||||
task_state=states.SUCCESS) as capturer:
|
||||
engine.run()
|
||||
self.assertEqual(engine.storage.get_flow_state(), states.SUCCESS)
|
||||
self.assertEqual(states.SUCCESS, engine.storage.get_flow_state())
|
||||
expected = []
|
||||
self.assertEqual(expected, capturer.values)
|
||||
|
||||
@@ -69,13 +69,13 @@ class SuspendTest(utils.EngineTestBase):
|
||||
with SuspendingListener(engine, task_name='b',
|
||||
task_state=states.SUCCESS) as capturer:
|
||||
engine.run()
|
||||
self.assertEqual(engine.storage.get_flow_state(), states.SUSPENDED)
|
||||
self.assertEqual(states.SUSPENDED, engine.storage.get_flow_state())
|
||||
expected = ['a.t RUNNING', 'a.t SUCCESS(5)',
|
||||
'b.t RUNNING', 'b.t SUCCESS(5)']
|
||||
self.assertEqual(expected, capturer.values)
|
||||
with utils.CaptureListener(engine, capture_flow=False) as capturer:
|
||||
engine.run()
|
||||
self.assertEqual(engine.storage.get_flow_state(), states.SUCCESS)
|
||||
self.assertEqual(states.SUCCESS, engine.storage.get_flow_state())
|
||||
expected = ['c.t RUNNING', 'c.t SUCCESS(5)']
|
||||
self.assertEqual(expected, capturer.values)
|
||||
|
||||
@@ -89,7 +89,7 @@ class SuspendTest(utils.EngineTestBase):
|
||||
with SuspendingListener(engine, task_name='b',
|
||||
task_state=states.REVERTED) as capturer:
|
||||
engine.run()
|
||||
self.assertEqual(engine.storage.get_flow_state(), states.SUSPENDED)
|
||||
self.assertEqual(states.SUSPENDED, engine.storage.get_flow_state())
|
||||
expected = ['a.t RUNNING',
|
||||
'a.t SUCCESS(5)',
|
||||
'b.t RUNNING',
|
||||
@@ -103,7 +103,7 @@ class SuspendTest(utils.EngineTestBase):
|
||||
self.assertEqual(expected, capturer.values)
|
||||
with utils.CaptureListener(engine, capture_flow=False) as capturer:
|
||||
self.assertRaisesRegexp(RuntimeError, '^Woot', engine.run)
|
||||
self.assertEqual(engine.storage.get_flow_state(), states.REVERTED)
|
||||
self.assertEqual(states.REVERTED, engine.storage.get_flow_state())
|
||||
expected = ['a.t REVERTING', 'a.t REVERTED(None)']
|
||||
self.assertEqual(expected, capturer.values)
|
||||
|
||||
@@ -133,7 +133,7 @@ class SuspendTest(utils.EngineTestBase):
|
||||
engine2 = self._make_engine(flow, engine.storage._flowdetail)
|
||||
with utils.CaptureListener(engine2, capture_flow=False) as capturer2:
|
||||
self.assertRaisesRegexp(RuntimeError, '^Woot', engine2.run)
|
||||
self.assertEqual(engine2.storage.get_flow_state(), states.REVERTED)
|
||||
self.assertEqual(states.REVERTED, engine2.storage.get_flow_state())
|
||||
expected = ['a.t REVERTING',
|
||||
'a.t REVERTED(None)']
|
||||
self.assertEqual(expected, capturer2.values)
|
||||
@@ -170,9 +170,9 @@ class SuspendTest(utils.EngineTestBase):
|
||||
engine2 = self._make_engine(flow2, engine.storage._flowdetail)
|
||||
with utils.CaptureListener(engine2, capture_flow=False) as capturer2:
|
||||
self.assertRaisesRegexp(RuntimeError, '^Woot', engine2.run)
|
||||
self.assertEqual(engine2.storage.get_flow_state(), states.REVERTED)
|
||||
self.assertEqual(states.REVERTED, engine2.storage.get_flow_state())
|
||||
expected = ['a.t REVERTING', 'a.t REVERTED(None)']
|
||||
self.assertEqual(capturer2.values, expected)
|
||||
self.assertEqual(expected, capturer2.values)
|
||||
|
||||
def test_storage_is_rechecked(self):
|
||||
flow = lf.Flow('linear').add(
|
||||
@@ -184,7 +184,7 @@ class SuspendTest(utils.EngineTestBase):
|
||||
with SuspendingListener(engine, task_name='b',
|
||||
task_state=states.SUCCESS):
|
||||
engine.run()
|
||||
self.assertEqual(engine.storage.get_flow_state(), states.SUSPENDED)
|
||||
self.assertEqual(states.SUSPENDED, engine.storage.get_flow_state())
|
||||
# uninject everything:
|
||||
engine.storage.save(engine.storage.injector_name,
|
||||
{}, states.SUCCESS)
|
||||
|
||||
@@ -390,6 +390,18 @@ CEO
|
||||
root = tree.Node("josh")
|
||||
self.assertTrue(root.empty())
|
||||
|
||||
def test_after_frozen(self):
|
||||
root = tree.Node("josh")
|
||||
root.add(tree.Node("josh.1"))
|
||||
root.freeze()
|
||||
self.assertTrue(
|
||||
all(n.frozen for n in root.dfs_iter(include_self=True)))
|
||||
self.assertRaises(tree.FrozenNode,
|
||||
root.remove, "josh.1")
|
||||
self.assertRaises(tree.FrozenNode, root.disassociate)
|
||||
self.assertRaises(tree.FrozenNode, root.add,
|
||||
tree.Node("josh.2"))
|
||||
|
||||
def test_removal(self):
|
||||
root = self._make_species()
|
||||
self.assertIsNotNone(root.remove('reptile'))
|
||||
@@ -467,24 +479,59 @@ CEO
|
||||
self.assertEqual(set(['animal', 'reptile', 'mammal', 'horse',
|
||||
'primate', 'monkey', 'human']), set(things))
|
||||
|
||||
def test_dfs_itr_order(self):
|
||||
def test_dfs_itr_left_to_right(self):
|
||||
root = self._make_species()
|
||||
it = root.dfs_iter(include_self=False, right_to_left=False)
|
||||
things = list([n.item for n in it])
|
||||
self.assertEqual(['reptile', 'mammal', 'primate',
|
||||
'human', 'monkey', 'horse'], things)
|
||||
|
||||
def test_dfs_itr_no_self(self):
|
||||
root = self._make_species()
|
||||
things = list([n.item for n in root.dfs_iter(include_self=True)])
|
||||
self.assertEqual(['animal', 'mammal', 'horse', 'primate',
|
||||
'monkey', 'human', 'reptile'], things)
|
||||
things = list([n.item for n in root.dfs_iter(include_self=False)])
|
||||
self.assertEqual(['mammal', 'horse', 'primate',
|
||||
'monkey', 'human', 'reptile'], things)
|
||||
|
||||
def test_bfs_iter(self):
|
||||
def test_bfs_itr(self):
|
||||
root = self._make_species()
|
||||
things = list([n.item for n in root.bfs_iter(include_self=True)])
|
||||
self.assertEqual(['animal', 'reptile', 'mammal', 'primate',
|
||||
'horse', 'human', 'monkey'], things)
|
||||
|
||||
def test_bfs_itr_no_self(self):
|
||||
root = self._make_species()
|
||||
things = list([n.item for n in root.bfs_iter(include_self=False)])
|
||||
self.assertEqual(['reptile', 'mammal', 'primate',
|
||||
'horse', 'human', 'monkey'], things)
|
||||
|
||||
def test_bfs_itr_right_to_left(self):
|
||||
root = self._make_species()
|
||||
it = root.bfs_iter(include_self=False, right_to_left=True)
|
||||
things = list([n.item for n in it])
|
||||
self.assertEqual(['mammal', 'reptile', 'horse',
|
||||
'primate', 'monkey', 'human'], things)
|
||||
|
||||
def test_to_diagraph(self):
|
||||
root = self._make_species()
|
||||
g = root.to_digraph()
|
||||
self.assertEqual(root.child_count(only_direct=False) + 1, len(g))
|
||||
for node in root.dfs_iter(include_self=True):
|
||||
self.assertIn(node.item, g)
|
||||
self.assertEqual([], g.predecessors('animal'))
|
||||
self.assertEqual(['animal'], g.predecessors('reptile'))
|
||||
self.assertEqual(['primate'], g.predecessors('human'))
|
||||
self.assertEqual(['mammal'], g.predecessors('primate'))
|
||||
self.assertEqual(['animal'], g.predecessors('mammal'))
|
||||
self.assertEqual(['mammal', 'reptile'], g.successors('animal'))
|
||||
|
||||
def test_to_digraph_retains_metadata(self):
|
||||
root = tree.Node("chickens", alive=True)
|
||||
dead_chicken = tree.Node("chicken.1", alive=False)
|
||||
root.add(dead_chicken)
|
||||
g = root.to_digraph()
|
||||
self.assertEqual(g.node['chickens'], {'alive': True})
|
||||
self.assertEqual(g.node['chicken.1'], {'alive': False})
|
||||
|
||||
|
||||
class OrderedSetTest(test.TestCase):
|
||||
|
||||
|
||||
@@ -179,19 +179,19 @@ class TestSequenceMinus(test.TestCase):
|
||||
|
||||
def test_simple_case(self):
|
||||
result = misc.sequence_minus([1, 2, 3, 4], [2, 3])
|
||||
self.assertEqual(result, [1, 4])
|
||||
self.assertEqual([1, 4], result)
|
||||
|
||||
def test_subtrahend_has_extra_elements(self):
|
||||
result = misc.sequence_minus([1, 2, 3, 4], [2, 3, 5, 7, 13])
|
||||
self.assertEqual(result, [1, 4])
|
||||
self.assertEqual([1, 4], result)
|
||||
|
||||
def test_some_items_are_equal(self):
|
||||
result = misc.sequence_minus([1, 1, 1, 1], [1, 1, 3])
|
||||
self.assertEqual(result, [1, 1])
|
||||
self.assertEqual([1, 1], result)
|
||||
|
||||
def test_equal_items_not_continious(self):
|
||||
result = misc.sequence_minus([1, 2, 3, 1], [1, 3])
|
||||
self.assertEqual(result, [2, 1])
|
||||
self.assertEqual([2, 1], result)
|
||||
|
||||
|
||||
class TestReversedEnumerate(testscenarios.TestWithScenarios, test.TestCase):
|
||||
@@ -301,11 +301,11 @@ class TestMergeUri(test.TestCase):
|
||||
class TestClamping(test.TestCase):
|
||||
def test_simple_clamp(self):
|
||||
result = misc.clamp(1.0, 2.0, 3.0)
|
||||
self.assertEqual(result, 2.0)
|
||||
self.assertEqual(2.0, result)
|
||||
result = misc.clamp(4.0, 2.0, 3.0)
|
||||
self.assertEqual(result, 3.0)
|
||||
self.assertEqual(3.0, result)
|
||||
result = misc.clamp(3.0, 4.0, 4.0)
|
||||
self.assertEqual(result, 4.0)
|
||||
self.assertEqual(4.0, result)
|
||||
|
||||
def test_invalid_clamp(self):
|
||||
self.assertRaises(ValueError, misc.clamp, 0.0, 2.0, 1.0)
|
||||
@@ -340,3 +340,27 @@ class TestIterable(test.TestCase):
|
||||
|
||||
def test_dict(self):
|
||||
self.assertTrue(misc.is_iterable(dict()))
|
||||
|
||||
|
||||
class TestEnsureDict(testscenarios.TestWithScenarios):
|
||||
scenarios = [
|
||||
('none', {'original': None, 'expected': {}}),
|
||||
('empty_dict', {'original': {}, 'expected': {}}),
|
||||
('empty_list', {'original': [], 'expected': {}}),
|
||||
('dict', {'original': {'a': 1, 'b': 2}, 'expected': {'a': 1, 'b': 2}}),
|
||||
]
|
||||
|
||||
def test_expected(self):
|
||||
self.assertEqual(self.expected, misc.ensure_dict(self.original))
|
||||
self.assertFalse(self.expected is misc.ensure_dict(self.original))
|
||||
|
||||
|
||||
class TestEnsureDictRaises(testscenarios.TestWithScenarios):
|
||||
scenarios = [
|
||||
('list', {'original': [1, 2], 'exception': TypeError}),
|
||||
('tuple', {'original': (1, 2), 'exception': TypeError}),
|
||||
('set', {'original': set([1, 2]), 'exception': TypeError}),
|
||||
]
|
||||
|
||||
def test_exceptions(self):
|
||||
self.assertRaises(self.exception, misc.ensure_dict, self.original)
|
||||
|
||||
@@ -32,7 +32,7 @@ class BinaryEncodeTest(test.TestCase):
|
||||
def _check(self, data, expected_result):
|
||||
result = misc.binary_encode(data)
|
||||
self.assertIsInstance(result, six.binary_type)
|
||||
self.assertEqual(result, expected_result)
|
||||
self.assertEqual(expected_result, result)
|
||||
|
||||
def test_simple_binary(self):
|
||||
data = _bytes('hello')
|
||||
@@ -51,7 +51,7 @@ class BinaryEncodeTest(test.TestCase):
|
||||
def test_unicode_other_encoding(self):
|
||||
result = misc.binary_encode(u'mañana', 'latin-1')
|
||||
self.assertIsInstance(result, six.binary_type)
|
||||
self.assertEqual(result, u'mañana'.encode('latin-1'))
|
||||
self.assertEqual(u'mañana'.encode('latin-1'), result)
|
||||
|
||||
|
||||
class BinaryDecodeTest(test.TestCase):
|
||||
@@ -59,7 +59,7 @@ class BinaryDecodeTest(test.TestCase):
|
||||
def _check(self, data, expected_result):
|
||||
result = misc.binary_decode(data)
|
||||
self.assertIsInstance(result, six.text_type)
|
||||
self.assertEqual(result, expected_result)
|
||||
self.assertEqual(expected_result, result)
|
||||
|
||||
def test_simple_text(self):
|
||||
data = u'hello'
|
||||
@@ -79,18 +79,18 @@ class BinaryDecodeTest(test.TestCase):
|
||||
data = u'mañana'.encode('latin-1')
|
||||
result = misc.binary_decode(data, 'latin-1')
|
||||
self.assertIsInstance(result, six.text_type)
|
||||
self.assertEqual(result, u'mañana')
|
||||
self.assertEqual(u'mañana', result)
|
||||
|
||||
|
||||
class DecodeJsonTest(test.TestCase):
|
||||
|
||||
def test_it_works(self):
|
||||
self.assertEqual(misc.decode_json(_bytes('{"foo": 1}')),
|
||||
{"foo": 1})
|
||||
self.assertEqual({"foo": 1},
|
||||
misc.decode_json(_bytes('{"foo": 1}')))
|
||||
|
||||
def test_it_works_with_unicode(self):
|
||||
data = _bytes('{"foo": "фуу"}')
|
||||
self.assertEqual(misc.decode_json(data), {"foo": u'фуу'})
|
||||
self.assertEqual({"foo": u'фуу'}, misc.decode_json(data))
|
||||
|
||||
def test_handles_invalid_unicode(self):
|
||||
self.assertRaises(ValueError, misc.decode_json,
|
||||
|
||||
@@ -30,6 +30,51 @@ def forever_it():
|
||||
|
||||
|
||||
class IterUtilsTest(test.TestCase):
|
||||
def test_fill_empty(self):
|
||||
self.assertEqual([], list(iter_utils.fill([1, 2, 3], 0)))
|
||||
|
||||
def test_bad_unique_seen(self):
|
||||
iters = [
|
||||
['a', 'b'],
|
||||
2,
|
||||
None,
|
||||
]
|
||||
self.assertRaises(ValueError,
|
||||
iter_utils.unique_seen, *iters)
|
||||
|
||||
def test_unique_seen(self):
|
||||
iters = [
|
||||
['a', 'b'],
|
||||
['a', 'c', 'd'],
|
||||
['a', 'e', 'f'],
|
||||
['f', 'm', 'n'],
|
||||
]
|
||||
self.assertEqual(['a', 'b', 'c', 'd', 'e', 'f', 'm', 'n'],
|
||||
list(iter_utils.unique_seen(*iters)))
|
||||
|
||||
def test_bad_fill(self):
|
||||
self.assertRaises(ValueError, iter_utils.fill, 2, 2)
|
||||
|
||||
def test_fill_many_empty(self):
|
||||
result = list(iter_utils.fill(compat_range(0, 50), 500))
|
||||
self.assertEqual(450, sum(1 for x in result if x is None))
|
||||
self.assertEqual(50, sum(1 for x in result if x is not None))
|
||||
|
||||
def test_fill_custom_filler(self):
|
||||
self.assertEqual("abcd",
|
||||
"".join(iter_utils.fill("abc", 4, filler='d')))
|
||||
|
||||
def test_fill_less_needed(self):
|
||||
self.assertEqual("ab", "".join(iter_utils.fill("abc", 2)))
|
||||
|
||||
def test_fill(self):
|
||||
self.assertEqual([None, None], list(iter_utils.fill([], 2)))
|
||||
self.assertEqual((None, None), tuple(iter_utils.fill([], 2)))
|
||||
|
||||
def test_bad_find_first_match(self):
|
||||
self.assertRaises(ValueError,
|
||||
iter_utils.find_first_match, 2, lambda v: False)
|
||||
|
||||
def test_find_first_match(self):
|
||||
it = forever_it()
|
||||
self.assertEqual(100, iter_utils.find_first_match(it,
|
||||
@@ -40,6 +85,9 @@ class IterUtilsTest(test.TestCase):
|
||||
self.assertIsNone(iter_utils.find_first_match(it,
|
||||
lambda v: v == ''))
|
||||
|
||||
def test_bad_count(self):
|
||||
self.assertRaises(ValueError, iter_utils.count, 2)
|
||||
|
||||
def test_count(self):
|
||||
self.assertEqual(0, iter_utils.count([]))
|
||||
self.assertEqual(1, iter_utils.count(['a']))
|
||||
@@ -48,6 +96,9 @@ class IterUtilsTest(test.TestCase):
|
||||
self.assertEqual(0, iter_utils.count(compat_range(0)))
|
||||
self.assertEqual(0, iter_utils.count(compat_range(-1)))
|
||||
|
||||
def test_bad_while_is_not(self):
|
||||
self.assertRaises(ValueError, iter_utils.while_is_not, 2, 'a')
|
||||
|
||||
def test_while_is_not(self):
|
||||
it = iter(string.ascii_lowercase)
|
||||
self.assertEqual(['a'],
|
||||
|
||||
@@ -52,7 +52,7 @@ class TestWorkerBasedActionEngine(test.MockTestCase):
|
||||
transition_timeout=mock.ANY,
|
||||
retry_options=None)
|
||||
]
|
||||
self.assertEqual(self.master_mock.mock_calls, expected_calls)
|
||||
self.assertEqual(expected_calls, self.master_mock.mock_calls)
|
||||
|
||||
def test_creation_custom(self):
|
||||
executor_mock, executor_inst_mock = self._patch_in_executor()
|
||||
@@ -77,7 +77,7 @@ class TestWorkerBasedActionEngine(test.MockTestCase):
|
||||
transition_timeout=200,
|
||||
retry_options={})
|
||||
]
|
||||
self.assertEqual(self.master_mock.mock_calls, expected_calls)
|
||||
self.assertEqual(expected_calls, self.master_mock.mock_calls)
|
||||
|
||||
def test_creation_custom_executor(self):
|
||||
ex = executor.WorkerTaskExecutor('a', 'test-exchange', ['test-topic'])
|
||||
|
||||
@@ -44,16 +44,16 @@ class TestEndpoint(test.TestCase):
|
||||
|
||||
def test_creation(self):
|
||||
task = self.task_ep.generate()
|
||||
self.assertEqual(self.task_ep.name, self.task_cls_name)
|
||||
self.assertEqual(self.task_cls_name, self.task_ep.name)
|
||||
self.assertIsInstance(task, self.task_cls)
|
||||
self.assertEqual(task.name, self.task_cls_name)
|
||||
self.assertEqual(self.task_cls_name, task.name)
|
||||
|
||||
def test_creation_with_task_name(self):
|
||||
task_name = 'test'
|
||||
task = self.task_ep.generate(name=task_name)
|
||||
self.assertEqual(self.task_ep.name, self.task_cls_name)
|
||||
self.assertEqual(self.task_cls_name, self.task_ep.name)
|
||||
self.assertIsInstance(task, self.task_cls)
|
||||
self.assertEqual(task.name, task_name)
|
||||
self.assertEqual(task_name, task.name)
|
||||
|
||||
def test_creation_task_with_constructor_args(self):
|
||||
# NOTE(skudriashev): Exception is expected here since task
|
||||
@@ -62,7 +62,7 @@ class TestEndpoint(test.TestCase):
|
||||
self.assertRaises(TypeError, endpoint.generate)
|
||||
|
||||
def test_to_str(self):
|
||||
self.assertEqual(str(self.task_ep), self.task_cls_name)
|
||||
self.assertEqual(self.task_cls_name, str(self.task_ep))
|
||||
|
||||
def test_execute(self):
|
||||
task = self.task_ep.generate(self.task_cls_name)
|
||||
@@ -70,7 +70,7 @@ class TestEndpoint(test.TestCase):
|
||||
task_uuid=self.task_uuid,
|
||||
arguments=self.task_args,
|
||||
progress_callback=None)
|
||||
self.assertEqual(result, self.task_result)
|
||||
self.assertEqual(self.task_result, result)
|
||||
|
||||
def test_revert(self):
|
||||
task = self.task_ep.generate(self.task_cls_name)
|
||||
@@ -80,4 +80,4 @@ class TestEndpoint(test.TestCase):
|
||||
progress_callback=None,
|
||||
result=self.task_result,
|
||||
failures={})
|
||||
self.assertEqual(result, None)
|
||||
self.assertEqual(None, result)
|
||||
|
||||
@@ -91,7 +91,7 @@ class TestWorkerTaskExecutor(test.MockTestCase):
|
||||
type_handlers=mock.ANY),
|
||||
mock.call.proxy.dispatcher.type_handlers.update(mock.ANY),
|
||||
]
|
||||
self.assertEqual(self.master_mock.mock_calls, master_mock_calls)
|
||||
self.assertEqual(master_mock_calls, self.master_mock.mock_calls)
|
||||
|
||||
def test_on_message_response_state_running(self):
|
||||
response = pr.Response(pr.RUNNING)
|
||||
@@ -126,7 +126,7 @@ class TestWorkerTaskExecutor(test.MockTestCase):
|
||||
ex._requests_cache[self.task_uuid] = self.request_inst_mock
|
||||
ex._process_response(response.to_dict(), self.message_mock)
|
||||
|
||||
self.assertEqual(len(ex._requests_cache), 0)
|
||||
self.assertEqual(0, len(ex._requests_cache))
|
||||
expected_calls = [
|
||||
mock.call.transition_and_log_error(pr.FAILURE, logger=mock.ANY),
|
||||
mock.call.set_result(result=test_utils.FailureMatcher(a_failure))
|
||||
@@ -152,7 +152,7 @@ class TestWorkerTaskExecutor(test.MockTestCase):
|
||||
ex._requests_cache[self.task_uuid] = self.request_inst_mock
|
||||
ex._process_response(response.to_dict(), self.message_mock)
|
||||
|
||||
self.assertEqual(self.request_inst_mock.mock_calls, [])
|
||||
self.assertEqual([], self.request_inst_mock.mock_calls)
|
||||
|
||||
def test_on_message_response_unknown_task(self):
|
||||
self.message_mock.properties['correlation_id'] = '<unknown>'
|
||||
@@ -161,7 +161,7 @@ class TestWorkerTaskExecutor(test.MockTestCase):
|
||||
ex._requests_cache[self.task_uuid] = self.request_inst_mock
|
||||
ex._process_response(response.to_dict(), self.message_mock)
|
||||
|
||||
self.assertEqual(self.request_inst_mock.mock_calls, [])
|
||||
self.assertEqual([], self.request_inst_mock.mock_calls)
|
||||
|
||||
def test_on_message_response_no_correlation_id(self):
|
||||
self.message_mock.properties = {'type': pr.RESPONSE}
|
||||
@@ -170,15 +170,15 @@ class TestWorkerTaskExecutor(test.MockTestCase):
|
||||
ex._requests_cache[self.task_uuid] = self.request_inst_mock
|
||||
ex._process_response(response.to_dict(), self.message_mock)
|
||||
|
||||
self.assertEqual(self.request_inst_mock.mock_calls, [])
|
||||
self.assertEqual([], self.request_inst_mock.mock_calls)
|
||||
|
||||
def test_on_wait_task_not_expired(self):
|
||||
ex = self.executor()
|
||||
ex._requests_cache[self.task_uuid] = self.request_inst_mock
|
||||
|
||||
self.assertEqual(len(ex._requests_cache), 1)
|
||||
self.assertEqual(1, len(ex._requests_cache))
|
||||
ex._on_wait()
|
||||
self.assertEqual(len(ex._requests_cache), 1)
|
||||
self.assertEqual(1, len(ex._requests_cache))
|
||||
|
||||
def test_on_wait_task_expired(self):
|
||||
now = timeutils.utcnow()
|
||||
@@ -191,24 +191,24 @@ class TestWorkerTaskExecutor(test.MockTestCase):
|
||||
ex = self.executor()
|
||||
ex._requests_cache[self.task_uuid] = self.request_inst_mock
|
||||
|
||||
self.assertEqual(len(ex._requests_cache), 1)
|
||||
self.assertEqual(1, len(ex._requests_cache))
|
||||
ex._on_wait()
|
||||
self.assertEqual(len(ex._requests_cache), 0)
|
||||
self.assertEqual(0, len(ex._requests_cache))
|
||||
|
||||
def test_remove_task_non_existent(self):
|
||||
ex = self.executor()
|
||||
ex._requests_cache[self.task_uuid] = self.request_inst_mock
|
||||
|
||||
self.assertEqual(len(ex._requests_cache), 1)
|
||||
self.assertEqual(1, len(ex._requests_cache))
|
||||
del ex._requests_cache[self.task_uuid]
|
||||
self.assertEqual(len(ex._requests_cache), 0)
|
||||
self.assertEqual(0, len(ex._requests_cache))
|
||||
|
||||
# delete non-existent
|
||||
try:
|
||||
del ex._requests_cache[self.task_uuid]
|
||||
except KeyError:
|
||||
pass
|
||||
self.assertEqual(len(ex._requests_cache), 0)
|
||||
self.assertEqual(0, len(ex._requests_cache))
|
||||
|
||||
def test_execute_task(self):
|
||||
ex = self.executor()
|
||||
@@ -255,7 +255,7 @@ class TestWorkerTaskExecutor(test.MockTestCase):
|
||||
mock.call.Request(self.task, self.task_uuid, 'execute',
|
||||
self.task_args, self.timeout),
|
||||
]
|
||||
self.assertEqual(self.master_mock.mock_calls, expected_calls)
|
||||
self.assertEqual(expected_calls, self.master_mock.mock_calls)
|
||||
|
||||
def test_execute_task_publish_error(self):
|
||||
self.proxy_inst_mock.publish.side_effect = Exception('Woot!')
|
||||
@@ -316,7 +316,7 @@ class TestWorkerTaskExecutor(test.MockTestCase):
|
||||
def test_stop_not_running(self):
|
||||
self.executor().stop()
|
||||
|
||||
self.assertEqual(self.master_mock.mock_calls, [])
|
||||
self.assertEqual([], self.master_mock.mock_calls)
|
||||
|
||||
def test_stop_not_alive(self):
|
||||
self.proxy_inst_mock.start.side_effect = None
|
||||
|
||||
@@ -133,34 +133,34 @@ class TestProtocol(test.TestCase):
|
||||
|
||||
def test_creation(self):
|
||||
request = self.request()
|
||||
self.assertEqual(request.uuid, self.task_uuid)
|
||||
self.assertEqual(request.task, self.task)
|
||||
self.assertEqual(self.task_uuid, request.uuid)
|
||||
self.assertEqual(self.task, request.task)
|
||||
self.assertIsInstance(request.result, futurist.Future)
|
||||
self.assertFalse(request.result.done())
|
||||
|
||||
def test_to_dict_default(self):
|
||||
self.assertEqual(self.request().to_dict(), self.request_to_dict())
|
||||
self.assertEqual(self.request_to_dict(), self.request().to_dict())
|
||||
|
||||
def test_to_dict_with_result(self):
|
||||
self.assertEqual(self.request(result=333).to_dict(),
|
||||
self.request_to_dict(result=('success', 333)))
|
||||
self.assertEqual(self.request_to_dict(result=('success', 333)),
|
||||
self.request(result=333).to_dict())
|
||||
|
||||
def test_to_dict_with_result_none(self):
|
||||
self.assertEqual(self.request(result=None).to_dict(),
|
||||
self.request_to_dict(result=('success', None)))
|
||||
self.assertEqual(self.request_to_dict(result=('success', None)),
|
||||
self.request(result=None).to_dict())
|
||||
|
||||
def test_to_dict_with_result_failure(self):
|
||||
a_failure = failure.Failure.from_exception(RuntimeError('Woot!'))
|
||||
expected = self.request_to_dict(result=('failure',
|
||||
a_failure.to_dict()))
|
||||
self.assertEqual(self.request(result=a_failure).to_dict(), expected)
|
||||
self.assertEqual(expected, self.request(result=a_failure).to_dict())
|
||||
|
||||
def test_to_dict_with_failures(self):
|
||||
a_failure = failure.Failure.from_exception(RuntimeError('Woot!'))
|
||||
request = self.request(failures={self.task.name: a_failure})
|
||||
expected = self.request_to_dict(
|
||||
failures={self.task.name: a_failure.to_dict()})
|
||||
self.assertEqual(request.to_dict(), expected)
|
||||
self.assertEqual(expected, request.to_dict())
|
||||
|
||||
@mock.patch('oslo_utils.timeutils.now')
|
||||
def test_pending_not_expired(self, now):
|
||||
@@ -189,4 +189,4 @@ class TestProtocol(test.TestCase):
|
||||
request = self.request()
|
||||
request.set_result(111)
|
||||
result = request.result.result()
|
||||
self.assertEqual(result, (executor.EXECUTED, 111))
|
||||
self.assertEqual((executor.EXECUTED, 111), result)
|
||||
|
||||
@@ -138,7 +138,7 @@ class TestProxy(test.MockTestCase):
|
||||
durable=False,
|
||||
auto_delete=True)
|
||||
]
|
||||
self.assertEqual(self.master_mock.mock_calls, master_mock_calls)
|
||||
self.assertEqual(master_mock_calls, self.master_mock.mock_calls)
|
||||
|
||||
def test_creation_custom(self):
|
||||
transport_opts = {'context': 'context'}
|
||||
@@ -151,7 +151,7 @@ class TestProxy(test.MockTestCase):
|
||||
durable=False,
|
||||
auto_delete=True)
|
||||
]
|
||||
self.assertEqual(self.master_mock.mock_calls, master_mock_calls)
|
||||
self.assertEqual(master_mock_calls, self.master_mock.mock_calls)
|
||||
|
||||
def test_publish(self):
|
||||
msg_mock = mock.MagicMock()
|
||||
|
||||
@@ -91,7 +91,7 @@ class TestServer(test.MockTestCase):
|
||||
retry_options=mock.ANY)
|
||||
]
|
||||
self.master_mock.assert_has_calls(master_mock_calls)
|
||||
self.assertEqual(len(s._endpoints), 3)
|
||||
self.assertEqual(3, len(s._endpoints))
|
||||
|
||||
def test_creation_with_endpoints(self):
|
||||
s = self.server(endpoints=self.endpoints)
|
||||
@@ -104,34 +104,34 @@ class TestServer(test.MockTestCase):
|
||||
retry_options=mock.ANY)
|
||||
]
|
||||
self.master_mock.assert_has_calls(master_mock_calls)
|
||||
self.assertEqual(len(s._endpoints), len(self.endpoints))
|
||||
self.assertEqual(len(self.endpoints), len(s._endpoints))
|
||||
|
||||
def test_parse_request(self):
|
||||
request = self.make_request()
|
||||
bundle = pr.Request.from_dict(request)
|
||||
task_cls, task_name, action, task_args = bundle
|
||||
self.assertEqual((task_cls, task_name, action, task_args),
|
||||
(self.task.name, self.task.name, self.task_action,
|
||||
dict(arguments=self.task_args)))
|
||||
self.assertEqual((self.task.name, self.task.name, self.task_action,
|
||||
dict(arguments=self.task_args)),
|
||||
(task_cls, task_name, action, task_args))
|
||||
|
||||
def test_parse_request_with_success_result(self):
|
||||
request = self.make_request(action='revert', result=1)
|
||||
bundle = pr.Request.from_dict(request)
|
||||
task_cls, task_name, action, task_args = bundle
|
||||
self.assertEqual((task_cls, task_name, action, task_args),
|
||||
(self.task.name, self.task.name, 'revert',
|
||||
self.assertEqual((self.task.name, self.task.name, 'revert',
|
||||
dict(arguments=self.task_args,
|
||||
result=1)))
|
||||
result=1)),
|
||||
(task_cls, task_name, action, task_args))
|
||||
|
||||
def test_parse_request_with_failure_result(self):
|
||||
a_failure = failure.Failure.from_exception(Exception('test'))
|
||||
request = self.make_request(action='revert', result=a_failure)
|
||||
bundle = pr.Request.from_dict(request)
|
||||
task_cls, task_name, action, task_args = bundle
|
||||
self.assertEqual((task_cls, task_name, action, task_args),
|
||||
(self.task.name, self.task.name, 'revert',
|
||||
self.assertEqual((self.task.name, self.task.name, 'revert',
|
||||
dict(arguments=self.task_args,
|
||||
result=utils.FailureMatcher(a_failure))))
|
||||
result=utils.FailureMatcher(a_failure))),
|
||||
(task_cls, task_name, action, task_args))
|
||||
|
||||
def test_parse_request_with_failures(self):
|
||||
failures = {'0': failure.Failure.from_exception(Exception('test1')),
|
||||
@@ -140,11 +140,11 @@ class TestServer(test.MockTestCase):
|
||||
bundle = pr.Request.from_dict(request)
|
||||
task_cls, task_name, action, task_args = bundle
|
||||
self.assertEqual(
|
||||
(task_cls, task_name, action, task_args),
|
||||
(self.task.name, self.task.name, 'revert',
|
||||
dict(arguments=self.task_args,
|
||||
failures=dict((i, utils.FailureMatcher(f))
|
||||
for i, f in six.iteritems(failures)))))
|
||||
for i, f in six.iteritems(failures)))),
|
||||
(task_cls, task_name, action, task_args))
|
||||
|
||||
@mock.patch("taskflow.engines.worker_based.server.LOG.critical")
|
||||
def test_reply_publish_failure(self, mocked_exception):
|
||||
|
||||
@@ -99,7 +99,7 @@ class TestWorker(test.MockTestCase):
|
||||
transport=mock.ANY,
|
||||
retry_options=mock.ANY)
|
||||
]
|
||||
self.assertEqual(self.master_mock.mock_calls, master_mock_calls)
|
||||
self.assertEqual(master_mock_calls, self.master_mock.mock_calls)
|
||||
|
||||
def test_run_with_no_tasks(self):
|
||||
self.worker(reset_master_mock=True).run()
|
||||
@@ -107,7 +107,7 @@ class TestWorker(test.MockTestCase):
|
||||
master_mock_calls = [
|
||||
mock.call.server.start()
|
||||
]
|
||||
self.assertEqual(self.master_mock.mock_calls, master_mock_calls)
|
||||
self.assertEqual(master_mock_calls, self.master_mock.mock_calls)
|
||||
|
||||
def test_run_with_tasks(self):
|
||||
self.worker(reset_master_mock=True,
|
||||
@@ -116,7 +116,7 @@ class TestWorker(test.MockTestCase):
|
||||
master_mock_calls = [
|
||||
mock.call.server.start()
|
||||
]
|
||||
self.assertEqual(self.master_mock.mock_calls, master_mock_calls)
|
||||
self.assertEqual(master_mock_calls, self.master_mock.mock_calls)
|
||||
|
||||
def test_run_with_custom_executor(self):
|
||||
executor_mock = mock.MagicMock(name='executor')
|
||||
@@ -126,7 +126,7 @@ class TestWorker(test.MockTestCase):
|
||||
master_mock_calls = [
|
||||
mock.call.server.start()
|
||||
]
|
||||
self.assertEqual(self.master_mock.mock_calls, master_mock_calls)
|
||||
self.assertEqual(master_mock_calls, self.master_mock.mock_calls)
|
||||
|
||||
def test_wait(self):
|
||||
w = self.worker(reset_master_mock=True)
|
||||
@@ -137,7 +137,7 @@ class TestWorker(test.MockTestCase):
|
||||
mock.call.server.start(),
|
||||
mock.call.server.wait()
|
||||
]
|
||||
self.assertEqual(self.master_mock.mock_calls, master_mock_calls)
|
||||
self.assertEqual(master_mock_calls, self.master_mock.mock_calls)
|
||||
|
||||
def test_stop(self):
|
||||
self.worker(reset_master_mock=True).stop()
|
||||
@@ -146,20 +146,20 @@ class TestWorker(test.MockTestCase):
|
||||
mock.call.server.stop(),
|
||||
mock.call.executor.shutdown()
|
||||
]
|
||||
self.assertEqual(self.master_mock.mock_calls, master_mock_calls)
|
||||
self.assertEqual(master_mock_calls, self.master_mock.mock_calls)
|
||||
|
||||
def test_derive_endpoints_from_string_tasks(self):
|
||||
endpoints = worker.Worker._derive_endpoints(
|
||||
['taskflow.tests.utils:DummyTask'])
|
||||
|
||||
self.assertEqual(len(endpoints), 1)
|
||||
self.assertEqual(1, len(endpoints))
|
||||
self.assertIsInstance(endpoints[0], endpoint.Endpoint)
|
||||
self.assertEqual(endpoints[0].name, self.task_name)
|
||||
self.assertEqual(self.task_name, endpoints[0].name)
|
||||
|
||||
def test_derive_endpoints_from_string_modules(self):
|
||||
endpoints = worker.Worker._derive_endpoints(['taskflow.tests.utils'])
|
||||
|
||||
self.assertEqual(len(endpoints), self.endpoint_count)
|
||||
self.assertEqual(self.endpoint_count, len(endpoints))
|
||||
|
||||
def test_derive_endpoints_from_string_non_existent_module(self):
|
||||
tasks = ['non.existent.module']
|
||||
@@ -179,9 +179,9 @@ class TestWorker(test.MockTestCase):
|
||||
def test_derive_endpoints_from_tasks(self):
|
||||
endpoints = worker.Worker._derive_endpoints([self.task_cls])
|
||||
|
||||
self.assertEqual(len(endpoints), 1)
|
||||
self.assertEqual(1, len(endpoints))
|
||||
self.assertIsInstance(endpoints[0], endpoint.Endpoint)
|
||||
self.assertEqual(endpoints[0].name, self.task_name)
|
||||
self.assertEqual(self.task_name, endpoints[0].name)
|
||||
|
||||
def test_derive_endpoints_from_non_task_class(self):
|
||||
self.assertRaises(TypeError, worker.Worker._derive_endpoints,
|
||||
@@ -190,7 +190,7 @@ class TestWorker(test.MockTestCase):
|
||||
def test_derive_endpoints_from_modules(self):
|
||||
endpoints = worker.Worker._derive_endpoints([utils])
|
||||
|
||||
self.assertEqual(len(endpoints), self.endpoint_count)
|
||||
self.assertEqual(self.endpoint_count, len(endpoints))
|
||||
|
||||
def test_derive_endpoints_unexpected_task_type(self):
|
||||
self.assertRaises(TypeError, worker.Worker._derive_endpoints, [111])
|
||||
|
||||
43
taskflow/types/entity.py
Normal file
43
taskflow/types/entity.py
Normal file
@@ -0,0 +1,43 @@
|
||||
# -*- coding: utf-8 -*-
|
||||
|
||||
# Copyright (C) 2015 Rackspace Inc. All Rights Reserved.
|
||||
#
|
||||
# Licensed under the Apache License, Version 2.0 (the "License"); you may
|
||||
# not use this file except in compliance with the License. You may obtain
|
||||
# a copy of the License at
|
||||
#
|
||||
# http://www.apache.org/licenses/LICENSE-2.0
|
||||
#
|
||||
# Unless required by applicable law or agreed to in writing, software
|
||||
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
|
||||
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
|
||||
# License for the specific language governing permissions and limitations
|
||||
# under the License.
|
||||
|
||||
|
||||
class Entity(object):
|
||||
"""Entity object that identifies some resource/item/other.
|
||||
|
||||
:ivar kind: **immutable** type/kind that identifies this
|
||||
entity (typically unique to a library/application)
|
||||
:type kind: string
|
||||
:ivar name: **immutable** name that can be used to uniquely
|
||||
identify this entity among many other entities
|
||||
:type name: string
|
||||
:ivar metadata: **immutable** dictionary of metadata that is
|
||||
associated with this entity (and typically
|
||||
has keys/values that further describe this
|
||||
entity)
|
||||
:type metadata: dict
|
||||
"""
|
||||
def __init__(self, kind, name, metadata):
|
||||
self.kind = kind
|
||||
self.name = name
|
||||
self.metadata = metadata
|
||||
|
||||
def to_dict(self):
|
||||
return {
|
||||
'kind': self.kind,
|
||||
'name': self.name,
|
||||
'metadata': self.metadata
|
||||
}
|
||||
@@ -24,6 +24,7 @@ from oslo_utils import reflection
|
||||
import six
|
||||
|
||||
from taskflow import exceptions as exc
|
||||
from taskflow.utils import iter_utils
|
||||
from taskflow.utils import mixins
|
||||
from taskflow.utils import schema_utils as su
|
||||
|
||||
@@ -40,23 +41,6 @@ def _copy_exc_info(exc_info):
|
||||
return (exc_type, copy.copy(exc_value), tb)
|
||||
|
||||
|
||||
def _fill_iter(it, desired_len, filler=None):
|
||||
"""Iterates over a provided iterator up to the desired length.
|
||||
|
||||
If the source iterator does not have enough values then the filler
|
||||
value is yielded until the desired length is reached.
|
||||
"""
|
||||
count = 0
|
||||
for value in it:
|
||||
if count >= desired_len:
|
||||
return
|
||||
yield value
|
||||
count += 1
|
||||
while count < desired_len:
|
||||
yield filler
|
||||
count += 1
|
||||
|
||||
|
||||
def _are_equal_exc_info_tuples(ei1, ei2):
|
||||
if ei1 == ei2:
|
||||
return True
|
||||
@@ -86,7 +70,7 @@ class Failure(mixins.StrMixin):
|
||||
re-used later to re-raise, inspect, examine, log, print, serialize,
|
||||
deserialize...
|
||||
|
||||
One example where they are dependened upon is in the WBE engine. When a
|
||||
One example where they are depended upon is in the WBE engine. When a
|
||||
remote worker throws an exception, the WBE based engine will receive that
|
||||
exception and desire to reraise it to the user/caller of the WBE based
|
||||
engine for appropriate handling (this matches the behavior of non-remote
|
||||
@@ -444,7 +428,7 @@ class Failure(mixins.StrMixin):
|
||||
# what the twisted people have done, see for example
|
||||
# twisted-13.0.0/twisted/python/failure.py#L89 for how they
|
||||
# created a fake traceback object...
|
||||
self._exc_info = tuple(_fill_iter(dct['exc_info'], 3))
|
||||
self._exc_info = tuple(iter_utils.fill(dct['exc_info'], 3))
|
||||
else:
|
||||
self._exc_info = None
|
||||
causes = dct.get('causes')
|
||||
|
||||
@@ -21,8 +21,52 @@ import networkx as nx
|
||||
import six
|
||||
|
||||
|
||||
def _common_format(g, edge_notation):
|
||||
lines = []
|
||||
lines.append("Name: %s" % g.name)
|
||||
lines.append("Type: %s" % type(g).__name__)
|
||||
lines.append("Frozen: %s" % nx.is_frozen(g))
|
||||
lines.append("Density: %0.3f" % nx.density(g))
|
||||
lines.append("Nodes: %s" % g.number_of_nodes())
|
||||
for n, n_data in g.nodes_iter(data=True):
|
||||
if n_data:
|
||||
lines.append(" - %s (%s)" % (n, n_data))
|
||||
else:
|
||||
lines.append(" - %s" % n)
|
||||
lines.append("Edges: %s" % g.number_of_edges())
|
||||
for (u, v, e_data) in g.edges_iter(data=True):
|
||||
if e_data:
|
||||
lines.append(" %s %s %s (%s)" % (u, edge_notation, v, e_data))
|
||||
else:
|
||||
lines.append(" %s %s %s" % (u, edge_notation, v))
|
||||
return lines
|
||||
|
||||
|
||||
class Graph(nx.Graph):
|
||||
"""A graph subclass with useful utility functions."""
|
||||
|
||||
def __init__(self, data=None, name=''):
|
||||
super(Graph, self).__init__(name=name, data=data)
|
||||
self.frozen = False
|
||||
|
||||
def freeze(self):
|
||||
"""Freezes the graph so that no more mutations can occur."""
|
||||
if not self.frozen:
|
||||
nx.freeze(self)
|
||||
return self
|
||||
|
||||
def export_to_dot(self):
|
||||
"""Exports the graph to a dot format (requires pydot library)."""
|
||||
return nx.to_pydot(self).to_string()
|
||||
|
||||
def pformat(self):
|
||||
"""Pretty formats your graph into a string."""
|
||||
return os.linesep.join(_common_format(self, "<->"))
|
||||
|
||||
|
||||
class DiGraph(nx.DiGraph):
|
||||
"""A directed graph subclass with useful utility functions."""
|
||||
|
||||
def __init__(self, data=None, name=''):
|
||||
super(DiGraph, self).__init__(name=name, data=data)
|
||||
self.frozen = False
|
||||
@@ -56,20 +100,7 @@ class DiGraph(nx.DiGraph):
|
||||
details about your graph, including; name, type, frozeness, node count,
|
||||
nodes, edge count, edges, graph density and graph cycles (if any).
|
||||
"""
|
||||
lines = []
|
||||
lines.append("Name: %s" % self.name)
|
||||
lines.append("Type: %s" % type(self).__name__)
|
||||
lines.append("Frozen: %s" % nx.is_frozen(self))
|
||||
lines.append("Nodes: %s" % self.number_of_nodes())
|
||||
for n in self.nodes_iter():
|
||||
lines.append(" - %s" % n)
|
||||
lines.append("Edges: %s" % self.number_of_edges())
|
||||
for (u, v, e_data) in self.edges_iter(data=True):
|
||||
if e_data:
|
||||
lines.append(" %s -> %s (%s)" % (u, v, e_data))
|
||||
else:
|
||||
lines.append(" %s -> %s" % (u, v))
|
||||
lines.append("Density: %0.3f" % nx.density(self))
|
||||
lines = _common_format(self, "->")
|
||||
cycles = list(nx.cycles.recursive_simple_cycles(self))
|
||||
lines.append("Cycles: %s" % len(cycles))
|
||||
for cycle in cycles:
|
||||
@@ -122,6 +153,18 @@ class DiGraph(nx.DiGraph):
|
||||
queue.append(pred_pred)
|
||||
|
||||
|
||||
class OrderedDiGraph(DiGraph):
|
||||
"""A directed graph subclass with useful utility functions.
|
||||
|
||||
This derivative retains node, edge, insertation and iteration
|
||||
ordering (so that the iteration order matches the insertation
|
||||
order).
|
||||
"""
|
||||
node_dict_factory = collections.OrderedDict
|
||||
adjlist_dict_factory = collections.OrderedDict
|
||||
edge_attr_dict_factory = collections.OrderedDict
|
||||
|
||||
|
||||
def merge_graphs(graph, *graphs, **kwargs):
|
||||
"""Merges a bunch of graphs into a new graph.
|
||||
|
||||
|
||||
@@ -72,9 +72,7 @@ class OrderedSet(collections.Set, collections.Hashable):
|
||||
|
||||
def copy(self):
|
||||
"""Return a shallow copy of a set."""
|
||||
it = iter(self)
|
||||
c = self._from_iterable(it)
|
||||
return c
|
||||
return self._from_iterable(iter(self))
|
||||
|
||||
def intersection(self, *sets):
|
||||
"""Return the intersection of two or more sets as a new set.
|
||||
@@ -91,9 +89,7 @@ class OrderedSet(collections.Set, collections.Hashable):
|
||||
break
|
||||
if matches == len(sets):
|
||||
yield value
|
||||
it = absorb_it(sets)
|
||||
c = self._from_iterable(it)
|
||||
return c
|
||||
return self._from_iterable(absorb_it(sets))
|
||||
|
||||
def issuperset(self, other):
|
||||
"""Report whether this set contains another set."""
|
||||
@@ -123,14 +119,11 @@ class OrderedSet(collections.Set, collections.Hashable):
|
||||
break
|
||||
if not seen:
|
||||
yield value
|
||||
it = absorb_it(sets)
|
||||
c = self._from_iterable(it)
|
||||
return c
|
||||
return self._from_iterable(absorb_it(sets))
|
||||
|
||||
def union(self, *sets):
|
||||
"""Return the union of sets as a new set.
|
||||
|
||||
(i.e. all elements that are in either set.)
|
||||
"""
|
||||
it = itertools.chain(iter(self), *sets)
|
||||
return self._from_iterable(it)
|
||||
return self._from_iterable(itertools.chain(iter(self), *sets))
|
||||
|
||||
@@ -31,11 +31,11 @@ class Timeout(object):
|
||||
This object has the ability to be interrupted before the actual timeout
|
||||
is reached.
|
||||
"""
|
||||
def __init__(self, timeout):
|
||||
def __init__(self, timeout, event_factory=threading.Event):
|
||||
if timeout < 0:
|
||||
raise ValueError("Timeout must be >= 0 and not %s" % (timeout))
|
||||
self._timeout = timeout
|
||||
self._event = threading.Event()
|
||||
self._event = event_factory()
|
||||
|
||||
def interrupt(self):
|
||||
self._event.set()
|
||||
|
||||
@@ -22,6 +22,7 @@ import os
|
||||
|
||||
import six
|
||||
|
||||
from taskflow.types import graph
|
||||
from taskflow.utils import iter_utils
|
||||
from taskflow.utils import misc
|
||||
|
||||
@@ -36,8 +37,9 @@ class FrozenNode(Exception):
|
||||
class _DFSIter(object):
|
||||
"""Depth first iterator (non-recursive) over the child nodes."""
|
||||
|
||||
def __init__(self, root, include_self=False):
|
||||
def __init__(self, root, include_self=False, right_to_left=True):
|
||||
self.root = root
|
||||
self.right_to_left = bool(right_to_left)
|
||||
self.include_self = bool(include_self)
|
||||
|
||||
def __iter__(self):
|
||||
@@ -45,20 +47,28 @@ class _DFSIter(object):
|
||||
if self.include_self:
|
||||
stack.append(self.root)
|
||||
else:
|
||||
stack.extend(self.root.reverse_iter())
|
||||
if self.right_to_left:
|
||||
stack.extend(self.root.reverse_iter())
|
||||
else:
|
||||
# Traverse the left nodes first to the right nodes.
|
||||
stack.extend(iter(self.root))
|
||||
while stack:
|
||||
node = stack.pop()
|
||||
# Visit the node.
|
||||
node = stack.pop()
|
||||
yield node
|
||||
# Traverse the left & right subtree.
|
||||
stack.extend(node.reverse_iter())
|
||||
if self.right_to_left:
|
||||
stack.extend(node.reverse_iter())
|
||||
else:
|
||||
# Traverse the left nodes first to the right nodes.
|
||||
stack.extend(iter(node))
|
||||
|
||||
|
||||
class _BFSIter(object):
|
||||
"""Breadth first iterator (non-recursive) over the child nodes."""
|
||||
|
||||
def __init__(self, root, include_self=False):
|
||||
def __init__(self, root, include_self=False, right_to_left=False):
|
||||
self.root = root
|
||||
self.right_to_left = bool(right_to_left)
|
||||
self.include_self = bool(include_self)
|
||||
|
||||
def __iter__(self):
|
||||
@@ -66,13 +76,20 @@ class _BFSIter(object):
|
||||
if self.include_self:
|
||||
q.append(self.root)
|
||||
else:
|
||||
q.extend(self.root.reverse_iter())
|
||||
if self.right_to_left:
|
||||
q.extend(iter(self.root))
|
||||
else:
|
||||
# Traverse the left nodes first to the right nodes.
|
||||
q.extend(self.root.reverse_iter())
|
||||
while q:
|
||||
node = q.popleft()
|
||||
# Visit the node.
|
||||
node = q.popleft()
|
||||
yield node
|
||||
# Traverse the left & right subtree.
|
||||
q.extend(node.reverse_iter())
|
||||
if self.right_to_left:
|
||||
q.extend(iter(node))
|
||||
else:
|
||||
# Traverse the left nodes first to the right nodes.
|
||||
q.extend(node.reverse_iter())
|
||||
|
||||
|
||||
class Node(object):
|
||||
@@ -184,6 +201,7 @@ class Node(object):
|
||||
only_direct=only_direct,
|
||||
include_self=include_self)
|
||||
|
||||
@misc.disallow_when_frozen(FrozenNode)
|
||||
def disassociate(self):
|
||||
"""Removes this node from its parent (if any).
|
||||
|
||||
@@ -203,6 +221,7 @@ class Node(object):
|
||||
occurrences += 1
|
||||
return occurrences
|
||||
|
||||
@misc.disallow_when_frozen(FrozenNode)
|
||||
def remove(self, item, only_direct=False, include_self=True):
|
||||
"""Removes a item from this nodes children.
|
||||
|
||||
@@ -361,10 +380,31 @@ class Node(object):
|
||||
raise ValueError("%s is not contained in any child" % (item))
|
||||
return index_at
|
||||
|
||||
def dfs_iter(self, include_self=False):
|
||||
def dfs_iter(self, include_self=False, right_to_left=True):
|
||||
"""Depth first iteration (non-recursive) over the child nodes."""
|
||||
return _DFSIter(self, include_self=include_self)
|
||||
return _DFSIter(self,
|
||||
include_self=include_self,
|
||||
right_to_left=right_to_left)
|
||||
|
||||
def bfs_iter(self, include_self=False):
|
||||
def bfs_iter(self, include_self=False, right_to_left=False):
|
||||
"""Breadth first iteration (non-recursive) over the child nodes."""
|
||||
return _BFSIter(self, include_self=include_self)
|
||||
return _BFSIter(self,
|
||||
include_self=include_self,
|
||||
right_to_left=right_to_left)
|
||||
|
||||
def to_digraph(self):
|
||||
"""Converts this node + its children into a ordered directed graph.
|
||||
|
||||
The graph returned will have the same structure as the
|
||||
this node and its children (and tree node metadata will be translated
|
||||
into graph node metadata).
|
||||
|
||||
:returns: a directed graph
|
||||
:rtype: :py:class:`taskflow.types.graph.OrderedDiGraph`
|
||||
"""
|
||||
g = graph.OrderedDiGraph()
|
||||
for node in self.bfs_iter(include_self=True, right_to_left=True):
|
||||
g.add_node(node.item, attr_dict=node.metadata)
|
||||
if node is not self:
|
||||
g.add_edge(node.parent.item, node.item)
|
||||
return g
|
||||
|
||||
@@ -16,12 +16,76 @@
|
||||
# License for the specific language governing permissions and limitations
|
||||
# under the License.
|
||||
|
||||
import collections
|
||||
import itertools
|
||||
|
||||
import six
|
||||
from six.moves import range as compat_range
|
||||
|
||||
|
||||
def _ensure_iterable(func):
|
||||
|
||||
@six.wraps(func)
|
||||
def wrapper(it, *args, **kwargs):
|
||||
if not isinstance(it, collections.Iterable):
|
||||
raise ValueError("Iterable expected, but '%s' is not"
|
||||
" iterable" % it)
|
||||
return func(it, *args, **kwargs)
|
||||
|
||||
return wrapper
|
||||
|
||||
|
||||
@_ensure_iterable
|
||||
def fill(it, desired_len, filler=None):
|
||||
"""Iterates over a provided iterator up to the desired length.
|
||||
|
||||
If the source iterator does not have enough values then the filler
|
||||
value is yielded until the desired length is reached.
|
||||
"""
|
||||
if desired_len > 0:
|
||||
count = 0
|
||||
for value in it:
|
||||
yield value
|
||||
count += 1
|
||||
if count >= desired_len:
|
||||
return
|
||||
while count < desired_len:
|
||||
yield filler
|
||||
count += 1
|
||||
|
||||
|
||||
@_ensure_iterable
|
||||
def count(it):
|
||||
"""Returns how many values in the iterator (depletes the iterator)."""
|
||||
return sum(1 for _value in it)
|
||||
|
||||
|
||||
def unique_seen(it, *its):
|
||||
"""Yields unique values from iterator(s) (and retains order)."""
|
||||
|
||||
def _gen_it(all_its):
|
||||
# NOTE(harlowja): Generation is delayed so that validation
|
||||
# can happen before generation/iteration... (instead of
|
||||
# during generation/iteration)
|
||||
seen = set()
|
||||
while all_its:
|
||||
it = all_its.popleft()
|
||||
for value in it:
|
||||
if value not in seen:
|
||||
yield value
|
||||
seen.add(value)
|
||||
|
||||
all_its = collections.deque([it])
|
||||
if its:
|
||||
all_its.extend(its)
|
||||
for it in all_its:
|
||||
if not isinstance(it, collections.Iterable):
|
||||
raise ValueError("Iterable expected, but '%s' is"
|
||||
" not iterable" % it)
|
||||
return _gen_it(all_its)
|
||||
|
||||
|
||||
@_ensure_iterable
|
||||
def find_first_match(it, matcher, not_found_value=None):
|
||||
"""Searches iterator for first value that matcher callback returns true."""
|
||||
for value in it:
|
||||
@@ -30,6 +94,7 @@ def find_first_match(it, matcher, not_found_value=None):
|
||||
return not_found_value
|
||||
|
||||
|
||||
@_ensure_iterable
|
||||
def while_is_not(it, stop_value):
|
||||
"""Yields given values from iterator until stop value is passed.
|
||||
|
||||
@@ -40,3 +105,17 @@ def while_is_not(it, stop_value):
|
||||
yield value
|
||||
if value is stop_value:
|
||||
break
|
||||
|
||||
|
||||
def iter_forever(limit):
|
||||
"""Yields values from iterator until a limit is reached.
|
||||
|
||||
if limit is negative, we iterate forever.
|
||||
"""
|
||||
if limit < 0:
|
||||
i = itertools.count()
|
||||
while True:
|
||||
yield next(i)
|
||||
else:
|
||||
for i in compat_range(0, limit):
|
||||
yield i
|
||||
|
||||
@@ -22,6 +22,7 @@ import errno
|
||||
import inspect
|
||||
import os
|
||||
import re
|
||||
import socket
|
||||
import sys
|
||||
import threading
|
||||
import types
|
||||
@@ -34,7 +35,6 @@ from oslo_utils import importutils
|
||||
from oslo_utils import netutils
|
||||
from oslo_utils import reflection
|
||||
import six
|
||||
from six.moves import map as compat_map
|
||||
from six.moves import range as compat_range
|
||||
|
||||
from taskflow.types import failure
|
||||
@@ -42,6 +42,7 @@ from taskflow.types import notifier
|
||||
from taskflow.utils import deprecation
|
||||
|
||||
|
||||
UNKNOWN_HOSTNAME = "<unknown>"
|
||||
NUMERIC_TYPES = six.integer_types + (float,)
|
||||
|
||||
# NOTE(imelnikov): regular expression to get scheme from URI,
|
||||
@@ -60,6 +61,26 @@ class StrEnum(str, enum.Enum):
|
||||
return super(StrEnum, cls).__new__(cls, *args, **kwargs)
|
||||
|
||||
|
||||
class StringIO(six.StringIO):
|
||||
"""String buffer with some small additions."""
|
||||
|
||||
def write_nl(self, value, linesep=os.linesep):
|
||||
self.write(value)
|
||||
self.write(linesep)
|
||||
|
||||
|
||||
def get_hostname(unknown_hostname=UNKNOWN_HOSTNAME):
|
||||
"""Gets the machines hostname; if not able to returns an invalid one."""
|
||||
try:
|
||||
hostname = socket.getfqdn()
|
||||
if not hostname:
|
||||
return unknown_hostname
|
||||
else:
|
||||
return hostname
|
||||
except socket.error:
|
||||
return unknown_hostname
|
||||
|
||||
|
||||
def match_type(obj, matchers):
|
||||
"""Matches a given object using the given matchers list/iterable.
|
||||
|
||||
@@ -431,18 +452,6 @@ def sequence_minus(seq1, seq2):
|
||||
return result
|
||||
|
||||
|
||||
def get_duplicate_keys(iterable, key=None):
|
||||
if key is not None:
|
||||
iterable = compat_map(key, iterable)
|
||||
keys = set()
|
||||
duplicates = set()
|
||||
for item in iterable:
|
||||
if item in keys:
|
||||
duplicates.add(item)
|
||||
keys.add(item)
|
||||
return duplicates
|
||||
|
||||
|
||||
class ExponentialBackoff(object):
|
||||
"""An iterable object that will yield back an exponential delay sequence.
|
||||
|
||||
@@ -573,3 +582,11 @@ def is_iterable(obj):
|
||||
"""
|
||||
return (not isinstance(obj, six.string_types) and
|
||||
isinstance(obj, collections.Iterable))
|
||||
|
||||
|
||||
def ensure_dict(obj):
|
||||
"""Copy an existing dictionary or default to empty dict...."""
|
||||
if not obj:
|
||||
return {}
|
||||
# default to a shallow copy to avoid most ownership issues
|
||||
return dict(obj)
|
||||
|
||||
@@ -15,6 +15,7 @@
|
||||
# under the License.
|
||||
|
||||
import collections
|
||||
import multiprocessing
|
||||
import threading
|
||||
|
||||
import six
|
||||
@@ -35,6 +36,17 @@ def get_ident():
|
||||
return _thread.get_ident()
|
||||
|
||||
|
||||
def get_optimal_thread_count(default=2):
|
||||
"""Try to guess optimal thread count for current system."""
|
||||
try:
|
||||
return multiprocessing.cpu_count() + 1
|
||||
except NotImplementedError:
|
||||
# NOTE(harlowja): apparently may raise so in this case we will
|
||||
# just setup two threads since it's hard to know what else we
|
||||
# should do in this situation.
|
||||
return default
|
||||
|
||||
|
||||
def daemon_thread(target, *args, **kwargs):
|
||||
"""Makes a daemon thread that calls the given target when started."""
|
||||
thread = threading.Thread(target=target, args=args, kwargs=kwargs)
|
||||
|
||||
@@ -32,4 +32,4 @@ eventlet>=0.17.4
|
||||
|
||||
# Docs build jobs need these packages.
|
||||
sphinx!=1.2.0,!=1.3b1,<1.3,>=1.1.2
|
||||
oslosphinx>=2.5.0 # Apache-2.0
|
||||
oslosphinx!=3.4.0,>=2.5.0 # Apache-2.0
|
||||
|
||||
@@ -12,7 +12,7 @@ if [ ! -d "$PWD/.diagram-tools" ]; then
|
||||
git clone "https://github.com/vidarh/diagram-tools.git" "$PWD/.diagram-tools"
|
||||
fi
|
||||
|
||||
script_dir="$( cd "$( dirname "${BASH_SOURCE[0]}" )" && pwd )"
|
||||
script_dir=$( cd "$( dirname "${BASH_SOURCE[0]}" )" && pwd )
|
||||
img_dir="$script_dir/../doc/source/img"
|
||||
|
||||
echo "---- Updating task state diagram ----"
|
||||
|
||||
14
tox.ini
14
tox.ini
@@ -1,21 +1,15 @@
|
||||
[tox]
|
||||
minversion = 1.6
|
||||
skipsdist = True
|
||||
envlist = cover,
|
||||
docs,
|
||||
pep8,
|
||||
py26,
|
||||
py27,
|
||||
py34,
|
||||
pylint,
|
||||
update-states
|
||||
|
||||
[testenv]
|
||||
usedevelop = True
|
||||
install_command = pip install {opts} {packages}
|
||||
setenv = VIRTUAL_ENV={envdir}
|
||||
deps = -r{toxinidir}/requirements.txt
|
||||
-r{toxinidir}/test-requirements.txt
|
||||
deps = -r{toxinidir}/test-requirements.txt
|
||||
commands = python setup.py testr --slowest --testr-args='{posargs}'
|
||||
|
||||
[testenv:docs]
|
||||
@@ -49,7 +43,7 @@ commands = {posargs}
|
||||
|
||||
[flake8]
|
||||
builtins = _
|
||||
exclude = .venv,.tox,dist,doc,./taskflow/openstack/common,*egg,.git,build,tools
|
||||
exclude = .venv,.tox,dist,doc,*egg,.git,build,tools
|
||||
|
||||
[hacking]
|
||||
import_exceptions = six.moves
|
||||
@@ -57,7 +51,7 @@ import_exceptions = six.moves
|
||||
unittest.mock
|
||||
|
||||
[testenv:py27]
|
||||
commands =
|
||||
commands =
|
||||
python setup.py testr --slowest --testr-args='{posargs}'
|
||||
sphinx-build -b doctest doc/source doc/build
|
||||
doc8 doc/source
|
||||
doc8 --ignore-path "doc/source/history.rst" doc/source
|
||||
|
||||
Reference in New Issue
Block a user