Persistence cleanup part one

- Convert the various functions that take a task detail into
  ones that take atom details (since this is now the generic
  type they should take).
- Don't expose the detail type strings as part of the atom
  detail api, leave those as private hidden strings and provide
  conversion functions from string<->class instead.
- Have the logbook objects contain the following new methods
  to reduce the dependence on persistence_utils to do the same.
  - to_dict() which converts the current object into a dict
  - from_dict() which converts the provided dict into a object
  - merge() which merges a incoming objects data with the current
    objects
- Have the persistence backends + storage + action engine use these
  new methods instead of there current usage.
- Don't compare to logbook.RETRY_DETAIL or logbook.TASK_DETAIL since
  python has the isinstance function just use it (ideally we should
  fix the code so that this isn't even needed, usage of isinstance
  means something is not designed/structured right).
- In storage tests we can't assume that failures will be non-lossy
  since under certain backends when a failure is stored information
  about the internally held exc_info is lost, so take this into
  account when testing by using matches() where applicable.

Change-Id: Ie8a274cfd4cb4e64e87c355dc99d466d74a4e82c
This commit is contained in:
Joshua Harlow
2014-03-18 15:59:58 -07:00
parent 690047d7b7
commit 58a5a0932d
27 changed files with 960 additions and 863 deletions

View File

@@ -100,7 +100,7 @@ class GraphAnalyzer(object):
for prev_task in self._graph.predecessors(task): for prev_task in self._graph.predecessors(task):
task_names.append(prev_task.name) task_names.append(prev_task.name)
task_states = self._storage.get_tasks_states(task_names) task_states = self._storage.get_atoms_states(task_names)
return all(state == st.SUCCESS and intention == st.EXECUTE return all(state == st.SUCCESS and intention == st.EXECUTE
for state, intention in six.itervalues(task_states)) for state, intention in six.itervalues(task_states))
@@ -117,7 +117,7 @@ class GraphAnalyzer(object):
for prev_task in self._graph.successors(task): for prev_task in self._graph.successors(task):
task_names.append(prev_task.name) task_names.append(prev_task.name)
task_states = self._storage.get_tasks_states(task_names) task_states = self._storage.get_atoms_states(task_names)
return all(state in (st.PENDING, st.REVERTED) return all(state in (st.PENDING, st.REVERTED)
for state, intention in six.itervalues(task_states)) for state, intention in six.itervalues(task_states))
@@ -151,4 +151,4 @@ class GraphAnalyzer(object):
return True return True
def get_state(self, node): def get_state(self, node):
return self._storage.get_task_state(node.name) return self._storage.get_atom_state(node.name)

View File

@@ -37,7 +37,7 @@ class RetryAction(object):
return kwargs return kwargs
def change_state(self, retry, state, result=None): def change_state(self, retry, state, result=None):
old_state = self._storage.get_task_state(retry.name) old_state = self._storage.get_atom_state(retry.name)
if old_state == state: if old_state == state:
return state != states.PENDING return state != states.PENDING
if state in SAVE_RESULT_STATES: if state in SAVE_RESULT_STATES:
@@ -45,9 +45,8 @@ class RetryAction(object):
elif state == states.REVERTED: elif state == states.REVERTED:
self._storage.cleanup_retry_history(retry.name, state) self._storage.cleanup_retry_history(retry.name, state)
else: else:
self._storage.set_task_state(retry.name, state) self._storage.set_atom_state(retry.name, state)
retry_uuid = self._storage.get_atom_uuid(retry.name)
retry_uuid = self._storage.get_task_uuid(retry.name)
details = dict(retry_name=retry.name, details = dict(retry_name=retry.name,
retry_uuid=retry_uuid, retry_uuid=retry_uuid,
result=result) result=result)

View File

@@ -32,17 +32,16 @@ class TaskAction(object):
self._notifier = notifier self._notifier = notifier
def change_state(self, task, state, result=None, progress=None): def change_state(self, task, state, result=None, progress=None):
old_state = self._storage.get_task_state(task.name) old_state = self._storage.get_atom_state(task.name)
if old_state == state: if old_state == state:
return state != states.PENDING return state != states.PENDING
if state in SAVE_RESULT_STATES: if state in SAVE_RESULT_STATES:
self._storage.save(task.name, result, state) self._storage.save(task.name, result, state)
else: else:
self._storage.set_task_state(task.name, state) self._storage.set_atom_state(task.name, state)
if progress is not None: if progress is not None:
self._storage.set_task_progress(task.name, progress) self._storage.set_task_progress(task.name, progress)
task_uuid = self._storage.get_atom_uuid(task.name)
task_uuid = self._storage.get_task_uuid(task.name)
details = dict(task_name=task.name, details = dict(task_name=task.name,
task_uuid=task_uuid, task_uuid=task_uuid,
result=result) result=result)
@@ -65,7 +64,7 @@ class TaskAction(object):
if not self.change_state(task, states.RUNNING, progress=0.0): if not self.change_state(task, states.RUNNING, progress=0.0):
return return
kwargs = self._storage.fetch_mapped_args(task.rebind) kwargs = self._storage.fetch_mapped_args(task.rebind)
task_uuid = self._storage.get_task_uuid(task.name) task_uuid = self._storage.get_atom_uuid(task.name)
return self._task_executor.execute_task(task, task_uuid, kwargs, return self._task_executor.execute_task(task, task_uuid, kwargs,
self._on_update_progress) self._on_update_progress)
@@ -80,7 +79,7 @@ class TaskAction(object):
if not self.change_state(task, states.REVERTING, progress=0.0): if not self.change_state(task, states.REVERTING, progress=0.0):
return return
kwargs = self._storage.fetch_mapped_args(task.rebind) kwargs = self._storage.fetch_mapped_args(task.rebind)
task_uuid = self._storage.get_task_uuid(task.name) task_uuid = self._storage.get_atom_uuid(task.name)
task_result = self._storage.get(task.name) task_result = self._storage.get(task.name)
failures = self._storage.get_failures() failures = self._storage.get_failures()
future = self._task_executor.revert_task(task, task_uuid, kwargs, future = self._task_executor.revert_task(task, task_uuid, kwargs,

View File

@@ -22,7 +22,6 @@ from concurrent import futures
from taskflow.engines.action_engine import executor from taskflow.engines.action_engine import executor
from taskflow.utils import misc from taskflow.utils import misc
from taskflow.utils import persistence_utils as pu
from taskflow.utils import reflection from taskflow.utils import reflection
# NOTE(skudriashev): This is protocol events, not related to the task states. # NOTE(skudriashev): This is protocol events, not related to the task states.
@@ -146,14 +145,14 @@ class Request(Message):
if 'result' in self._kwargs: if 'result' in self._kwargs:
result = self._kwargs['result'] result = self._kwargs['result']
if isinstance(result, misc.Failure): if isinstance(result, misc.Failure):
request['result'] = ('failure', pu.failure_to_dict(result)) request['result'] = ('failure', result.to_dict())
else: else:
request['result'] = ('success', result) request['result'] = ('success', result)
if 'failures' in self._kwargs: if 'failures' in self._kwargs:
failures = self._kwargs['failures'] failures = self._kwargs['failures']
request['failures'] = {} request['failures'] = {}
for task, failure in six.iteritems(failures): for task, failure in six.iteritems(failures):
request['failures'][task] = pu.failure_to_dict(failure) request['failures'][task] = failure.to_dict()
return request return request
def set_result(self, result): def set_result(self, result):
@@ -183,7 +182,7 @@ class Response(Message):
state = data['state'] state = data['state']
data = data['data'] data = data['data']
if state == FAILURE and 'result' in data: if state == FAILURE and 'result' in data:
data['result'] = pu.failure_from_dict(data['result']) data['result'] = misc.Failure.from_dict(data['result'])
return cls(state, **data) return cls(state, **data)
@property @property

View File

@@ -22,7 +22,6 @@ from kombu import exceptions as kombu_exc
from taskflow.engines.worker_based import protocol as pr from taskflow.engines.worker_based import protocol as pr
from taskflow.engines.worker_based import proxy from taskflow.engines.worker_based import proxy
from taskflow.utils import misc from taskflow.utils import misc
from taskflow.utils import persistence_utils as pu
LOG = logging.getLogger(__name__) LOG = logging.getLogger(__name__)
@@ -86,13 +85,13 @@ class Server(object):
if result is not None: if result is not None:
data_type, data = result data_type, data = result
if data_type == 'failure': if data_type == 'failure':
action_args['result'] = pu.failure_from_dict(data) action_args['result'] = misc.Failure.from_dict(data)
else: else:
action_args['result'] = data action_args['result'] = data
if failures is not None: if failures is not None:
action_args['failures'] = {} action_args['failures'] = {}
for k, v in failures.items(): for k, v in failures.items():
action_args['failures'][k] = pu.failure_from_dict(v) action_args['failures'][k] = misc.Failure.from_dict(v)
return task_cls, action, action_args return task_cls, action, action_args
@staticmethod @staticmethod
@@ -164,7 +163,7 @@ class Server(object):
except ValueError: except ValueError:
with misc.capture_failure() as failure: with misc.capture_failure() as failure:
LOG.exception("Failed to parse request") LOG.exception("Failed to parse request")
reply_callback(result=pu.failure_to_dict(failure)) reply_callback(result=failure.to_dict())
return return
# get task endpoint # get task endpoint
@@ -174,7 +173,7 @@ class Server(object):
with misc.capture_failure() as failure: with misc.capture_failure() as failure:
LOG.exception("The '%s' task endpoint does not exist", LOG.exception("The '%s' task endpoint does not exist",
task_cls) task_cls)
reply_callback(result=pu.failure_to_dict(failure)) reply_callback(result=failure.to_dict())
return return
else: else:
reply_callback(state=pr.RUNNING) reply_callback(state=pr.RUNNING)
@@ -185,10 +184,10 @@ class Server(object):
except Exception: except Exception:
with misc.capture_failure() as failure: with misc.capture_failure() as failure:
LOG.exception("The %s task execution failed", endpoint) LOG.exception("The %s task execution failed", endpoint)
reply_callback(result=pu.failure_to_dict(failure)) reply_callback(result=failure.to_dict())
else: else:
if isinstance(result, misc.Failure): if isinstance(result, misc.Failure):
reply_callback(result=pu.failure_to_dict(result)) reply_callback(result=result.to_dict())
else: else:
reply_callback(state=pr.SUCCESS, result=result) reply_callback(state=pr.SUCCESS, result=result)

View File

@@ -48,7 +48,7 @@ class TimingListener(base.ListenerBase):
} }
try: try:
# Don't let storage failures throw exceptions in a listener method. # Don't let storage failures throw exceptions in a listener method.
self._engine.storage.update_task_metadata(task_name, meta_update) self._engine.storage.update_atom_metadata(task_name, meta_update)
except excp.StorageError: except excp.StorageError:
LOG.warn("Failure to store duration update %s for task %s", LOG.warn("Failure to store duration update %s for task %s",
meta_update, task_name, exc_info=True) meta_update, task_name, exc_info=True)

View File

@@ -18,6 +18,8 @@ import abc
import six import six
from taskflow.persistence import logbook
@six.add_metaclass(abc.ABCMeta) @six.add_metaclass(abc.ABCMeta)
class Backend(object): class Backend(object):
@@ -75,11 +77,11 @@ class Connection(object):
pass pass
@abc.abstractmethod @abc.abstractmethod
def update_task_details(self, task_detail): def update_atom_details(self, atom_detail):
"""Updates a given task details and returns the updated version. """Updates a given atom details and returns the updated version.
NOTE(harlowja): the details that is to be updated must already have NOTE(harlowja): the details that is to be updated must already have
been created by saving a flow details with the given task detail inside been created by saving a flow details with the given atom detail inside
of it. of it.
""" """
pass pass
@@ -113,3 +115,10 @@ class Connection(object):
def get_logbooks(self): def get_logbooks(self):
"""Return an iterable of logbook objects.""" """Return an iterable of logbook objects."""
pass pass
def _format_atom(atom_detail):
return {
'atom': atom_detail.to_dict(),
'type': logbook.atom_detail_type(atom_detail),
}

View File

@@ -25,9 +25,9 @@ import six
from taskflow import exceptions as exc from taskflow import exceptions as exc
from taskflow.openstack.common import jsonutils from taskflow.openstack.common import jsonutils
from taskflow.persistence.backends import base from taskflow.persistence.backends import base
from taskflow.persistence import logbook
from taskflow.utils import lock_utils from taskflow.utils import lock_utils
from taskflow.utils import misc from taskflow.utils import misc
from taskflow.utils import persistence_utils as p_utils
LOG = logging.getLogger(__name__) LOG = logging.getLogger(__name__)
@@ -64,7 +64,7 @@ class Connection(base.Connection):
self._backend = backend self._backend = backend
self._file_cache = self._backend._file_cache self._file_cache = self._backend._file_cache
self._flow_path = os.path.join(self._backend.base_path, 'flows') self._flow_path = os.path.join(self._backend.base_path, 'flows')
self._task_path = os.path.join(self._backend.base_path, 'tasks') self._atom_path = os.path.join(self._backend.base_path, 'atoms')
self._book_path = os.path.join(self._backend.base_path, 'books') self._book_path = os.path.join(self._backend.base_path, 'books')
def validate(self): def validate(self):
@@ -73,7 +73,7 @@ class Connection(base.Connection):
self._backend.base_path, self._backend.base_path,
self._backend.lock_path, self._backend.lock_path,
self._flow_path, self._flow_path,
self._task_path, self._atom_path,
self._book_path, self._book_path,
] ]
for p in paths: for p in paths:
@@ -141,37 +141,38 @@ class Connection(base.Connection):
def close(self): def close(self):
pass pass
def _save_task_details(self, task_detail, ignore_missing): def _save_atom_details(self, atom_detail, ignore_missing):
# See if we have an existing task detail to merge with. # See if we have an existing atom detail to merge with.
e_td = None e_ad = None
try: try:
e_td = self._get_task_details(task_detail.uuid, lock=False) e_ad = self._get_atom_details(atom_detail.uuid, lock=False)
except EnvironmentError: except EnvironmentError:
if not ignore_missing: if not ignore_missing:
raise exc.NotFound("No task details found with id: %s" raise exc.NotFound("No atom details found with id: %s"
% task_detail.uuid) % atom_detail.uuid)
if e_td is not None: if e_ad is not None:
task_detail = p_utils.task_details_merge(e_td, task_detail) atom_detail = e_ad.merge(atom_detail)
td_path = os.path.join(self._task_path, task_detail.uuid) ad_path = os.path.join(self._atom_path, atom_detail.uuid)
td_data = p_utils.format_task_detail(task_detail) ad_data = base._format_atom(atom_detail)
self._write_to(td_path, jsonutils.dumps(td_data)) self._write_to(ad_path, jsonutils.dumps(ad_data))
return task_detail return atom_detail
def update_task_details(self, task_detail): def update_atom_details(self, atom_detail):
return self._run_with_process_lock("task", return self._run_with_process_lock("atom",
self._save_task_details, self._save_atom_details,
task_detail, atom_detail,
ignore_missing=False) ignore_missing=False)
def _get_task_details(self, uuid, lock=True): def _get_atom_details(self, uuid, lock=True):
def _get(): def _get():
td_path = os.path.join(self._task_path, uuid) ad_path = os.path.join(self._atom_path, uuid)
td_data = misc.decode_json(self._read_from(td_path)) ad_data = misc.decode_json(self._read_from(ad_path))
return p_utils.unformat_task_detail(uuid, td_data) ad_cls = logbook.atom_detail_class(ad_data['type'])
return ad_cls.from_dict(ad_data['atom'])
if lock: if lock:
return self._run_with_process_lock('task', _get) return self._run_with_process_lock('atom', _get)
else: else:
return _get() return _get()
@@ -181,17 +182,17 @@ class Connection(base.Connection):
fd_path = os.path.join(self._flow_path, uuid) fd_path = os.path.join(self._flow_path, uuid)
meta_path = os.path.join(fd_path, 'metadata') meta_path = os.path.join(fd_path, 'metadata')
meta = misc.decode_json(self._read_from(meta_path)) meta = misc.decode_json(self._read_from(meta_path))
fd = p_utils.unformat_flow_detail(uuid, meta) fd = logbook.FlowDetail.from_dict(meta)
td_to_load = [] ad_to_load = []
td_path = os.path.join(fd_path, 'tasks') ad_path = os.path.join(fd_path, 'atoms')
try: try:
td_to_load = [f for f in os.listdir(td_path) ad_to_load = [f for f in os.listdir(ad_path)
if os.path.islink(os.path.join(td_path, f))] if os.path.islink(os.path.join(ad_path, f))]
except EnvironmentError as e: except EnvironmentError as e:
if e.errno != errno.ENOENT: if e.errno != errno.ENOENT:
raise raise
for t_uuid in td_to_load: for ad_uuid in ad_to_load:
fd.add(self._get_task_details(t_uuid)) fd.add(self._get_atom_details(ad_uuid))
return fd return fd
if lock: if lock:
@@ -199,13 +200,13 @@ class Connection(base.Connection):
else: else:
return _get() return _get()
def _save_tasks_and_link(self, task_details, local_task_path): def _save_atoms_and_link(self, atom_details, local_atom_path):
for task_detail in task_details: for atom_detail in atom_details:
self._save_task_details(task_detail, ignore_missing=True) self._save_atom_details(atom_detail, ignore_missing=True)
src_td_path = os.path.join(self._task_path, task_detail.uuid) src_ad_path = os.path.join(self._atom_path, atom_detail.uuid)
target_td_path = os.path.join(local_task_path, task_detail.uuid) target_ad_path = os.path.join(local_atom_path, atom_detail.uuid)
try: try:
os.symlink(src_td_path, target_td_path) os.symlink(src_ad_path, target_ad_path)
except EnvironmentError as e: except EnvironmentError as e:
if e.errno != errno.EEXIST: if e.errno != errno.EEXIST:
raise raise
@@ -220,22 +221,21 @@ class Connection(base.Connection):
raise exc.NotFound("No flow details found with id: %s" raise exc.NotFound("No flow details found with id: %s"
% flow_detail.uuid) % flow_detail.uuid)
if e_fd is not None: if e_fd is not None:
e_fd = p_utils.flow_details_merge(e_fd, flow_detail) e_fd = e_fd.merge(flow_detail)
for td in flow_detail: for ad in flow_detail:
if e_fd.find(td.uuid) is None: if e_fd.find(ad.uuid) is None:
e_fd.add(td) e_fd.add(ad)
flow_detail = e_fd flow_detail = e_fd
flow_path = os.path.join(self._flow_path, flow_detail.uuid) flow_path = os.path.join(self._flow_path, flow_detail.uuid)
misc.ensure_tree(flow_path) misc.ensure_tree(flow_path)
self._write_to( self._write_to(os.path.join(flow_path, 'metadata'),
os.path.join(flow_path, 'metadata'), jsonutils.dumps(flow_detail.to_dict()))
jsonutils.dumps(p_utils.format_flow_detail(flow_detail)))
if len(flow_detail): if len(flow_detail):
task_path = os.path.join(flow_path, 'tasks') atom_path = os.path.join(flow_path, 'atoms')
misc.ensure_tree(task_path) misc.ensure_tree(atom_path)
self._run_with_process_lock('task', self._run_with_process_lock('atom',
self._save_tasks_and_link, self._save_atoms_and_link,
list(flow_detail), task_path) list(flow_detail), atom_path)
return flow_detail return flow_detail
def update_flow_details(self, flow_detail): def update_flow_details(self, flow_detail):
@@ -263,18 +263,15 @@ class Connection(base.Connection):
except exc.NotFound: except exc.NotFound:
pass pass
if e_lb is not None: if e_lb is not None:
e_lb = p_utils.logbook_merge(e_lb, book) e_lb = e_lb.merge(book)
for fd in book: for fd in book:
if e_lb.find(fd.uuid) is None: if e_lb.find(fd.uuid) is None:
e_lb.add(fd) e_lb.add(fd)
book = e_lb book = e_lb
book_path = os.path.join(self._book_path, book.uuid) book_path = os.path.join(self._book_path, book.uuid)
misc.ensure_tree(book_path) misc.ensure_tree(book_path)
created_at = None self._write_to(os.path.join(book_path, 'metadata'),
if e_lb is not None: jsonutils.dumps(book.to_dict(marshal_time=True)))
created_at = e_lb.created_at
self._write_to(os.path.join(book_path, 'metadata'), jsonutils.dumps(
p_utils.format_logbook(book, created_at=created_at)))
if len(book): if len(book):
flow_path = os.path.join(book_path, 'flows') flow_path = os.path.join(book_path, 'flows')
misc.ensure_tree(flow_path) misc.ensure_tree(flow_path)
@@ -290,7 +287,7 @@ class Connection(base.Connection):
def upgrade(self): def upgrade(self):
def _step_create(): def _step_create():
for path in (self._book_path, self._flow_path, self._task_path): for path in (self._book_path, self._flow_path, self._atom_path):
try: try:
misc.ensure_tree(path) misc.ensure_tree(path)
except EnvironmentError as e: except EnvironmentError as e:
@@ -310,15 +307,15 @@ class Connection(base.Connection):
def clear_all(self): def clear_all(self):
def _step_clear(): def _step_clear():
for d in (self._book_path, self._flow_path, self._task_path): for d in (self._book_path, self._flow_path, self._atom_path):
if os.path.isdir(d): if os.path.isdir(d):
shutil.rmtree(d) shutil.rmtree(d)
def _step_task(): def _step_atom():
self._run_with_process_lock("task", _step_clear) self._run_with_process_lock("atom", _step_clear)
def _step_flow(): def _step_flow():
self._run_with_process_lock("flow", _step_task) self._run_with_process_lock("flow", _step_atom)
def _step_book(): def _step_book():
self._run_with_process_lock("book", _step_flow) self._run_with_process_lock("book", _step_flow)
@@ -328,21 +325,21 @@ class Connection(base.Connection):
def destroy_logbook(self, book_uuid): def destroy_logbook(self, book_uuid):
def _destroy_tasks(task_details): def _destroy_atoms(atom_details):
for task_detail in task_details: for atom_detail in atom_details:
task_path = os.path.join(self._task_path, task_detail.uuid) atom_path = os.path.join(self._atom_path, atom_detail.uuid)
try: try:
shutil.rmtree(task_path) shutil.rmtree(atom_path)
except EnvironmentError as e: except EnvironmentError as e:
if e.errno != errno.ENOENT: if e.errno != errno.ENOENT:
raise exc.StorageFailure("Unable to remove task" raise exc.StorageFailure("Unable to remove atom"
" directory %s" % task_path, " directory %s" % atom_path,
e) e)
def _destroy_flows(flow_details): def _destroy_flows(flow_details):
for flow_detail in flow_details: for flow_detail in flow_details:
flow_path = os.path.join(self._flow_path, flow_detail.uuid) flow_path = os.path.join(self._flow_path, flow_detail.uuid)
self._run_with_process_lock("task", _destroy_tasks, self._run_with_process_lock("atom", _destroy_atoms,
list(flow_detail)) list(flow_detail))
try: try:
shutil.rmtree(flow_path) shutil.rmtree(flow_path)
@@ -376,7 +373,7 @@ class Connection(base.Connection):
raise exc.NotFound("No logbook found with id: %s" % book_uuid) raise exc.NotFound("No logbook found with id: %s" % book_uuid)
else: else:
raise raise
lb = p_utils.unformat_logbook(book_uuid, meta) lb = logbook.LogBook.from_dict(meta, unmarshal_time=True)
fd_path = os.path.join(book_path, 'flows') fd_path = os.path.join(book_path, 'flows')
fd_uuids = [] fd_uuids = []
try: try:

View File

@@ -19,11 +19,11 @@
import logging import logging
import six
from taskflow import exceptions as exc from taskflow import exceptions as exc
from taskflow.openstack.common import timeutils
from taskflow.persistence.backends import base from taskflow.persistence.backends import base
from taskflow.persistence import logbook from taskflow.persistence import logbook
from taskflow.utils import persistence_utils as p_utils
LOG = logging.getLogger(__name__) LOG = logging.getLogger(__name__)
@@ -36,7 +36,7 @@ class MemoryBackend(base.Backend):
super(MemoryBackend, self).__init__(conf) super(MemoryBackend, self).__init__(conf)
self._log_books = {} self._log_books = {}
self._flow_details = {} self._flow_details = {}
self._task_details = {} self._atom_details = {}
@property @property
def log_books(self): def log_books(self):
@@ -47,8 +47,8 @@ class MemoryBackend(base.Backend):
return self._flow_details return self._flow_details
@property @property
def task_details(self): def atom_details(self):
return self._task_details return self._atom_details
def get_connection(self): def get_connection(self):
return Connection(self) return Connection(self)
@@ -76,8 +76,8 @@ class Connection(base.Connection):
def clear_all(self): def clear_all(self):
count = 0 count = 0
for uuid in list(self.backend.log_books.keys()): for book_uuid in list(six.iterkeys(self.backend.log_books)):
self.destroy_logbook(uuid) self.destroy_logbook(book_uuid)
count += 1 count += 1
return count return count
@@ -87,33 +87,27 @@ class Connection(base.Connection):
lb = self.backend.log_books.pop(book_uuid) lb = self.backend.log_books.pop(book_uuid)
for fd in lb: for fd in lb:
self.backend.flow_details.pop(fd.uuid, None) self.backend.flow_details.pop(fd.uuid, None)
for td in fd: for ad in fd:
self.backend.task_details.pop(td.uuid, None) self.backend.atom_details.pop(ad.uuid, None)
except KeyError: except KeyError:
raise exc.NotFound("No logbook found with id: %s" % book_uuid) raise exc.NotFound("No logbook found with id: %s" % book_uuid)
def update_task_details(self, task_detail): def update_atom_details(self, atom_detail):
try: try:
e_td = self.backend.task_details[task_detail.uuid] e_ad = self.backend.atom_details[atom_detail.uuid]
except KeyError: except KeyError:
raise exc.NotFound("No task details found with id: %s" raise exc.NotFound("No atom details found with id: %s"
% task_detail.uuid) % atom_detail.uuid)
return p_utils.task_details_merge(e_td, task_detail, deep_copy=True) return e_ad.merge(atom_detail, deep_copy=True)
def _save_flowdetail_tasks(self, e_fd, flow_detail): def _save_flowdetail_atoms(self, e_fd, flow_detail):
for task_detail in flow_detail: for atom_detail in flow_detail:
e_td = e_fd.find(task_detail.uuid) e_ad = e_fd.find(atom_detail.uuid)
if e_td is None: if e_ad is None:
if task_detail.atom_type == logbook.TASK_DETAIL: e_fd.add(atom_detail)
e_td = logbook.TaskDetail(name=task_detail.name, self.backend.atom_details[atom_detail.uuid] = atom_detail
uuid=task_detail.uuid) else:
else: e_ad.merge(atom_detail, deep_copy=True)
e_td = logbook.RetryDetail(name=task_detail.name,
uuid=task_detail.uuid)
e_fd.add(e_td)
if task_detail.uuid not in self.backend.task_details:
self.backend.task_details[task_detail.uuid] = e_td
p_utils.task_details_merge(e_td, task_detail, deep_copy=True)
def update_flow_details(self, flow_detail): def update_flow_details(self, flow_detail):
try: try:
@@ -121,8 +115,8 @@ class Connection(base.Connection):
except KeyError: except KeyError:
raise exc.NotFound("No flow details found with id: %s" raise exc.NotFound("No flow details found with id: %s"
% flow_detail.uuid) % flow_detail.uuid)
p_utils.flow_details_merge(e_fd, flow_detail, deep_copy=True) e_fd.merge(flow_detail, deep_copy=True)
self._save_flowdetail_tasks(e_fd, flow_detail) self._save_flowdetail_atoms(e_fd, flow_detail)
return e_fd return e_fd
def save_logbook(self, book): def save_logbook(self, book):
@@ -130,28 +124,21 @@ class Connection(base.Connection):
try: try:
e_lb = self.backend.log_books[book.uuid] e_lb = self.backend.log_books[book.uuid]
except KeyError: except KeyError:
e_lb = logbook.LogBook(book.name, book.uuid, e_lb = logbook.LogBook(book.name, uuid=book.uuid)
updated_at=book.updated_at,
created_at=timeutils.utcnow())
self.backend.log_books[e_lb.uuid] = e_lb self.backend.log_books[e_lb.uuid] = e_lb
else:
# TODO(harlowja): figure out a better way to set this property
# without actually setting a 'private' property.
e_lb._updated_at = timeutils.utcnow()
p_utils.logbook_merge(e_lb, book, deep_copy=True) e_lb.merge(book, deep_copy=True)
# Add anything in to the new logbook that isn't already in the existing # Add anything in to the new logbook that isn't already in the existing
# logbook. # logbook.
for flow_detail in book: for flow_detail in book:
try: try:
e_fd = self.backend.flow_details[flow_detail.uuid] e_fd = self.backend.flow_details[flow_detail.uuid]
except KeyError: except KeyError:
e_fd = logbook.FlowDetail(name=flow_detail.name, e_fd = logbook.FlowDetail(flow_detail.name, flow_detail.uuid)
uuid=flow_detail.uuid) e_lb.add(e_fd)
e_lb.add(flow_detail) self.backend.flow_details[e_fd.uuid] = e_fd
self.backend.flow_details[flow_detail.uuid] = e_fd e_fd.merge(flow_detail, deep_copy=True)
p_utils.flow_details_merge(e_fd, flow_detail, deep_copy=True) self._save_flowdetail_atoms(e_fd, flow_detail)
self._save_flowdetail_tasks(e_fd, flow_detail)
return e_lb return e_lb
def get_logbook(self, book_uuid): def get_logbook(self, book_uuid):
@@ -160,9 +147,6 @@ class Connection(base.Connection):
except KeyError: except KeyError:
raise exc.NotFound("No logbook found with id: %s" % book_uuid) raise exc.NotFound("No logbook found with id: %s" % book_uuid)
def _get_logbooks(self):
return list(self.backend.log_books.values())
def get_logbooks(self): def get_logbooks(self):
for lb in self._get_logbooks(): for lb in list(six.itervalues(self.backend.log_books)):
yield lb yield lb

View File

@@ -37,7 +37,6 @@ from taskflow.persistence.backends.sqlalchemy import models
from taskflow.persistence import logbook from taskflow.persistence import logbook
from taskflow.utils import eventlet_utils from taskflow.utils import eventlet_utils
from taskflow.utils import misc from taskflow.utils import misc
from taskflow.utils import persistence_utils
LOG = logging.getLogger(__name__) LOG = logging.getLogger(__name__)
@@ -347,17 +346,17 @@ class Connection(base.Connection):
def clear_all(self): def clear_all(self):
return self._run_in_session(self._clear_all) return self._run_in_session(self._clear_all)
def _update_task_details(self, session, td): def _update_atom_details(self, session, ad):
# Must already exist since a tasks details has a strong connection to # Must already exist since a atoms details has a strong connection to
# a flow details, and tasks details can not be saved on there own since # a flow details, and atom details can not be saved on there own since
# they *must* have a connection to an existing flow details. # they *must* have a connection to an existing flow detail.
td_m = _task_details_get_model(td.uuid, session=session) ad_m = _atom_details_get_model(ad.uuid, session=session)
td_m = _taskdetails_merge(td_m, td) ad_m = _atomdetails_merge(ad_m, ad)
td_m = session.merge(td_m) ad_m = session.merge(ad_m)
return _convert_td_to_external(td_m) return _convert_ad_to_external(ad_m)
def update_task_details(self, task_detail): def update_atom_details(self, atom_detail):
return self._run_in_session(self._update_task_details, td=task_detail) return self._run_in_session(self._update_atom_details, ad=atom_detail)
def _update_flow_details(self, session, fd): def _update_flow_details(self, session, fd):
# Must already exist since a flow details has a strong connection to # Must already exist since a flow details has a strong connection to
@@ -432,12 +431,63 @@ class Connection(base.Connection):
### ###
def _atomdetails_merge(ad_m, ad):
atom_type = logbook.atom_detail_type(ad)
if atom_type != ad_m.atom_type:
raise exc.StorageError("Can not merge differing atom types (%s != %s)"
% (atom_type, ad_m.atom_type))
ad_d = ad.to_dict()
ad_m.state = ad_d['state']
ad_m.intention = ad_d['intention']
ad_m.results = ad_d['results']
ad_m.version = ad_d['version']
ad_m.failure = ad_d['failure']
ad_m.meta = ad_d['meta']
ad_m.name = ad_d['name']
return ad_m
def _flowdetails_merge(fd_m, fd):
fd_d = fd.to_dict()
fd_m.state = fd_d['state']
fd_m.name = fd_d['name']
fd_m.meta = fd_d['meta']
for ad in fd:
existing_ad = False
for ad_m in fd_m.atomdetails:
if ad_m.uuid == ad.uuid:
existing_ad = True
ad_m = _atomdetails_merge(ad_m, ad)
break
if not existing_ad:
ad_m = _convert_ad_to_internal(ad, fd_m.uuid)
fd_m.atomdetails.append(ad_m)
return fd_m
def _logbook_merge(lb_m, lb):
lb_d = lb.to_dict()
lb_m.meta = lb_d['meta']
lb_m.name = lb_d['name']
lb_m.created_at = lb_d['created_at']
lb_m.updated_at = lb_d['updated_at']
for fd in lb:
existing_fd = False
for fd_m in lb_m.flowdetails:
if fd_m.uuid == fd.uuid:
existing_fd = True
fd_m = _flowdetails_merge(fd_m, fd)
if not existing_fd:
lb_m.flowdetails.append(_convert_fd_to_internal(fd, lb_m.uuid))
return lb_m
def _convert_fd_to_external(fd): def _convert_fd_to_external(fd):
fd_c = logbook.FlowDetail(fd.name, uuid=fd.uuid) fd_c = logbook.FlowDetail(fd.name, uuid=fd.uuid)
fd_c.meta = fd.meta fd_c.meta = fd.meta
fd_c.state = fd.state fd_c.state = fd.state
for td in fd.taskdetails: for ad_m in fd.atomdetails:
fd_c.add(_convert_td_to_external(td)) fd_c.add(_convert_ad_to_external(ad_m))
return fd_c return fd_c
@@ -445,47 +495,40 @@ def _convert_fd_to_internal(fd, parent_uuid):
fd_m = models.FlowDetail(name=fd.name, uuid=fd.uuid, fd_m = models.FlowDetail(name=fd.name, uuid=fd.uuid,
parent_uuid=parent_uuid, meta=fd.meta, parent_uuid=parent_uuid, meta=fd.meta,
state=fd.state) state=fd.state)
fd_m.taskdetails = [] fd_m.atomdetails = []
for td in fd: for ad in fd:
fd_m.taskdetails.append(_convert_td_to_internal(td, fd_m.uuid)) fd_m.atomdetails.append(_convert_ad_to_internal(ad, fd_m.uuid))
return fd_m return fd_m
def _convert_td_to_internal(td, parent_uuid): def _convert_ad_to_internal(ad, parent_uuid):
results = td.results converted = ad.to_dict()
if td.atom_type == logbook.RETRY_DETAIL: converted['atom_type'] = logbook.atom_detail_type(ad)
results = persistence_utils.encode_retry_results(results) converted['parent_uuid'] = parent_uuid
return models.TaskDetail(name=td.name, uuid=td.uuid, return models.AtomDetail(**converted)
atom_type=td.atom_type,
intention=td.intention,
state=td.state, results=results,
failure=td.failure, meta=td.meta,
version=td.version, parent_uuid=parent_uuid)
def _convert_td_to_external(td): def _convert_ad_to_external(ad):
# Convert from sqlalchemy model -> external model, this allows us # Convert from sqlalchemy model -> external model, this allows us
# to change the internal sqlalchemy model easily by forcing a defined # to change the internal sqlalchemy model easily by forcing a defined
# interface (that isn't the sqlalchemy model itself). # interface (that isn't the sqlalchemy model itself).
results = td.results atom_cls = logbook.atom_detail_class(ad.atom_type)
if td.atom_type == logbook.RETRY_DETAIL: return atom_cls.from_dict({
results = persistence_utils.decode_retry_results(results) 'state': ad.state,
atom_cls = logbook.get_atom_detail_class(td.atom_type) 'intention': ad.intention,
td_c = atom_cls(td.name, uuid=td.uuid) 'results': ad.results,
td_c.state = td.state 'failure': ad.failure,
td_c.intention = td.intention 'meta': ad.meta,
td_c.results = results 'version': ad.version,
td_c.failure = td.failure 'name': ad.name,
td_c.meta = td.meta 'uuid': ad.uuid,
td_c.version = td.version })
return td_c
def _convert_lb_to_external(lb_m): def _convert_lb_to_external(lb_m):
"""Don't expose the internal sqlalchemy ORM model to the external api.""" lb_c = logbook.LogBook(lb_m.name, lb_m.uuid)
lb_c = logbook.LogBook(lb_m.name, lb_m.uuid, lb_c.updated_at = lb_m.updated_at
updated_at=lb_m.updated_at, lb_c.created_at = lb_m.created_at
created_at=lb_m.created_at)
lb_c.meta = lb_m.meta lb_c.meta = lb_m.meta
for fd_m in lb_m.flowdetails: for fd_m in lb_m.flowdetails:
lb_c.add(_convert_fd_to_external(fd_m)) lb_c.add(_convert_fd_to_external(fd_m))
@@ -493,7 +536,6 @@ def _convert_lb_to_external(lb_m):
def _convert_lb_to_internal(lb_c): def _convert_lb_to_internal(lb_c):
"""Don't expose the external model to the sqlalchemy ORM model."""
lb_m = models.LogBook(uuid=lb_c.uuid, meta=lb_c.meta, name=lb_c.name) lb_m = models.LogBook(uuid=lb_c.uuid, meta=lb_c.meta, name=lb_c.name)
lb_m.flowdetails = [] lb_m.flowdetails = []
for fd_c in lb_c: for fd_c in lb_c:
@@ -508,48 +550,15 @@ def _logbook_get_model(lb_id, session):
return entry return entry
def _flow_details_get_model(f_id, session): def _flow_details_get_model(flow_id, session):
entry = session.query(models.FlowDetail).filter_by(uuid=f_id).first() entry = session.query(models.FlowDetail).filter_by(uuid=flow_id).first()
if entry is None: if entry is None:
raise exc.NotFound("No flow details found with id: %s" % f_id) raise exc.NotFound("No flow details found with id: %s" % flow_id)
return entry return entry
def _task_details_get_model(t_id, session): def _atom_details_get_model(atom_id, session):
entry = session.query(models.TaskDetail).filter_by(uuid=t_id).first() entry = session.query(models.AtomDetail).filter_by(uuid=atom_id).first()
if entry is None: if entry is None:
raise exc.NotFound("No task details found with id: %s" % t_id) raise exc.NotFound("No atom details found with id: %s" % atom_id)
return entry return entry
def _logbook_merge(lb_m, lb):
lb_m = persistence_utils.logbook_merge(lb_m, lb)
for fd in lb:
existing_fd = False
for fd_m in lb_m.flowdetails:
if fd_m.uuid == fd.uuid:
existing_fd = True
fd_m = _flowdetails_merge(fd_m, fd)
if not existing_fd:
lb_m.flowdetails.append(_convert_fd_to_internal(fd, lb_m.uuid))
return lb_m
def _flowdetails_merge(fd_m, fd):
fd_m = persistence_utils.flow_details_merge(fd_m, fd)
for td in fd:
existing_td = False
for td_m in fd_m.taskdetails:
if td_m.uuid == td.uuid:
existing_td = True
td_m = _taskdetails_merge(td_m, td)
break
if not existing_td:
td_m = _convert_td_to_internal(td, fd_m.uuid)
fd_m.taskdetails.append(td_m)
return fd_m
def _taskdetails_merge(td_m, td):
td_i = _convert_td_to_internal(td, td_m.parent_uuid)
return persistence_utils.task_details_merge(td_m, td_i)

View File

@@ -26,7 +26,6 @@ from taskflow.persistence.backends import base
from taskflow.persistence import logbook from taskflow.persistence import logbook
from taskflow.utils import kazoo_utils as k_utils from taskflow.utils import kazoo_utils as k_utils
from taskflow.utils import misc from taskflow.utils import misc
from taskflow.utils import persistence_utils as p_utils
LOG = logging.getLogger(__name__) LOG = logging.getLogger(__name__)
@@ -87,7 +86,7 @@ class ZkConnection(base.Connection):
self._client = client self._client = client
self._book_path = paths.join(self._backend.path, "books") self._book_path = paths.join(self._backend.path, "books")
self._flow_path = paths.join(self._backend.path, "flow_details") self._flow_path = paths.join(self._backend.path, "flow_details")
self._task_path = paths.join(self._backend.path, "task_details") self._atom_path = paths.join(self._backend.path, "atom_details")
with self._exc_wrapper(): with self._exc_wrapper():
# NOOP if already started. # NOOP if already started.
self._client.start() self._client.start()
@@ -113,8 +112,8 @@ class ZkConnection(base.Connection):
return self._flow_path return self._flow_path
@property @property
def task_path(self): def atom_path(self):
return self._task_path return self._atom_path
def close(self): def close(self):
pass pass
@@ -122,7 +121,7 @@ class ZkConnection(base.Connection):
def upgrade(self): def upgrade(self):
"""Creates the initial paths (if they already don't exist).""" """Creates the initial paths (if they already don't exist)."""
with self._exc_wrapper(): with self._exc_wrapper():
for path in (self.book_path, self.flow_path, self.task_path): for path in (self.book_path, self.flow_path, self.atom_path):
self._client.ensure_path(path) self._client.ensure_path(path)
@contextlib.contextmanager @contextlib.contextmanager
@@ -144,56 +143,61 @@ class ZkConnection(base.Connection):
except (k_exc.KazooException, k_exc.ZookeeperError) as e: except (k_exc.KazooException, k_exc.ZookeeperError) as e:
raise exc.StorageFailure("Storage backend internal error", e) raise exc.StorageFailure("Storage backend internal error", e)
def update_task_details(self, td): def update_atom_details(self, ad):
"""Update a task_detail transactionally.""" """Update a atom detail transactionally."""
with self._exc_wrapper(): with self._exc_wrapper():
with self._client.transaction() as txn: with self._client.transaction() as txn:
return self._update_task_details(td, txn) return self._update_atom_details(ad, txn)
def _update_task_details(self, td, txn, create_missing=False): def _update_atom_details(self, ad, txn, create_missing=False):
# Determine whether the desired data exists or not. # Determine whether the desired data exists or not.
td_path = paths.join(self.task_path, td.uuid) ad_path = paths.join(self.atom_path, ad.uuid)
e_ad = None
try: try:
td_data, _zstat = self._client.get(td_path) ad_data, _zstat = self._client.get(ad_path)
except k_exc.NoNodeError: except k_exc.NoNodeError:
# Not-existent: create or raise exception. # Not-existent: create or raise exception.
if create_missing: raise exc.NotFound("No atom details found with id: %s" % ad.uuid)
txn.create(td_path)
e_td = logbook.TaskDetail(name=td.name, uuid=td.uuid)
else:
raise exc.NotFound("No task details found with id: %s"
% td.uuid)
else: else:
# Existent: read it out. # Existent: read it out.
e_td = p_utils.unformat_task_detail(td.uuid, try:
misc.decode_json(td_data)) ad_data = misc.decode_json(ad_data)
ad_cls = logbook.atom_detail_class(ad_data['type'])
e_ad = ad_cls.from_dict(ad_data['atom'])
except KeyError:
pass
# Update and write it back # Update and write it back
e_td = p_utils.task_details_merge(e_td, td) if e_ad:
td_data = p_utils.format_task_detail(e_td) e_ad = e_ad.merge(ad)
txn.set_data(td_path, misc.binary_encode(jsonutils.dumps(td_data))) else:
return e_td e_ad = ad
ad_data = base._format_atom(e_ad)
txn.set_data(ad_path,
misc.binary_encode(jsonutils.dumps(ad_data)))
return e_ad
def get_task_details(self, td_uuid): def get_atom_details(self, ad_uuid):
"""Read a taskdetail. """Read a atom detail.
*Read-only*, so no need of zk transaction. *Read-only*, so no need of zk transaction.
""" """
with self._exc_wrapper(): with self._exc_wrapper():
return self._get_task_details(td_uuid) return self._get_atom_details(ad_uuid)
def _get_task_details(self, td_uuid): def _get_atom_details(self, ad_uuid):
td_path = paths.join(self.task_path, td_uuid) ad_path = paths.join(self.atom_path, ad_uuid)
try: try:
td_data, _zstat = self._client.get(td_path) ad_data, _zstat = self._client.get(ad_path)
except k_exc.NoNodeError: except k_exc.NoNodeError:
raise exc.NotFound("No task details found with id: %s" % td_uuid) raise exc.NotFound("No atom details found with id: %s" % ad_uuid)
else: else:
return p_utils.unformat_task_detail(td_uuid, ad_data = misc.decode_json(ad_data)
misc.decode_json(td_data)) ad_cls = logbook.atom_detail_class(ad_data['type'])
return ad_cls.from_dict(ad_data['atom'])
def update_flow_details(self, fd): def update_flow_details(self, fd):
"""Update a flowdetail transactionally.""" """Update a flow detail transactionally."""
with self._exc_wrapper(): with self._exc_wrapper():
with self._client.transaction() as txn: with self._client.transaction() as txn:
return self._update_flow_details(fd, txn) return self._update_flow_details(fd, txn)
@@ -213,25 +217,24 @@ class ZkConnection(base.Connection):
% fd.uuid) % fd.uuid)
else: else:
# Existent: read it out # Existent: read it out
e_fd = p_utils.unformat_flow_detail(fd.uuid, e_fd = logbook.FlowDetail.from_dict(misc.decode_json(fd_data))
misc.decode_json(fd_data))
# Update and write it back # Update and write it back
e_fd = p_utils.flow_details_merge(e_fd, fd) e_fd = e_fd.merge(fd)
fd_data = p_utils.format_flow_detail(e_fd) fd_data = e_fd.to_dict()
txn.set_data(fd_path, misc.binary_encode(jsonutils.dumps(fd_data))) txn.set_data(fd_path, misc.binary_encode(jsonutils.dumps(fd_data)))
for td in fd: for ad in fd:
td_path = paths.join(fd_path, td.uuid) ad_path = paths.join(fd_path, ad.uuid)
# NOTE(harlowja): create an entry in the flow detail path # NOTE(harlowja): create an entry in the flow detail path
# for the provided task detail so that a reference exists # for the provided atom detail so that a reference exists
# from the flow detail to its task details. # from the flow detail to its atom details.
if not self._client.exists(td_path): if not self._client.exists(ad_path):
txn.create(td_path) txn.create(ad_path)
e_fd.add(self._update_task_details(td, txn, create_missing=True)) e_fd.add(self._update_atom_details(ad, txn, create_missing=True))
return e_fd return e_fd
def get_flow_details(self, fd_uuid): def get_flow_details(self, fd_uuid):
"""Read a flowdetail. """Read a flow detail.
*Read-only*, so no need of zk transaction. *Read-only*, so no need of zk transaction.
""" """
@@ -245,16 +248,16 @@ class ZkConnection(base.Connection):
except k_exc.NoNodeError: except k_exc.NoNodeError:
raise exc.NotFound("No flow details found with id: %s" % fd_uuid) raise exc.NotFound("No flow details found with id: %s" % fd_uuid)
fd = p_utils.unformat_flow_detail(fd_uuid, misc.decode_json(fd_data)) fd = logbook.FlowDetail.from_dict(misc.decode_json(fd_data))
for td_uuid in self._client.get_children(fd_path): for ad_uuid in self._client.get_children(fd_path):
fd.add(self._get_task_details(td_uuid)) fd.add(self._get_atom_details(ad_uuid))
return fd return fd
def save_logbook(self, lb): def save_logbook(self, lb):
"""Save (update) a log_book transactionally.""" """Save (update) a log_book transactionally."""
def _create_logbook(lb_path, txn): def _create_logbook(lb_path, txn):
lb_data = p_utils.format_logbook(lb, created_at=None) lb_data = lb.to_dict(marshal_time=True)
txn.create(lb_path, misc.binary_encode(jsonutils.dumps(lb_data))) txn.create(lb_path, misc.binary_encode(jsonutils.dumps(lb_data)))
for fd in lb: for fd in lb:
# NOTE(harlowja): create an entry in the logbook path # NOTE(harlowja): create an entry in the logbook path
@@ -262,22 +265,24 @@ class ZkConnection(base.Connection):
# from the logbook to its flow details. # from the logbook to its flow details.
txn.create(paths.join(lb_path, fd.uuid)) txn.create(paths.join(lb_path, fd.uuid))
fd_path = paths.join(self.flow_path, fd.uuid) fd_path = paths.join(self.flow_path, fd.uuid)
fd_data = jsonutils.dumps(p_utils.format_flow_detail(fd)) fd_data = jsonutils.dumps(fd.to_dict())
txn.create(fd_path, misc.binary_encode(fd_data)) txn.create(fd_path, misc.binary_encode(fd_data))
for td in fd: for ad in fd:
# NOTE(harlowja): create an entry in the flow detail path # NOTE(harlowja): create an entry in the flow detail path
# for the provided task detail so that a reference exists # for the provided atom detail so that a reference exists
# from the flow detail to its task details. # from the flow detail to its atom details.
txn.create(paths.join(fd_path, td.uuid)) txn.create(paths.join(fd_path, ad.uuid))
td_path = paths.join(self.task_path, td.uuid) ad_path = paths.join(self.atom_path, ad.uuid)
td_data = jsonutils.dumps(p_utils.format_task_detail(td)) ad_data = base._format_atom(ad)
txn.create(td_path, misc.binary_encode(td_data)) txn.create(ad_path,
misc.binary_encode(jsonutils.dumps(ad_data)))
return lb return lb
def _update_logbook(lb_path, lb_data, txn): def _update_logbook(lb_path, lb_data, txn):
e_lb = p_utils.unformat_logbook(lb.uuid, misc.decode_json(lb_data)) e_lb = logbook.LogBook.from_dict(misc.decode_json(lb_data),
e_lb = p_utils.logbook_merge(e_lb, lb) unmarshal_time=True)
lb_data = p_utils.format_logbook(e_lb, created_at=lb.created_at) e_lb = e_lb.merge(lb)
lb_data = e_lb.to_dict(marshal_time=True)
txn.set_data(lb_path, misc.binary_encode(jsonutils.dumps(lb_data))) txn.set_data(lb_path, misc.binary_encode(jsonutils.dumps(lb_data)))
for fd in lb: for fd in lb:
fd_path = paths.join(lb_path, fd.uuid) fd_path = paths.join(lb_path, fd.uuid)
@@ -312,8 +317,8 @@ class ZkConnection(base.Connection):
except k_exc.NoNodeError: except k_exc.NoNodeError:
raise exc.NotFound("No logbook found with id: %s" % lb_uuid) raise exc.NotFound("No logbook found with id: %s" % lb_uuid)
else: else:
lb = p_utils.unformat_logbook(lb_uuid, lb = logbook.LogBook.from_dict(misc.decode_json(lb_data),
misc.decode_json(lb_data)) unmarshal_time=True)
for fd_uuid in self._client.get_children(lb_path): for fd_uuid in self._client.get_children(lb_path):
lb.add(self._get_flow_details(fd_uuid)) lb.add(self._get_flow_details(fd_uuid))
return lb return lb
@@ -338,21 +343,21 @@ class ZkConnection(base.Connection):
def destroy_logbook(self, lb_uuid): def destroy_logbook(self, lb_uuid):
"""Destroy (delete) a log_book transactionally.""" """Destroy (delete) a log_book transactionally."""
def _destroy_task_details(td_uuid, txn): def _destroy_atom_details(ad_uuid, txn):
td_path = paths.join(self.task_path, td_uuid) ad_path = paths.join(self.atom_path, ad_uuid)
if not self._client.exists(td_path): if not self._client.exists(ad_path):
raise exc.NotFound("No task details found with id: %s" raise exc.NotFound("No atom details found with id: %s"
% td_uuid) % ad_uuid)
txn.delete(td_path) txn.delete(ad_path)
def _destroy_flow_details(fd_uuid, txn): def _destroy_flow_details(fd_uuid, txn):
fd_path = paths.join(self.flow_path, fd_uuid) fd_path = paths.join(self.flow_path, fd_uuid)
if not self._client.exists(fd_path): if not self._client.exists(fd_path):
raise exc.NotFound("No flow details found with id: %s" raise exc.NotFound("No flow details found with id: %s"
% fd_uuid) % fd_uuid)
for td_uuid in self._client.get_children(fd_path): for ad_uuid in self._client.get_children(fd_path):
_destroy_task_details(td_uuid, txn) _destroy_atom_details(ad_uuid, txn)
txn.delete(paths.join(fd_path, td_uuid)) txn.delete(paths.join(fd_path, ad_uuid))
txn.delete(fd_path) txn.delete(fd_path)
def _destroy_logbook(lb_uuid, txn): def _destroy_logbook(lb_uuid, txn):
@@ -380,20 +385,20 @@ class ZkConnection(base.Connection):
txn.delete(paths.join(lb_path, fd_uuid)) txn.delete(paths.join(lb_path, fd_uuid))
txn.delete(lb_path) txn.delete(lb_path)
# Delete all data under flowdetail path. # Delete all data under flow detail path.
for fd_uuid in self._client.get_children(self.flow_path): for fd_uuid in self._client.get_children(self.flow_path):
fd_path = paths.join(self.flow_path, fd_uuid) fd_path = paths.join(self.flow_path, fd_uuid)
for td_uuid in self._client.get_children(fd_path): for ad_uuid in self._client.get_children(fd_path):
txn.delete(paths.join(fd_path, td_uuid)) txn.delete(paths.join(fd_path, ad_uuid))
txn.delete(fd_path) txn.delete(fd_path)
# Delete all data under taskdetail path. # Delete all data under atom detail path.
for td_uuid in self._client.get_children(self.task_path): for ad_uuid in self._client.get_children(self.atom_path):
td_path = paths.join(self.task_path, td_uuid) ad_path = paths.join(self.atom_path, ad_uuid)
txn.delete(td_path) txn.delete(ad_path)
# Delete containing directories. # Delete containing directories.
if delete_dirs: if delete_dirs:
txn.delete(self.book_path) txn.delete(self.book_path)
txn.delete(self.task_path) txn.delete(self.atom_path)
txn.delete(self.flow_path) txn.delete(self.flow_path)

View File

@@ -0,0 +1,37 @@
# -*- coding: utf-8 -*-
# Copyright (C) 2014 Yahoo! Inc. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
"""Rename taskdetails to atomdetails
Revision ID: 589dccdf2b6e
Revises: 14b227d79a87
Create Date: 2014-03-19 11:49:16.533227
"""
# revision identifiers, used by Alembic.
revision = '589dccdf2b6e'
down_revision = '14b227d79a87'
from alembic import op
def upgrade():
op.rename_table("taskdetails", "atomdetails")
def downgrade():
op.rename_table("atomdetails", "taskdetails")

View File

@@ -33,8 +33,7 @@ from taskflow.persistence import logbook
def upgrade(): def upgrade():
atom_types = sa.Enum(*logbook.ATOM_TYPES, name='atom_types') atom_types = sa.Enum(*logbook.ATOM_TYPES, name='atom_types')
column = sa.Column('atom_type', atom_types, column = sa.Column('atom_type', atom_types)
server_default=logbook.TASK_DETAIL)
bind = op.get_bind() bind = op.get_bind()
impl = atom_types.dialect_impl(bind.dialect) impl = atom_types.dialect_impl(bind.dialect)
impl.create(bind, checkfirst=True) impl.create(bind, checkfirst=True)

View File

@@ -28,7 +28,6 @@ from taskflow.openstack.common import uuidutils
from taskflow.persistence import logbook from taskflow.persistence import logbook
from taskflow import states from taskflow import states
from taskflow.utils import persistence_utils
BASE = declarative_base() BASE = declarative_base()
@@ -49,27 +48,6 @@ class Json(types.TypeDecorator):
return jsonutils.loads(value) return jsonutils.loads(value)
class Failure(types.TypeDecorator):
"""Put misc.Failure object into database column.
We convert Failure object to dict, serialize that dict into
JSON and save it. None is stored as NULL.
The conversion is lossy since we cannot save exc_info.
"""
impl = types.Text
def process_bind_param(self, value, dialect):
if value is None:
return None
return jsonutils.dumps(persistence_utils.failure_to_dict(value))
def process_result_value(self, value, dialect):
if value is None:
return None
return persistence_utils.failure_from_dict(jsonutils.loads(value))
class ModelBase(TimestampMixin): class ModelBase(TimestampMixin):
"""Base model for all taskflow objects.""" """Base model for all taskflow objects."""
uuid = Column(String, default=uuidutils.generate_uuid, uuid = Column(String, default=uuidutils.generate_uuid,
@@ -98,23 +76,23 @@ class FlowDetail(BASE, ModelBase):
# Relationships # Relationships
parent_uuid = Column(String, ForeignKey('logbooks.uuid')) parent_uuid = Column(String, ForeignKey('logbooks.uuid'))
taskdetails = relationship("TaskDetail", atomdetails = relationship("AtomDetail",
single_parent=True, single_parent=True,
backref=backref("flowdetails", backref=backref("flowdetails",
cascade="save-update, delete, " cascade="save-update, delete, "
"merge")) "merge"))
class TaskDetail(BASE, ModelBase): class AtomDetail(BASE, ModelBase):
__tablename__ = 'taskdetails' __tablename__ = 'atomdetails'
atom_type = Column(Enum(*logbook.ATOM_TYPES, name='atom_types'))
# Member variables # Member variables
atom_type = Column(Enum(*logbook.ATOM_TYPES, name='atom_types'))
state = Column(String) state = Column(String)
intention = Column(Enum(*states.INTENTIONS, name='intentions')) intention = Column(Enum(*states.INTENTIONS, name='intentions'))
results = Column(Json) results = Column(Json)
failure = Column(Failure) failure = Column(Json)
version = Column(String) version = Column(Json)
# Relationships # Relationships
parent_uuid = Column(String, ForeignKey('flowdetails.uuid')) parent_uuid = Column(String, ForeignKey('flowdetails.uuid'))

View File

@@ -15,20 +15,37 @@
# License for the specific language governing permissions and limitations # License for the specific language governing permissions and limitations
# under the License. # under the License.
import abc
import copy
import logging import logging
import abc
import six import six
from taskflow.openstack.common import timeutils
from taskflow.openstack.common import uuidutils from taskflow.openstack.common import uuidutils
from taskflow import states from taskflow import states
from taskflow.utils import misc
LOG = logging.getLogger(__name__) LOG = logging.getLogger(__name__)
TASK_DETAIL = 'TASK_DETAIL'
RETRY_DETAIL = 'RETRY_DETAIL'
ATOM_TYPES = [TASK_DETAIL, RETRY_DETAIL] def _copy_function(deep_copy):
if deep_copy:
return copy.deepcopy
else:
return lambda x: x
def _safe_marshal_time(when):
if not when:
return None
return timeutils.marshall_now(now=when)
def _safe_unmarshal_time(when):
if not when:
return None
return timeutils.unmarshall_time(when)
class LogBook(object): class LogBook(object):
@@ -41,35 +58,75 @@ class LogBook(object):
storage in real time. The data in this class will only be guaranteed to be storage in real time. The data in this class will only be guaranteed to be
persisted when a save occurs via some backend connection. persisted when a save occurs via some backend connection.
""" """
def __init__(self, name, uuid=None, updated_at=None, created_at=None): def __init__(self, name, uuid=None):
if uuid: if uuid:
self._uuid = uuid self._uuid = uuid
else: else:
self._uuid = uuidutils.generate_uuid() self._uuid = uuidutils.generate_uuid()
self._name = name self._name = name
self._flowdetails_by_id = {} self._flowdetails_by_id = {}
self._updated_at = updated_at self.created_at = timeutils.utcnow()
self._created_at = created_at self.updated_at = None
self.meta = None self.meta = None
@property
def created_at(self):
return self._created_at
@property
def updated_at(self):
return self._updated_at
def add(self, fd): def add(self, fd):
"""Adds a new entry to the underlying logbook. """Adds a new entry to the underlying logbook.
Does not *guarantee* that the details will be immediately saved. Does not *guarantee* that the details will be immediately saved.
""" """
self._flowdetails_by_id[fd.uuid] = fd self._flowdetails_by_id[fd.uuid] = fd
self.updated_at = timeutils.utcnow()
def find(self, flow_uuid): def find(self, flow_uuid):
return self._flowdetails_by_id.get(flow_uuid, None) return self._flowdetails_by_id.get(flow_uuid, None)
def merge(self, lb, deep_copy=False):
"""Merges the current object state with the given ones state.
NOTE(harlowja): Does not merge the flow details contained in either.
"""
if lb is self:
return self
copy_fn = _copy_function(deep_copy)
if self.meta != lb.meta:
self.meta = copy_fn(lb.meta)
if lb.created_at != self.created_at:
self.created_at = copy_fn(lb.created_at)
if lb.updated_at != self.updated_at:
self.updated_at = copy_fn(lb.updated_at)
return self
def to_dict(self, marshal_time=False):
"""Translates the internal state of this object to a dictionary.
NOTE(harlowja): Does not include the contained flow details.
"""
if not marshal_time:
marshal_fn = lambda x: x
else:
marshal_fn = _safe_marshal_time
data = {
'name': self.name,
'meta': self.meta,
'uuid': self.uuid,
'updated_at': marshal_fn(self.updated_at),
'created_at': marshal_fn(self.created_at),
}
return data
@classmethod
def from_dict(cls, data, unmarshal_time=False):
"""Translates the given data into an instance of this class."""
if not unmarshal_time:
unmarshal_fn = lambda x: x
else:
unmarshal_fn = _safe_unmarshal_time
obj = cls(data['name'], uuid=data['uuid'])
obj.updated_at = unmarshal_fn(data['updated_at'])
obj.created_at = unmarshal_fn(data['created_at'])
obj.meta = data.get('meta')
return obj
@property @property
def uuid(self): def uuid(self):
return self._uuid return self._uuid
@@ -87,7 +144,7 @@ class LogBook(object):
class FlowDetail(object): class FlowDetail(object):
"""This class contains a dict of task detail entries for a given """This class contains a dict of atom detail entries for a given
flow along with any metadata associated with that flow. flow along with any metadata associated with that flow.
The data contained within this class need *not* be backed by the backend The data contained within this class need *not* be backed by the backend
@@ -97,7 +154,7 @@ class FlowDetail(object):
def __init__(self, name, uuid): def __init__(self, name, uuid):
self._uuid = uuid self._uuid = uuid
self._name = name self._name = name
self._taskdetails_by_id = {} self._atomdetails_by_id = {}
self.state = None self.state = None
# Any other metadata to include about this flow while storing. For # Any other metadata to include about this flow while storing. For
# example timing information could be stored here, other misc. flow # example timing information could be stored here, other misc. flow
@@ -107,16 +164,52 @@ class FlowDetail(object):
def update(self, fd): def update(self, fd):
"""Updates the objects state to be the same as the given one.""" """Updates the objects state to be the same as the given one."""
if fd is self: if fd is self:
return return self
self._taskdetails_by_id = dict(fd._taskdetails_by_id) self._atomdetails_by_id = dict(fd._atomdetails_by_id)
self.state = fd.state self.state = fd.state
self.meta = fd.meta self.meta = fd.meta
return self
def add(self, td): def merge(self, fd, deep_copy=False):
self._taskdetails_by_id[td.uuid] = td """Merges the current object state with the given ones state.
def find(self, td_uuid): NOTE(harlowja): Does not merge the atom details contained in either.
return self._taskdetails_by_id.get(td_uuid) """
if fd is self:
return self
copy_fn = _copy_function(deep_copy)
if self.meta != fd.meta:
self.meta = copy_fn(fd.meta)
if self.state != fd.state:
# NOTE(imelnikov): states are just strings, no need to copy.
self.state = fd.state
return self
def to_dict(self):
"""Translates the internal state of this object to a dictionary.
NOTE(harlowja): Does not include the contained atom details.
"""
return {
'name': self.name,
'meta': self.meta,
'state': self.state,
'uuid': self.uuid,
}
@classmethod
def from_dict(cls, data):
"""Translates the given data into an instance of this class."""
obj = cls(data['name'], data['uuid'])
obj.state = data.get('state')
obj.meta = data.get('meta')
return obj
def add(self, ad):
self._atomdetails_by_id[ad.uuid] = ad
def find(self, ad_uuid):
return self._atomdetails_by_id.get(ad_uuid)
@property @property
def uuid(self): def uuid(self):
@@ -127,11 +220,11 @@ class FlowDetail(object):
return self._name return self._name
def __iter__(self): def __iter__(self):
for td in six.itervalues(self._taskdetails_by_id): for ad in six.itervalues(self._atomdetails_by_id):
yield td yield ad
def __len__(self): def __len__(self):
return len(self._taskdetails_by_id) return len(self._atomdetails_by_id)
@six.add_metaclass(abc.ABCMeta) @six.add_metaclass(abc.ABCMeta)
@@ -172,16 +265,71 @@ class AtomDetail(object):
# information can be associated with. # information can be associated with.
self.version = None self.version = None
def update(self, td): def update(self, ad):
"""Updates the objects state to be the same as the given one.""" """Updates the objects state to be the same as the given one."""
if td is self: if ad is self:
return return self
self.state = td.state self.state = ad.state
self.intention = td.intention self.intention = ad.intention
self.meta = td.meta self.meta = ad.meta
self.failure = td.failure self.failure = ad.failure
self.results = td.results self.results = ad.results
self.version = td.version self.version = ad.version
return self
@abc.abstractmethod
def merge(self, other, deep_copy=False):
"""Merges the current object state with the given ones state."""
copy_fn = _copy_function(deep_copy)
# NOTE(imelnikov): states and intentions are just strings,
# so there is no need to copy them (strings are immutable in python).
self.state = other.state
self.intention = other.intention
if self.failure != other.failure:
# NOTE(imelnikov): we can't just deep copy Failures, as they
# contain tracebacks, which are not copyable.
if other.failure:
if deep_copy:
self.failure = other.failure.copy()
else:
self.failure = other.failure
else:
self.failure = None
if self.meta != other.meta:
self.meta = copy_fn(other.meta)
if self.version != other.version:
self.version = copy_fn(other.version)
return self
@abc.abstractmethod
def to_dict(self):
"""Translates the internal state of this object to a dictionary."""
def _to_dict_shared(self):
if self.failure:
failure = self.failure.to_dict()
else:
failure = None
return {
'failure': failure,
'meta': self.meta,
'name': self.name,
'results': self.results,
'state': self.state,
'version': self.version,
'intention': self.intention,
'uuid': self.uuid,
}
def _from_dict_shared(self, data):
self.state = data.get('state')
self.intention = data.get('intention')
self.results = data.get('results')
self.version = data.get('version')
self.meta = data.get('meta')
failure = data.get('failure')
if failure:
self.failure = misc.Failure.from_dict(failure)
@property @property
def uuid(self): def uuid(self):
@@ -191,10 +339,6 @@ class AtomDetail(object):
def name(self): def name(self):
return self._name return self._name
@abc.abstractproperty
def atom_type(self):
"""Identifies atom type represented by this detail."""
@abc.abstractmethod @abc.abstractmethod
def reset(self, state): def reset(self, state):
"""Resets detail results ans failures.""" """Resets detail results ans failures."""
@@ -205,16 +349,34 @@ class TaskDetail(AtomDetail):
def __init__(self, name, uuid): def __init__(self, name, uuid):
super(TaskDetail, self).__init__(name, uuid) super(TaskDetail, self).__init__(name, uuid)
@property
def atom_type(self):
return TASK_DETAIL
def reset(self, state): def reset(self, state):
self.results = None self.results = None
self.failure = None self.failure = None
self.state = state self.state = state
self.intention = states.EXECUTE self.intention = states.EXECUTE
@classmethod
def from_dict(cls, data):
"""Translates the given data into an instance of this class."""
obj = cls(data['name'], data['uuid'])
obj._from_dict_shared(data)
return obj
def to_dict(self):
"""Translates the internal state of this object to a dictionary."""
return self._to_dict_shared()
def merge(self, other, deep_copy=False):
if not isinstance(other, TaskDetail):
raise NotImplemented("Can only merge with other task details")
if other is self:
return self
super(TaskDetail, self).merge(other, deep_copy=deep_copy)
copy_fn = _copy_function(deep_copy)
if self.results != other.results:
self.results = copy_fn(other.results)
return self
class RetryDetail(AtomDetail): class RetryDetail(AtomDetail):
"""This class represents a retry detail for retry controller object.""" """This class represents a retry detail for retry controller object."""
@@ -222,21 +384,89 @@ class RetryDetail(AtomDetail):
super(RetryDetail, self).__init__(name, uuid) super(RetryDetail, self).__init__(name, uuid)
self.results = [] self.results = []
@property
def atom_type(self):
return RETRY_DETAIL
def reset(self, state): def reset(self, state):
self.results = [] self.results = []
self.failure = None self.failure = None
self.state = state self.state = state
self.intention = states.EXECUTE self.intention = states.EXECUTE
@classmethod
def from_dict(cls, data):
"""Translates the given data into an instance of this class."""
def get_atom_detail_class(atom_type): def decode_results(results):
if atom_type == TASK_DETAIL: if not results:
return TaskDetail return []
elif atom_type == RETRY_DETAIL: new_results = []
return RetryDetail for (data, failures) in results:
else: new_failures = {}
raise TypeError("Unknown atom type") for (key, failure_data) in six.iteritems(failures):
new_failures[key] = misc.Failure.from_dict(failure_data)
new_results.append((data, new_failures))
return new_results
obj = cls(data['name'], data['uuid'])
obj._from_dict_shared(data)
obj.results = decode_results(obj.results)
return obj
def to_dict(self):
"""Translates the internal state of this object to a dictionary."""
def encode_results(results):
if not results:
return []
new_results = []
for (data, failures) in results:
new_failures = {}
for (key, failure) in six.iteritems(failures):
new_failures[key] = failure.to_dict()
new_results.append((data, new_failures))
return new_results
base = self._to_dict_shared()
base['results'] = encode_results(base.get('results'))
return base
def merge(self, other, deep_copy=False):
if not isinstance(other, RetryDetail):
raise NotImplemented("Can only merge with other retry details")
if other is self:
return self
super(RetryDetail, self).merge(other, deep_copy=deep_copy)
results = []
# NOTE(imelnikov): we can't just deep copy Failures, as they
# contain tracebacks, which are not copyable.
for (data, failures) in other.results:
copied_failures = {}
for (key, failure) in six.iteritems(failures):
if deep_copy:
copied_failures[key] = failure.copy()
else:
copied_failures[key] = failure
results.append((data, copied_failures))
self.results = results
return self
_DETAIL_TO_NAME = {
RetryDetail: 'RETRY_DETAIL',
TaskDetail: 'TASK_DETAIL',
}
_NAME_TO_DETAIL = dict((name, cls)
for (cls, name) in six.iteritems(_DETAIL_TO_NAME))
ATOM_TYPES = list(six.iterkeys(_NAME_TO_DETAIL))
def atom_detail_class(atom_type):
try:
return _NAME_TO_DETAIL[atom_type]
except KeyError:
raise TypeError("Unknown atom type: %s" % (atom_type))
def atom_detail_type(atom_detail):
try:
return _DETAIL_TO_NAME[type(atom_detail)]
except KeyError:
raise TypeError("Unknown atom type: %s" % type(atom_detail))

View File

@@ -36,10 +36,11 @@ STATES_WITH_RESULTS = (states.SUCCESS, states.REVERTING, states.FAILURE)
class Storage(object): class Storage(object):
"""Interface between engines and logbook. """Interface between engines and logbook.
This class provides a simple interface to save tasks of a given flow and This class provides a simple interface to save atoms of a given flow and
associated activity and results to persistence layer (logbook, associated activity and results to persistence layer (logbook,
task_details, flow_details) for use by engines, making it easier to atom_details, flow_details) for use by engines. This makes it easier to
interact with the underlying storage & backend mechanism. interact with the underlying storage & backend mechanism through this
interface rather than accessing those objects directly.
""" """
injector_name = '_TaskFlow_INJECTOR' injector_name = '_TaskFlow_INJECTOR'
@@ -54,15 +55,17 @@ class Storage(object):
# NOTE(imelnikov): failure serialization looses information, # NOTE(imelnikov): failure serialization looses information,
# so we cache failures here, in task name -> misc.Failure mapping. # so we cache failures here, in task name -> misc.Failure mapping.
self._failures = {} self._failures = {}
for td in self._flowdetail: for ad in self._flowdetail:
if td.failure is not None: if ad.failure is not None:
self._failures[td.name] = td.failure self._failures[ad.name] = ad.failure
self._task_name_to_uuid = dict((td.name, td.uuid) self._atom_name_to_uuid = dict((ad.name, ad.uuid)
for td in self._flowdetail) for ad in self._flowdetail)
try: try:
injector_td = self._taskdetail_by_name(self.injector_name) injector_td = self._atomdetail_by_name(
self.injector_name,
expected_type=logbook.TaskDetail)
except exceptions.NotFound: except exceptions.NotFound:
pass pass
else: else:
@@ -100,16 +103,16 @@ class Storage(object):
""" """
with self._lock.write_lock(): with self._lock.write_lock():
try: try:
task_id = self._task_name_to_uuid[task_name] task_id = self._atom_name_to_uuid[task_name]
except KeyError: except KeyError:
task_id = uuidutils.generate_uuid() task_id = uuidutils.generate_uuid()
self._create_atom_detail(logbook.TaskDetail, task_name, self._create_atom_detail(logbook.TaskDetail, task_name,
task_id, task_version) task_id, task_version)
else: else:
td = self._flowdetail.find(task_id) ad = self._flowdetail.find(task_id)
if td.atom_type != logbook.TASK_DETAIL: if not isinstance(ad, logbook.TaskDetail):
raise exceptions.Duplicate( raise exceptions.Duplicate(
"Task detail %s already exists in flow detail %s." % "Atom detail %s already exists in flow detail %s." %
(task_name, self._flowdetail.name)) (task_name, self._flowdetail.name))
self._set_result_mapping(task_name, result_mapping) self._set_result_mapping(task_name, result_mapping)
return task_id return task_id
@@ -128,16 +131,16 @@ class Storage(object):
""" """
with self._lock.write_lock(): with self._lock.write_lock():
try: try:
retry_id = self._task_name_to_uuid[retry_name] retry_id = self._atom_name_to_uuid[retry_name]
except KeyError: except KeyError:
retry_id = uuidutils.generate_uuid() retry_id = uuidutils.generate_uuid()
self._create_atom_detail(logbook.RetryDetail, retry_name, self._create_atom_detail(logbook.RetryDetail, retry_name,
retry_id, retry_version) retry_id, retry_version)
else: else:
td = self._flowdetail.find(retry_id) ad = self._flowdetail.find(retry_id)
if td.atom_type != logbook.RETRY_DETAIL: if not isinstance(ad, logbook.RetryDetail):
raise exceptions.Duplicate( raise exceptions.Duplicate(
"Task detail %s already exists in flow detail %s." % "Atom detail %s already exists in flow detail %s." %
(retry_name, self._flowdetail.name)) (retry_name, self._flowdetail.name))
self._set_result_mapping(retry_name, result_mapping) self._set_result_mapping(retry_name, result_mapping)
return retry_id return retry_id
@@ -153,7 +156,7 @@ class Storage(object):
ad.version = task_version ad.version = task_version
self._flowdetail.add(ad) self._flowdetail.add(ad)
self._with_connection(self._save_flow_detail) self._with_connection(self._save_flow_detail)
self._task_name_to_uuid[ad.name] = ad.uuid self._atom_name_to_uuid[ad.name] = ad.uuid
@property @property
def flow_name(self): def flow_name(self):
@@ -171,73 +174,93 @@ class Storage(object):
# added item to the flow detail). # added item to the flow detail).
self._flowdetail.update(conn.update_flow_details(self._flowdetail)) self._flowdetail.update(conn.update_flow_details(self._flowdetail))
def _taskdetail_by_name(self, task_name): def _atomdetail_by_name(self, atom_name, expected_type=None):
try: try:
return self._flowdetail.find(self._task_name_to_uuid[task_name]) ad = self._flowdetail.find(self._atom_name_to_uuid[atom_name])
except KeyError: except KeyError:
raise exceptions.NotFound("Unknown task name: %s" % task_name) raise exceptions.NotFound("Unknown atom name: %s" % atom_name)
else:
# TODO(harlowja): we need to figure out how to get away from doing
# these kinds of type checks in general (since they likely mean
# we aren't doing something right).
if expected_type and not isinstance(ad, expected_type):
raise TypeError("Atom %s is not of the expected type: %s"
% (atom_name,
reflection.get_class_name(expected_type)))
return ad
def _save_task_detail(self, conn, task_detail): def _save_atom_detail(self, conn, atom_detail):
# NOTE(harlowja): we need to update our contained task detail if # NOTE(harlowja): we need to update our contained atom detail if
# the result of the update actually added more (aka another process # the result of the update actually added more (aka another process
# is also modifying the task detail). # is also modifying the task detail), since python is by reference
task_detail.update(conn.update_task_details(task_detail)) # and the contained atom detail will reflect the old state if we don't
# do this update.
atom_detail.update(conn.update_atom_details(atom_detail))
def get_task_uuid(self, task_name): def get_atom_uuid(self, atom_name):
"""Get task uuid by given name.""" """Gets an atoms uuid given a atoms name."""
with self._lock.read_lock(): with self._lock.read_lock():
td = self._taskdetail_by_name(task_name) ad = self._atomdetail_by_name(atom_name)
return td.uuid return ad.uuid
def set_task_state(self, task_name, state): def set_atom_state(self, atom_name, state):
"""Set task or retry state.""" """Sets an atoms state."""
with self._lock.write_lock(): with self._lock.write_lock():
td = self._taskdetail_by_name(task_name) ad = self._atomdetail_by_name(atom_name)
td.state = state ad.state = state
self._with_connection(self._save_task_detail, td) self._with_connection(self._save_atom_detail, ad)
def get_task_state(self, task_name): def get_atom_state(self, atom_name):
"""Get state of task with given name.""" """Gets the state of an atom given an atoms name."""
with self._lock.read_lock(): with self._lock.read_lock():
td = self._taskdetail_by_name(task_name) ad = self._atomdetail_by_name(atom_name)
return td.state return ad.state
def set_atom_intention(self, atom_name, intention): def set_atom_intention(self, atom_name, intention):
"""Set intention for atom with given name.""" """Sets the intention of an atom given an atoms name."""
td = self._taskdetail_by_name(atom_name) ad = self._atomdetail_by_name(atom_name)
td.intention = intention ad.intention = intention
self._with_connection(self._save_task_detail, task_detail=td) self._with_connection(self._save_atom_detail, ad)
def get_atom_intention(self, atom_name): def get_atom_intention(self, atom_name):
"""Get intention of atom with given name.""" """Gets the intention of an atom given an atoms name."""
return self._taskdetail_by_name(atom_name).intention ad = self._atomdetail_by_name(atom_name)
return ad.intention
def get_tasks_states(self, task_names): def get_atoms_states(self, atom_names):
"""Gets all task states.""" """Gets all atoms states given a set of names."""
with self._lock.read_lock(): with self._lock.read_lock():
return dict((name, (self.get_task_state(name), return dict((name, (self.get_atom_state(name),
self.get_atom_intention(name))) self.get_atom_intention(name)))
for name in task_names) for name in atom_names)
def update_task_metadata(self, task_name, update_with): def _update_atom_metadata(self, atom_name, update_with,
"""Updates a tasks metadata.""" expected_type=None):
if not update_with: if not update_with:
return update_with = {}
with self._lock.write_lock(): with self._lock.write_lock():
td = self._taskdetail_by_name(task_name) ad = self._atomdetail_by_name(atom_name,
if not td.meta: expected_type=expected_type)
td.meta = {} if not ad.meta:
td.meta.update(update_with) ad.meta = {}
self._with_connection(self._save_task_detail, td) ad.meta.update(update_with)
self._with_connection(self._save_atom_detail, ad)
def update_atom_metadata(self, atom_name, update_with):
"""Updates a atoms metadata given another dictionary or a list of
(key, value) pairs to include in the updated metadata (newer keys will
overwrite older keys).
"""
self._update_atom_metadata(atom_name, update_with)
def set_task_progress(self, task_name, progress, details=None): def set_task_progress(self, task_name, progress, details=None):
"""Set task progress. """Set a tasks progress.
:param task_name: task name :param task_name: task name
:param progress: task progress :param progress: tasks progress (0.0 <-> 1.0)
:param details: task specific progress information :param details: any task specific progress details
""" """
metadata_update = { update_with = {
'progress': progress, 'progress': progress,
} }
if details is not None: if details is not None:
@@ -245,109 +268,118 @@ class Storage(object):
# updating details (e.g. automatically from engine) # updating details (e.g. automatically from engine)
# we save progress value with details, too. # we save progress value with details, too.
if details: if details:
metadata_update['progress_details'] = { update_with['progress_details'] = {
'at_progress': progress, 'at_progress': progress,
'details': details, 'details': details,
} }
else: else:
metadata_update['progress_details'] = None update_with['progress_details'] = None
self.update_task_metadata(task_name, metadata_update) self._update_atom_metadata(task_name, update_with,
expected_type=logbook.TaskDetail)
def get_task_progress(self, task_name): def get_task_progress(self, task_name):
"""Get progress of task with given name. """Get the progress of a task given a tasks name.
:param task_name: task name :param task_name: tasks name
:returns: current task progress value :returns: current task progress value
""" """
with self._lock.read_lock(): with self._lock.read_lock():
td = self._taskdetail_by_name(task_name) ad = self._atomdetail_by_name(task_name,
if not td.meta: expected_type=logbook.TaskDetail)
try:
return ad.meta['progress']
except (TypeError, KeyError):
return 0.0 return 0.0
return td.meta.get('progress', 0.0)
def get_task_progress_details(self, task_name): def get_task_progress_details(self, task_name):
"""Get progress details of task with given name. """Get the progress details of a task given a tasks name.
:param task_name: task name :param task_name: task name
:returns: None if progress_details not defined, else progress_details :returns: None if progress_details not defined, else progress_details
dict dict
""" """
with self._lock.read_lock(): with self._lock.read_lock():
td = self._taskdetail_by_name(task_name) ad = self._atomdetail_by_name(task_name,
if not td.meta: expected_type=logbook.TaskDetail)
try:
return ad.meta['progress_details']
except (TypeError, KeyError):
return None return None
return td.meta.get('progress_details')
def _check_all_results_provided(self, task_name, data): def _check_all_results_provided(self, atom_name, data):
"""Warn if task did not provide some of results. """Warn if an atom did not provide some of its expected results.
This may happen if task returns shorter tuple or list or dict This may happen if atom returns shorter tuple or list or dict
without all needed keys. It may also happen if task returns without all needed keys. It may also happen if atom returns
result of wrong type. result of wrong type.
""" """
result_mapping = self._result_mappings.get(task_name) result_mapping = self._result_mappings.get(atom_name)
if not result_mapping: if not result_mapping:
return return
for name, index in six.iteritems(result_mapping): for name, index in six.iteritems(result_mapping):
try: try:
misc.item_from(data, index, name=name) misc.item_from(data, index, name=name)
except exceptions.NotFound: except exceptions.NotFound:
LOG.warning("Task %s did not supply result " LOG.warning("Atom %s did not supply result "
"with index %r (name %s)", task_name, index, name) "with index %r (name %s)", atom_name, index, name)
def save(self, task_name, data, state=states.SUCCESS): def save(self, atom_name, data, state=states.SUCCESS):
"""Put result for task with id 'uuid' to storage.""" """Put result for atom with id 'uuid' to storage."""
with self._lock.write_lock(): with self._lock.write_lock():
td = self._taskdetail_by_name(task_name) ad = self._atomdetail_by_name(atom_name)
td.state = state ad.state = state
if state == states.FAILURE and isinstance(data, misc.Failure): if state == states.FAILURE and isinstance(data, misc.Failure):
# FIXME(harlowja): this seems like it should be internal logic
# in the atom detail object and not in here. Fix that soon...
#
# Do not clean retry history # Do not clean retry history
if td.atom_type != logbook.RETRY_DETAIL: if not isinstance(ad, logbook.RetryDetail):
td.results = None ad.results = None
td.failure = data ad.failure = data
self._failures[td.name] = data self._failures[ad.name] = data
else: else:
if td.atom_type == logbook.RETRY_DETAIL: # FIXME(harlowja): this seems like it should be internal logic
td.results.append((data, {})) # in the atom detail object and not in here. Fix that soon...
if isinstance(ad, logbook.RetryDetail):
ad.results.append((data, {}))
else: else:
td.results = data ad.results = data
td.failure = None ad.failure = None
self._check_all_results_provided(td.name, data) self._check_all_results_provided(ad.name, data)
self._with_connection(self._save_task_detail, task_detail=td) self._with_connection(self._save_atom_detail, ad)
def save_retry_failure(self, retry_name, failed_atom_name, failure): def save_retry_failure(self, retry_name, failed_atom_name, failure):
"""Save subflow failure to retry controller history.""" """Save subflow failure to retry controller history."""
with self._lock.write_lock(): with self._lock.write_lock():
td = self._taskdetail_by_name(retry_name) ad = self._atomdetail_by_name(retry_name,
if td.atom_type != logbook.RETRY_DETAIL: expected_type=logbook.RetryDetail)
raise TypeError( failures = ad.results[-1][1]
"Atom %s is not a retry controller." % retry_name)
failures = td.results[-1][1]
if failed_atom_name not in failures: if failed_atom_name not in failures:
failures[failed_atom_name] = failure failures[failed_atom_name] = failure
self._with_connection(self._save_task_detail, task_detail=td) self._with_connection(self._save_atom_detail, ad)
def cleanup_retry_history(self, retry_name, state): def cleanup_retry_history(self, retry_name, state):
"""Cleanup history of retry with given name.""" """Cleanup history of retry atom with given name."""
with self._lock.write_lock(): with self._lock.write_lock():
td = self._taskdetail_by_name(retry_name) ad = self._atomdetail_by_name(retry_name,
td.state = state expected_type=logbook.RetryDetail)
td.results = [] ad.state = state
self._with_connection(self._save_task_detail, task_detail=td) ad.results = []
self._with_connection(self._save_atom_detail, ad)
def get(self, task_name): def get(self, atom_name):
"""Get result for task with name 'task_name' to storage.""" """Gets the result for an atom with a given name from storage."""
with self._lock.read_lock(): with self._lock.read_lock():
td = self._taskdetail_by_name(task_name) ad = self._atomdetail_by_name(atom_name)
if td.failure is not None: if ad.failure is not None:
cached = self._failures.get(task_name) cached = self._failures.get(atom_name)
if td.failure.matches(cached): if ad.failure.matches(cached):
return cached return cached
return td.failure return ad.failure
if td.state not in STATES_WITH_RESULTS: if ad.state not in STATES_WITH_RESULTS:
raise exceptions.NotFound("Result for task %s is not known" raise exceptions.NotFound("Result for atom %s is not currently"
% task_name) " known" % atom_name)
return td.results return ad.results
def get_failures(self): def get_failures(self):
"""Get list of failures that happened with this flow. """Get list of failures that happened with this flow.
@@ -362,21 +394,21 @@ class Storage(object):
with self._lock.read_lock(): with self._lock.read_lock():
return bool(self._failures) return bool(self._failures)
def _reset_task(self, td, state): def _reset_atom(self, ad, state):
if td.name == self.injector_name: if ad.name == self.injector_name:
return False return False
if td.state == state: if ad.state == state:
return False return False
td.reset(state) ad.reset(state)
self._failures.pop(td.name, None) self._failures.pop(ad.name, None)
return True return True
def reset(self, task_name, state=states.PENDING): def reset(self, atom_name, state=states.PENDING):
"""Remove result for task with id 'uuid' from storage.""" """Reset atom with given name (if the task is in a given state)."""
with self._lock.write_lock(): with self._lock.write_lock():
td = self._taskdetail_by_name(task_name) ad = self._atomdetail_by_name(atom_name)
if self._reset_task(td, state): if self._reset_atom(ad, state):
self._with_connection(self._save_task_detail, td) self._with_connection(self._save_atom_detail, ad)
def inject(self, pairs): def inject(self, pairs):
"""Add values into storage. """Add values into storage.
@@ -386,23 +418,26 @@ class Storage(object):
""" """
with self._lock.write_lock(): with self._lock.write_lock():
try: try:
td = self._taskdetail_by_name(self.injector_name) ad = self._atomdetail_by_name(
self.injector_name,
expected_type=logbook.TaskDetail)
except exceptions.NotFound: except exceptions.NotFound:
uuid = uuidutils.generate_uuid() uuid = uuidutils.generate_uuid()
self._create_atom_detail(logbook.TaskDetail, self._create_atom_detail(logbook.TaskDetail,
self.injector_name, uuid) self.injector_name, uuid)
td = self._taskdetail_by_name(self.injector_name) ad = self._atomdetail_by_name(self.injector_name,
td.results = dict(pairs) expected_type=logbook.TaskDetail)
td.state = states.SUCCESS ad.results = dict(pairs)
ad.state = states.SUCCESS
else: else:
td.results.update(pairs) ad.results.update(pairs)
self._with_connection(self._save_task_detail, td) self._with_connection(self._save_atom_detail, ad)
names = six.iterkeys(td.results) names = six.iterkeys(ad.results)
self._set_result_mapping(self.injector_name, self._set_result_mapping(self.injector_name,
dict((name, name) for name in names)) dict((name, name) for name in names))
def _set_result_mapping(self, task_name, mapping): def _set_result_mapping(self, atom_name, mapping):
"""Set mapping for naming task results. """Sets the result mapping for an atom.
The result saved with given name would be accessible by names The result saved with given name would be accessible by names
defined in mapping. Mapping is a dict name => index. If index defined in mapping. Mapping is a dict name => index. If index
@@ -411,42 +446,42 @@ class Storage(object):
""" """
if not mapping: if not mapping:
return return
self._result_mappings[task_name] = mapping self._result_mappings[atom_name] = mapping
for name, index in six.iteritems(mapping): for name, index in six.iteritems(mapping):
entries = self._reverse_mapping.setdefault(name, []) entries = self._reverse_mapping.setdefault(name, [])
# NOTE(imelnikov): We support setting same result mapping for # NOTE(imelnikov): We support setting same result mapping for
# the same task twice (e.g when we are injecting 'a' and then # the same atom twice (e.g when we are injecting 'a' and then
# injecting 'a' again), so we should not log warning below in # injecting 'a' again), so we should not log warning below in
# that case and we should have only one item for each pair # that case and we should have only one item for each pair
# (task_name, index) in entries. It should be put to the end of # (atom_name, index) in entries. It should be put to the end of
# entries list because order matters on fetching. # entries list because order matters on fetching.
try: try:
entries.remove((task_name, index)) entries.remove((atom_name, index))
except ValueError: except ValueError:
pass pass
entries.append((task_name, index)) entries.append((atom_name, index))
if len(entries) > 1: if len(entries) > 1:
LOG.warning("Multiple provider mappings being created for %r", LOG.warning("Multiple provider mappings being created for %r",
name) name)
def fetch(self, name): def fetch(self, name):
"""Fetch named task result.""" """Fetch a named atoms result."""
with self._lock.read_lock(): with self._lock.read_lock():
try: try:
indexes = self._reverse_mapping[name] indexes = self._reverse_mapping[name]
except KeyError: except KeyError:
raise exceptions.NotFound("Name %r is not mapped" % name) raise exceptions.NotFound("Name %r is not mapped" % name)
# Return the first one that is found. # Return the first one that is found.
for (task_name, index) in reversed(indexes): for (atom_name, index) in reversed(indexes):
try: try:
result = self.get(task_name) result = self.get(atom_name)
td = self._taskdetail_by_name(task_name) ad = self._atomdetail_by_name(atom_name)
# If it is a retry's result then fetch values from the # If it is a retry's result then fetch values from the
# latest retry run. # latest retry run only.
if td.atom_type == logbook.RETRY_DETAIL: if isinstance(ad, logbook.RetryDetail):
if result: if result:
result = result[-1][0] result = result[-1][0]
else: else:
@@ -457,7 +492,7 @@ class Storage(object):
raise exceptions.NotFound("Unable to find result %r" % name) raise exceptions.NotFound("Unable to find result %r" % name)
def fetch_all(self): def fetch_all(self):
"""Fetch all named task results known so far. """Fetch all named atom results known so far.
Should be used for debugging and testing purposes mostly. Should be used for debugging and testing purposes mostly.
""" """
@@ -471,7 +506,7 @@ class Storage(object):
return results return results
def fetch_mapped_args(self, args_mapping): def fetch_mapped_args(self, args_mapping):
"""Fetch arguments for the task using arguments mapping.""" """Fetch arguments for an atom using an atoms arguments mapping."""
with self._lock.read_lock(): with self._lock.read_lock():
return dict((key, self.fetch(name)) return dict((key, self.fetch(name))
for key, name in six.iteritems(args_mapping)) for key, name in six.iteritems(args_mapping))
@@ -490,19 +525,20 @@ class Storage(object):
state = states.PENDING state = states.PENDING
return state return state
def get_retry_history(self, name): def get_retry_history(self, retry_name):
"""Fetch retry results history.""" """Fetch retry results history."""
with self._lock.read_lock(): with self._lock.read_lock():
td = self._taskdetail_by_name(name) ad = self._atomdetail_by_name(retry_name,
if td.failure is not None: expected_type=logbook.RetryDetail)
cached = self._failures.get(name) if ad.failure is not None:
history = list(td.results) cached = self._failures.get(retry_name)
if td.failure.matches(cached): history = list(ad.results)
if ad.failure.matches(cached):
history.append((cached, {})) history.append((cached, {}))
else: else:
history.append((td.failure, {})) history.append((ad.failure, {}))
return history return history
return td.results return ad.results
class MultiThreadedStorage(Storage): class MultiThreadedStorage(Storage):

View File

@@ -102,13 +102,13 @@ class PersistenceTestMixin(object):
# their parent existing). # their parent existing).
with contextlib.closing(self._get_connection()) as conn: with contextlib.closing(self._get_connection()) as conn:
self.assertRaises(exc.NotFound, conn.update_flow_details, fd) self.assertRaises(exc.NotFound, conn.update_flow_details, fd)
self.assertRaises(exc.NotFound, conn.update_task_details, td) self.assertRaises(exc.NotFound, conn.update_atom_details, td)
# Ok now we should be able to save them. # Ok now we should be able to save them.
with contextlib.closing(self._get_connection()) as conn: with contextlib.closing(self._get_connection()) as conn:
conn.save_logbook(lb) conn.save_logbook(lb)
conn.update_flow_details(fd) conn.update_flow_details(fd)
conn.update_task_details(td) conn.update_atom_details(td)
def test_task_detail_meta_update(self): def test_task_detail_meta_update(self):
lb_id = uuidutils.generate_uuid() lb_id = uuidutils.generate_uuid()
@@ -123,18 +123,18 @@ class PersistenceTestMixin(object):
with contextlib.closing(self._get_connection()) as conn: with contextlib.closing(self._get_connection()) as conn:
conn.save_logbook(lb) conn.save_logbook(lb)
conn.update_flow_details(fd) conn.update_flow_details(fd)
conn.update_task_details(td) conn.update_atom_details(td)
td.meta['test'] = 43 td.meta['test'] = 43
with contextlib.closing(self._get_connection()) as conn: with contextlib.closing(self._get_connection()) as conn:
conn.update_task_details(td) conn.update_atom_details(td)
with contextlib.closing(self._get_connection()) as conn: with contextlib.closing(self._get_connection()) as conn:
lb2 = conn.get_logbook(lb_id) lb2 = conn.get_logbook(lb_id)
fd2 = lb2.find(fd.uuid) fd2 = lb2.find(fd.uuid)
td2 = fd2.find(td.uuid) td2 = fd2.find(td.uuid)
self.assertEqual(td2.meta.get('test'), 43) self.assertEqual(td2.meta.get('test'), 43)
self.assertEqual(td2.atom_type, logbook.TASK_DETAIL) self.assertIsInstance(td2, logbook.TaskDetail)
def test_task_detail_with_failure(self): def test_task_detail_with_failure(self):
lb_id = uuidutils.generate_uuid() lb_id = uuidutils.generate_uuid()
@@ -154,7 +154,7 @@ class PersistenceTestMixin(object):
with contextlib.closing(self._get_connection()) as conn: with contextlib.closing(self._get_connection()) as conn:
conn.save_logbook(lb) conn.save_logbook(lb)
conn.update_flow_details(fd) conn.update_flow_details(fd)
conn.update_task_details(td) conn.update_atom_details(td)
# Read failure back # Read failure back
with contextlib.closing(self._get_connection()) as conn: with contextlib.closing(self._get_connection()) as conn:
@@ -165,7 +165,7 @@ class PersistenceTestMixin(object):
self.assertEqual(failure.exception_str, 'Woot!') self.assertEqual(failure.exception_str, 'Woot!')
self.assertIs(failure.check(RuntimeError), RuntimeError) self.assertIs(failure.check(RuntimeError), RuntimeError)
self.assertEqual(failure.traceback_str, td.failure.traceback_str) self.assertEqual(failure.traceback_str, td.failure.traceback_str)
self.assertEqual(td2.atom_type, logbook.TASK_DETAIL) self.assertIsInstance(td2, logbook.TaskDetail)
def test_logbook_merge_flow_detail(self): def test_logbook_merge_flow_detail(self):
lb_id = uuidutils.generate_uuid() lb_id = uuidutils.generate_uuid()
@@ -246,21 +246,21 @@ class PersistenceTestMixin(object):
lb = logbook.LogBook(name=lb_name, uuid=lb_id) lb = logbook.LogBook(name=lb_name, uuid=lb_id)
fd = logbook.FlowDetail('test', uuid=uuidutils.generate_uuid()) fd = logbook.FlowDetail('test', uuid=uuidutils.generate_uuid())
lb.add(fd) lb.add(fd)
td = logbook.RetryDetail("detail-1", uuid=uuidutils.generate_uuid()) rd = logbook.RetryDetail("detail-1", uuid=uuidutils.generate_uuid())
td.intention = states.REVERT rd.intention = states.REVERT
fd.add(td) fd.add(rd)
with contextlib.closing(self._get_connection()) as conn: with contextlib.closing(self._get_connection()) as conn:
conn.save_logbook(lb) conn.save_logbook(lb)
conn.update_flow_details(fd) conn.update_flow_details(fd)
conn.update_task_details(td) conn.update_atom_details(rd)
with contextlib.closing(self._get_connection()) as conn: with contextlib.closing(self._get_connection()) as conn:
lb2 = conn.get_logbook(lb_id) lb2 = conn.get_logbook(lb_id)
fd2 = lb2.find(fd.uuid) fd2 = lb2.find(fd.uuid)
td2 = fd2.find(td.uuid) rd2 = fd2.find(rd.uuid)
self.assertEqual(td2.atom_type, logbook.RETRY_DETAIL) self.assertEqual(rd2.intention, states.REVERT)
self.assertEqual(td2.intention, states.REVERT) self.assertIsInstance(rd2, logbook.RetryDetail)
def test_retry_detail_save_with_task_failure(self): def test_retry_detail_save_with_task_failure(self):
lb_id = uuidutils.generate_uuid() lb_id = uuidutils.generate_uuid()
@@ -268,23 +268,24 @@ class PersistenceTestMixin(object):
lb = logbook.LogBook(name=lb_name, uuid=lb_id) lb = logbook.LogBook(name=lb_name, uuid=lb_id)
fd = logbook.FlowDetail('test', uuid=uuidutils.generate_uuid()) fd = logbook.FlowDetail('test', uuid=uuidutils.generate_uuid())
lb.add(fd) lb.add(fd)
td = logbook.RetryDetail("retry-1", uuid=uuidutils.generate_uuid()) rd = logbook.RetryDetail("retry-1", uuid=uuidutils.generate_uuid())
fail = misc.Failure.from_exception(RuntimeError('fail')) fail = misc.Failure.from_exception(RuntimeError('fail'))
td.results.append((42, {'some-task': fail})) rd.results.append((42, {'some-task': fail}))
fd.add(td) fd.add(rd)
# save it # save it
with contextlib.closing(self._get_connection()) as conn: with contextlib.closing(self._get_connection()) as conn:
conn.save_logbook(lb) conn.save_logbook(lb)
conn.update_flow_details(fd) conn.update_flow_details(fd)
conn.update_task_details(td) conn.update_atom_details(rd)
# now read it back # now read it back
with contextlib.closing(self._get_connection()) as conn: with contextlib.closing(self._get_connection()) as conn:
lb2 = conn.get_logbook(lb_id) lb2 = conn.get_logbook(lb_id)
fd2 = lb2.find(fd.uuid) fd2 = lb2.find(fd.uuid)
td2 = fd2.find(td.uuid) rd2 = fd2.find(rd.uuid)
fail2 = td2.results[0][1].get('some-task') self.assertIsInstance(rd2, logbook.RetryDetail)
fail2 = rd2.results[0][1].get('some-task')
self.assertIsInstance(fail2, misc.Failure) self.assertIsInstance(fail2, misc.Failure)
self.assertTrue(fail.matches(fail2)) self.assertTrue(fail.matches(fail2))
@@ -294,23 +295,24 @@ class PersistenceTestMixin(object):
lb = logbook.LogBook(name=lb_name, uuid=lb_id) lb = logbook.LogBook(name=lb_name, uuid=lb_id)
fd = logbook.FlowDetail('test', uuid=uuidutils.generate_uuid()) fd = logbook.FlowDetail('test', uuid=uuidutils.generate_uuid())
lb.add(fd) lb.add(fd)
td = logbook.RetryDetail("retry-1", uuid=uuidutils.generate_uuid()) rd = logbook.RetryDetail("retry-1", uuid=uuidutils.generate_uuid())
fd.add(td) fd.add(rd)
# save it # save it
with contextlib.closing(self._get_connection()) as conn: with contextlib.closing(self._get_connection()) as conn:
conn.save_logbook(lb) conn.save_logbook(lb)
conn.update_flow_details(fd) conn.update_flow_details(fd)
conn.update_task_details(td) conn.update_atom_details(rd)
# change intention and save # change intention and save
td.intention = states.REVERT rd.intention = states.REVERT
with contextlib.closing(self._get_connection()) as conn: with contextlib.closing(self._get_connection()) as conn:
conn.update_task_details(td) conn.update_atom_details(rd)
# now read it back # now read it back
with contextlib.closing(self._get_connection()) as conn: with contextlib.closing(self._get_connection()) as conn:
lb2 = conn.get_logbook(lb_id) lb2 = conn.get_logbook(lb_id)
fd2 = lb2.find(fd.uuid) fd2 = lb2.find(fd.uuid)
td2 = fd2.find(td.uuid) rd2 = fd2.find(rd.uuid)
self.assertEqual(td2.intention, states.REVERT) self.assertEqual(rd2.intention, states.REVERT)
self.assertIsInstance(rd2, logbook.RetryDetail)

View File

@@ -263,7 +263,7 @@ class EngineParallelFlowTest(utils.EngineTestBase):
with contextlib.closing(self.backend.get_connection()) as conn: with contextlib.closing(self.backend.get_connection()) as conn:
fd.update(conn.update_flow_details(fd)) fd.update(conn.update_flow_details(fd))
td.update(conn.update_task_details(td)) td.update(conn.update_atom_details(td))
engine = self._make_engine(flow, fd) engine = self._make_engine(flow, fd)
engine.run() engine.run()

View File

@@ -55,7 +55,7 @@ class TestDuration(test.TestCase):
e = self.make_engine(flo, fd, be) e = self.make_engine(flo, fd, be)
with timing.TimingListener(e): with timing.TimingListener(e):
e.run() e.run()
t_uuid = e.storage.get_task_uuid("test-1") t_uuid = e.storage.get_atom_uuid("test-1")
td = fd.find(t_uuid) td = fd.find(t_uuid)
self.assertIsNotNone(td) self.assertIsNotNone(td)
self.assertIsNotNone(td.meta) self.assertIsNotNone(td.meta)

View File

@@ -100,7 +100,7 @@ class TestProgress(test.TestCase):
e.run() e.run()
end_progress = e.storage.get_task_progress("test") end_progress = e.storage.get_task_progress("test")
self.assertEqual(1.0, end_progress) self.assertEqual(1.0, end_progress)
task_uuid = e.storage.get_task_uuid("test") task_uuid = e.storage.get_atom_uuid("test")
td = fd.find(task_uuid) td = fd.find(task_uuid)
self.assertEqual(1.0, td.meta['progress']) self.assertEqual(1.0, td.meta['progress'])
self.assertFalse(td.meta['progress_details']) self.assertFalse(td.meta['progress_details'])
@@ -135,7 +135,7 @@ class TestProgress(test.TestCase):
end_progress = e.storage.get_task_progress("test") end_progress = e.storage.get_task_progress("test")
self.assertEqual(1.0, end_progress) self.assertEqual(1.0, end_progress)
task_uuid = e.storage.get_task_uuid("test") task_uuid = e.storage.get_atom_uuid("test")
td = fd.find(task_uuid) td = fd.find(task_uuid)
self.assertEqual(1.0, td.meta['progress']) self.assertEqual(1.0, td.meta['progress'])
self.assertFalse(td.meta['progress_details']) self.assertFalse(td.meta['progress_details'])

View File

@@ -395,10 +395,10 @@ class RetryTest(utils.EngineTestBase):
engine = self._make_engine(flow) engine = self._make_engine(flow)
engine.compile() engine.compile()
utils.register_notifiers(engine, self.values) utils.register_notifiers(engine, self.values)
engine.storage.set_task_state('r1', st.RETRYING) engine.storage.set_atom_state('r1', st.RETRYING)
engine.storage.set_task_state('t1', st.PENDING) engine.storage.set_atom_state('t1', st.PENDING)
engine.storage.set_task_state('t2', st.REVERTED) engine.storage.set_atom_state('t2', st.REVERTED)
engine.storage.set_task_state('t3', st.REVERTED) engine.storage.set_atom_state('t3', st.REVERTED)
engine.run() engine.run()
expected = ['flow RUNNING', expected = ['flow RUNNING',
@@ -427,9 +427,9 @@ class RetryTest(utils.EngineTestBase):
engine.compile() engine.compile()
utils.register_notifiers(engine, self.values) utils.register_notifiers(engine, self.values)
engine.storage.set_atom_intention('r1', st.RETRY) engine.storage.set_atom_intention('r1', st.RETRY)
engine.storage.set_task_state('r1', st.SUCCESS) engine.storage.set_atom_state('r1', st.SUCCESS)
engine.storage.set_task_state('t1', st.REVERTED) engine.storage.set_atom_state('t1', st.REVERTED)
engine.storage.set_task_state('t2', st.REVERTED) engine.storage.set_atom_state('t2', st.REVERTED)
engine.run() engine.run()
expected = ['flow RUNNING', expected = ['flow RUNNING',
@@ -575,7 +575,7 @@ class RetryTest(utils.EngineTestBase):
if when == 'task updated': if when == 'task updated':
return engine return engine
# we schedule task1 for reversion # we schedule task1 for reversion
engine.storage.set_task_state('task1', st.REVERTING) engine.storage.set_atom_state('task1', st.REVERTING)
if when == 'revert scheduled': if when == 'revert scheduled':
return engine return engine
raise ValueError('Invalid crash point: %s' % when) raise ValueError('Invalid crash point: %s' % when)

View File

@@ -61,7 +61,7 @@ class StorageTestMixin(object):
_lb, flow_detail = p_utils.temporary_flow_detail(self.backend) _lb, flow_detail = p_utils.temporary_flow_detail(self.backend)
s = storage.SingleThreadedStorage(flow_detail=flow_detail) s = storage.SingleThreadedStorage(flow_detail=flow_detail)
s.ensure_task('my_task') s.ensure_task('my_task')
self.assertTrue(uuidutils.is_uuid_like(s.get_task_uuid('my_task'))) self.assertTrue(uuidutils.is_uuid_like(s.get_atom_uuid('my_task')))
def test_flow_name_and_uuid(self): def test_flow_name_and_uuid(self):
flow_detail = logbook.FlowDetail(name='test-fd', uuid='aaaa') flow_detail = logbook.FlowDetail(name='test-fd', uuid='aaaa')
@@ -72,8 +72,8 @@ class StorageTestMixin(object):
def test_ensure_task(self): def test_ensure_task(self):
s = self._get_storage() s = self._get_storage()
s.ensure_task('my task') s.ensure_task('my task')
self.assertEqual(s.get_task_state('my task'), states.PENDING) self.assertEqual(s.get_atom_state('my task'), states.PENDING)
self.assertTrue(uuidutils.is_uuid_like(s.get_task_uuid('my task'))) self.assertTrue(uuidutils.is_uuid_like(s.get_atom_uuid('my task')))
def test_get_tasks_states(self): def test_get_tasks_states(self):
s = self._get_storage() s = self._get_storage()
@@ -84,13 +84,13 @@ class StorageTestMixin(object):
'my task': (states.SUCCESS, states.EXECUTE), 'my task': (states.SUCCESS, states.EXECUTE),
'my task2': (states.PENDING, states.EXECUTE), 'my task2': (states.PENDING, states.EXECUTE),
} }
self.assertEqual(s.get_tasks_states(['my task', 'my task2']), expected) self.assertEqual(s.get_atoms_states(['my task', 'my task2']), expected)
def test_ensure_task_flow_detail(self): def test_ensure_task_flow_detail(self):
_lb, flow_detail = p_utils.temporary_flow_detail(self.backend) _lb, flow_detail = p_utils.temporary_flow_detail(self.backend)
s = self._get_storage(flow_detail) s = self._get_storage(flow_detail)
s.ensure_task('my task', '3.11') s.ensure_task('my task', '3.11')
td = flow_detail.find(s.get_task_uuid('my task')) td = flow_detail.find(s.get_atom_uuid('my task'))
self.assertIsNotNone(td) self.assertIsNotNone(td)
self.assertEqual(td.name, 'my task') self.assertEqual(td.name, 'my task')
self.assertEqual(td.version, '3.11') self.assertEqual(td.version, '3.11')
@@ -101,7 +101,7 @@ class StorageTestMixin(object):
td = logbook.TaskDetail(name='my_task', uuid='42') td = logbook.TaskDetail(name='my_task', uuid='42')
flow_detail.add(td) flow_detail.add(td)
s = self._get_storage(flow_detail) s = self._get_storage(flow_detail)
self.assertEqual('42', s.get_task_uuid('my_task')) self.assertEqual('42', s.get_atom_uuid('my_task'))
def test_ensure_existing_task(self): def test_ensure_existing_task(self):
_lb, flow_detail = p_utils.temporary_flow_detail(self.backend) _lb, flow_detail = p_utils.temporary_flow_detail(self.backend)
@@ -109,7 +109,7 @@ class StorageTestMixin(object):
flow_detail.add(td) flow_detail.add(td)
s = self._get_storage(flow_detail) s = self._get_storage(flow_detail)
s.ensure_task('my_task') s.ensure_task('my_task')
self.assertEqual('42', s.get_task_uuid('my_task')) self.assertEqual('42', s.get_atom_uuid('my_task'))
def test_save_and_get(self): def test_save_and_get(self):
s = self._get_storage() s = self._get_storage()
@@ -117,22 +117,22 @@ class StorageTestMixin(object):
s.save('my task', 5) s.save('my task', 5)
self.assertEqual(s.get('my task'), 5) self.assertEqual(s.get('my task'), 5)
self.assertEqual(s.fetch_all(), {}) self.assertEqual(s.fetch_all(), {})
self.assertEqual(s.get_task_state('my task'), states.SUCCESS) self.assertEqual(s.get_atom_state('my task'), states.SUCCESS)
def test_save_and_get_other_state(self): def test_save_and_get_other_state(self):
s = self._get_storage() s = self._get_storage()
s.ensure_task('my task') s.ensure_task('my task')
s.save('my task', 5, states.FAILURE) s.save('my task', 5, states.FAILURE)
self.assertEqual(s.get('my task'), 5) self.assertEqual(s.get('my task'), 5)
self.assertEqual(s.get_task_state('my task'), states.FAILURE) self.assertEqual(s.get_atom_state('my task'), states.FAILURE)
def test_save_and_get_failure(self): def test_save_and_get_cached_failure(self):
failure = misc.Failure.from_exception(RuntimeError('Woot!')) failure = misc.Failure.from_exception(RuntimeError('Woot!'))
s = self._get_storage() s = self._get_storage()
s.ensure_task('my task') s.ensure_task('my task')
s.save('my task', failure, states.FAILURE) s.save('my task', failure, states.FAILURE)
self.assertEqual(s.get('my task'), failure) self.assertEqual(s.get('my task'), failure)
self.assertEqual(s.get_task_state('my task'), states.FAILURE) self.assertEqual(s.get_atom_state('my task'), states.FAILURE)
self.assertTrue(s.has_failures()) self.assertTrue(s.has_failures())
self.assertEqual(s.get_failures(), {'my task': failure}) self.assertEqual(s.get_failures(), {'my task': failure})
@@ -143,18 +143,19 @@ class StorageTestMixin(object):
s.save('my task', failure, states.FAILURE) s.save('my task', failure, states.FAILURE)
self.assertEqual(s.get('my task'), failure) self.assertEqual(s.get('my task'), failure)
s._failures['my task'] = None s._failures['my task'] = None
self.assertEqual(s.get('my task'), failure) self.assertTrue(failure.matches(s.get('my task')))
def test_get_failure_from_reverted_task(self): def test_get_failure_from_reverted_task(self):
failure = misc.Failure.from_exception(RuntimeError('Woot!')) failure = misc.Failure.from_exception(RuntimeError('Woot!'))
s = self._get_storage() s = self._get_storage()
s.ensure_task('my task') s.ensure_task('my task')
s.save('my task', failure, states.FAILURE) s.save('my task', failure, states.FAILURE)
s.set_task_state('my task', states.REVERTING) s.set_atom_state('my task', states.REVERTING)
self.assertEqual(s.get('my task'), failure) self.assertEqual(s.get('my task'), failure)
s.set_task_state('my task', states.REVERTED) s.set_atom_state('my task', states.REVERTED)
self.assertEqual(s.get('my task'), failure) self.assertEqual(s.get('my task'), failure)
def test_get_failure_after_reload(self): def test_get_failure_after_reload(self):
@@ -163,10 +164,10 @@ class StorageTestMixin(object):
s.ensure_task('my task') s.ensure_task('my task')
s.save('my task', failure, states.FAILURE) s.save('my task', failure, states.FAILURE)
s2 = self._get_storage(s._flowdetail) s2 = self._get_storage(s._flowdetail)
self.assertIs(s2.has_failures(), True) self.assertTrue(s2.has_failures())
self.assertEqual(s2.get_failures(), {'my task': failure}) self.assertEqual(1, len(s2.get_failures()))
self.assertEqual(s2.get('my task'), failure) self.assertTrue(failure.matches(s2.get('my task')))
self.assertEqual(s2.get_task_state('my task'), states.FAILURE) self.assertEqual(s2.get_atom_state('my task'), states.FAILURE)
def test_get_non_existing_var(self): def test_get_non_existing_var(self):
s = self._get_storage() s = self._get_storage()
@@ -178,7 +179,7 @@ class StorageTestMixin(object):
s.ensure_task('my task') s.ensure_task('my task')
s.save('my task', 5) s.save('my task', 5)
s.reset('my task') s.reset('my task')
self.assertEqual(s.get_task_state('my task'), states.PENDING) self.assertEqual(s.get_atom_state('my task'), states.PENDING)
self.assertRaises(exceptions.NotFound, s.get, 'my task') self.assertRaises(exceptions.NotFound, s.get, 'my task')
def test_reset_unknown_task(self): def test_reset_unknown_task(self):
@@ -203,11 +204,11 @@ class StorageTestMixin(object):
def test_task_metadata_update_with_none(self): def test_task_metadata_update_with_none(self):
s = self._get_storage() s = self._get_storage()
s.ensure_task('my task') s.ensure_task('my task')
s.update_task_metadata('my task', None) s.update_atom_metadata('my task', None)
self.assertEqual(s.get_task_progress('my task'), 0.0) self.assertEqual(s.get_task_progress('my task'), 0.0)
s.set_task_progress('my task', 0.5) s.set_task_progress('my task', 0.5)
self.assertEqual(s.get_task_progress('my task'), 0.5) self.assertEqual(s.get_task_progress('my task'), 0.5)
s.update_task_metadata('my task', None) s.update_atom_metadata('my task', None)
self.assertEqual(s.get_task_progress('my task'), 0.5) self.assertEqual(s.get_task_progress('my task'), 0.5)
def test_default_task_progress(self): def test_default_task_progress(self):
@@ -357,24 +358,24 @@ class StorageTestMixin(object):
s = self._get_storage() s = self._get_storage()
state = states.PENDING state = states.PENDING
s.ensure_task('my task') s.ensure_task('my task')
s.set_task_state('my task', state) s.set_atom_state('my task', state)
self.assertEqual(s.get_task_state('my task'), state) self.assertEqual(s.get_atom_state('my task'), state)
def test_get_state_of_unknown_task(self): def test_get_state_of_unknown_task(self):
s = self._get_storage() s = self._get_storage()
self.assertRaisesRegexp(exceptions.NotFound, '^Unknown', self.assertRaisesRegexp(exceptions.NotFound, '^Unknown',
s.get_task_state, 'my task') s.get_atom_state, 'my task')
def test_task_by_name(self): def test_task_by_name(self):
s = self._get_storage() s = self._get_storage()
s.ensure_task('my task') s.ensure_task('my task')
self.assertTrue(uuidutils.is_uuid_like(s.get_task_uuid('my task'))) self.assertTrue(uuidutils.is_uuid_like(s.get_atom_uuid('my task')))
def test_unknown_task_by_name(self): def test_unknown_task_by_name(self):
s = self._get_storage() s = self._get_storage()
self.assertRaisesRegexp(exceptions.NotFound, self.assertRaisesRegexp(exceptions.NotFound,
'^Unknown task name:', '^Unknown atom',
s.get_task_uuid, '42') s.get_atom_uuid, '42')
def test_initial_flow_state(self): def test_initial_flow_state(self):
s = self._get_storage() s = self._get_storage()
@@ -451,7 +452,7 @@ class StorageTestMixin(object):
s = self._get_storage() s = self._get_storage()
s.ensure_task('my retry') s.ensure_task('my retry')
self.assertRaisesRegexp(exceptions.Duplicate, self.assertRaisesRegexp(exceptions.Duplicate,
'^Task detail', s.ensure_retry, 'my retry') '^Atom detail', s.ensure_retry, 'my retry')
def test_save_retry_results(self): def test_save_retry_results(self):
s = self._get_storage() s = self._get_storage()
@@ -481,21 +482,21 @@ class StorageTestMixin(object):
self.assertEqual(history, []) self.assertEqual(history, [])
self.assertEqual(s.fetch_all(), {}) self.assertEqual(s.fetch_all(), {})
def test_retry_failure(self): def test_cached_retry_failure(self):
fail = misc.Failure(exc_info=(RuntimeError, RuntimeError(), None)) failure = misc.Failure.from_exception(RuntimeError('Woot!'))
s = self._get_storage() s = self._get_storage()
s.ensure_retry('my retry', result_mapping={'x': 0}) s.ensure_retry('my retry', result_mapping={'x': 0})
s.save('my retry', 'a') s.save('my retry', 'a')
s.save('my retry', fail, states.FAILURE) s.save('my retry', failure, states.FAILURE)
history = s.get_retry_history('my retry') history = s.get_retry_history('my retry')
self.assertEqual(history, [('a', {}), (fail, {})]) self.assertEqual(history, [('a', {}), (failure, {})])
self.assertIs(s.has_failures(), True) self.assertIs(s.has_failures(), True)
self.assertEqual(s.get_failures(), {'my retry': fail}) self.assertEqual(s.get_failures(), {'my retry': failure})
def test_logbook_get_unknown_atom_type(self): def test_logbook_get_unknown_atom_type(self):
self.assertRaisesRegexp(TypeError, self.assertRaisesRegexp(TypeError,
'Unknown atom type', 'Unknown atom',
logbook.get_atom_detail_class, 'some_detail') logbook.atom_detail_class, 'some_detail')
def test_save_task_intention(self): def test_save_task_intention(self):
s = self._get_storage() s = self._get_storage()

View File

@@ -26,7 +26,6 @@ from taskflow.engines.worker_based import protocol as pr
from taskflow import test from taskflow import test
from taskflow.tests import utils from taskflow.tests import utils
from taskflow.utils import misc from taskflow.utils import misc
from taskflow.utils import persistence_utils as pu
class TestWorkerTaskExecutor(test.MockTestCase): class TestWorkerTaskExecutor(test.MockTestCase):
@@ -113,7 +112,7 @@ class TestWorkerTaskExecutor(test.MockTestCase):
def test_on_message_response_state_failure(self): def test_on_message_response_state_failure(self):
failure = misc.Failure.from_exception(Exception('test')) failure = misc.Failure.from_exception(Exception('test'))
failure_dict = pu.failure_to_dict(failure) failure_dict = failure.to_dict()
response = pr.Response(pr.FAILURE, result=failure_dict) response = pr.Response(pr.FAILURE, result=failure_dict)
ex = self.executor() ex = self.executor()
ex._requests_cache.set(self.task_uuid, self.request_inst_mock) ex._requests_cache.set(self.task_uuid, self.request_inst_mock)

View File

@@ -22,7 +22,6 @@ from taskflow.engines.worker_based import protocol as pr
from taskflow import test from taskflow import test
from taskflow.tests import utils from taskflow.tests import utils
from taskflow.utils import misc from taskflow.utils import misc
from taskflow.utils import persistence_utils as pu
class TestProtocol(test.TestCase): class TestProtocol(test.TestCase):
@@ -83,15 +82,14 @@ class TestProtocol(test.TestCase):
def test_to_dict_with_result_failure(self): def test_to_dict_with_result_failure(self):
failure = misc.Failure.from_exception(RuntimeError('Woot!')) failure = misc.Failure.from_exception(RuntimeError('Woot!'))
expected = self.request_to_dict( expected = self.request_to_dict(result=('failure', failure.to_dict()))
result=('failure', pu.failure_to_dict(failure)))
self.assertEqual(self.request(result=failure).to_dict(), expected) self.assertEqual(self.request(result=failure).to_dict(), expected)
def test_to_dict_with_failures(self): def test_to_dict_with_failures(self):
failure = misc.Failure.from_exception(RuntimeError('Woot!')) failure = misc.Failure.from_exception(RuntimeError('Woot!'))
request = self.request(failures={self.task.name: failure}) request = self.request(failures={self.task.name: failure})
expected = self.request_to_dict( expected = self.request_to_dict(
failures={self.task.name: pu.failure_to_dict(failure)}) failures={self.task.name: failure.to_dict()})
self.assertEqual(request.to_dict(), expected) self.assertEqual(request.to_dict(), expected)
@mock.patch('taskflow.engines.worker_based.protocol.misc.wallclock') @mock.patch('taskflow.engines.worker_based.protocol.misc.wallclock')

View File

@@ -274,12 +274,15 @@ class TestServer(test.MockTestCase):
self.assertEqual(self.master_mock.mock_calls, []) self.assertEqual(self.master_mock.mock_calls, [])
self.assertTrue(mocked_exception.called) self.assertTrue(mocked_exception.called)
@mock.patch('taskflow.engines.worker_based.server.pu') @mock.patch.object(misc.Failure, 'from_dict')
def test_process_request_parse_request_failure(self, pu_mock): @mock.patch.object(misc.Failure, 'to_dict')
failure_dict = 'failure_dict' def test_process_request_parse_request_failure(self, to_mock, from_mock):
failure_dict = {
'failure': 'failure',
}
failure = misc.Failure.from_exception(RuntimeError('Woot!')) failure = misc.Failure.from_exception(RuntimeError('Woot!'))
pu_mock.failure_to_dict.return_value = failure_dict to_mock.return_value = failure_dict
pu_mock.failure_from_dict.side_effect = ValueError('Woot!') from_mock.side_effect = ValueError('Woot!')
request = self.make_request(result=failure) request = self.make_request(result=failure)
# create server and process request # create server and process request
@@ -293,12 +296,14 @@ class TestServer(test.MockTestCase):
self.reply_to, self.reply_to,
correlation_id=self.task_uuid) correlation_id=self.task_uuid)
] ]
self.assertEqual(self.master_mock.mock_calls, master_mock_calls) self.assertEqual(master_mock_calls, self.master_mock.mock_calls)
@mock.patch('taskflow.engines.worker_based.server.pu') @mock.patch.object(misc.Failure, 'to_dict')
def test_process_request_endpoint_not_found(self, pu_mock): def test_process_request_endpoint_not_found(self, to_mock):
failure_dict = 'failure_dict' failure_dict = {
pu_mock.failure_to_dict.return_value = failure_dict 'failure': 'failure',
}
to_mock.return_value = failure_dict
request = self.make_request(task=mock.MagicMock(name='<unknown>')) request = self.make_request(task=mock.MagicMock(name='<unknown>'))
# create server and process request # create server and process request
@@ -314,10 +319,12 @@ class TestServer(test.MockTestCase):
] ]
self.assertEqual(self.master_mock.mock_calls, master_mock_calls) self.assertEqual(self.master_mock.mock_calls, master_mock_calls)
@mock.patch('taskflow.engines.worker_based.server.pu') @mock.patch.object(misc.Failure, 'to_dict')
def test_process_request_execution_failure(self, pu_mock): def test_process_request_execution_failure(self, to_mock):
failure_dict = 'failure_dict' failure_dict = {
pu_mock.failure_to_dict.return_value = failure_dict 'failure': 'failure',
}
to_mock.return_value = failure_dict
request = self.make_request() request = self.make_request()
request['action'] = '<unknown>' request['action'] = '<unknown>'
@@ -337,10 +344,12 @@ class TestServer(test.MockTestCase):
] ]
self.assertEqual(self.master_mock.mock_calls, master_mock_calls) self.assertEqual(self.master_mock.mock_calls, master_mock_calls)
@mock.patch('taskflow.engines.worker_based.server.pu') @mock.patch.object(misc.Failure, 'to_dict')
def test_process_request_task_failure(self, pu_mock): def test_process_request_task_failure(self, to_mock):
failure_dict = 'failure_dict' failure_dict = {
pu_mock.failure_to_dict.return_value = failure_dict 'failure': 'failure',
}
to_mock.return_value = failure_dict
request = self.make_request(task=utils.TaskWithFailure(), arguments={}) request = self.make_request(task=utils.TaskWithFailure(), arguments={})
# create server and process request # create server and process request

View File

@@ -531,6 +531,7 @@ class Failure(object):
Failure objects encapsulate exception information so that Failure objects encapsulate exception information so that
it can be re-used later to re-raise or inspect. it can be re-used later to re-raise or inspect.
""" """
DICT_VERSION = 1
def __init__(self, exc_info=None, **kwargs): def __init__(self, exc_info=None, **kwargs):
if not kwargs: if not kwargs:
@@ -663,6 +664,23 @@ class Failure(object):
for et in self._exc_type_names: for et in self._exc_type_names:
yield et yield et
@classmethod
def from_dict(cls, data):
data = dict(data)
version = data.pop('version', None)
if version != cls.DICT_VERSION:
raise ValueError('Invalid dict version of failure object: %r'
% version)
return cls(**data)
def to_dict(self):
return {
'exception_str': self.exception_str,
'traceback_str': self.traceback_str,
'exc_type_names': list(self),
'version': self.DICT_VERSION,
}
def copy(self): def copy(self):
return Failure(exc_info=copy_exc_info(self.exc_info), return Failure(exc_info=copy_exc_info(self.exc_info),
exception_str=self.exception_str, exception_str=self.exception_str,

View File

@@ -15,11 +15,8 @@
# under the License. # under the License.
import contextlib import contextlib
import copy
import logging import logging
import six
from taskflow.openstack.common import timeutils from taskflow.openstack.common import timeutils
from taskflow.openstack.common import uuidutils from taskflow.openstack.common import uuidutils
from taskflow.persistence import logbook from taskflow.persistence import logbook
@@ -92,130 +89,6 @@ def create_flow_detail(flow, book=None, backend=None, meta=None):
return flow_detail return flow_detail
def _copy_function(deep_copy):
if deep_copy:
return copy.deepcopy
else:
return lambda x: x
def task_details_merge(td_e, td_new, deep_copy=False):
"""Merges an existing task details with a new task details object.
The new task details fields, if they differ will replace the existing
objects fields (except name, version, uuid which can not be replaced).
If 'deep_copy' is True, fields are copied deeply (by value) if possible.
"""
if td_e is td_new:
return td_e
copy_fn = _copy_function(deep_copy)
# NOTE(imelnikov): states and intentions are just strings, no need to copy.
td_e.state = td_new.state
td_e.intention = td_new.intention
if td_e.results != td_new.results:
td_e.results = copy_fn(td_new.results)
if td_e.failure != td_new.failure:
# NOTE(imelnikov): we can't just deep copy Failures, as they
# contain tracebacks, which are not copyable.
if deep_copy:
td_e.failure = td_new.failure.copy()
else:
td_e.failure = td_new.failure
if td_e.meta != td_new.meta:
td_e.meta = copy_fn(td_new.meta)
if td_e.version != td_new.version:
td_e.version = copy_fn(td_new.version)
return td_e
def flow_details_merge(fd_e, fd_new, deep_copy=False):
"""Merges an existing flow details with a new flow details object.
The new flow details fields, if they differ will replace the existing
objects fields (except name and uuid which can not be replaced).
If 'deep_copy' is True, fields are copied deeply (by value) if possible.
"""
if fd_e is fd_new:
return fd_e
copy_fn = _copy_function(deep_copy)
if fd_e.meta != fd_new.meta:
fd_e.meta = copy_fn(fd_new.meta)
if fd_e.state != fd_new.state:
# NOTE(imelnikov): states are just strings, no need to copy.
fd_e.state = fd_new.state
return fd_e
def logbook_merge(lb_e, lb_new, deep_copy=False):
"""Merges an existing logbook with a new logbook object.
The new logbook fields, if they differ will replace the existing
objects fields (except name and uuid which can not be replaced).
If 'deep_copy' is True, fields are copied deeply (by value) if possible.
"""
if lb_e is lb_new:
return lb_e
copy_fn = _copy_function(deep_copy)
if lb_e.meta != lb_new.meta:
lb_e.meta = copy_fn(lb_new.meta)
return lb_e
def failure_to_dict(failure):
"""Convert misc.Failure object to JSON-serializable dict."""
if not failure:
return None
if not isinstance(failure, misc.Failure):
raise TypeError('Failure object expected, but got %r'
% failure)
return {
'exception_str': failure.exception_str,
'traceback_str': failure.traceback_str,
'exc_type_names': list(failure),
'version': 1
}
def failure_from_dict(data):
"""Restore misc.Failure object from dict.
The dict should be similar to what failure_to_dict() function produces.
"""
if not data:
return None
version = data.pop('version', None)
if version != 1:
raise ValueError('Invalid version of saved Failure object: %r'
% version)
return misc.Failure(**data)
def encode_retry_results(results):
new_results = []
for (data, failures) in results:
new_failures = {}
for key, value in six.iteritems(failures):
new_failures[key] = failure_to_dict(value)
new_results.append((data, new_failures))
return new_results
def decode_retry_results(results):
new_results = []
for (data, failures) in results:
new_failures = {}
for key, value in six.iteritems(failures):
new_failures[key] = failure_from_dict(value)
new_results.append((data, new_failures))
return new_results
def _format_meta(metadata, indent): def _format_meta(metadata, indent):
"""Format the common metadata dictionary in the same manner.""" """Format the common metadata dictionary in the same manner."""
if not metadata: if not metadata:
@@ -245,18 +118,18 @@ def _format_shared(obj, indent):
return lines return lines
def pformat_task_detail(task_detail, indent=0): def pformat_atom_detail(atom_detail, indent=0):
"""Pretty formats a task detail.""" """Pretty formats a atom detail."""
detail_type = task_detail.atom_type detail_type = logbook.atom_detail_type(atom_detail)
lines = ["%s%s: '%s'" % (" " * (indent), detail_type, task_detail.name)] lines = ["%s%s: '%s'" % (" " * (indent), detail_type, atom_detail.name)]
lines.extend(_format_shared(task_detail, indent=indent + 1)) lines.extend(_format_shared(atom_detail, indent=indent + 1))
lines.append("%s- version = %s" lines.append("%s- version = %s"
% (" " * (indent + 1), misc.get_version_string(task_detail))) % (" " * (indent + 1), misc.get_version_string(atom_detail)))
lines.append("%s- results = %s" lines.append("%s- results = %s"
% (" " * (indent + 1), task_detail.results)) % (" " * (indent + 1), atom_detail.results))
lines.append("%s- failure = %s" % (" " * (indent + 1), lines.append("%s- failure = %s" % (" " * (indent + 1),
bool(task_detail.failure))) bool(atom_detail.failure)))
lines.extend(_format_meta(task_detail.meta, indent=indent + 1)) lines.extend(_format_meta(atom_detail.meta, indent=indent + 1))
return "\n".join(lines) return "\n".join(lines)
@@ -266,7 +139,7 @@ def pformat_flow_detail(flow_detail, indent=0):
lines.extend(_format_shared(flow_detail, indent=indent + 1)) lines.extend(_format_shared(flow_detail, indent=indent + 1))
lines.extend(_format_meta(flow_detail.meta, indent=indent + 1)) lines.extend(_format_meta(flow_detail.meta, indent=indent + 1))
for task_detail in flow_detail: for task_detail in flow_detail:
lines.append(pformat_task_detail(task_detail, indent=indent + 1)) lines.append(pformat_atom_detail(task_detail, indent=indent + 1))
return "\n".join(lines) return "\n".join(lines)
@@ -286,86 +159,3 @@ def pformat(book, indent=0):
for flow_detail in book: for flow_detail in book:
lines.append(pformat_flow_detail(flow_detail, indent=indent + 1)) lines.append(pformat_flow_detail(flow_detail, indent=indent + 1))
return "\n".join(lines) return "\n".join(lines)
def _str_2_datetime(text):
"""Converts an iso8601 string/text into a datetime object (or none)."""
if text is None:
return None
if not isinstance(text, six.string_types):
raise ValueError("Can only convert strings into a datetime object and"
" not %r" % (text))
if not len(text):
return None
return timeutils.parse_isotime(text)
def format_task_detail(td):
results = td.results
if td.atom_type == logbook.RETRY_DETAIL:
results = encode_retry_results(results)
return {
'failure': failure_to_dict(td.failure),
'meta': td.meta,
'name': td.name,
'results': results,
'state': td.state,
'version': td.version,
'atom_type': td.atom_type,
'intention': td.intention,
}
def unformat_task_detail(uuid, td_data):
results = td_data.get('results')
if td_data['atom_type'] == logbook.RETRY_DETAIL:
results = decode_retry_results(results)
atom_cls = logbook.get_atom_detail_class(td_data['atom_type'])
td = atom_cls(name=td_data['name'], uuid=uuid)
td.state = td_data.get('state')
td.failure = failure_from_dict(td_data.get('failure'))
td.intention = td_data.get('intention')
td.results = results
td.meta = td_data.get('meta')
td.version = td_data.get('version')
return td
def format_flow_detail(fd):
return {
'name': fd.name,
'meta': fd.meta,
'state': fd.state,
}
def unformat_flow_detail(uuid, fd_data):
fd = logbook.FlowDetail(name=fd_data['name'], uuid=uuid)
fd.state = fd_data.get('state')
fd.meta = fd_data.get('meta')
return fd
def format_logbook(lb, created_at=None):
lb_data = {
'name': lb.name,
'meta': lb.meta,
}
if created_at:
lb_data['created_at'] = timeutils.isotime(at=created_at)
lb_data['updated_at'] = timeutils.isotime()
else:
lb_data['created_at'] = timeutils.isotime()
lb_data['updated_at'] = None
return lb_data
def unformat_logbook(uuid, lb_data):
lb = logbook.LogBook(name=lb_data['name'],
uuid=uuid,
updated_at=_str_2_datetime(lb_data['updated_at']),
created_at=_str_2_datetime(lb_data['created_at']))
lb.meta = lb_data.get('meta')
return lb