Files
deb-python-taskflow/taskflow/persistence/backends/impl_memory.py
Joshua Harlow 58a5a0932d Persistence cleanup part one
- Convert the various functions that take a task detail into
  ones that take atom details (since this is now the generic
  type they should take).
- Don't expose the detail type strings as part of the atom
  detail api, leave those as private hidden strings and provide
  conversion functions from string<->class instead.
- Have the logbook objects contain the following new methods
  to reduce the dependence on persistence_utils to do the same.
  - to_dict() which converts the current object into a dict
  - from_dict() which converts the provided dict into a object
  - merge() which merges a incoming objects data with the current
    objects
- Have the persistence backends + storage + action engine use these
  new methods instead of there current usage.
- Don't compare to logbook.RETRY_DETAIL or logbook.TASK_DETAIL since
  python has the isinstance function just use it (ideally we should
  fix the code so that this isn't even needed, usage of isinstance
  means something is not designed/structured right).
- In storage tests we can't assume that failures will be non-lossy
  since under certain backends when a failure is stored information
  about the internally held exc_info is lost, so take this into
  account when testing by using matches() where applicable.

Change-Id: Ie8a274cfd4cb4e64e87c355dc99d466d74a4e82c
2014-03-26 12:48:40 -07:00

153 lines
4.7 KiB
Python

# -*- coding: utf-8 -*-
# Copyright (C) 2012 Yahoo! Inc. All Rights Reserved.
# Copyright (C) 2013 Rackspace Hosting All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
"""Implementation of in-memory backend."""
import logging
import six
from taskflow import exceptions as exc
from taskflow.persistence.backends import base
from taskflow.persistence import logbook
LOG = logging.getLogger(__name__)
class MemoryBackend(base.Backend):
"""A backend that writes logbooks, flow details, and task details to in
memory dictionaries.
"""
def __init__(self, conf):
super(MemoryBackend, self).__init__(conf)
self._log_books = {}
self._flow_details = {}
self._atom_details = {}
@property
def log_books(self):
return self._log_books
@property
def flow_details(self):
return self._flow_details
@property
def atom_details(self):
return self._atom_details
def get_connection(self):
return Connection(self)
def close(self):
pass
class Connection(base.Connection):
def __init__(self, backend):
self._backend = backend
def upgrade(self):
pass
def validate(self):
pass
@property
def backend(self):
return self._backend
def close(self):
pass
def clear_all(self):
count = 0
for book_uuid in list(six.iterkeys(self.backend.log_books)):
self.destroy_logbook(book_uuid)
count += 1
return count
def destroy_logbook(self, book_uuid):
try:
# Do the same cascading delete that the sql layer does.
lb = self.backend.log_books.pop(book_uuid)
for fd in lb:
self.backend.flow_details.pop(fd.uuid, None)
for ad in fd:
self.backend.atom_details.pop(ad.uuid, None)
except KeyError:
raise exc.NotFound("No logbook found with id: %s" % book_uuid)
def update_atom_details(self, atom_detail):
try:
e_ad = self.backend.atom_details[atom_detail.uuid]
except KeyError:
raise exc.NotFound("No atom details found with id: %s"
% atom_detail.uuid)
return e_ad.merge(atom_detail, deep_copy=True)
def _save_flowdetail_atoms(self, e_fd, flow_detail):
for atom_detail in flow_detail:
e_ad = e_fd.find(atom_detail.uuid)
if e_ad is None:
e_fd.add(atom_detail)
self.backend.atom_details[atom_detail.uuid] = atom_detail
else:
e_ad.merge(atom_detail, deep_copy=True)
def update_flow_details(self, flow_detail):
try:
e_fd = self.backend.flow_details[flow_detail.uuid]
except KeyError:
raise exc.NotFound("No flow details found with id: %s"
% flow_detail.uuid)
e_fd.merge(flow_detail, deep_copy=True)
self._save_flowdetail_atoms(e_fd, flow_detail)
return e_fd
def save_logbook(self, book):
# Get a existing logbook model (or create it if it isn't there).
try:
e_lb = self.backend.log_books[book.uuid]
except KeyError:
e_lb = logbook.LogBook(book.name, uuid=book.uuid)
self.backend.log_books[e_lb.uuid] = e_lb
e_lb.merge(book, deep_copy=True)
# Add anything in to the new logbook that isn't already in the existing
# logbook.
for flow_detail in book:
try:
e_fd = self.backend.flow_details[flow_detail.uuid]
except KeyError:
e_fd = logbook.FlowDetail(flow_detail.name, flow_detail.uuid)
e_lb.add(e_fd)
self.backend.flow_details[e_fd.uuid] = e_fd
e_fd.merge(flow_detail, deep_copy=True)
self._save_flowdetail_atoms(e_fd, flow_detail)
return e_lb
def get_logbook(self, book_uuid):
try:
return self.backend.log_books[book_uuid]
except KeyError:
raise exc.NotFound("No logbook found with id: %s" % book_uuid)
def get_logbooks(self):
for lb in list(six.itervalues(self.backend.log_books)):
yield lb