deb-mistral/mistral/workflow/data_flow.py
Renat Akhmerov 816bfd9dcc Refactor Mistral Engine
* Introduced class hierarchies Task and Action used by Mistral engine.
  Note: Action here is a different than executor Action and represents
  rather actions of different types: regular python action, ad-hoc
  action and workflow action (since for task action and workflow are
  polymorphic)
* Refactored task_handler.py and action_handler.py with Task and Action
  hierarchies
* Rebuilt a chain call so that the entire action processing would look
  like a chain of calls Action -> Task -> Workflow where each level
  knows only about the next level and can influence it (e.g. if adhoc
  action has failed due to YAQL error in 'output' transformer action
  itself fails its task)
* Refactored policies according to new object model
* Fixed some of the tests to match the idea of having two types of
  exceptions, MistralException and MistralError, where the latter
  is considered either a harsh environmental problem or a logical
  issue in the system itself so that it must not be handled anywhere
  in the code

TODO(in subsequent patches):
 * Refactor WithItemsTask w/o using with_items.py
 * Remove DB transaction in Scheduler when making a delayed call,
   helper policy methods like 'continue_workflow'
 * Refactor policies test so that workflow definitions live right
   in test methods
 * Refactor workflow_handler with Workflow abstraction
 * Get rid of RunExistingTask workflow command, it should be just
   one command with various properties
 * Refactor resume and rerun with Task abstraction (same way as
   other methods, e.g. on_action_complete())
 * Add error handling to all required places such as
   task_handler.continue_task()
 * More tests for error handling

P.S. This patch is very big but it was nearly impossible to split
it into multiple smaller patches just because how entangled everything
was in Mistral Engine.

Partially implements: blueprint mistral-engine-error-handling
Implements: blueprint mistral-action-result-processing-pipeline
Implements: blueprint mistral-refactor-task-handler
Closes-Bug: #1568909

Change-Id: I0668e695c60dde31efc690563fc891387d44d6ba
2016-05-31 14:08:36 +00:00

198 lines
5.8 KiB
Python

# Copyright 2013 - Mirantis, Inc.
# Copyright 2015 - StackStorm, Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import copy
from oslo_config import cfg
from oslo_log import log as logging
from mistral import context as auth_ctx
from mistral.db.v2 import api as db_api
from mistral.db.v2.sqlalchemy import models
from mistral import expressions as expr
from mistral import utils
from mistral.utils import inspect_utils
from mistral.workbook import parser as spec_parser
from mistral.workflow import states
from mistral.workflow import with_items
LOG = logging.getLogger(__name__)
CONF = cfg.CONF
def evaluate_upstream_context(upstream_task_execs):
published_vars = {}
ctx = {}
for t_ex in upstream_task_execs:
# TODO(rakhmerov): These two merges look confusing. So it's a
# temporary solution. There's still the bug
# https://bugs.launchpad.net/mistral/+bug/1424461 that needs to be
# fixed using context variable versioning.
published_vars = utils.merge_dicts(
published_vars,
t_ex.published
)
utils.merge_dicts(ctx, evaluate_task_outbound_context(t_ex))
return utils.merge_dicts(ctx, published_vars)
def _extract_execution_result(ex):
if isinstance(ex, models.WorkflowExecution):
return ex.output
if ex.output:
return ex.output['result']
def invalidate_task_execution_result(task_ex):
for ex in task_ex.executions:
ex.accepted = False
def get_task_execution_result(task_ex):
# Use of task_ex.executions requires a session to lazy load the action
# executions. This get_task_execution_result method is also invoked
# from get_all in the task execution API controller. If there is a lot of
# read against the API, it will lead to a lot of unnecessary DB locks
# which result in possible deadlocks and WF execution failures. Therefore,
# use db_api.get_action_executions here to avoid session-less use cases.
action_execs = db_api.get_action_executions(task_execution_id=task_ex.id)
action_execs.sort(
key=lambda x: x.runtime_context.get('index')
)
results = [
_extract_execution_result(ex)
for ex in action_execs
if hasattr(ex, 'output') and ex.accepted
]
task_spec = spec_parser.get_task_spec(task_ex.spec)
if task_spec.get_with_items():
if with_items.get_count(task_ex) > 0:
return results
else:
return []
return results[0] if len(results) == 1 else results
def publish_variables(task_ex, task_spec):
if task_ex.state != states.SUCCESS:
return
expr_ctx = task_ex.in_context
if task_ex.name in expr_ctx:
LOG.warning(
'Shadowing context variable with task name while publishing: %s' %
task_ex.name
)
task_ex.published = expr.evaluate_recursively(
task_spec.get_publish(),
expr_ctx
)
def evaluate_task_outbound_context(task_ex):
"""Evaluates task outbound Data Flow context.
This method assumes that complete task output (after publisher etc.)
has already been evaluated.
:param task_ex: DB task.
:return: Outbound task Data Flow context.
"""
in_context = (copy.deepcopy(dict(task_ex.in_context))
if task_ex.in_context is not None else {})
return utils.merge_dicts(in_context, task_ex.published)
def evaluate_workflow_output(wf_spec, ctx):
"""Evaluates workflow output.
:param wf_spec: Workflow specification.
:param ctx: Final Data Flow context (cause task's outbound context).
"""
ctx = copy.deepcopy(ctx)
output_dict = wf_spec.get_output()
# Evaluate workflow 'publish' clause using the final workflow context.
output = expr.evaluate_recursively(output_dict, ctx)
# TODO(rakhmerov): Many don't like that we return the whole context
# if 'output' is not explicitly defined.
return output or ctx
def add_openstack_data_to_context(wf_ex):
wf_ex.context = wf_ex.context or {}
if CONF.pecan.auth_enable:
exec_ctx = auth_ctx.ctx()
LOG.debug('Data flow security context: %s' % exec_ctx)
if exec_ctx:
wf_ex.context.update({'openstack': exec_ctx.to_dict()})
def add_execution_to_context(wf_ex):
wf_ex.context = wf_ex.context or {}
wf_ex.context['__execution'] = {
'id': wf_ex.id,
'spec': wf_ex.spec,
'params': wf_ex.params,
'input': wf_ex.input
}
def add_environment_to_context(wf_ex):
wf_ex.context = wf_ex.context or {}
# If env variables are provided, add an evaluated copy into the context.
if 'env' in wf_ex.params:
env = copy.deepcopy(wf_ex.params['env'])
# An env variable can be an expression of other env variables.
wf_ex.context['__env'] = expr.evaluate_recursively(env, {'__env': env})
def add_workflow_variables_to_context(wf_ex, wf_spec):
wf_ex.context = wf_ex.context or {}
return utils.merge_dicts(
wf_ex.context,
expr.evaluate_recursively(wf_spec.get_vars(), wf_ex.context)
)
def evaluate_object_fields(obj, context):
fields = inspect_utils.get_public_fields(obj)
evaluated_fields = expr.evaluate_recursively(fields, context)
for k, v in evaluated_fields.items():
setattr(obj, k, v)