
*2 new CLI commands were added: - execution-get-sub-executions returns the sub-executions of a given execution id - task-get-sub-executions returns the sub-executions of a given task-execution id both commands have the options --errors_only: returns only the error routes - default is False --max_depth: the max depth for the returned executions - if a negative value is given, then the API will return all sub-executions - default is -1 Change-Id: Ifcd25cfdbfb99613ff1bdccf8b94b3929f02a71d Implements: blueprint mistral-execution-origin Signed-off-by: ali <ali.abdelal@nokia.com>
165 lines
4.8 KiB
Python
165 lines
4.8 KiB
Python
# Copyright 2014 - Mirantis, Inc.
|
|
# Copyright 2015 - StackStorm, Inc.
|
|
#
|
|
# Licensed under the Apache License, Version 2.0 (the "License");
|
|
# you may not use this file except in compliance with the License.
|
|
# You may obtain a copy of the License at
|
|
#
|
|
# http://www.apache.org/licenses/LICENSE-2.0
|
|
#
|
|
# Unless required by applicable law or agreed to in writing, software
|
|
# distributed under the License is distributed on an "AS IS" BASIS,
|
|
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
|
# See the License for the specific language governing permissions and
|
|
# limitations under the License.
|
|
|
|
from oslo_serialization import jsonutils
|
|
|
|
from mistralclient.api.v2.executions import Execution
|
|
from mistralclient.api.v2 import tasks
|
|
from mistralclient.tests.unit.v2 import base
|
|
|
|
# TODO(everyone): later we need additional tests verifying all the errors etc.
|
|
|
|
TASK = {
|
|
'id': "1",
|
|
'workflow_execution_id': '123',
|
|
'name': 'my_task',
|
|
'workflow_name': 'my_wf',
|
|
'state': 'RUNNING',
|
|
'tags': ['deployment', 'demo'],
|
|
'result': {'some': 'result'}
|
|
}
|
|
|
|
SUB_WF_EXEC = {
|
|
'id': "456",
|
|
'workflow_id': '123e4567-e89b-12d3-a456-426655440000',
|
|
'workflow_name': 'my_sub_wf',
|
|
'workflow_namespace': '',
|
|
'task_execution_id': "1",
|
|
'description': '',
|
|
'state': 'RUNNING',
|
|
'input': {}
|
|
}
|
|
|
|
URL_TEMPLATE = '/tasks'
|
|
URL_TEMPLATE_ID = '/tasks/%s'
|
|
URL_TEMPLATE_SUB_EXECUTIONS = '/tasks/%s/executions%s'
|
|
|
|
|
|
class TestTasksV2(base.BaseClientV2Test):
|
|
def test_list(self):
|
|
self.requests_mock.get(self.TEST_URL + URL_TEMPLATE,
|
|
json={'tasks': [TASK]})
|
|
|
|
task_list = self.tasks.list()
|
|
|
|
self.assertEqual(1, len(task_list))
|
|
task = task_list[0]
|
|
|
|
self.assertEqual(
|
|
tasks.Task(self.tasks, TASK).to_dict(),
|
|
task.to_dict()
|
|
)
|
|
|
|
def test_list_with_fields(self):
|
|
field_params = "?fields=id,name"
|
|
|
|
self.requests_mock.get(self.TEST_URL + URL_TEMPLATE + field_params,
|
|
json={'tasks': [TASK]})
|
|
|
|
self.tasks.list(fields=["id,name"])
|
|
self.assertTrue(self.requests_mock.called_once)
|
|
|
|
def test_list_with_no_limit(self):
|
|
self.requests_mock.get(self.TEST_URL + URL_TEMPLATE,
|
|
json={'tasks': [TASK]})
|
|
|
|
task_list = self.tasks.list(limit=-1)
|
|
|
|
self.assertEqual(1, len(task_list))
|
|
|
|
last_request = self.requests_mock.last_request
|
|
|
|
self.assertNotIn('limit', last_request.qs)
|
|
|
|
def test_get(self):
|
|
url = self.TEST_URL + URL_TEMPLATE_ID % TASK['id']
|
|
self.requests_mock.get(url, json=TASK)
|
|
|
|
task = self.tasks.get(TASK['id'])
|
|
|
|
self.assertEqual(
|
|
tasks.Task(self.tasks, TASK).to_dict(),
|
|
task.to_dict()
|
|
)
|
|
|
|
def test_rerun(self):
|
|
url = self.TEST_URL + URL_TEMPLATE_ID % TASK['id']
|
|
self.requests_mock.put(url, json=TASK)
|
|
|
|
task = self.tasks.rerun(TASK['id'])
|
|
|
|
self.assertDictEqual(
|
|
tasks.Task(self.tasks, TASK).to_dict(),
|
|
task.to_dict()
|
|
)
|
|
|
|
body = {
|
|
'reset': True,
|
|
'state': 'RUNNING',
|
|
'id': TASK['id']
|
|
}
|
|
self.assertDictEqual(body, self.requests_mock.last_request.json())
|
|
|
|
def test_rerun_no_reset(self):
|
|
url = self.TEST_URL + URL_TEMPLATE_ID % TASK['id']
|
|
self.requests_mock.put(url, json=TASK)
|
|
|
|
task = self.tasks.rerun(TASK['id'], reset=False)
|
|
|
|
self.assertDictEqual(
|
|
tasks.Task(self.tasks, TASK).to_dict(),
|
|
task.to_dict()
|
|
)
|
|
|
|
body = {
|
|
'reset': False,
|
|
'state': 'RUNNING',
|
|
'id': TASK['id']
|
|
}
|
|
self.assertDictEqual(body, self.requests_mock.last_request.json())
|
|
|
|
def test_rerun_update_env(self):
|
|
url = self.TEST_URL + URL_TEMPLATE_ID % TASK['id']
|
|
self.requests_mock.put(url, json=TASK)
|
|
|
|
task = self.tasks.rerun(TASK['id'], env={'k1': 'foobar'})
|
|
|
|
self.assertDictEqual(
|
|
tasks.Task(self.tasks, TASK).to_dict(),
|
|
task.to_dict()
|
|
)
|
|
|
|
body = {
|
|
'reset': True,
|
|
'state': 'RUNNING',
|
|
'id': TASK['id'],
|
|
'env': jsonutils.dumps({'k1': 'foobar'})
|
|
}
|
|
self.assertDictEqual(body, self.requests_mock.last_request.json())
|
|
|
|
def test_get_sub_executions(self):
|
|
url = self.TEST_URL + URL_TEMPLATE_SUB_EXECUTIONS \
|
|
% (TASK['id'], '?max_depth=-1&errors_only=')
|
|
|
|
self.requests_mock.get(url, json={'executions': [SUB_WF_EXEC]})
|
|
|
|
sub_execution_list = self.tasks.get_task_sub_executions(TASK['id'])
|
|
|
|
self.assertEqual(1, len(sub_execution_list))
|
|
self.assertDictEqual(
|
|
Execution(self.executions, SUB_WF_EXEC).to_dict(),
|
|
sub_execution_list[0].to_dict()
|
|
)
|