Smoke test engine

1. What is the problem
Current smoke test needs improvement

2. What is the solution for the problem
Implement a task runner and a task engine. The implementation is
based on the specification document[1].

3. What features need to be implemented to the Tricircle to
realize the solution
Smoke test engine is added

[1] https://github.com/openstack/tricircle/blob/master/specs/pike/smoke-test-engine.rst

Implements: blueprint smoke-test-engine
Change-Id: Ice098ce020c85f74d008c1952dd1dd36350dec1d
This commit is contained in:
zhiyuan_cai 2017-06-26 18:38:49 +08:00
parent 1b4ceafc44
commit 21e923aa22
10 changed files with 1071 additions and 255 deletions

View File

@ -0,0 +1,27 @@
# All Rights Reserved
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
import sys
import task_runner
if __name__ == '__main__':
yaml_path, auth_url, project, user, password = sys.argv[1:]
runner = task_runner.SDKRunner(auth_url, project, user, password)
engine = task_runner.RunnerEngine(yaml_path, runner)
error_msg = engine.run_task_sets()
if error_msg:
sys.exit(error_msg)

View File

@ -0,0 +1,537 @@
- task_set_id: preparation
tasks:
- task_id: image1
region: region1
type: image
query:
get_one: true
- task_id: image2
region: region2
type: image
query:
get_one: true
- task_id: ext-net
region: central
type: network
params:
name: ext-net
is_router_external: true
provider_network_type: vlan
provider_physical_network: extern
availability_zone_hints: [RegionTwo]
- task_id: ext-subnet
region: central
type: subnet
depend: [ext-net]
params:
name: ext-subnet
ip_version: 4
cidr: 163.3.124.0/24
is_dhcp_enabled: false
network_id: ext-net@id
- task_id: router
region: central
type: router
params:
name: router
- task_id: add-gateway
region: central
type: router
action:
target: router@id
method: update
depend:
- ext-net
- ext-subnet
- router
params:
external_gateway_info:
network_id: ext-net@id
enable_snat: true
- task_id: net1
region: central
type: network
params:
name: net1
- task_id: subnet1
region: central
type: subnet
depend: [net1]
params:
name: subnet1
ip_version: 4
cidr: 10.0.1.0/24
allocation_pools:
- start: 10.0.1.10
end: 10.0.1.90
network_id: net1@id
- task_id: net3
region: central
type: network
params:
name: net3
- task_id: subnet3
region: central
type: subnet
depend: [net3]
params:
name: subnet3
ip_version: 4
cidr: 10.0.1.0/24
allocation_pools:
- start: 10.0.1.110
end: 10.0.1.190
network_id: net3@id
- task_id: port1
region: central
type: port
depend:
- net1
- subnet1
params:
network_id: net1@id
- task_id: add-subnet1
region: central
type: router
depend:
- subnet1
- router
action:
target: router@id
method: add_interface_to_router
params:
subnet_id: subnet1@id
- task_id: fip1
region: central
type: fip
depend:
- ext-net
- port1
- add-gateway
- add-subnet1
params:
port_id: port1@id
floating_ip_address: 163.3.124.15
floating_network_id: ext-net@id
- task_id: vm1
region: region1
type: server
depend:
- fip1
- port1
- image1
params:
flavor_id: 1
image_id: image1@id
name: vm1
networks:
- port: port1@id
- task_id: net2
region: central
type: network
params:
name: net2
- task_id: subnet2
region: central
type: subnet
depend: [net2]
params:
name: subnet2
ip_version: 4
cidr: 10.0.2.0/24
network_id: net2@id
- task_id: port2
region: central
type: port
depend:
- net2
- subnet2
params:
network_id: net2@id
- task_id: fip2
region: central
type: fip
depend:
- vm2
- ext-net
- port2
- add-gateway
- add-subnet2
params:
port_id: port2@id
floating_ip_address: 163.3.124.20
floating_network_id: ext-net@id
- task_id: vm2
region: region2
type: server
depend:
- port2
- image2
- vm1
params:
flavor_id: 1
image_id: image2@id
name: vm2
networks:
- port: port2@id
- task_id: add-subnet2
region: central
type: router
depend:
- vm2
- subnet2
- router
action:
target: router@id
method: add_interface_to_router
params:
subnet_id: subnet2@id
- task_id: net4
region: central
type: network
params:
name: net4
- task_id: subnet4
region: central
type: subnet
depend: [net4]
params:
name: subnet4
ip_version: 4
cidr: 10.0.4.0/24
network_id: net4@id
gateway_ip: null
- task_id: vm3
region: region1
type: server
depend:
- net4
- subnet4
- image1
- vm2
params:
flavor_id: 1
image_id: image1@id
name: vm3
networks:
- uuid: net4@id
- task_set_id: wait-for-job
tasks:
- task_id: check-job
region: central
type: job
validate:
predicate: all
retries: 10
condition:
- status: SUCCESS
- task_set_id: check
depend: [preparation]
tasks:
- task_id: router1
region: region1
type: router
query:
get_one: true
params:
name: preparation@router@id
- task_id: router2
region: region2
type: router
query:
get_one: true
params:
name: preparation@router@id
- task_id: check-servers1
region: region1
type: server
validate:
predicate: any
condition:
- status: ACTIVE
name: vm1
- status: ACTIVE
name: vm3
- task_id: check-ports1
region: region1
type: port
depend: [router1]
validate:
predicate: any
condition:
- fixed_ips:
- ip_address: 10.0.1*
- fixed_ips:
- ip_address: 100.0.0*
params:
device_id: router1@id
- task_id: check-routers1
region: region1
type: router
validate:
predicate: any
condition:
- routes:
- destination: 0.0.0.0/0
nexthop: 100.0.0.1
- destination: 10.0.2*
nexthop: 100.0.0*
- task_id: check-subnets1
region: region1
type: subnet
validate:
predicate: any
condition:
- cidr: 10.0.1.0/24
- cidr: 10.0.4.0/24
- cidr: 100.0.0.0/24
- task_id: check-servers2
region: region2
type: server
validate:
predicate: any
condition:
- status: ACTIVE
name: vm2
- task_id: check-ports2
region: region2
type: port
depend: [router2]
validate:
predicate: any
condition:
- fixed_ips:
- ip_address: 10.0.2*
- fixed_ips:
- ip_address: 100.0.0*
params:
device_id: router2@id
- task_id: check-routers2
region: region2
type: router
validate:
predicate: any
condition:
- routes:
- destination: 0.0.0.0/0
nexthop: 100.0.0.1
- destination: 10.0.1*
nexthop: 100.0.0*
- task_id: check-subnets2
region: region2
type: subnet
validate:
predicate: any
condition:
- cidr: 10.0.1.0/24
- cidr: 10.0.2.0/24
- cidr: 100.0.0.0/24
- cidr: 163.3.124.0/24
- task_id: check-fips2
region: region2
type: fip
validate:
predicate: any
condition:
- floating_ip_address: 163.3.124.15
- floating_ip_address: 163.3.124.20
- task_set_id: clean
depend: [preparation]
tasks:
- task_id: delete-fip1
region: central
type: fip
action:
target: preparation@fip1@id
method: delete
- task_id: delete-fip2
region: central
type: fip
action:
target: preparation@fip2@id
method: delete
- task_id: delete-vm1
region: region1
type: server
depend: [delete-fip1]
action:
target: preparation@vm1@id
method: delete
- task_id: delete-vm3
region: region1
type: server
action:
target: preparation@vm3@id
method: delete
- task_id: delete-vm2
region: region2
type: server
depend: [delete-fip2]
action:
target: preparation@vm2@id
method: delete
- task_id: remove-gateway
region: central
type: router
action:
target: preparation@router@id
method: update
depend:
- delete-fip1
- delete-fip2
params:
external_gateway_info: null
- task_id: remove-subnet1
region: central
type: router
action:
target: preparation@router@id
method: remove_interface_from_router
depend: [remove-gateway]
params:
subnet_id: preparation@subnet1@id
- task_id: remove-subnet2
region: central
type: router
action:
target: preparation@router@id
method: remove_interface_from_router
depend: [remove-gateway]
params:
subnet_id: preparation@subnet2@id
- task_id: delete-router
region: central
type: router
action:
target: preparation@router@id
method: delete
retries: 3
depend:
- remove-gateway
- remove-subnet1
- remove-subnet2
- task_id: delete-port1
region: central
type: port
action:
target: preparation@port1@id
method: delete
depend: [delete-router]
- task_id: delete-port2
region: central
type: port
action:
target: preparation@port2@id
method: delete
depend: [delete-router]
- task_id: delete-subnet1
region: central
type: subnet
action:
target: preparation@subnet1@id
method: delete
retries: 3
depend: [delete-port1]
- task_id: delete-subnet2
region: central
type: subnet
action:
target: preparation@subnet2@id
method: delete
retries: 3
depend: [delete-port2]
- task_id: delete-subnet3
region: central
type: subnet
action:
target: preparation@subnet3@id
method: delete
retries: 3
- task_id: delete-subnet4
region: central
type: subnet
action:
target: preparation@subnet4@id
method: delete
retries: 3
depend: [delete-vm3]
- task_id: delete-net1
region: central
type: network
action:
target: preparation@net1@id
method: delete
depend: [delete-subnet1]
- task_id: delete-net2
region: central
type: network
action:
target: preparation@net2@id
method: delete
depend: [delete-subnet2]
- task_id: delete-net3
region: central
type: network
action:
target: preparation@net3@id
method: delete
depend: [delete-subnet3]
- task_id: delete-net4
region: central
type: network
action:
target: preparation@net4@id
method: delete
depend: [delete-subnet4]
- task_id: delete-ext-subnet
region: central
type: subnet
action:
target: preparation@ext-subnet@id
method: delete
depend: [delete-router]
- task_id: delete-ext-net
region: central
type: network
action:
target: preparation@ext-net@id
method: delete
depend: [delete-ext-subnet]
- task_set_id: clean-check
tasks:
- task_id: check-no-routers1
region: region1
type: router
validate:
predicate: all
condition:
- name: invalid-name
- task_id: check-no-routers2
region: region2
type: router
validate:
predicate: all
condition:
- name: invalid-name
- task_id: check-no-networks1
region: region1
type: network
validate:
predicate: all
condition:
- name: invalid-name
- task_id: check-no-networks2
region: region2
type: network
validate:
predicate: all
condition:
- name: invalid-name
- task_id: check-jobs
region: central
type: job
validate:
predicate: all
retries: 10
condition:
- status: SUCCESS

View File

@ -5,153 +5,7 @@ DEVSTACK_DIR=$DEST/devstack
source $DEVSTACK_DIR/openrc admin admin
unset OS_REGION_NAME
openstacktop="openstack --os-region-name CentralRegion"
openstackpod1="openstack --os-region-name RegionOne"
openstackpod2="openstack --os-region-name RegionTwo"
echo list networks before running
$openstacktop network list
echo create external network
$openstacktop network create --external --provider-network-type vlan \
--provider-physical-network extern --availability-zone-hint RegionTwo ext-net
echo show networks after running
for id in $($openstacktop network list -c ID -f value)
do $openstacktop network show $id
done
echo create external subnet
$openstacktop subnet create --subnet-range 163.3.124.0/24 --network ext-net \
--no-dhcp ext-subnet
echo create router
router_id=$($openstacktop router create router -c id -f value)
echo attach router to external network
$openstacktop router set --external-gateway ext-net \
--fixed-ip subnet=ext-subnet,ip-address=163.3.124.10 router
echo create network1
$openstacktop network create net1
echo create subnet1
$openstacktop subnet create --subnet-range 10.0.1.0/24 --network net1 \
--allocation-pool start=10.0.1.10,end=10.0.1.90 subnet1
echo create network3
$openstacktop network create net3
echo create subnet3 that has same CIDR with subnet1
$openstacktop subnet create --subnet-range 10.0.1.0/24 --network net3 \
--allocation-pool start=10.0.1.110,end=10.0.1.190 subnet3
echo create port1
port1_id=$($openstacktop port create --network net1 port1 -c id -f value)
echo attach subnet1 to router
$openstacktop router add subnet router subnet1
echo associate floating ip to port1
$openstacktop floating ip create --port $port1_id --floating-ip-address 163.3.124.15 ext-net -c id -f value
image1_id=$($openstackpod1 image list -c ID -f value)
echo create server1
$openstackpod1 server create --flavor 1 --image $image1_id --nic port-id=$port1_id vm1
echo create network2
$openstacktop network create net2
echo create subnet2
$openstacktop subnet create --subnet-range 10.0.2.0/24 --network net2 subnet2
echo create port2
port2_id=$($openstacktop port create --network net2 port2 -c id -f value)
image2_id=$($openstackpod2 image list -c ID -f value)
echo create server2
$openstackpod2 server create --flavor 1 --image $image2_id --nic port-id=$port2_id vm2
echo attach subnet2 to router
$openstacktop router add subnet router subnet2
echo create network4
net4_id=$($openstacktop network create net4 -c id -f value)
echo create subnet4 that has no gateway
$openstacktop subnet create --subnet-range 10.0.4.0/24 --network net4 \
--gateway None subnet4
echo create server3
$openstackpod1 server create --flavor 1 --image $image1_id --nic net-id=$net4_id vm3
sleep 10
echo associate floating ip to port2
$openstacktop floating ip create --port $port2_id --floating-ip-address 163.3.124.20 ext-net -c id -f value
sleep 20
TOP_DIR=$DEVSTACK_DIR
source $DEVSTACK_DIR/stackrc
source $DEVSTACK_DIR/inc/meta-config
extract_localrc_section $TOP_DIR/local.conf $TOP_DIR/localrc $TOP_DIR/.localrc.auto
source $DEVSTACK_DIR/functions-common
source $DEVSTACK_DIR/lib/database
initialize_database_backends
token=$(openstack token issue -c id -f value)
for i in $(seq 1 11); do
if [ $i == 11 ]; then
echo "List fail jobs"
curl -X GET http://127.0.0.1:19999/v1.0/jobs?status=fail -H "Content-Type: application/json" -H "X-Auth-Token: $token"
die $LINENO "Smoke test fails, exceed max wait time for job"
fi
full_result=$(curl -X GET http://127.0.0.1:19999/v1.0/jobs -H "Content-Type: application/json" -H "X-Auth-Token: $token")
echo $full_result | python smoke_test_validation.py job 0
if [ $? != 0 ]; then
echo "Wait for job to finish"
sleep 10
else
break
fi
done
$openstackpod1 server list -f json | python smoke_test_validation.py server 1
python run_yaml_test.py single_gw_topology_test.yaml "$OS_AUTH_URL" "$OS_TENANT_NAME" "$OS_USERNAME" "$OS_PASSWORD"
if [ $? != 0 ]; then
die $LINENO "Smoke test fails, error in server of RegionOne"
fi
$openstackpod2 server list -f json | python smoke_test_validation.py server 2
if [ $? != 0 ]; then
die $LINENO "Smoke test fails, error in server of RegionTwo"
fi
$openstackpod1 subnet list -f json | python smoke_test_validation.py subnet 1
if [ $? != 0 ]; then
die $LINENO "Smoke test fails, error in subnet of RegionOne"
fi
$openstackpod2 subnet list -f json | python smoke_test_validation.py subnet 2
if [ $? != 0 ]; then
die $LINENO "Smoke test fails, error in subnet of RegionTwo"
fi
$openstackpod1 port list --router $router_id -f json | python smoke_test_validation.py router_port 1
if [ $? != 0 ]; then
die $LINENO "Smoke test fails, error in router port of RegionOne"
fi
$openstackpod2 port list --router $router_id -f json | python smoke_test_validation.py router_port 2
if [ $? != 0 ]; then
die $LINENO "Smoke test fails, error in router port of RegionTwo"
fi
$openstackpod1 router show $router_id -c routes -f json | python smoke_test_validation.py router 1
if [ $? != 0 ]; then
die $LINENO "Smoke test fails, error in router of RegionOne"
fi
$openstackpod2 router show $router_id -c routes -f json | python smoke_test_validation.py router 2
if [ $? != 0 ]; then
die $LINENO "Smoke test fails, error in router of RegionTwo"
fi
$openstackpod2 floating ip list -f json | python smoke_test_validation.py fip 2
if [ $? != 0 ]; then
die $LINENO "Smoke test fails, error in fip of RegionTwo"
die $LINENO "Smoke test fails, error in single gateway topology test"
fi

View File

@ -1,107 +0,0 @@
# All Rights Reserved
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
import json
import sys
class ContainedString(object):
def __init__(self, txt):
self.content = txt
def __eq__(self, other):
return other.find(self.content) != -1
def __ne__(self, other):
return other.find(self.content) == -1
ALL_CONDITIONS = {
'0': {'job': [{'status': 'SUCCESS'}]}
}
ANY_CONDITIONS = {
'1': {'server': [{'Name': 'vm1', 'Status': 'ACTIVE'},
{'Name': 'vm3', 'Status': 'ACTIVE'}],
'subnet': [{'Subnet': '100.0.0.0/24'}, {'Subnet': '10.0.1.0/24'},
{'Subnet': '10.0.4.0/24'}],
'router_port': [{'Fixed IP Addresses': ContainedString('10.0.1')},
{'Fixed IP Addresses': ContainedString('100.0.0')}],
'router': [
{'routes': ContainedString(
"destination='0.0.0.0/0', gateway='100.0.0.1'")},
{'routes': ContainedString("destination='10.0.2")}]},
'2': {'server': [{'Name': 'vm2', 'Status': 'ACTIVE'}],
'subnet': [{'Subnet': '100.0.0.0/24'}, {'Subnet': '10.0.1.0/24'},
{'Subnet': '10.0.2.0/24'}, {'Subnet': '163.3.124.0/24'}],
'router_port': [{'Fixed IP Addresses': ContainedString('10.0.2')},
{'Fixed IP Addresses': ContainedString('100.0.0')}],
'router': [
{'routes': ContainedString(
"destination='0.0.0.0/0', gateway='100.0.0.1'")},
{'routes': ContainedString("destination='10.0.1")}],
'fip': [{'Floating IP Address': '163.3.124.15'},
{'Floating IP Address': '163.3.124.20'}]}
}
def get_result_list(result):
if isinstance(result, list):
return result
# not list, so result should be a dict
if len(result) != 1:
# dict for single resource
return [result]
value = list(result.values())[0]
if isinstance(value, list):
# dict for resource list
return value
else:
return [result]
def validate_any_condition(result, condition):
for res in get_result_list(result):
if all(res[key] == value for (key, value) in condition.items()):
return True
return False
def validate_all_condition(result, condition):
for res in get_result_list(result):
if not all(res[key] == value for (key, value) in condition.items()):
return False
return True
def validate_result(result, region, res_type):
if res_type in ANY_CONDITIONS.get(region, {}):
for condition in ANY_CONDITIONS[region][res_type]:
if not validate_any_condition(result, condition):
return False
if res_type in ALL_CONDITIONS.get(region, {}):
for condition in ALL_CONDITIONS[region][res_type]:
if not validate_all_condition(result, condition):
return False
return True
if __name__ == '__main__':
res_type, region = sys.argv[1:]
raw_result = ''.join([line for line in sys.stdin])
result = json.loads(raw_result)
passed = validate_result(result, region, res_type)
# True is casted to 1, but 1 indicates error in shell
sys.exit(1 - int(passed))

View File

@ -0,0 +1,426 @@
# All Rights Reserved
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
import copy
import itertools
import logging
import time
import traceback
import yaml
from openstack import connection
from openstack import profile
from tricircle.tests.tricircle_sdk import multiregion_network_service
LOG = logging.getLogger(__name__)
logging.basicConfig(level=logging.INFO)
class DummyRunner(object):
class DummyResource(object):
def __init__(self, _id):
self.id = _id
def __init__(self):
self.id_pool = {}
def _get_id(self, _type):
if _type not in self.id_pool:
self.id_pool[_type] = 0
self.id_pool[_type] += 1
return '%s%d_id' % (_type, self.id_pool[_type])
def create(self, region, _type, params):
_id = self._get_id(_type)
msg = 'create %s with id %s in %s, params: %s' % (
_type, _id, region, params)
LOG.info(msg)
return self.DummyResource(_id)
def action(self, region, _type, target, method, params):
msg = '%s %s with id %s in %s, params: %s' % (
method, _type, target, region, params)
LOG.info(msg)
def query(self, region, _type, get_one, params):
if get_one:
return self.DummyResource(self._get_id(_type))
return []
def validate(self, region, _type, predicate, conditions, params):
pass
class SDKRunner(object):
region_map = {'central': 'CentralRegion',
'region1': 'RegionOne',
'region2': 'RegionTwo'}
serv_reslist_map = {
'network': ['network', 'subnet', 'port', 'router', 'fip'],
'compute': ['server'],
'image': ['image'],
'tricircle_sdk': ['job']}
res_alias_map = {
'fip': 'ip'}
def __init__(self, auth_url, project, username, password):
self.res_serv_map = {}
for serv in self.serv_reslist_map:
for res in self.serv_reslist_map[serv]:
self.res_serv_map[res] = serv
self.connection_map = {}
param = {
'auth_url': auth_url,
'project_name': project,
'username': username,
'password': password}
for region in ('CentralRegion', 'RegionOne', 'RegionTwo'):
prof = profile.Profile()
if region == 'CentralRegion':
serv = multiregion_network_service.MultiregionNetworkService(
version='v1')
prof._add_service(serv)
prof.set_region(profile.Profile.ALL, region)
param['profile'] = prof
conn = connection.Connection(**param)
self.connection_map[region] = conn
def create(self, region, _type, params):
conn = self.connection_map[self.region_map[region]]
serv = self.res_serv_map[_type]
_type = self.res_alias_map.get(_type, _type)
proxy = getattr(conn, serv)
return getattr(proxy, 'create_%s' % _type)(**params)
def action(self, region, _type, target, method, params):
conn = self.connection_map[self.region_map[region]]
serv = self.res_serv_map[_type]
_type = self.res_alias_map.get(_type, _type)
proxy = getattr(conn, serv)
if method in ('update', 'delete'):
method = '%s_%s' % (method, _type)
getattr(proxy, method)(target, **params)
def query(self, region, _type, get_one, params):
conn = self.connection_map[self.region_map[region]]
serv = self.res_serv_map[_type]
_type = self.res_alias_map.get(_type, _type)
proxy = getattr(conn, serv)
_list = list(getattr(proxy, '%ss' % _type)(**params))
if get_one:
return _list[0]
return _list
def validate(self, region, _type, predicate, conditions, params):
def validate_value(actual, expected):
if isinstance(expected, list):
actual_len = len(actual)
expected_len = len(expected)
if actual_len != expected_len:
return False
for actual_list in itertools.permutations(actual, actual_len):
for expected_list in itertools.permutations(expected,
expected_len):
match = True
for i, actual_ele in enumerate(actual_list):
if not validate_value(actual_ele,
expected_list[i]):
match = False
break
if match:
return True
return False
elif isinstance(expected, dict):
for k in expected:
if not validate_value(actual[k], expected[k]):
return False
return True
elif isinstance(expected, str):
tokens = expected.split('*')
if tokens[0] == '' and tokens[-1] == '':
return actual.find(tokens[1]) != -1
elif tokens[0] == '':
return actual.endswith(tokens[-1])
elif tokens[-1] == '':
return actual.startswith(tokens[0])
return actual == expected
else:
return actual == expected
def validate_any_condition(results, condition):
for result in results:
if all(validate_value(
getattr(result, key),
value) for (key, value) in condition.items()):
return True
return False
def validate_all_condition(results, condition):
for result in results:
if not all(validate_value(
getattr(result, key),
value) for (key, value) in condition.items()):
return False
return True
results = self.query(region, _type, False, params)
if predicate == 'any':
for condition in conditions:
if not validate_any_condition(results, condition):
raise Exception(
'Validation fail, acutal results: %s' % results)
elif predicate == 'all':
for condition in conditions:
if not validate_all_condition(results, condition):
raise Exception(
'Validation fail, acutal results: %s' % results)
class RunnerEngine(object):
def __init__(self, yaml_path, runner):
self.task_set_map = {}
self.task_set_id_list = []
self.runner = runner
with open(yaml_path) as f:
data = yaml.load(f)
self._parse_data(data)
def _validate_task(self, task):
def collect_require_from_dict(requires, _dict):
for v in _dict.values():
if isinstance(v, list):
collect_require_from_list(requires, v)
elif isinstance(v, dict):
collect_require_from_dict(requires, v)
elif not isinstance(v, str):
continue
elif '@' in v:
requires.append(v)
def collect_require_from_list(requires, _list):
for v in _list:
if isinstance(v, list):
collect_require_from_list(requires, v)
elif isinstance(v, dict):
collect_require_from_dict(requires, v)
elif not isinstance(v, str):
continue
elif '@' in v:
requires.append(v)
for field in ('task_id', 'region', 'type'):
if field not in task:
raise Exception('Required field %s not set' % field)
for sub_section, fields in [('action', ['target', 'method']),
('query', ['get_one']),
('validate', ['predicate', 'condition'])]:
if sub_section in task:
for field in fields:
if field not in task[sub_section]:
raise Exception('Required field %s for %s '
'not set' % (field, sub_section))
requires = []
if 'params' in task:
collect_require_from_dict(requires, task['params'])
if 'action' in task:
requires.append(task['action']['target'])
depend = task.get('depend', [])
for value in requires:
tokens = value.split('@')
if len(tokens) == 2 and tokens[0] not in depend:
raise Exception(
'Depend list not complete for %s: %s not in %s' % (
task['task_id'], tokens[0], depend))
elif len(tokens) == 3:
task_set_id, task_id = tokens[:2]
if task_set_id not in self.task_set_map:
raise Exception(
'Depend task set %s for %s not found' % (
task_set_id, task['task_id']))
task_map, _, _ = self.task_set_map[task_set_id]
if task_id not in task_map:
raise Exception(
'Depend task %s for %s not found' % (
task_id, task['task_id']))
@staticmethod
def _parse_dependency(depend_map):
depend_map = copy.deepcopy(depend_map)
ordered_list = []
while len(depend_map):
pop_list = []
for _id in depend_map:
if not depend_map[_id]:
ordered_list.append(_id)
pop_list.append(_id)
for _id in pop_list:
depend_map.pop(_id)
for depend in depend_map.values():
for _id in pop_list:
if _id in depend:
depend.remove(_id)
if not pop_list:
raise Exception('Unresolved dependency, '
'left s: %s' % depend_map.keys())
return ordered_list
def _parse_data(self, data):
task_set_depend_map = {}
task_set_tasks_map = {}
for task_set in data:
task_set_id = task_set['task_set_id']
self.task_set_id_list.append(task_set_id)
task_set_depend_map[task_set_id] = set(
task_set.get('depend', []))
task_set_tasks_map[task_set_id] = task_set['tasks']
ordered_task_set_list = self._parse_dependency(task_set_depend_map)
for task_set_id in ordered_task_set_list:
task_map = {}
task_depend_map = {}
for task in task_set_tasks_map[task_set_id]:
task_map[task['task_id']] = task
task_depend_map[task['task_id']] = set(task.get('depend', []))
self._validate_task(task)
ordered_task_list = self._parse_dependency(task_depend_map)
self.task_set_map[task_set_id] = (task_map, ordered_task_list,
task_set_depend_map[task_set_id])
@staticmethod
def _fill_depend_field_in_list(_list, task_result_map,
depend_task_result_map):
if not _list:
return
for i, e in enumerate(_list):
if isinstance(e, list):
RunnerEngine._fill_depend_field_in_list(e, task_result_map,
depend_task_result_map)
elif isinstance(e, dict):
RunnerEngine._fill_depend_filed_in_dict(e, task_result_map,
depend_task_result_map)
if not isinstance(e, str):
continue
tokens = e.split('@')
if len(tokens) == 2:
task_id, task_filed = tokens
_list[i] = getattr(task_result_map[task_id], task_filed)
elif len(tokens) == 3:
task_set_id, task_id, task_filed = tokens
_list[i] = getattr(
depend_task_result_map[task_set_id][task_id], task_filed)
@staticmethod
def _fill_depend_filed_in_dict(_dict, task_result_map,
depend_task_result_map):
if not _dict:
return
for k, v in _dict.items():
if isinstance(v, list):
RunnerEngine._fill_depend_field_in_list(v, task_result_map,
depend_task_result_map)
elif isinstance(v, dict):
RunnerEngine._fill_depend_filed_in_dict(v, task_result_map,
depend_task_result_map)
if not isinstance(v, str):
continue
tokens = v.split('@')
if len(tokens) == 2:
task_id, task_filed = tokens
_dict[k] = getattr(task_result_map[task_id], task_filed)
elif len(tokens) == 3:
task_set_id, task_id, task_filed = tokens
_dict[k] = getattr(
depend_task_result_map[task_set_id][task_id], task_filed)
@staticmethod
def _fill_depend_field(params, task_result_map, depend_task_result_map):
RunnerEngine._fill_depend_filed_in_dict(params, task_result_map,
depend_task_result_map)
@staticmethod
def _retry(task_id, retry_num, func, *args):
run_time = retry_num + 1
for i in range(run_time):
try:
func(*args)
break
except Exception:
if i == run_time - 1:
raise
else:
time.sleep(10)
LOG.info('Redo failed task %s', task_id)
def run_tasks(self, task_set_id, depend_task_set_result={}):
if task_set_id not in self.task_set_map:
raise Exception('Task set %s not found' % task_set_id)
(task_map, ordered_task_list,
task_set_depend) = self.task_set_map[task_set_id]
for set_id in task_set_depend:
if set_id not in depend_task_set_result:
raise Exception('Task set %s fails, reason: result for depend '
'task set %s not given' % (task_set_id,
set_id))
task_result_map = {}
for task_id in ordered_task_list:
task = task_map[task_id]
params = task.get('params', {})
self._fill_depend_field(params, task_result_map,
depend_task_set_result)
try:
if 'action' in task:
self._fill_depend_field(task['action'], task_result_map,
depend_task_set_result)
self._retry(task_id, task['action'].get('retries', 0),
self.runner.action, task['region'],
task['type'], task['action']['target'],
task['action']['method'], params)
elif 'query' in task:
result = self.runner.query(
task['region'], task['type'],
task['query']['get_one'], params)
task_result_map[task_id] = result
elif 'validate' in task:
self._fill_depend_field(task['validate'], task_result_map,
depend_task_set_result)
self._retry(task_id, task['validate'].get('retries', 0),
self.runner.validate, task['region'],
task['type'], task['validate']['predicate'],
task['validate']['condition'], params)
else:
result = self.runner.create(task['region'],
task['type'], params)
task_result_map[task_id] = result
LOG.info('Task %s done\n' % task_id)
except Exception:
error_msg = 'Task %s fails, reason: %s' % (
task_id, traceback.format_exc())
return task_result_map, error_msg
return task_result_map, None
def run_task_sets(self):
task_set_result_map = {}
for task_set_id in self.task_set_id_list:
_, _, task_set_depend = self.task_set_map[task_set_id]
depend_task_set_result = dict(
[(_id, task_set_result_map[_id]) for _id in task_set_depend])
task_result_map, error_msg = self.run_tasks(
task_set_id, depend_task_set_result)
if error_msg:
return error_msg
task_set_result_map[task_set_id] = task_result_map

View File

@ -0,0 +1,24 @@
# All Rights Reserved
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
from openstack import service_filter
class MultiregionNetworkService(service_filter.ServiceFilter):
valid_versions = [service_filter.ValidVersion('v1')]
def __init__(self, version=None):
# TODO(zhiyuan) register a proper service type in keystone
super(MultiregionNetworkService, self).__init__(
service_type='tricircle', version=version)

View File

@ -0,0 +1,22 @@
# All Rights Reserved
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
from openstack import proxy2
from tricircle.tests.tricircle_sdk.v1 import job
class Proxy(proxy2.BaseProxy):
def jobs(self, **query):
return self._list(job.Job, pagination=False, **query)

View File

@ -0,0 +1,33 @@
# All Rights Reserved
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
from openstack import resource2
from tricircle.tests.tricircle_sdk import multiregion_network_service
class Job(resource2.Resource):
resource_key = 'job'
resources_key = 'jobs'
base_path = '/jobs'
service = multiregion_network_service.MultiregionNetworkService()
allow_list = True
allow_get = True
resource = resource2.Body('resource', type=dict)
type = resource2.Body('type')
timestamp = resource2.Body('timestamp')
project_id = resource2.Body('project_id')
status = resource2.Body('status')