New task config and verification refactoring
*) Change task config format . Split "context" & "runner" stuff *) Refactor Verification . Move validation to context.base, runner.base and scenario.base . Validate whole config fully before starting any of tasks . Optimize scenario args validation (create only one time clients) . Optimize order of validation: 1) Validate names of benchmarks 2) Validate all static parameters, e.g. configuration of runner and context 3) If everything is ok in all benchmarks, then start validation of scenario args. . Store validation result (exception) in task["verification_log"] . Remove verification logic from BenchmarkEngine.__exit__ . Remove scenario args verification results from task["results"] *) Fix & Swtich to new format doc/samples/tasks . Switch to new fromat . Add missing task configratuion . Better formatting . json & yaml samples *) Refactored unit tests . tests.rally.benchmark.test_engine . tests.rally.benchmark.context.base . tests.orcestrator.test_api.start_task cover validation step as well and new change format *) Refactor orchestrator api start task . Remove benchmark engine context . Call verify explicity . Do not raise any excpetion in case of validation error . Catch in start task any unexcepted Exceptions a set deployment in incosistance state *) Refactor CLI . Properly handle new behaviour of verification . Replace table on task start to just message . Add HINTs to task detailed command *) Add unit test for checking doc samples *) Improve benchmark engine LOGing blueprint benchmark-new-task-config Change-Id: I23d3f6b3439fdb44946a7c2491d5a9b3559dc671
This commit is contained in:
parent
6a3d2be0e8
commit
d7635776d7
@ -1,6 +1,17 @@
|
||||
{
|
||||
"Authenticate.keystone": [
|
||||
{ "execution": "continuous",
|
||||
"config": {"times": 100, "active_users": 1, "tenants": 3, "users_per_tenant": 50}}
|
||||
{
|
||||
"runner": {
|
||||
"type": "continuous",
|
||||
"times": 100,
|
||||
"active_users": 5
|
||||
},
|
||||
"context": {
|
||||
"users": {
|
||||
"tenants": 3,
|
||||
"users_per_tenant": 50
|
||||
}
|
||||
}
|
||||
}
|
||||
]
|
||||
}
|
||||
|
11
doc/samples/tasks/authenticate/keystone.yaml
Normal file
11
doc/samples/tasks/authenticate/keystone.yaml
Normal file
@ -0,0 +1,11 @@
|
||||
---
|
||||
Authenticate.keystone:
|
||||
-
|
||||
runner:
|
||||
type: "continuous"
|
||||
times: 100
|
||||
active_users: 5
|
||||
context:
|
||||
users:
|
||||
tenants: 3
|
||||
users_per_tenant: 50
|
@ -1,10 +1,20 @@
|
||||
{
|
||||
"CinderVolumes.create_and_delete_volume": [
|
||||
{
|
||||
"args": {"size": 1},
|
||||
"execution": "continuous",
|
||||
"config": {"times": 3, "active_users": 2,
|
||||
"tenants": 2, "users_per_tenant": 2}
|
||||
"args": {
|
||||
"size": 1
|
||||
},
|
||||
"runner": {
|
||||
"type": "continuous",
|
||||
"times": 3,
|
||||
"active_users": 2
|
||||
},
|
||||
"context": {
|
||||
"users": {
|
||||
"tenants": 2,
|
||||
"users_per_tenant": 2
|
||||
}
|
||||
}
|
||||
}
|
||||
]
|
||||
}
|
13
doc/samples/tasks/cinder/create-and-delete-volume.yaml
Normal file
13
doc/samples/tasks/cinder/create-and-delete-volume.yaml
Normal file
@ -0,0 +1,13 @@
|
||||
---
|
||||
CinderVolumes.create_and_delete_volume:
|
||||
-
|
||||
args:
|
||||
size: 1
|
||||
runner:
|
||||
type: "continuous"
|
||||
times: 3
|
||||
active_users: 2
|
||||
context:
|
||||
users:
|
||||
tenants: 2
|
||||
users_per_tenant: 2
|
20
doc/samples/tasks/cinder/create-volume.json
Normal file
20
doc/samples/tasks/cinder/create-volume.json
Normal file
@ -0,0 +1,20 @@
|
||||
{
|
||||
"CinderVolumes.create_volume": [
|
||||
{
|
||||
"args": {
|
||||
"size": 1
|
||||
},
|
||||
"runner": {
|
||||
"type": "continuous",
|
||||
"times": 3,
|
||||
"active_users": 2
|
||||
},
|
||||
"context": {
|
||||
"users": {
|
||||
"tenants": 2,
|
||||
"users_per_tenant": 2
|
||||
}
|
||||
}
|
||||
}
|
||||
]
|
||||
}
|
13
doc/samples/tasks/cinder/create-volume.yaml
Normal file
13
doc/samples/tasks/cinder/create-volume.yaml
Normal file
@ -0,0 +1,13 @@
|
||||
---
|
||||
CinderVolumes.create_volume:
|
||||
-
|
||||
args:
|
||||
size: 1
|
||||
runner:
|
||||
type: "continuous"
|
||||
times: 3
|
||||
active_users: 2
|
||||
context:
|
||||
users:
|
||||
tenants: 2
|
||||
users_per_tenant: 2
|
@ -1,11 +1,22 @@
|
||||
{
|
||||
|
||||
"GlanceImages.create_and_delete_image": [
|
||||
{"args": {"image_url": "http://download.cirros-cloud.net/0.3.1/cirros-0.3.1-x86_64-disk.img",
|
||||
{
|
||||
"args": {
|
||||
"image_url": "http://download.cirros-cloud.net/0.3.1/cirros-0.3.1-x86_64-disk.img",
|
||||
"container_format": "bare",
|
||||
"disk_format": "qcow2"},
|
||||
"execution": "continuous",
|
||||
"config": {"times": 1, "active_users": 1,
|
||||
"tenants": 1, "users_per_tenant": 1}}
|
||||
"disk_format": "qcow2"
|
||||
},
|
||||
"runner": {
|
||||
"type": "continuous",
|
||||
"times": 10,
|
||||
"active_users": 2
|
||||
},
|
||||
"context": {
|
||||
"users": {
|
||||
"tenants": 2,
|
||||
"users_per_tenant": 3
|
||||
}
|
||||
}
|
||||
}
|
||||
]
|
||||
}
|
||||
|
15
doc/samples/tasks/glance/create-and-delete-image.yaml
Normal file
15
doc/samples/tasks/glance/create-and-delete-image.yaml
Normal file
@ -0,0 +1,15 @@
|
||||
---
|
||||
GlanceImages.create_and_delete_image:
|
||||
-
|
||||
args:
|
||||
image_url: "http://download.cirros-cloud.net/0.3.1/cirros-0.3.1-x86_64-disk.img"
|
||||
container_format: "bare"
|
||||
disk_format: "qcow2"
|
||||
runner:
|
||||
type: "continuous"
|
||||
times: 10
|
||||
active_users: 2
|
||||
context:
|
||||
users:
|
||||
tenants: 2
|
||||
users_per_tenant: 3
|
@ -1,12 +1,24 @@
|
||||
{
|
||||
"GlanceImages.create_image_and_boot_instances": [
|
||||
{"args": {"image_url": "http://download.cirros-cloud.net/0.3.1/cirros-0.3.1-x86_64-disk.img",
|
||||
{
|
||||
"args": {
|
||||
"image_url": "http://download.cirros-cloud.net/0.3.1/cirros-0.3.1-x86_64-disk.img",
|
||||
"container_format": "bare",
|
||||
"disk_format": "qcow2",
|
||||
"flavor_id": 42,
|
||||
"number_instances": 2},
|
||||
"execution": "continuous",
|
||||
"config": {"times": 1, "active_users": 1,
|
||||
"tenants": 1, "users_per_tenant": 1}}
|
||||
"number_instances": 2
|
||||
},
|
||||
"runner": {
|
||||
"type": "continuous",
|
||||
"times": 10,
|
||||
"active_users": 2
|
||||
},
|
||||
"context": {
|
||||
"users": {
|
||||
"tenants": 3,
|
||||
"users_per_tenant": 5
|
||||
}
|
||||
}
|
||||
}
|
||||
]
|
||||
}
|
||||
|
@ -0,0 +1,17 @@
|
||||
---
|
||||
GlanceImages.create_image_and_boot_instances:
|
||||
-
|
||||
args:
|
||||
image_url: "http://download.cirros-cloud.net/0.3.1/cirros-0.3.1-x86_64-disk.img"
|
||||
container_format: "bare"
|
||||
disk_format: "qcow2"
|
||||
flavor_id: 42
|
||||
number_instances: 2
|
||||
runner:
|
||||
type: "continuous"
|
||||
times: 10
|
||||
active_users: 2
|
||||
context:
|
||||
users:
|
||||
tenants: 3
|
||||
users_per_tenant: 5
|
@ -1,7 +1,14 @@
|
||||
{
|
||||
"KeystoneBasic.create_delete_user": [
|
||||
{"args": {"name_length": 10},
|
||||
"execution": "continuous",
|
||||
"config": {"times": 100, "active_users": 10}}
|
||||
{
|
||||
"args": {
|
||||
"name_length": 10
|
||||
},
|
||||
"runner": {
|
||||
"type": "continuous",
|
||||
"times": 100,
|
||||
"active_users": 10
|
||||
}
|
||||
}
|
||||
]
|
||||
}
|
9
doc/samples/tasks/keystone/create-and-delete-user.yaml
Normal file
9
doc/samples/tasks/keystone/create-and-delete-user.yaml
Normal file
@ -0,0 +1,9 @@
|
||||
---
|
||||
KeystoneBasic.create_delete_user:
|
||||
-
|
||||
args:
|
||||
name_length: 10
|
||||
runner:
|
||||
type: "continuous"
|
||||
times: 100
|
||||
active_users: 10
|
@ -1,7 +1,14 @@
|
||||
{
|
||||
"KeystoneBasic.create_user": [
|
||||
{"args": {"name_length": 10},
|
||||
"execution": "continuous",
|
||||
"config": {"times": 100, "active_users": 10}}
|
||||
{
|
||||
"args": {
|
||||
"name_length": 10
|
||||
},
|
||||
"runner":{
|
||||
"type": "continuous",
|
||||
"times": 100,
|
||||
"active_users": 10
|
||||
}
|
||||
}
|
||||
]
|
||||
}
|
9
doc/samples/tasks/keystone/create-user.yaml
Normal file
9
doc/samples/tasks/keystone/create-user.yaml
Normal file
@ -0,0 +1,9 @@
|
||||
---
|
||||
KeystoneBasic.create_user:
|
||||
-
|
||||
args:
|
||||
name_length: 10
|
||||
runner:
|
||||
type: "continuous"
|
||||
times: 100
|
||||
active_users: 10
|
@ -1,11 +0,0 @@
|
||||
{
|
||||
"NovaServers.boot_and_delete_server": [
|
||||
{
|
||||
"args": {"flavor_id": 1,
|
||||
"image_id": "73257560-c59b-4275-a1ec-ab140e5b9979"},
|
||||
"execution": "periodic",
|
||||
"config": {"times": 10, "period": 2, "tenants": 3,
|
||||
"users_per_tenant": 2}
|
||||
}
|
||||
]
|
||||
}
|
@ -1,11 +1,21 @@
|
||||
{
|
||||
"NovaServers.boot_and_delete_server": [
|
||||
{
|
||||
"args": {"flavor_id": 1,
|
||||
"image_id": "73257560-c59b-4275-a1ec-ab140e5b9979"},
|
||||
"execution": "continuous",
|
||||
"config": {"times": 10, "active_users": 2, "tenants": 3,
|
||||
"users_per_tenant": 2}
|
||||
"args": {
|
||||
"flavor_id": 1,
|
||||
"image_id": "73257560-c59b-4275-a1ec-ab140e5b9979"
|
||||
},
|
||||
"runner": {
|
||||
"type": "continuous",
|
||||
"times": 10,
|
||||
"active_users": 2
|
||||
},
|
||||
"context": {
|
||||
"users": {
|
||||
"tenants": 3,
|
||||
"users_per_tenant": 2
|
||||
}
|
||||
}
|
||||
}
|
||||
]
|
||||
}
|
||||
|
14
doc/samples/tasks/nova/boot-and-delete.yaml
Normal file
14
doc/samples/tasks/nova/boot-and-delete.yaml
Normal file
@ -0,0 +1,14 @@
|
||||
---
|
||||
NovaServers.boot_and_delete_server:
|
||||
-
|
||||
args:
|
||||
flavor_id: 1
|
||||
image_id: "73257560-c59b-4275-a1ec-ab140e5b9979"
|
||||
runner:
|
||||
type: "continuous"
|
||||
times: 10
|
||||
active_users: 2
|
||||
context:
|
||||
users:
|
||||
tenants: 3
|
||||
users_per_tenant: 2
|
27
doc/samples/tasks/nova/boot-bounce-delete.json
Normal file
27
doc/samples/tasks/nova/boot-bounce-delete.json
Normal file
@ -0,0 +1,27 @@
|
||||
{
|
||||
"NovaServers.boot_and_bounce_server": [
|
||||
{
|
||||
"args": {
|
||||
"flavor_id": 1,
|
||||
"image_id": "3fa4482f-677a-4488-adaf-c48befac5e5a",
|
||||
"actions": [
|
||||
{"hard_reboot": 1},
|
||||
{"soft_reboot": 1},
|
||||
{"stop_start": 1},
|
||||
{"rescue_unrescue": 1}
|
||||
]
|
||||
},
|
||||
"runner": {
|
||||
"type": "continuous",
|
||||
"times": 10,
|
||||
"active_users": 2
|
||||
},
|
||||
"context": {
|
||||
"users": {
|
||||
"tenants": 3,
|
||||
"users_per_tenant": 2
|
||||
}
|
||||
}
|
||||
}
|
||||
]
|
||||
}
|
23
doc/samples/tasks/nova/boot-bounce-delete.yaml
Normal file
23
doc/samples/tasks/nova/boot-bounce-delete.yaml
Normal file
@ -0,0 +1,23 @@
|
||||
---
|
||||
NovaServers.boot_and_bounce_server:
|
||||
-
|
||||
args:
|
||||
flavor_id: 1
|
||||
image_id: "3fa4482f-677a-4488-adaf-c48befac5e5a"
|
||||
actions:
|
||||
-
|
||||
hard_reboot: 1
|
||||
-
|
||||
soft_reboot: 1
|
||||
-
|
||||
stop_start: 1
|
||||
-
|
||||
rescue_unrescue: 1
|
||||
runner:
|
||||
type: "continuous"
|
||||
times: 10
|
||||
active_users: 2
|
||||
context:
|
||||
users:
|
||||
tenants: 3
|
||||
users_per_tenant: 2
|
22
doc/samples/tasks/nova/boot-from-volume-and-delete.json
Normal file
22
doc/samples/tasks/nova/boot-from-volume-and-delete.json
Normal file
@ -0,0 +1,22 @@
|
||||
{
|
||||
"NovaServers.boot_server_from_volume_and_delete": [
|
||||
{
|
||||
"args": {
|
||||
"flavor_id": 1,
|
||||
"image_id": "73257560-c59b-4275-a1ec-ab140e5b9979",
|
||||
"volume_size": 10
|
||||
},
|
||||
"runner": {
|
||||
"type": "continuous",
|
||||
"times": 10,
|
||||
"active_users": 2
|
||||
},
|
||||
"context": {
|
||||
"users": {
|
||||
"tenants": 3,
|
||||
"users_per_tenant": 2
|
||||
}
|
||||
}
|
||||
}
|
||||
]
|
||||
}
|
15
doc/samples/tasks/nova/boot-from-volume-and-delete.yaml
Normal file
15
doc/samples/tasks/nova/boot-from-volume-and-delete.yaml
Normal file
@ -0,0 +1,15 @@
|
||||
---
|
||||
NovaServers.boot_server_from_volume_and_delete:
|
||||
-
|
||||
args:
|
||||
flavor_id: 1
|
||||
image_id: "73257560-c59b-4275-a1ec-ab140e5b9979"
|
||||
volume_size: 10
|
||||
runner:
|
||||
type: "continuous"
|
||||
times: 10
|
||||
active_users: 2
|
||||
context:
|
||||
users:
|
||||
tenants: 3
|
||||
users_per_tenant: 2
|
22
doc/samples/tasks/nova/boot-from-volume.json
Normal file
22
doc/samples/tasks/nova/boot-from-volume.json
Normal file
@ -0,0 +1,22 @@
|
||||
{
|
||||
"NovaServers.boot_server_from_volume": [
|
||||
{
|
||||
"args": {
|
||||
"flavor_id": 1,
|
||||
"image_id": "73257560-c59b-4275-a1ec-ab140e5b9979",
|
||||
"volume_size": 10
|
||||
},
|
||||
"runner": {
|
||||
"type": "continuous",
|
||||
"times": 10,
|
||||
"active_users": 2
|
||||
},
|
||||
"context": {
|
||||
"users": {
|
||||
"tenants": 3,
|
||||
"users_per_tenant": 2
|
||||
}
|
||||
}
|
||||
}
|
||||
]
|
||||
}
|
15
doc/samples/tasks/nova/boot-from-volume.yaml
Normal file
15
doc/samples/tasks/nova/boot-from-volume.yaml
Normal file
@ -0,0 +1,15 @@
|
||||
---
|
||||
NovaServers.boot_server_from_volume:
|
||||
-
|
||||
args:
|
||||
flavor_id: 1
|
||||
image_id: "73257560-c59b-4275-a1ec-ab140e5b9979"
|
||||
volume_size: 10
|
||||
runner:
|
||||
type: "continuous"
|
||||
times: 10
|
||||
active_users: 2
|
||||
context:
|
||||
users:
|
||||
tenants: 3
|
||||
users_per_tenant: 2
|
@ -1,10 +1,24 @@
|
||||
{
|
||||
"NovaServers.boot_runcommand_delete_server": [
|
||||
{"args": {"flavor_id": "your flavor id here",
|
||||
"image_id": "your image id here",
|
||||
{
|
||||
"args": {
|
||||
"flavor_id": "1",
|
||||
"image_id": "73257560-c59b-4275-a1ec-ab140e5b9979",
|
||||
"script": "doc/samples/support/instance_dd_test.sh",
|
||||
"interpreter": "/bin/sh",
|
||||
"username": "ubuntu"},
|
||||
"config": {"times": 2, "active_users": 1}}
|
||||
"username": "ubuntu"
|
||||
},
|
||||
"runner": {
|
||||
"type": "continuous",
|
||||
"times": 10,
|
||||
"active_users": 2
|
||||
},
|
||||
"context": {
|
||||
"users": {
|
||||
"tenants": 3,
|
||||
"users_per_tenant": 2
|
||||
}
|
||||
}
|
||||
}
|
||||
]
|
||||
}
|
||||
|
17
doc/samples/tasks/nova/boot-runcommand-delete.yaml
Normal file
17
doc/samples/tasks/nova/boot-runcommand-delete.yaml
Normal file
@ -0,0 +1,17 @@
|
||||
---
|
||||
NovaServers.boot_runcommand_delete_server:
|
||||
-
|
||||
args:
|
||||
flavor_id: "1"
|
||||
image_id: "73257560-c59b-4275-a1ec-ab140e5b9979"
|
||||
script: "doc/samples/support/instance_dd_test.sh"
|
||||
interpreter: "/bin/sh"
|
||||
username: "ubuntu"
|
||||
runner:
|
||||
type: "continuous"
|
||||
times: 10
|
||||
active_users: 2
|
||||
context:
|
||||
users:
|
||||
tenants: 3
|
||||
users_per_tenant: 2
|
@ -1,12 +0,0 @@
|
||||
{
|
||||
"NovaServers.boot_server_from_volume_and_delete": [
|
||||
{
|
||||
"args": {"flavor_id": 1,
|
||||
"image_id": "73257560-c59b-4275-a1ec-ab140e5b9979",
|
||||
"volume_size": 10},
|
||||
"execution": "continuous",
|
||||
"config": {"times": 10, "active_users": 2, "tenants": 3,
|
||||
"users_per_tenant": 2}
|
||||
}
|
||||
]
|
||||
}
|
21
doc/samples/tasks/nova/boot-snapshot-boot-delete.json
Normal file
21
doc/samples/tasks/nova/boot-snapshot-boot-delete.json
Normal file
@ -0,0 +1,21 @@
|
||||
{
|
||||
"NovaServers.snapshot_server": [
|
||||
{
|
||||
"args": {
|
||||
"flavor_id": 1,
|
||||
"image_id": "73257560-c59b-4275-a1ec-ab140e5b9979"
|
||||
},
|
||||
"runner": {
|
||||
"type": "continuous",
|
||||
"times": 10,
|
||||
"active_users": 2
|
||||
},
|
||||
"context": {
|
||||
"users": {
|
||||
"tenants": 3,
|
||||
"users_per_tenant": 2
|
||||
}
|
||||
}
|
||||
}
|
||||
]
|
||||
}
|
14
doc/samples/tasks/nova/boot-snapshot-boot-delete.yaml
Normal file
14
doc/samples/tasks/nova/boot-snapshot-boot-delete.yaml
Normal file
@ -0,0 +1,14 @@
|
||||
---
|
||||
NovaServers.snapshot_server:
|
||||
-
|
||||
args:
|
||||
flavor_id: 1
|
||||
image_id: "73257560-c59b-4275-a1ec-ab140e5b9979"
|
||||
runner:
|
||||
type: "continuous"
|
||||
times: 10
|
||||
active_users: 2
|
||||
context:
|
||||
users:
|
||||
tenants: 3
|
||||
users_per_tenant: 2
|
21
doc/samples/tasks/nova/boot.json
Normal file
21
doc/samples/tasks/nova/boot.json
Normal file
@ -0,0 +1,21 @@
|
||||
{
|
||||
"NovaServers.boot_server": [
|
||||
{
|
||||
"args": {
|
||||
"flavor_id": 1,
|
||||
"image_id": "73257560-c59b-4275-a1ec-ab140e5b9979"
|
||||
},
|
||||
"runner": {
|
||||
"type": "continuous",
|
||||
"times": 10,
|
||||
"active_users": 2
|
||||
},
|
||||
"context": {
|
||||
"users": {
|
||||
"tenants": 3,
|
||||
"users_per_tenant": 2
|
||||
}
|
||||
}
|
||||
}
|
||||
]
|
||||
}
|
14
doc/samples/tasks/nova/boot.yaml
Normal file
14
doc/samples/tasks/nova/boot.yaml
Normal file
@ -0,0 +1,14 @@
|
||||
---
|
||||
NovaServers.boot_server:
|
||||
-
|
||||
args:
|
||||
flavor_id: 1
|
||||
image_id: "73257560-c59b-4275-a1ec-ab140e5b9979"
|
||||
runner:
|
||||
type: "continuous"
|
||||
times: 10
|
||||
active_users: 2
|
||||
context:
|
||||
users:
|
||||
tenants: 3
|
||||
users_per_tenant: 2
|
@ -1,10 +0,0 @@
|
||||
{
|
||||
"NovaServers.boot_and_bounce_server": [
|
||||
{
|
||||
"args": {"flavor_id": 1,
|
||||
"image_id": "3fa4482f-677a-4488-adaf-c48befac5e5a",
|
||||
"actions": [{"rescue_unrescue": 1}]},
|
||||
"config": {"times": 1, "active_users": 1}
|
||||
}
|
||||
]
|
||||
}
|
@ -1,11 +0,0 @@
|
||||
{
|
||||
"NovaServers.boot_and_bounce_server": [
|
||||
{
|
||||
"args": {"flavor_id": 2,
|
||||
"image_id": "539ccae5-5982-4868-b176-23c41ff1195e",
|
||||
"actions": [{"soft_reboot": 4}]},
|
||||
"execution": "continuous",
|
||||
"config": {"times": 3, "active_users": 2}
|
||||
}
|
||||
]
|
||||
}
|
@ -16,4 +16,5 @@
|
||||
from rally import utils as rutils
|
||||
|
||||
|
||||
rutils.import_modules_from_package('rally.benchmark.context')
|
||||
rutils.import_modules_from_package('rally.benchmark.runners')
|
||||
|
@ -31,28 +31,30 @@ class Context(object):
|
||||
2) Validation of input args
|
||||
3) Common logging
|
||||
|
||||
Actually the same functionionallity as
|
||||
Actually the same functionality as
|
||||
runners.base.ScenarioRunner and scenarios.base.Scenario
|
||||
"""
|
||||
|
||||
__name__ = "basecontext"
|
||||
__ctx_name__ = "base"
|
||||
|
||||
CONFIG_SCHEMA = {}
|
||||
|
||||
def __init__(self, context):
|
||||
self.config = context.get("config", {}).get(self.__name__, {})
|
||||
self.config = context.get("config", {}).get(self.__ctx_name__, {})
|
||||
self.context = context
|
||||
self.task = context["task"]
|
||||
|
||||
@staticmethod
|
||||
def validate(cls, context):
|
||||
jsonschema.validate(context, cls.CONFIG_SCHEMA)
|
||||
def validate(context):
|
||||
for name, config in context.iteritems():
|
||||
ctx = Context.get_by_name(name)
|
||||
jsonschema.validate(config, ctx.CONFIG_SCHEMA)
|
||||
|
||||
@staticmethod
|
||||
def get_by_name(name):
|
||||
"""Returns Context class by name."""
|
||||
for context in utils.itersubclasses(Context):
|
||||
if name == context.__name__:
|
||||
if name == context.__ctx_name__:
|
||||
return context
|
||||
raise exceptions.NoSuchContext(name=name)
|
||||
|
||||
|
@ -30,7 +30,7 @@ LOG = logging.getLogger(__name__)
|
||||
class ResourceCleaner(base.Context):
|
||||
"""Context class for resource cleanup (both admin and non-admin)."""
|
||||
|
||||
__name__ = "cleaner"
|
||||
__ctx_name__ = "cleaner"
|
||||
|
||||
CONFIG_SCHEMA = {
|
||||
"type": "object",
|
||||
|
@ -76,7 +76,7 @@ def _prepare_open_secgroup(endpoint):
|
||||
|
||||
|
||||
class AllowSSH(base.Context):
|
||||
__name__ = "allow_ssh"
|
||||
__ctx_name__ = "allow_ssh"
|
||||
|
||||
def __init__(self, context):
|
||||
super(AllowSSH, self).__init__(context)
|
||||
|
@ -28,7 +28,7 @@ LOG = logging.getLogger(__name__)
|
||||
class UserGenerator(base.Context):
|
||||
"""Context class for generating temporary users/tenants for benchmarks."""
|
||||
|
||||
__name__ = "users"
|
||||
__ctx_name__ = "users"
|
||||
|
||||
CONFIG_SCHEMA = {
|
||||
"type": "object",
|
||||
|
@ -15,7 +15,9 @@
|
||||
|
||||
import json
|
||||
import jsonschema
|
||||
import traceback
|
||||
|
||||
from rally.benchmark.context import base as base_ctx
|
||||
from rally.benchmark.context import users as users_ctx
|
||||
from rally.benchmark.runners import base as base_runner
|
||||
from rally.benchmark.scenarios import base as base_scenario
|
||||
@ -33,28 +35,25 @@ LOG = logging.getLogger(__name__)
|
||||
|
||||
CONFIG_SCHEMA = {
|
||||
"type": "object",
|
||||
"$schema": "http://json-schema.org/draft-03/schema",
|
||||
"$schema": "http://json-schema.org/draft-04/schema#",
|
||||
"patternProperties": {
|
||||
".*": {
|
||||
"type": "array",
|
||||
"items": {
|
||||
"type": "object",
|
||||
"properties": {
|
||||
"args": {"type": "object"},
|
||||
"init": {"type": "object"},
|
||||
"execution": {"enum": ["continuous", "periodic"]},
|
||||
"config": {
|
||||
"args": {
|
||||
"type": "object"
|
||||
},
|
||||
"runner": {
|
||||
"type": "object",
|
||||
"properties": {
|
||||
"times": {"type": "integer"},
|
||||
"duration": {"type": "number"},
|
||||
"active_users": {"type": "integer"},
|
||||
"period": {"type": "number"},
|
||||
"tenants": {"type": "integer"},
|
||||
"users_per_tenant": {"type": "integer"},
|
||||
"timeout": {"type": "number"}
|
||||
"type": {"type": "string"}
|
||||
},
|
||||
"additionalProperties": False
|
||||
"required": ["type"]
|
||||
},
|
||||
"context": {
|
||||
"type": "object"
|
||||
}
|
||||
},
|
||||
"additionalProperties": False
|
||||
@ -67,7 +66,7 @@ CONFIG_SCHEMA = {
|
||||
class BenchmarkEngine(object):
|
||||
"""The Benchmark engine class, an instance of which is initialized by the
|
||||
Orchestrator with the benchmarks configuration and then is used to execute
|
||||
all specified benchmark scnearios.
|
||||
all specified benchmark scenarios.
|
||||
.. note::
|
||||
|
||||
Typical usage:
|
||||
@ -86,93 +85,72 @@ class BenchmarkEngine(object):
|
||||
"""
|
||||
self.config = config
|
||||
self.task = task
|
||||
self._validate_config()
|
||||
|
||||
@rutils.log_task_wrapper(LOG.info,
|
||||
_("Benchmark config format validation."))
|
||||
def _validate_config(self):
|
||||
task_uuid = self.task['uuid']
|
||||
# Perform schema validation
|
||||
_("Task validation of scenarios names."))
|
||||
def _validate_config_scenarios_name(self, config):
|
||||
available = set(base_scenario.Scenario.list_benchmark_scenarios())
|
||||
specified = set(config.iterkeys())
|
||||
|
||||
if not specified.issubset(available):
|
||||
names = ", ".join(specified - available)
|
||||
raise exceptions.NotFoundScenarios(names=names)
|
||||
|
||||
@rutils.log_task_wrapper(LOG.info, _("Task validation of syntax."))
|
||||
def _validate_config_syntax(self, config):
|
||||
for scenario, values in config.iteritems():
|
||||
for pos, kw in enumerate(values):
|
||||
try:
|
||||
jsonschema.validate(self.config, CONFIG_SCHEMA)
|
||||
except jsonschema.ValidationError as e:
|
||||
LOG.exception(_('Task %s: Error: %s') % (task_uuid, e.message))
|
||||
raise exceptions.InvalidConfigException(message=e.message)
|
||||
base_runner.ScenarioRunner.validate(kw.get("runner", {}))
|
||||
base_ctx.Context.validate(kw.get("context", {}))
|
||||
except (exceptions.RallyException,
|
||||
jsonschema.ValidationError) as e:
|
||||
raise exceptions.InvalidBenchmarkConfig(name=scenario,
|
||||
pos=pos, args=kw,
|
||||
reason=e.message)
|
||||
|
||||
# Check for benchmark scenario names
|
||||
available_scenarios = \
|
||||
set(base_scenario.Scenario.list_benchmark_scenarios())
|
||||
for scenario in self.config:
|
||||
if scenario not in available_scenarios:
|
||||
LOG.exception(_('Task %s: Error: the specified '
|
||||
'benchmark scenario does not exist: %s') %
|
||||
(task_uuid, scenario))
|
||||
raise exceptions.NoSuchScenario(name=scenario)
|
||||
# Check for conflicting config parameters
|
||||
for run in self.config[scenario]:
|
||||
if 'times' in run['config'] and 'duration' in run['config']:
|
||||
message = _("'times' and 'duration' cannot be set "
|
||||
"simultaneously for one continuous "
|
||||
"scenario run.")
|
||||
LOG.exception(_('Task %s: Error: %s') % (task_uuid,
|
||||
message))
|
||||
raise exceptions.InvalidConfigException(message=message)
|
||||
if ((run.get('execution', 'continuous') == 'periodic' and
|
||||
'active_users' in run['config'])):
|
||||
message = _("'active_users' parameter cannot be set "
|
||||
"for periodic test runs.")
|
||||
LOG.exception(_('Task %s: Error: %s') % (task_uuid,
|
||||
message))
|
||||
raise exceptions.InvalidConfigException(message=message)
|
||||
def _validate_config_sematic_helper(self, admin, user, name, pos, kwargs):
|
||||
args = {} if not kwargs else kwargs.get("args", {})
|
||||
try:
|
||||
base_scenario.Scenario.validate(name, args, admin=admin,
|
||||
users=[user])
|
||||
except exceptions.InvalidScenarioArgument as e:
|
||||
kw = {"name": name, "pos": pos, "args": args, "reason": e.message}
|
||||
raise exceptions.InvalidBenchmarkConfig(**kw)
|
||||
|
||||
@rutils.log_task_wrapper(LOG.info,
|
||||
_("Benchmark config parameters validation."))
|
||||
def _validate_scenario_args(self, name, kwargs):
|
||||
cls_name, method_name = name.split(".")
|
||||
cls = base_scenario.Scenario.get_by_name(cls_name)
|
||||
|
||||
method = getattr(cls, method_name)
|
||||
validators = getattr(method, "validators", [])
|
||||
|
||||
args = kwargs.get("args", {})
|
||||
|
||||
# NOTE(msdubov): Some scenarios may require validation from admin,
|
||||
# while others use ordinary clients.
|
||||
admin_validators = [v for v in validators
|
||||
if v.permission == consts.EndpointPermission.ADMIN]
|
||||
user_validators = [v for v in validators
|
||||
if v.permission == consts.EndpointPermission.USER]
|
||||
|
||||
def validate(validators, clients):
|
||||
for validator in validators:
|
||||
result = validator(clients=clients, **args)
|
||||
if not result.is_valid:
|
||||
raise exceptions.InvalidScenarioArgument(
|
||||
message=result.msg)
|
||||
|
||||
# NOTE(msdubov): In case of generated users (= admin mode) - validate
|
||||
# first the admin validators, then the user ones
|
||||
# (with one temporarily created user).
|
||||
if self.admin_endpoint:
|
||||
admin_client = osclients.Clients(self.admin_endpoint)
|
||||
validate(admin_validators, admin_client)
|
||||
@rutils.log_task_wrapper(LOG.info, _("Task validation of semantic."))
|
||||
def _validate_config_semantic(self, config):
|
||||
# NOTE(boris-42): In future we will have more complex context, because
|
||||
# we will have pre-created users mode as well.
|
||||
context = {
|
||||
"task": self.task,
|
||||
"admin": {"endpoint": self.admin_endpoint}
|
||||
}
|
||||
with users_ctx.UserGenerator(context) as generator:
|
||||
# TODO(boris-42): refactor this peace
|
||||
generator.setup()
|
||||
user = context["users"][0]
|
||||
user_client = osclients.Clients(user["endpoint"])
|
||||
validate(user_validators, user_client)
|
||||
# NOTE(msdubov): In case of pre-created users - validate
|
||||
# for all of them.
|
||||
else:
|
||||
for user in self.users:
|
||||
user_client = osclients.Clients(user)
|
||||
validate(user_validators, user_client)
|
||||
with users_ctx.UserGenerator(context) as ctx:
|
||||
ctx.setup()
|
||||
admin = osclients.Clients(self.admin_endpoint)
|
||||
user = osclients.Clients(context["users"][0]["endpoint"])
|
||||
|
||||
for name, values in config.iteritems():
|
||||
for pos, kwargs in enumerate(values):
|
||||
self._validate_config_sematic_helper(admin, user, name,
|
||||
pos, kwargs)
|
||||
|
||||
@rutils.log_task_wrapper(LOG.info, _("Task validation."))
|
||||
def validate(self):
|
||||
"""Perform full task configuration validation."""
|
||||
self.task.update_status(consts.TaskStatus.VERIFYING)
|
||||
try:
|
||||
jsonschema.validate(self.config, CONFIG_SCHEMA)
|
||||
self._validate_config_scenarios_name(self.config)
|
||||
self._validate_config_syntax(self.config)
|
||||
self._validate_config_semantic(self.config)
|
||||
except Exception as e:
|
||||
log = [str(type(e)), str(e), json.dumps(traceback.format_exc())]
|
||||
self.task.set_failed(log=log)
|
||||
raise exceptions.InvalidTaskException(message=str(e))
|
||||
|
||||
@rutils.log_task_wrapper(LOG.info, _("Benchmarking."))
|
||||
def run(self):
|
||||
"""Runs the benchmarks according to the test configuration
|
||||
the benchmark engine was initialized with.
|
||||
@ -180,33 +158,21 @@ class BenchmarkEngine(object):
|
||||
:returns: List of dicts, each dict containing the results of all the
|
||||
corresponding benchmark test launches
|
||||
"""
|
||||
self.task.update_status(consts.TaskStatus.TEST_TOOL_BENCHMARKING)
|
||||
|
||||
self.task.update_status(consts.TaskStatus.RUNNING)
|
||||
results = {}
|
||||
for name in self.config:
|
||||
for n, kwargs in enumerate(self.config[name]):
|
||||
key = {'name': name, 'pos': n, 'kw': kwargs}
|
||||
try:
|
||||
self._validate_scenario_args(name, kwargs)
|
||||
runner = kwargs.get("runner", {}).get("type", "continuous")
|
||||
scenario_runner = base_runner.ScenarioRunner.get_runner(
|
||||
self.task, self.endpoints, kwargs)
|
||||
self.task, self.endpoints, runner)
|
||||
result = scenario_runner.run(name, kwargs)
|
||||
self.task.append_results(key, {"raw": result,
|
||||
"validation":
|
||||
{"is_valid": True}})
|
||||
self.task.append_results(key, {"raw": result})
|
||||
results[json.dumps(key)] = result
|
||||
except exceptions.InvalidScenarioArgument as e:
|
||||
self.task.append_results(key, {"raw": [],
|
||||
"validation":
|
||||
{"is_valid": False,
|
||||
"exc_msg": e.message}})
|
||||
self.task.set_failed()
|
||||
LOG.error(_("Scenario (%(pos)s, %(name)s) input arguments "
|
||||
"validation error: %(msg)s") %
|
||||
{"pos": n, "name": name, "msg": e.message})
|
||||
|
||||
self.task.update_status(consts.TaskStatus.FINISHED)
|
||||
return results
|
||||
|
||||
@rutils.log_task_wrapper(LOG.info, _("Check cloud."))
|
||||
def bind(self, endpoints):
|
||||
self.endpoints = [endpoint.Endpoint(**endpoint_dict)
|
||||
for endpoint_dict in endpoints]
|
||||
@ -219,12 +185,3 @@ class BenchmarkEngine(object):
|
||||
clients = osclients.Clients(self.admin_endpoint)
|
||||
clients.verified_keystone()
|
||||
return self
|
||||
|
||||
def __enter__(self):
|
||||
pass
|
||||
|
||||
def __exit__(self, exc_type, exc_value, exc_traceback):
|
||||
if exc_type is not None:
|
||||
self.task.set_failed()
|
||||
else:
|
||||
self.task.update_status(consts.TaskStatus.FINISHED)
|
||||
|
@ -15,7 +15,7 @@
|
||||
|
||||
import abc
|
||||
|
||||
|
||||
import jsonschema
|
||||
from oslo.config import cfg
|
||||
|
||||
from rally.benchmark.context import cleaner as cleaner_ctx
|
||||
@ -24,6 +24,7 @@ from rally.benchmark.context import secgroup as secgroup_ctx
|
||||
from rally.benchmark.context import users as users_ctx
|
||||
from rally.benchmark.scenarios import base
|
||||
from rally.benchmark import utils
|
||||
from rally import exceptions
|
||||
from rally.openstack.common import log as logging
|
||||
from rally import osclients
|
||||
from rally import utils as rutils
|
||||
@ -73,6 +74,8 @@ class ScenarioRunner(object):
|
||||
in the_run_scenario() method.
|
||||
"""
|
||||
|
||||
CONFIG_SCHEMA = {}
|
||||
|
||||
def __init__(self, task, endpoints):
|
||||
base.Scenario.register()
|
||||
|
||||
@ -85,13 +88,22 @@ class ScenarioRunner(object):
|
||||
self.users = []
|
||||
|
||||
@staticmethod
|
||||
def get_runner(task, endpoint, config):
|
||||
"""Returns instance of a scenario runner for execution type."""
|
||||
execution_type = config.get('execution', 'continuous')
|
||||
def _get_cls(runner_type):
|
||||
for runner in rutils.itersubclasses(ScenarioRunner):
|
||||
if execution_type == runner.__execution_type__:
|
||||
new_runner = runner(task, endpoint)
|
||||
return new_runner
|
||||
if runner_type == runner.__execution_type__:
|
||||
return runner
|
||||
raise exceptions.NoSuchRunner(type=runner_type)
|
||||
|
||||
@staticmethod
|
||||
def get_runner(task, endpoint, runner_type):
|
||||
"""Returns instance of a scenario runner for execution type."""
|
||||
return ScenarioRunner._get_cls(runner_type)(task, endpoint)
|
||||
|
||||
@staticmethod
|
||||
def validate(config):
|
||||
"""Validates runner's part of task config."""
|
||||
runner = ScenarioRunner._get_cls(config.get("type", "continuous"))
|
||||
jsonschema.validate(config, runner.CONFIG_SCHEMA)
|
||||
|
||||
@abc.abstractmethod
|
||||
def _run_scenario(self, cls, method_name, context, args, config):
|
||||
@ -110,11 +122,11 @@ class ScenarioRunner(object):
|
||||
"""
|
||||
|
||||
def _prepare_and_run_scenario(self, context, name, kwargs):
|
||||
cls_name, method_name = name.split(".")
|
||||
cls_name, method_name = name.split(".", 1)
|
||||
cls = base.Scenario.get_by_name(cls_name)
|
||||
|
||||
args = kwargs.get('args', {})
|
||||
config = kwargs.get('config', {})
|
||||
config = kwargs.get('runner', {})
|
||||
|
||||
with secgroup_ctx.AllowSSH(context) as allow_ssh:
|
||||
allow_ssh.setup()
|
||||
@ -125,17 +137,10 @@ class ScenarioRunner(object):
|
||||
args, config)
|
||||
|
||||
def _run_as_admin(self, name, kwargs):
|
||||
config = kwargs.get('config', {})
|
||||
|
||||
context = {
|
||||
"task": self.task,
|
||||
"admin": {"endpoint": self.admin_user},
|
||||
"config": {
|
||||
"users": {
|
||||
"tenants": config.get("tenants", 1),
|
||||
"users_per_tenant": config.get("users_per_tenant", 1)
|
||||
}
|
||||
}
|
||||
"config": kwargs.get("context", {})
|
||||
}
|
||||
|
||||
with users_ctx.UserGenerator(context) as generator:
|
||||
|
@ -42,6 +42,33 @@ class ContinuousScenarioRunner(base.ScenarioRunner):
|
||||
|
||||
__execution_type__ = "continuous"
|
||||
|
||||
CONFIG_SCHEMA = {
|
||||
"type": "object",
|
||||
"$schema": "http://json-schema.org/draft-03/schema",
|
||||
"properties": {
|
||||
"type": {
|
||||
"type": "string"
|
||||
},
|
||||
"active_users": {
|
||||
"type": "number",
|
||||
"minimum": 1
|
||||
},
|
||||
"times": {
|
||||
"type": "number",
|
||||
"minimum": 1
|
||||
},
|
||||
"duration": {
|
||||
"type": "float",
|
||||
"minimum": 1.0
|
||||
},
|
||||
"timeout": {
|
||||
"type": "number",
|
||||
"minimum": 1
|
||||
}
|
||||
},
|
||||
"additionalProperties": False
|
||||
}
|
||||
|
||||
def _run_scenario_continuously_for_times(self, cls, method, context, args,
|
||||
times, concurrent, timeout):
|
||||
test_args = [(i, cls, method, context["admin"],
|
||||
|
@ -39,6 +39,27 @@ class PeriodicScenarioRunner(base.ScenarioRunner):
|
||||
|
||||
__execution_type__ = "periodic"
|
||||
|
||||
CONFIG_SCHEMA = {
|
||||
"type": "object",
|
||||
"$schema": "http://json-schema.org/draft-03/schema",
|
||||
"properties": {
|
||||
"type": "string",
|
||||
"times": {
|
||||
"type": "number",
|
||||
"minimum": 1
|
||||
},
|
||||
"period": {
|
||||
"type": "float",
|
||||
"minimum": "0.000001"
|
||||
},
|
||||
"timeout": {
|
||||
"type": "number",
|
||||
"minimum": 1
|
||||
}
|
||||
},
|
||||
"additionalProperties": False
|
||||
}
|
||||
|
||||
def _run_scenario(self, cls, method_name, context, args, config):
|
||||
|
||||
times = config["times"]
|
||||
|
@ -17,14 +17,15 @@ import itertools
|
||||
import random
|
||||
import time
|
||||
|
||||
from rally import consts
|
||||
from rally import exceptions
|
||||
from rally import utils
|
||||
|
||||
|
||||
class Scenario(object):
|
||||
"""This is base class for any benchmark scenario.
|
||||
You should create subclass of this class. And you test scnerios will
|
||||
be autodiscoverd and you will be able to specify it in test config.
|
||||
You should create subclass of this class. And you test scenarios will
|
||||
be auto discoverable and you will be able to specify it in test config.
|
||||
"""
|
||||
registred = False
|
||||
|
||||
@ -68,6 +69,38 @@ class Scenario(object):
|
||||
benchmark_scenarios))
|
||||
return benchmark_scenarios_flattened
|
||||
|
||||
@staticmethod
|
||||
def _validate_helper(validators, clients, args):
|
||||
for validator in validators:
|
||||
result = validator(clients=clients, **args)
|
||||
if not result.is_valid:
|
||||
raise exceptions.InvalidScenarioArgument(message=result.msg)
|
||||
|
||||
@staticmethod
|
||||
def validate(name, args, admin=None, users=None):
|
||||
"""Semantic check of benchmark arguments."""
|
||||
cls_name, method_name = name.split(".", 1)
|
||||
cls = Scenario.get_by_name(cls_name)
|
||||
|
||||
method = getattr(cls, method_name)
|
||||
validators = getattr(method, "validators", [])
|
||||
|
||||
if not validators:
|
||||
return
|
||||
|
||||
admin_validators = [v for v in validators
|
||||
if v.permission == consts.EndpointPermission.ADMIN]
|
||||
user_validators = [v for v in validators
|
||||
if v.permission == consts.EndpointPermission.USER]
|
||||
|
||||
# NOTE(boris-42): Potential bug, what if we don't have "admin" client
|
||||
# and scenario have "admin" validators.
|
||||
if admin:
|
||||
Scenario._validate_helper(admin_validators, admin, args)
|
||||
if users:
|
||||
for user in users:
|
||||
Scenario._validate_helper(user_validators, user, args)
|
||||
|
||||
def context(self):
|
||||
"""Returns the context of the current benchmark scenario.
|
||||
|
||||
|
@ -25,9 +25,10 @@ import pprint
|
||||
import prettytable
|
||||
import sys
|
||||
import webbrowser
|
||||
|
||||
import yaml
|
||||
|
||||
from oslo.config import cfg
|
||||
|
||||
from rally.benchmark.processing import plot
|
||||
from rally.cmd import cliutils
|
||||
from rally.cmd import envutils
|
||||
@ -57,11 +58,14 @@ class TaskCommands(object):
|
||||
config_dict = yaml.safe_load(task_file.read())
|
||||
try:
|
||||
task = api.create_task(deploy_id, tag)
|
||||
self.list(task_list=[task])
|
||||
print("=" * 80)
|
||||
print(_("Task %(tag)s %(uuid)s is started")
|
||||
% {"uuid": task["uuid"], "tag": task["tag"]})
|
||||
print("-" * 80)
|
||||
api.start_task(deploy_id, config_dict, task=task)
|
||||
self.detailed(task_id=task['uuid'])
|
||||
except exceptions.InvalidArgumentsException:
|
||||
print(_("Reason: %s") % sys.exc_info()[1])
|
||||
except exceptions.InvalidConfigException:
|
||||
sys.exit(1)
|
||||
|
||||
@cliutils.args('--task-id', type=str, dest='task_id', help='UUID of task')
|
||||
def abort(self, task_id):
|
||||
@ -169,11 +173,22 @@ class TaskCommands(object):
|
||||
|
||||
print()
|
||||
print("=" * 80)
|
||||
print(_("Task %(task_id)s is %(status)s. Failed: %(failed)s")
|
||||
% {'task_id': task_id,
|
||||
'status': task['status'],
|
||||
'failed': task['failed']
|
||||
})
|
||||
print(_("Task %(task_id)s is %(status)s.")
|
||||
% {"task_id": task_id, "status": task["status"]})
|
||||
|
||||
if task["failed"]:
|
||||
print("-" * 80)
|
||||
verification = yaml.safe_load(task["verification_log"])
|
||||
|
||||
if not cfg.CONF.debug:
|
||||
print(verification[0])
|
||||
print(verification[1])
|
||||
print()
|
||||
print(_("For more details run:\nrally -vd task detailed %s")
|
||||
% task["uuid"])
|
||||
else:
|
||||
print(yaml.safe_load(verification[2]))
|
||||
return
|
||||
|
||||
for result in task["results"]:
|
||||
key = result["key"]
|
||||
@ -184,11 +199,6 @@ class TaskCommands(object):
|
||||
print("args values:")
|
||||
pprint.pprint(key["kw"])
|
||||
|
||||
if not result["data"]["validation"]["is_valid"]:
|
||||
print("-" * 80)
|
||||
print(result["data"]["validation"]["exc_msg"])
|
||||
continue
|
||||
|
||||
_print_atomic_actions_time(result["data"]["raw"])
|
||||
|
||||
raw = result["data"]["raw"]
|
||||
@ -252,6 +262,14 @@ class TaskCommands(object):
|
||||
if result['scenario_output']['errors']:
|
||||
print(result['scenario_output']['errors'])
|
||||
|
||||
print()
|
||||
print("HINTS:")
|
||||
print(_("* To plot HTML graphics with this data, run:"))
|
||||
print("\trally task plot2html %s --out output.html" % task["uuid"])
|
||||
print()
|
||||
print(_("* To get raw JSON output of task results, run:"))
|
||||
print("\trally task results %s\n" % task["uuid"])
|
||||
|
||||
@cliutils.args('--task-id', type=str, dest='task_id', help='uuid of task')
|
||||
@cliutils.args('--pretty', type=str, help=('pretty print (pprint) '
|
||||
'or json print (json)'))
|
||||
|
@ -25,15 +25,13 @@ from rally import utils
|
||||
|
||||
|
||||
class _TaskStatus(utils.ImmutableMixin, utils.EnumMixin):
|
||||
INIT = 'init'
|
||||
CLEANUP = 'cleanup'
|
||||
FINISHED = 'finished'
|
||||
FAILED = 'failed'
|
||||
|
||||
TEST_TOOL_PATCHING_OPENSTACK = 'test_tool->patching_openstack'
|
||||
TEST_TOOL_VERIFY_OPENSTACK = 'test_tool->verify_openstack'
|
||||
TEST_TOOL_BENCHMARKING = 'test_tool->benchmarking'
|
||||
TEST_TOOL_PROCESSING_RESULTS = 'test_tool->result_processing'
|
||||
INIT = "init"
|
||||
VERIFYING = "verifying"
|
||||
SETTING_UP = "setting up"
|
||||
RUNNING = "running"
|
||||
CLEANING_UP = "cleaning up"
|
||||
FINISHED = "finished"
|
||||
FAILED = "failed"
|
||||
|
||||
|
||||
class _DeployStatus(utils.ImmutableMixin, utils.EnumMixin):
|
||||
|
@ -126,16 +126,17 @@ class Resource(BASE, RallyBase):
|
||||
|
||||
class Task(BASE, RallyBase):
|
||||
"""Represents a Benchamrk task."""
|
||||
__tablename__ = 'tasks'
|
||||
__tablename__ = "tasks"
|
||||
__table_args__ = (
|
||||
sa.Index('task_uuid', 'uuid', unique=True),
|
||||
sa.Index("task_uuid", "uuid", unique=True),
|
||||
)
|
||||
|
||||
id = sa.Column(sa.Integer, primary_key=True, autoincrement=True)
|
||||
uuid = sa.Column(sa.String(36), default=UUID, nullable=False)
|
||||
status = sa.Column(sa.Enum(*list(consts.TaskStatus),
|
||||
name='enum_tasks_status'),
|
||||
default=consts.TaskStatus.INIT, nullable=False)
|
||||
name="enum_tasks_status"),
|
||||
default=consts.TaskStatus.INIT,
|
||||
nullable=False)
|
||||
failed = sa.Column(sa.Boolean, default=False, nullable=False)
|
||||
verification_log = sa.Column(sa.Text, default='')
|
||||
tag = sa.Column(sa.String(64), default='')
|
||||
@ -147,7 +148,7 @@ class Task(BASE, RallyBase):
|
||||
)
|
||||
deployment = sa.orm.relationship(
|
||||
Deployment,
|
||||
backref=sa.orm.backref('tasks'),
|
||||
backref=sa.orm.backref("tasks"),
|
||||
foreign_keys=deployment_uuid,
|
||||
primaryjoin=(deployment_uuid == Deployment.uuid),
|
||||
)
|
||||
|
@ -87,9 +87,28 @@ class InvalidArgumentsException(RallyException):
|
||||
|
||||
|
||||
class InvalidConfigException(RallyException):
|
||||
msg_fmt = _("This config has invalid schema: `%(message)s`")
|
||||
|
||||
|
||||
class InvalidTaskException(InvalidConfigException):
|
||||
msg_fmt = _("This config is invalid: `%(message)s`")
|
||||
|
||||
|
||||
class InvalidTaskConfigException(InvalidTaskException):
|
||||
msg_fmt = _("This config has invalid schema: `%(message)s`")
|
||||
|
||||
|
||||
class NotFoundScenarios(InvalidTaskException):
|
||||
msg_fmt = _("There are no benchmark scenarios with names: `%(names)s`.")
|
||||
|
||||
|
||||
class InvalidBenchmarkConfig(InvalidTaskException):
|
||||
msg_fmt = _("Task config is invalid.\n"
|
||||
"\tBenchmark %(name)s has wrong configuration of args at"
|
||||
" position %(pos)s: %(args)s"
|
||||
"\n\tReason: %(reason)s")
|
||||
|
||||
|
||||
class TestException(RallyException):
|
||||
msg_fmt = _("Test failed: %(test_message)s")
|
||||
|
||||
@ -110,6 +129,10 @@ class NoSuchScenario(NotFoundException):
|
||||
msg_fmt = _("There is no benchmark scenario with name `%(name)s`.")
|
||||
|
||||
|
||||
class NoSuchRunner(NotFoundException):
|
||||
msg_fmt = _("There is no benchmark runner with type `%(type)s`.")
|
||||
|
||||
|
||||
class NoSuchContext(NotFoundException):
|
||||
msg_fmt = _("There is no benchmark context with name `%(name)s`.")
|
||||
|
||||
|
@ -13,6 +13,8 @@
|
||||
# License for the specific language governing permissions and limitations
|
||||
# under the License.
|
||||
|
||||
import json
|
||||
|
||||
from rally import consts
|
||||
from rally import db
|
||||
|
||||
@ -44,10 +46,12 @@ class Task(object):
|
||||
self._update({'status': status})
|
||||
|
||||
def update_verification_log(self, log):
|
||||
self._update({'verification_log': log})
|
||||
self._update({'verification_log': json.dumps(log)})
|
||||
|
||||
def set_failed(self):
|
||||
self._update({'failed': True, 'status': consts.TaskStatus.FAILED})
|
||||
def set_failed(self, log=""):
|
||||
self._update({'failed': True,
|
||||
'status': consts.TaskStatus.FAILED,
|
||||
'verification_log': json.dumps(log)})
|
||||
|
||||
def append_results(self, key, value):
|
||||
db.task_result_create(self.task['uuid'], key, value)
|
||||
|
@ -20,6 +20,7 @@ from rally.benchmark import engine
|
||||
from rally import consts
|
||||
from rally import db
|
||||
from rally import deploy
|
||||
from rally import exceptions
|
||||
from rally import objects
|
||||
from rally.verification.verifiers.tempest import tempest
|
||||
|
||||
@ -97,9 +98,15 @@ def start_task(deploy_uuid, config, task=None):
|
||||
task = task or objects.Task(deployment_uuid=deploy_uuid)
|
||||
benchmark_engine = engine.BenchmarkEngine(config, task)
|
||||
endpoint = deployment['endpoints']
|
||||
|
||||
try:
|
||||
with benchmark_engine.bind(endpoint):
|
||||
benchmark_engine.bind(endpoint)
|
||||
benchmark_engine.validate()
|
||||
benchmark_engine.run()
|
||||
except exceptions.InvalidTaskException:
|
||||
# NOTE(boris-42): We don't log anything, because it's normal situation
|
||||
# that user put wrong config.
|
||||
pass
|
||||
except Exception:
|
||||
deployment.update_status(consts.DeployStatus.DEPLOY_INCONSISTENT)
|
||||
raise
|
||||
|
@ -14,12 +14,103 @@
|
||||
# under the License.
|
||||
|
||||
|
||||
#import mock
|
||||
|
||||
#from rally.benchmark.context import base as base_ctx
|
||||
import jsonschema
|
||||
import mock
|
||||
|
||||
from rally.benchmark.context import base
|
||||
from rally import exceptions
|
||||
from tests import fakes
|
||||
from tests import test
|
||||
|
||||
|
||||
class BaseContextTestCase(test.TestCase):
|
||||
|
||||
def test_init(self):
|
||||
context = {
|
||||
"config": {
|
||||
"a": 1,
|
||||
"fake": mock.MagicMock()
|
||||
},
|
||||
"task": mock.MagicMock()
|
||||
}
|
||||
|
||||
ctx = fakes.FakeContext(context)
|
||||
self.assertEqual(ctx.config, context["config"]["fake"])
|
||||
self.assertEqual(ctx.task, context["task"])
|
||||
self.assertEqual(ctx.context, context)
|
||||
|
||||
def test_init_empty_context(self):
|
||||
context = {
|
||||
"task": mock.MagicMock()
|
||||
}
|
||||
ctx = fakes.FakeContext(context)
|
||||
self.assertEqual(ctx.config, {})
|
||||
self.assertEqual(ctx.task, context["task"])
|
||||
self.assertEqual(ctx.context, context)
|
||||
|
||||
def test_validate__context(self):
|
||||
context = {
|
||||
"fake": {"test": 2}
|
||||
}
|
||||
base.Context.validate(context)
|
||||
|
||||
def test_validate__wrong_context(self):
|
||||
context = {
|
||||
"fake": {"nonexisting": 2}
|
||||
}
|
||||
self.assertRaises(jsonschema.ValidationError,
|
||||
base.Context.validate, context)
|
||||
|
||||
def test_validate__non_existing_context(self):
|
||||
config = {
|
||||
"nonexisting": {"nonexisting": 2}
|
||||
}
|
||||
self.assertRaises(exceptions.NoSuchContext,
|
||||
base.Context.validate, config)
|
||||
|
||||
@mock.patch("rally.benchmark.context.base.utils.itersubclasses")
|
||||
def test_get_by_name(self, mock_itersubclasses):
|
||||
A = mock.MagicMock()
|
||||
A.__ctx_name__ = "a"
|
||||
B = mock.MagicMock()
|
||||
B.__ctx_name__ = "b"
|
||||
mock_itersubclasses.return_value = [A, B]
|
||||
|
||||
self.assertEqual(A, base.Context.get_by_name("a"))
|
||||
self.assertEqual(B, base.Context.get_by_name("b"))
|
||||
|
||||
@mock.patch("rally.benchmark.context.base.utils.itersubclasses")
|
||||
def test_get_by_name_non_existing(self, mock_itersubclasses):
|
||||
mock_itersubclasses.return_value = []
|
||||
self.assertRaises(exceptions.NoSuchContext,
|
||||
base.Context.get_by_name, "nonexisting")
|
||||
|
||||
def test_setup_is_abstract(self):
|
||||
|
||||
class A(base.Context):
|
||||
|
||||
def cleanup(self):
|
||||
pass
|
||||
|
||||
self.assertRaises(TypeError, A)
|
||||
|
||||
def test_cleanup_is_abstract(self):
|
||||
class A(base.Context):
|
||||
|
||||
def setup(self):
|
||||
pass
|
||||
|
||||
self.assertRaises(TypeError, A)
|
||||
|
||||
def test_with_statement(self):
|
||||
context = {
|
||||
"task": mock.MagicMock()
|
||||
}
|
||||
ctx = fakes.FakeContext(context)
|
||||
ctx.setup = mock.MagicMock()
|
||||
ctx.cleanup = mock.MagicMock()
|
||||
|
||||
with ctx as entered_ctx:
|
||||
self.assertEqual(ctx, entered_ctx)
|
||||
|
||||
ctx.cleanup.assert_called_once_with()
|
||||
|
@ -37,9 +37,28 @@ class ScenarioRunnerTestCase(test.TestCase):
|
||||
def test_init_calls_register(self, mock_osclients, mock_base):
|
||||
mock_osclients.Clients.return_value = fakes.FakeClients()
|
||||
base.ScenarioRunner.get_runner(mock.MagicMock(), self.fake_endpoints,
|
||||
{"execution": "continuous"})
|
||||
"continuous")
|
||||
self.assertEqual(mock_base.mock_calls, [mock.call.Scenario.register()])
|
||||
|
||||
@mock.patch("rally.benchmark.runners.base.jsonschema.validate")
|
||||
@mock.patch("rally.benchmark.runners.base.ScenarioRunner._get_cls")
|
||||
def test_validate(self, mock_get_cls, mock_validate):
|
||||
mock_get_cls.return_value = fakes.FakeRunner
|
||||
|
||||
config = {"type": "fake", "a": 10}
|
||||
base.ScenarioRunner.validate(config)
|
||||
mock_get_cls.assert_called_once_with("fake")
|
||||
mock_validate.assert_called_once_with(config,
|
||||
fakes.FakeRunner.CONFIG_SCHEMA)
|
||||
|
||||
@mock.patch("rally.benchmark.runners.base.jsonschema.validate")
|
||||
def test_validate_default_runner(self, mock_validate):
|
||||
config = {"a": 10}
|
||||
base.ScenarioRunner.validate(config)
|
||||
mock_validate.assert_called_once_with(
|
||||
config,
|
||||
continuous.ContinuousScenarioRunner.CONFIG_SCHEMA)
|
||||
|
||||
@mock.patch("rally.benchmark.runners.base.rutils")
|
||||
@mock.patch("rally.benchmark.runners.base.osclients")
|
||||
def test_run_scenario(self, mock_osclients, mock_utils):
|
||||
@ -84,7 +103,7 @@ class ScenarioRunnerTestCase(test.TestCase):
|
||||
mock_osclients.Clients.return_value = fakes.FakeClients()
|
||||
runner = base.ScenarioRunner.get_runner(mock.MagicMock(),
|
||||
self.fake_endpoints,
|
||||
{"execution": "continuous"})
|
||||
"continuous")
|
||||
times = 4
|
||||
active_users = 2
|
||||
results = runner._run_scenario(fakes.FakeScenario,
|
||||
|
@ -103,7 +103,7 @@ class ContinuousScenarioRunnerTestCase(test.TestCase):
|
||||
|
||||
runner = base.ScenarioRunner.get_runner(mock.MagicMock(),
|
||||
self.fake_endpoints,
|
||||
{"execution": "continuous"})
|
||||
"continuous")
|
||||
|
||||
runner._run_scenario_continuously_for_times = \
|
||||
mock.MagicMock(return_value="times")
|
||||
@ -131,7 +131,7 @@ class ContinuousScenarioRunnerTestCase(test.TestCase):
|
||||
|
||||
runner = base.ScenarioRunner.get_runner(mock.MagicMock(),
|
||||
self.fake_endpoints,
|
||||
{"execution": "continuous"})
|
||||
"continuous")
|
||||
runner._run_scenario_continuously_for_duration = \
|
||||
mock.MagicMock(return_value="duration")
|
||||
|
||||
|
@ -66,5 +66,5 @@ class PeriodicScenarioRunnerTestCase(test.TestCase):
|
||||
|
||||
runner = base.ScenarioRunner.get_runner(mock.MagicMock(),
|
||||
self.fake_endpoints,
|
||||
{"execution": "periodic"})
|
||||
"periodic")
|
||||
self.assertTrue(runner is not None)
|
||||
|
@ -16,6 +16,8 @@
|
||||
import mock
|
||||
|
||||
from rally.benchmark.scenarios import base
|
||||
from rally.benchmark import validation
|
||||
from rally import consts
|
||||
from rally import exceptions
|
||||
from tests import fakes
|
||||
from tests import test
|
||||
@ -48,6 +50,88 @@ class ScenarioTestCase(test.TestCase):
|
||||
self.assertRaises(exceptions.NoSuchScenario,
|
||||
base.Scenario.get_by_name, "non existing scenario")
|
||||
|
||||
def test__validate_helper(self):
|
||||
validators = [
|
||||
mock.MagicMock(return_value=validation.ValidationResult()),
|
||||
mock.MagicMock(return_value=validation.ValidationResult())
|
||||
]
|
||||
clients = mock.MagicMock()
|
||||
args = {"a": 1, "b": 2}
|
||||
base.Scenario._validate_helper(validators, clients, args)
|
||||
for validator in validators:
|
||||
validator.assert_called_with(clients=clients, **args)
|
||||
|
||||
def test__validate_helper__no_valid(self):
|
||||
validators = [
|
||||
mock.MagicMock(return_value=validation.ValidationResult()),
|
||||
mock.MagicMock(
|
||||
return_value=validation.ValidationResult(is_valid=False)
|
||||
)
|
||||
]
|
||||
clients = mock.MagicMock()
|
||||
args = {"a": 1, "b": 2}
|
||||
self.assertRaises(exceptions.InvalidScenarioArgument,
|
||||
base.Scenario._validate_helper,
|
||||
validators, clients, args)
|
||||
|
||||
@mock.patch("rally.benchmark.scenarios.base.Scenario.get_by_name")
|
||||
def test_validate__no_validators(self, mock_base_get_by_name):
|
||||
|
||||
class FakeScenario(fakes.FakeScenario):
|
||||
pass
|
||||
|
||||
FakeScenario.do_it = mock.MagicMock()
|
||||
FakeScenario.do_it.validators = []
|
||||
mock_base_get_by_name.return_value = FakeScenario
|
||||
|
||||
base.Scenario.validate("FakeScenario.do_it", {"a": 1, "b": 2})
|
||||
|
||||
mock_base_get_by_name.assert_called_once_with("FakeScenario")
|
||||
|
||||
@mock.patch("rally.benchmark.scenarios.base.Scenario._validate_helper")
|
||||
@mock.patch("rally.benchmark.scenarios.base.Scenario.get_by_name")
|
||||
def test_validate__admin_validators(self, mock_base_get_by_name,
|
||||
mock_validate_helper):
|
||||
|
||||
class FakeScenario(fakes.FakeScenario):
|
||||
pass
|
||||
|
||||
FakeScenario.do_it = mock.MagicMock()
|
||||
mock_base_get_by_name.return_value = FakeScenario
|
||||
|
||||
validators = [mock.MagicMock(), mock.MagicMock()]
|
||||
for validator in validators:
|
||||
validator.permission = consts.EndpointPermission.ADMIN
|
||||
|
||||
FakeScenario.do_it.validators = validators
|
||||
args = {"a": 1, "b": 2}
|
||||
base.Scenario.validate("FakeScenario.do_it", args, admin="admin")
|
||||
mock_validate_helper.assert_called_once_with(validators, "admin", args)
|
||||
|
||||
@mock.patch("rally.benchmark.scenarios.base.Scenario._validate_helper")
|
||||
@mock.patch("rally.benchmark.scenarios.base.Scenario.get_by_name")
|
||||
def test_validate_user_validators(self, mock_base_get_by_name,
|
||||
mock_validate_helper):
|
||||
|
||||
class FakeScenario(fakes.FakeScenario):
|
||||
pass
|
||||
|
||||
FakeScenario.do_it = mock.MagicMock()
|
||||
mock_base_get_by_name.return_value = FakeScenario
|
||||
|
||||
validators = [mock.MagicMock(), mock.MagicMock()]
|
||||
for validator in validators:
|
||||
validator.permission = consts.EndpointPermission.USER
|
||||
|
||||
FakeScenario.do_it.validators = validators
|
||||
args = {"a": 1, "b": 2}
|
||||
base.Scenario.validate("FakeScenario.do_it", args, users=["u1", "u2"])
|
||||
|
||||
mock_validate_helper.assert_has_calls([
|
||||
mock.call(validators, "u1", args),
|
||||
mock.call(validators, "u2", args)
|
||||
])
|
||||
|
||||
@mock.patch("rally.benchmark.scenarios.base.time.sleep")
|
||||
@mock.patch("rally.benchmark.scenarios.base.random.uniform")
|
||||
def test_sleep_between(self, mock_uniform, mock_sleep):
|
||||
|
@ -15,10 +15,10 @@
|
||||
|
||||
"""Tests for the Test engine."""
|
||||
|
||||
import jsonschema
|
||||
import mock
|
||||
|
||||
from rally.benchmark import engine
|
||||
from rally.benchmark import validation
|
||||
from rally import consts
|
||||
from rally import exceptions
|
||||
from tests import fakes
|
||||
@ -27,290 +27,249 @@ from tests import test
|
||||
|
||||
class BenchmarkEngineTestCase(test.TestCase):
|
||||
|
||||
def setUp(self):
|
||||
super(BenchmarkEngineTestCase, self).setUp()
|
||||
def test_init(self):
|
||||
config = mock.MagicMock()
|
||||
task = mock.MagicMock()
|
||||
eng = engine.BenchmarkEngine(config, task)
|
||||
self.assertEqual(eng.config, config)
|
||||
self.assertEqual(eng.task, task)
|
||||
|
||||
self.valid_test_config_continuous_times = {
|
||||
'NovaServers.boot_and_delete_server': [
|
||||
{'args': {'flavor_id': 1, 'image_id': 'img'},
|
||||
'execution': 'continuous',
|
||||
'config': {'times': 10, 'active_users': 2,
|
||||
'tenants': 3, 'users_per_tenant': 2}}
|
||||
]
|
||||
}
|
||||
self.valid_test_config_continuous_duration = {
|
||||
'NovaServers.boot_and_delete_server': [
|
||||
{'args': {'flavor_id': 1, 'image_id': 'img'},
|
||||
'execution': 'continuous',
|
||||
'config': {'duration': 4, 'active_users': 2,
|
||||
'tenants': 3, 'users_per_tenant': 2}}
|
||||
]
|
||||
}
|
||||
self.invalid_test_config_bad_execution_type = {
|
||||
'NovaServers.boot_and_delete_server': [
|
||||
{'args': {'flavor_id': 1, 'image_id': 'img'},
|
||||
'execution': 'contitnuous',
|
||||
'config': {'times': 10, 'active_users': 2,
|
||||
'tenants': 3, 'users_per_tenant': 2}}
|
||||
]
|
||||
}
|
||||
self.invalid_test_config_bad_config_parameter = {
|
||||
'NovaServers.boot_and_delete_server': [
|
||||
{'args': {'flavor_id': 1, 'image_id': 'img'},
|
||||
'execution': 'continuous',
|
||||
'config': {'times': 10, 'activeusers': 2,
|
||||
'tenants': 3, 'users_per_tenant': 2}}
|
||||
]
|
||||
}
|
||||
self.invalid_test_config_parameters_conflict = {
|
||||
'NovaServers.boot_and_delete_server': [
|
||||
{'args': {'flavor_id': 1, 'image_id': 'img'},
|
||||
'execution': 'continuous',
|
||||
'config': {'times': 10, 'duration': 100,
|
||||
'tenants': 3, 'users_per_tenant': 2}}
|
||||
]
|
||||
}
|
||||
self.invalid_test_config_bad_param_for_periodic = {
|
||||
'NovaServers.boot_and_delete_server': [
|
||||
{'args': {'flavor_id': 1, 'image_id': 'img'},
|
||||
'execution': 'periodic',
|
||||
'config': {'times': 10, 'active_users': 3,
|
||||
'tenants': 3, 'users_per_tenant': 2}}
|
||||
]
|
||||
}
|
||||
self.valid_endpoints = [{
|
||||
'auth_url': 'http://127.0.0.1:5000/v2.0',
|
||||
'username': 'admin',
|
||||
'password': 'admin',
|
||||
'tenant_name': 'admin',
|
||||
'permission': consts.EndpointPermission.ADMIN
|
||||
}]
|
||||
@mock.patch("rally.benchmark.engine.jsonschema.validate")
|
||||
def test_validate(self, mock_json_validate):
|
||||
config = mock.MagicMock()
|
||||
eng = engine.BenchmarkEngine(config, mock.MagicMock())
|
||||
mock_validate = mock.MagicMock()
|
||||
|
||||
self.run_success = {'msg': 'msg', 'status': 0, 'proc_name': 'proc'}
|
||||
eng._validate_config_scenarios_name = mock_validate.names
|
||||
eng._validate_config_syntax = mock_validate.syntax
|
||||
eng._validate_config_semantic = mock_validate.semantic
|
||||
|
||||
def test__validate_config(self):
|
||||
try:
|
||||
engine.BenchmarkEngine(self.valid_test_config_continuous_times,
|
||||
mock.MagicMock())
|
||||
engine.BenchmarkEngine(self.valid_test_config_continuous_duration,
|
||||
mock.MagicMock())
|
||||
except Exception as e:
|
||||
self.fail("Unexpected exception in test config" +
|
||||
"verification: %s" % str(e))
|
||||
self.assertRaises(exceptions.InvalidConfigException,
|
||||
engine.BenchmarkEngine,
|
||||
self.invalid_test_config_bad_execution_type,
|
||||
mock.MagicMock())
|
||||
self.assertRaises(exceptions.InvalidConfigException,
|
||||
engine.BenchmarkEngine,
|
||||
self.invalid_test_config_bad_config_parameter,
|
||||
mock.MagicMock())
|
||||
self.assertRaises(exceptions.InvalidConfigException,
|
||||
engine.BenchmarkEngine,
|
||||
self.invalid_test_config_parameters_conflict,
|
||||
mock.MagicMock())
|
||||
self.assertRaises(exceptions.InvalidConfigException,
|
||||
engine.BenchmarkEngine,
|
||||
self.invalid_test_config_bad_param_for_periodic,
|
||||
mock.MagicMock())
|
||||
eng.validate()
|
||||
|
||||
expected_calls = [
|
||||
mock.call.names(config),
|
||||
mock.call.syntax(config),
|
||||
mock.call.semantic(config)
|
||||
]
|
||||
mock_validate.assert_has_calls(expected_calls)
|
||||
|
||||
def test_validate__wrong_schema(self):
|
||||
config = {
|
||||
"wrong": True
|
||||
}
|
||||
task = mock.MagicMock()
|
||||
eng = engine.BenchmarkEngine(config, task)
|
||||
self.assertRaises(exceptions.InvalidTaskException,
|
||||
eng.validate)
|
||||
task.set_failed.assert_called_once()
|
||||
|
||||
@mock.patch("rally.benchmark.engine.jsonschema.validate")
|
||||
def test_validate__wrong_scenarios_name(self, mova_validate):
|
||||
task = mock.MagicMock()
|
||||
eng = engine.BenchmarkEngine(mock.MagicMock(), task)
|
||||
eng._validate_config_scenarios_name = \
|
||||
mock.MagicMock(side_effect=exceptions.NotFoundScenarios)
|
||||
|
||||
self.assertRaises(exceptions.InvalidTaskException, eng.validate)
|
||||
task.set_failed.assert_called_once()
|
||||
|
||||
@mock.patch("rally.benchmark.engine.jsonschema.validate")
|
||||
def test_validate__wrong_syntax(self, mova_validate):
|
||||
task = mock.MagicMock()
|
||||
eng = engine.BenchmarkEngine(mock.MagicMock(), task)
|
||||
eng._validate_config_scenarios_name = mock.MagicMock()
|
||||
eng._validate_config_syntax = \
|
||||
mock.MagicMock(side_effect=exceptions.InvalidBenchmarkConfig)
|
||||
|
||||
self.assertRaises(exceptions.InvalidTaskException, eng.validate)
|
||||
task.set_failed.assert_called_once()
|
||||
|
||||
@mock.patch("rally.benchmark.engine.jsonschema.validate")
|
||||
def test_validate__wrong_semantic(self, mova_validate):
|
||||
task = mock.MagicMock()
|
||||
eng = engine.BenchmarkEngine(mock.MagicMock(), task)
|
||||
eng._validate_config_scenarios_name = mock.MagicMock()
|
||||
eng._validate_config_syntax = mock.MagicMock()
|
||||
eng._validate_config_semantic = \
|
||||
mock.MagicMock(side_effect=exceptions.InvalidBenchmarkConfig)
|
||||
|
||||
self.assertRaises(exceptions.InvalidTaskException, eng.validate)
|
||||
task.set_failed.assert_called_once()
|
||||
|
||||
@mock.patch("rally.benchmark.engine.base_scenario.Scenario")
|
||||
def test__validate_config_scenarios_name(self, mock_scenario):
|
||||
config = {
|
||||
"a": [],
|
||||
"b": []
|
||||
}
|
||||
mock_scenario.list_benchmark_scenarios.return_value = ["e", "b", "a"]
|
||||
eng = engine.BenchmarkEngine(config, mock.MagicMock())
|
||||
eng._validate_config_scenarios_name(config)
|
||||
|
||||
@mock.patch("rally.benchmark.engine.base_scenario.Scenario")
|
||||
def test__validate_config_scenarios_name_non_exsisting(self,
|
||||
mock_scenario):
|
||||
config = {
|
||||
"exist": [],
|
||||
"nonexist1": [],
|
||||
"nonexist2": []
|
||||
}
|
||||
mock_scenario.list_benchmark_scenarios.return_value = ["exist", "aaa"]
|
||||
eng = engine.BenchmarkEngine(config, mock.MagicMock())
|
||||
|
||||
self.assertRaises(exceptions.NotFoundScenarios,
|
||||
eng._validate_config_scenarios_name, config)
|
||||
|
||||
@mock.patch("rally.benchmark.engine.base_runner.ScenarioRunner.validate")
|
||||
@mock.patch("rally.benchmark.engine.base_ctx.Context.validate")
|
||||
def test__validate_config_syntax(self, mock_context, mock_runner):
|
||||
config = {"sca": [{"context": "a"}], "scb": [{"runner": "b"}]}
|
||||
eng = engine.BenchmarkEngine(mock.MagicMock(), mock.MagicMock())
|
||||
eng._validate_config_syntax(config)
|
||||
mock_runner.assert_has_calls([mock.call({}), mock.call("b")])
|
||||
mock_context.assert_has_calls([mock.call("a"), mock.call({})])
|
||||
|
||||
@mock.patch("rally.benchmark.engine.base_runner.ScenarioRunner")
|
||||
@mock.patch("rally.benchmark.engine.base_ctx.Context.validate")
|
||||
def test__validate_config_syntax__wrong_runner(self, mock_context,
|
||||
mock_runner):
|
||||
config = {"sca": [{"context": "a"}], "scb": [{"runner": "b"}]}
|
||||
eng = engine.BenchmarkEngine(mock.MagicMock(), mock.MagicMock())
|
||||
|
||||
mock_runner.validate = mock.MagicMock(
|
||||
side_effect=jsonschema.ValidationError("a"))
|
||||
self.assertRaises(exceptions.InvalidBenchmarkConfig,
|
||||
eng._validate_config_syntax, config)
|
||||
|
||||
@mock.patch("rally.benchmark.engine.base_runner.ScenarioRunner.validate")
|
||||
@mock.patch("rally.benchmark.engine.base_ctx.Context")
|
||||
def test__validate_config_syntax__wrong_context(self, mock_context,
|
||||
mock_runner):
|
||||
config = {"sca": [{"context": "a"}], "scb": [{"runner": "b"}]}
|
||||
eng = engine.BenchmarkEngine(mock.MagicMock(), mock.MagicMock())
|
||||
|
||||
mock_context.validate = mock.MagicMock(
|
||||
side_effect=jsonschema.ValidationError("a"))
|
||||
self.assertRaises(exceptions.InvalidBenchmarkConfig,
|
||||
eng._validate_config_syntax, config)
|
||||
|
||||
@mock.patch("rally.benchmark.engine.base_scenario.Scenario.validate")
|
||||
def test__validate_config_semantic_helper(self, mock_validate):
|
||||
eng = engine.BenchmarkEngine(mock.MagicMock(), mock.MagicMock())
|
||||
eng._validate_config_sematic_helper("admin", "user", "name", "pos",
|
||||
{"args": "args"})
|
||||
mock_validate.assert_called_once_with("name", "args", admin="admin",
|
||||
users=["user"])
|
||||
|
||||
@mock.patch("rally.benchmark.engine.base_scenario.Scenario.validate")
|
||||
def test__validate_config_semanitc_helper_invalid_arg(self, mock_validate):
|
||||
mock_validate.side_effect = exceptions.InvalidScenarioArgument()
|
||||
eng = engine.BenchmarkEngine(mock.MagicMock(), mock.MagicMock())
|
||||
|
||||
self.assertRaises(exceptions.InvalidBenchmarkConfig,
|
||||
eng._validate_config_sematic_helper, "a", "u", "n",
|
||||
"p", {})
|
||||
|
||||
@mock.patch("rally.benchmark.scenarios.base.Scenario.get_by_name")
|
||||
@mock.patch("rally.benchmark.engine.users_ctx")
|
||||
@mock.patch("rally.benchmark.engine.osclients.Clients")
|
||||
@mock.patch("rally.benchmark.engine.BenchmarkEngine._validate_config")
|
||||
def test__validate_scenario_args(self, mock_validate_config,
|
||||
mock_clients,
|
||||
mock_user_ctxt,
|
||||
mock_scenario_get_by_name):
|
||||
@mock.patch("rally.benchmark.engine.users_ctx")
|
||||
@mock.patch("rally.benchmark.engine.BenchmarkEngine"
|
||||
"._validate_config_sematic_helper")
|
||||
def test__validate_config_sematic(self, mock_helper, mock_userctx,
|
||||
mock_osclients):
|
||||
mock_userctx.UserGenerator = fakes.FakeUserContext
|
||||
mock_osclients.return_value = mock.MagicMock()
|
||||
config = {
|
||||
"a": [mock.MagicMock(), mock.MagicMock()],
|
||||
"b": [mock.MagicMock()]
|
||||
}
|
||||
|
||||
@validation.requires_permission(consts.EndpointPermission.ADMIN)
|
||||
def validator_admin(**kwargs):
|
||||
return validation.ValidationResult()
|
||||
eng = engine.BenchmarkEngine(config, mock.MagicMock())
|
||||
eng.admin_endpoint = "admin"
|
||||
|
||||
@validation.requires_permission(consts.EndpointPermission.USER)
|
||||
def validator_user(**kwargs):
|
||||
return validation.ValidationResult()
|
||||
eng._validate_config_semantic(config)
|
||||
|
||||
FakeScenario = mock.MagicMock()
|
||||
FakeScenario.do_it.validators = [validator_admin, validator_user]
|
||||
mock_scenario_get_by_name.return_value = FakeScenario
|
||||
expected_calls = [
|
||||
mock.call("admin"),
|
||||
mock.call(fakes.FakeUserContext.user["endpoint"])
|
||||
]
|
||||
mock_osclients.assert_has_calls(expected_calls)
|
||||
|
||||
mock_user_ctxt.UserGenerator = fakes.FakeUserContext
|
||||
admin = user = mock_osclients.return_value
|
||||
expected_calls = [
|
||||
mock.call(admin, user, "a", 0, config["a"][0]),
|
||||
mock.call(admin, user, "a", 1, config["a"][1]),
|
||||
mock.call(admin, user, "b", 0, config["b"][0])
|
||||
]
|
||||
mock_helper.assert_has_calls(expected_calls)
|
||||
|
||||
def test_run__update_status(self):
|
||||
task = mock.MagicMock()
|
||||
eng = engine.BenchmarkEngine([], task)
|
||||
results = eng.run()
|
||||
self.assertEqual(results, {})
|
||||
task.update_status.assert_has_calls([
|
||||
mock.call(consts.TaskStatus.RUNNING),
|
||||
mock.call(consts.TaskStatus.FINISHED)
|
||||
])
|
||||
|
||||
@mock.patch("rally.benchmark.engine.endpoint.Endpoint")
|
||||
@mock.patch("rally.benchmark.engine.osclients")
|
||||
@mock.patch("rally.benchmark.engine.base_runner.ScenarioRunner")
|
||||
def test_run__config_has_args(self, mock_endpoint, mock_osclients,
|
||||
mock_runner):
|
||||
config = {
|
||||
"a.args": [{"args": {"a": "a", "b": 1}}],
|
||||
"b.args": [{"args": {"a": 1}}]
|
||||
}
|
||||
task = mock.MagicMock()
|
||||
eng = engine.BenchmarkEngine(config, task).bind([{}])
|
||||
eng.run()
|
||||
|
||||
@mock.patch("rally.benchmark.engine.endpoint.Endpoint")
|
||||
@mock.patch("rally.benchmark.engine.osclients")
|
||||
@mock.patch("rally.benchmark.engine.base_runner.ScenarioRunner")
|
||||
def test_run__config_has_runner(self, mock_endpoint, mock_osclients,
|
||||
mock_runner):
|
||||
config = {
|
||||
"a.args": [{"runner": {"type": "a", "b": 1}}],
|
||||
"b.args": [{"runner": {"a": 1}}]
|
||||
}
|
||||
task = mock.MagicMock()
|
||||
eng = engine.BenchmarkEngine(config, task).bind([{}])
|
||||
eng.run()
|
||||
|
||||
@mock.patch("rally.benchmark.engine.endpoint.Endpoint")
|
||||
@mock.patch("rally.benchmark.engine.osclients")
|
||||
@mock.patch("rally.benchmark.engine.base_runner.ScenarioRunner")
|
||||
def test_run__config_has_context(self, mock_endpoint, mock_osclients,
|
||||
mock_runner):
|
||||
config = {
|
||||
"a.args": [{"context": {"context_a": {"a": 1}}}],
|
||||
"b.args": [{"context": {"context_b": {"b": 2}}}]
|
||||
}
|
||||
task = mock.MagicMock()
|
||||
eng = engine.BenchmarkEngine(config, task).bind([{}])
|
||||
eng.run()
|
||||
|
||||
@mock.patch("rally.benchmark.engine.osclients")
|
||||
@mock.patch("rally.benchmark.engine.endpoint.Endpoint")
|
||||
def test_bind(self, mock_endpoint, mock_osclients):
|
||||
mock_endpoint.return_value = mock.MagicMock()
|
||||
benchmark_engine = engine.BenchmarkEngine(mock.MagicMock(),
|
||||
mock.MagicMock())
|
||||
benchmark_engine.admin_endpoint = "admin"
|
||||
|
||||
benchmark_engine._validate_scenario_args("FakeScenario.do_it", {})
|
||||
|
||||
expected = [mock.call("admin"),
|
||||
mock.call(fakes.FakeUserContext.user["endpoint"])]
|
||||
mock_clients.assert_has_calls(expected, any_order=True)
|
||||
|
||||
@mock.patch("rally.benchmark.scenarios.base.Scenario.get_by_name")
|
||||
@mock.patch("rally.benchmark.engine.users_ctx")
|
||||
@mock.patch("rally.benchmark.engine.osclients.Clients")
|
||||
@mock.patch("rally.benchmark.engine.BenchmarkEngine._validate_config")
|
||||
def test__validate_scenario_args_failure(self, mock_validate_config,
|
||||
mock_create_os_clients,
|
||||
mock_user_ctxt,
|
||||
mock_scenario_get_by_name):
|
||||
|
||||
@validation.requires_permission(consts.EndpointPermission.ADMIN)
|
||||
def evil_validator_admin(**kwargs):
|
||||
return validation.ValidationResult(is_valid=False)
|
||||
|
||||
FakeScenario = mock.MagicMock()
|
||||
FakeScenario.do_it.validators = [evil_validator_admin]
|
||||
mock_scenario_get_by_name.return_value = FakeScenario
|
||||
|
||||
benchmark_engine = engine.BenchmarkEngine(mock.MagicMock(),
|
||||
mock.MagicMock())
|
||||
benchmark_engine.admin_endpoint = "admin"
|
||||
|
||||
self.assertRaises(exceptions.InvalidScenarioArgument,
|
||||
benchmark_engine._validate_scenario_args,
|
||||
"FakeScenario.do_it", {})
|
||||
|
||||
mock_user_ctxt.UserGenerator = fakes.FakeUserContext
|
||||
|
||||
@validation.requires_permission(consts.EndpointPermission.USER)
|
||||
def evil_validator_user(**kwargs):
|
||||
return validation.ValidationResult(is_valid=False)
|
||||
|
||||
FakeScenario.do_it.validators = [evil_validator_user]
|
||||
|
||||
self.assertRaises(exceptions.InvalidScenarioArgument,
|
||||
benchmark_engine._validate_scenario_args,
|
||||
"FakeScenario.do_it", {})
|
||||
|
||||
@mock.patch("rally.benchmark.engine.osclients")
|
||||
def test_bind(self, mock_osclients):
|
||||
mock_osclients.Clients.return_value = fakes.FakeClients()
|
||||
benchmark_engine = engine.BenchmarkEngine(
|
||||
self.valid_test_config_continuous_times, mock.MagicMock())
|
||||
with benchmark_engine.bind(self.valid_endpoints):
|
||||
endpoint_dicts = [endpoint.to_dict(include_permission=True)
|
||||
for endpoint in benchmark_engine.endpoints]
|
||||
self.assertEqual(endpoint_dicts, self.valid_endpoints)
|
||||
|
||||
@mock.patch("rally.benchmark.engine.BenchmarkEngine."
|
||||
"_validate_scenario_args")
|
||||
@mock.patch("rally.benchmark.runners.base.ScenarioRunner.run")
|
||||
@mock.patch("rally.benchmark.runners.base.osclients")
|
||||
@mock.patch("rally.benchmark.engine.osclients")
|
||||
def test_run(self, mock_engine_osclients, mock_utils_osclients, mock_run,
|
||||
mock_validate_scenario_args):
|
||||
mock_engine_osclients.Clients.return_value = fakes.FakeClients()
|
||||
mock_utils_osclients.Clients.return_value = fakes.FakeClients()
|
||||
benchmark_engine = engine.BenchmarkEngine(
|
||||
self.valid_test_config_continuous_times, mock.MagicMock())
|
||||
with benchmark_engine.bind(self.valid_endpoints):
|
||||
benchmark_engine.run()
|
||||
|
||||
@mock.patch("rally.benchmark.engine.BenchmarkEngine."
|
||||
"_validate_scenario_args")
|
||||
@mock.patch("rally.benchmark.runners.base.ScenarioRunner.run")
|
||||
@mock.patch("rally.benchmark.runners.base.osclients")
|
||||
@mock.patch("rally.benchmark.engine.osclients")
|
||||
def test_task_status_basic_chain(self, mock_engine_osclients,
|
||||
mock_utils_osclients, mock_scenario_run,
|
||||
mock_validate_scenario_args):
|
||||
fake_task = mock.MagicMock()
|
||||
benchmark_engine = engine.BenchmarkEngine(
|
||||
self.valid_test_config_continuous_times, fake_task)
|
||||
mock_engine_osclients.Clients.return_value = fakes.FakeClients()
|
||||
mock_utils_osclients.Clients.return_value = fakes.FakeClients()
|
||||
mock_scenario_run.return_value = {}
|
||||
with benchmark_engine.bind(self.valid_endpoints):
|
||||
benchmark_engine.run()
|
||||
|
||||
benchmark_name = 'NovaServers.boot_and_delete_server'
|
||||
benchmark_results = {
|
||||
'name': benchmark_name, 'pos': 0,
|
||||
'kw': self.valid_test_config_continuous_times[benchmark_name][0],
|
||||
endpoint = {
|
||||
"auth_url": "http://valid.com",
|
||||
"username": "user",
|
||||
"password": "pwd",
|
||||
"tenant_name": "tenant"
|
||||
}
|
||||
|
||||
s = consts.TaskStatus
|
||||
expected = [
|
||||
mock.call.update_status(s.TEST_TOOL_BENCHMARKING),
|
||||
mock.call.append_results(benchmark_results, {'raw': {},
|
||||
'validation': {'is_valid': True}}),
|
||||
mock.call.update_status(s.FINISHED)
|
||||
binded_benchmark_engine = benchmark_engine.bind([endpoint])
|
||||
self.assertEqual([mock_endpoint.return_value],
|
||||
benchmark_engine.endpoints)
|
||||
self.assertEqual(benchmark_engine, binded_benchmark_engine)
|
||||
expected_calls = [
|
||||
mock.call.Clients(mock_endpoint.return_value),
|
||||
mock.call.Clients().verified_keystone()
|
||||
]
|
||||
# NOTE(msdubov): Ignore task['uuid'] calls which are used for logging
|
||||
mock_calls = filter(lambda call: '__getitem__' not in call[0],
|
||||
fake_task.mock_calls)
|
||||
self.assertEqual(mock_calls, expected)
|
||||
|
||||
@mock.patch("rally.benchmark.engine.BenchmarkEngine."
|
||||
"_validate_scenario_args")
|
||||
@mock.patch("rally.benchmark.runners.base.ScenarioRunner.run")
|
||||
@mock.patch("rally.benchmark.runners.base.osclients")
|
||||
@mock.patch("rally.benchmark.engine.osclients")
|
||||
def test_task_status_basic_chain_validation_fails(self,
|
||||
mock_engine_osclients,
|
||||
mock_utils_osclients,
|
||||
mock_scenario_run,
|
||||
mock_validate_sc_args):
|
||||
fake_task = mock.MagicMock()
|
||||
benchmark_engine = engine.BenchmarkEngine(
|
||||
self.valid_test_config_continuous_times, fake_task)
|
||||
mock_engine_osclients.Clients.return_value = fakes.FakeClients()
|
||||
mock_utils_osclients.Clients.return_value = fakes.FakeClients()
|
||||
validation_exc = exceptions.InvalidScenarioArgument()
|
||||
mock_validate_sc_args.side_effect = validation_exc
|
||||
|
||||
with benchmark_engine.bind(self.valid_endpoints):
|
||||
benchmark_engine.run()
|
||||
|
||||
benchmark_name = 'NovaServers.boot_and_delete_server'
|
||||
benchmark_results = {
|
||||
'name': benchmark_name, 'pos': 0,
|
||||
'kw': self.valid_test_config_continuous_times[benchmark_name][0],
|
||||
}
|
||||
|
||||
s = consts.TaskStatus
|
||||
expected = [
|
||||
mock.call.update_status(s.TEST_TOOL_BENCHMARKING),
|
||||
mock.call.append_results(benchmark_results,
|
||||
{'raw': [],
|
||||
'validation': {'is_valid': False,
|
||||
'exc_msg': validation_exc.message}}),
|
||||
mock.call.set_failed(),
|
||||
mock.call.update_status(s.FINISHED)
|
||||
]
|
||||
# NOTE(msdubov): Ignore task['uuid'] calls which are used for logging
|
||||
mock_calls = filter(lambda call: '__getitem__' not in call[0],
|
||||
fake_task.mock_calls)
|
||||
self.assertEqual(mock_calls, expected)
|
||||
|
||||
@mock.patch("rally.benchmark.engine.BenchmarkEngine."
|
||||
"_validate_scenario_args")
|
||||
@mock.patch("rally.benchmark.runners.base.ScenarioRunner.run")
|
||||
@mock.patch("rally.benchmark.runners.base.osclients")
|
||||
@mock.patch("rally.benchmark.engine.osclients")
|
||||
def test_task_status_failed(self, mock_engine_osclients,
|
||||
mock_utils_osclients, mock_scenario_run,
|
||||
mock_validate_scenario_args):
|
||||
fake_task = mock.MagicMock()
|
||||
benchmark_engine = engine.BenchmarkEngine(
|
||||
self.valid_test_config_continuous_times, fake_task)
|
||||
mock_engine_osclients.Clients.return_value = fakes.FakeClients()
|
||||
mock_utils_osclients.Clients.return_value = fakes.FakeClients()
|
||||
mock_scenario_run.side_effect = exceptions.TestException()
|
||||
try:
|
||||
with benchmark_engine.bind(self.valid_endpoints):
|
||||
benchmark_engine.run()
|
||||
except exceptions.TestException:
|
||||
pass
|
||||
|
||||
s = consts.TaskStatus
|
||||
expected = [
|
||||
mock.call.update_status(s.TEST_TOOL_BENCHMARKING),
|
||||
mock.call.set_failed(),
|
||||
]
|
||||
# NOTE(msdubov): Ignore task['uuid'] calls which are used for logging
|
||||
mock_calls = filter(lambda call: '__getitem__' not in call[0],
|
||||
fake_task.mock_calls)
|
||||
self.assertEqual(mock_calls, expected)
|
||||
mock_osclients.assert_has_calls(expected_calls)
|
||||
|
@ -69,7 +69,9 @@ class TaskCommandsTestCase(test.BaseTestCase):
|
||||
@mock.patch('rally.cmd.commands.task.db')
|
||||
def test_detailed(self, mock_db):
|
||||
test_uuid = str(uuid.uuid4())
|
||||
value = {'task_id': "task",
|
||||
value = {
|
||||
"id": "task",
|
||||
"uuid": test_uuid,
|
||||
"status": "status",
|
||||
"results": [],
|
||||
"failed": False
|
||||
|
0
tests/doc/__init__.py
Normal file
0
tests/doc/__init__.py
Normal file
56
tests/doc/test_task_samples.py
Normal file
56
tests/doc/test_task_samples.py
Normal file
@ -0,0 +1,56 @@
|
||||
# Copyright 2014: Mirantis Inc.
|
||||
# All Rights Reserved.
|
||||
#
|
||||
# Licensed under the Apache License, Version 2.0 (the "License"); you may
|
||||
# not use this file except in compliance with the License. You may obtain
|
||||
# a copy of the License at
|
||||
#
|
||||
# http://www.apache.org/licenses/LICENSE-2.0
|
||||
#
|
||||
# Unless required by applicable law or agreed to in writing, software
|
||||
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
|
||||
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
|
||||
# License for the specific language governing permissions and limitations
|
||||
# under the License.
|
||||
|
||||
import mock
|
||||
import os
|
||||
import traceback
|
||||
|
||||
import yaml
|
||||
|
||||
from rally.benchmark import engine
|
||||
from tests import test
|
||||
|
||||
class TaskSampleTestCase(test.TestCase):
|
||||
|
||||
@mock.patch("rally.benchmark.engine.BenchmarkEngine"
|
||||
"._validate_config_semantic")
|
||||
def test_schema_is_valid(self, mock_semantic):
|
||||
samples_path = os.path.join(os.path.dirname(__file__), "..", "..",
|
||||
"doc", "samples", "tasks")
|
||||
|
||||
scenarios = set()
|
||||
|
||||
for dirname, dirnames, filenames in os.walk(samples_path):
|
||||
for filename in filenames:
|
||||
full_path = os.path.join(dirname, filename)
|
||||
|
||||
with open(full_path) as task_file:
|
||||
try:
|
||||
task_config = yaml.safe_load(task_file.read())
|
||||
eng = engine.BenchmarkEngine(task_config,
|
||||
mock.MagicMock())
|
||||
eng.validate()
|
||||
except Exception :
|
||||
print(traceback.format_exc())
|
||||
self.assertTrue(False,
|
||||
"Wrong task config %s" % full_path)
|
||||
else:
|
||||
scenarios.update(task_config.keys())
|
||||
|
||||
# TODO(boris-42): We should refactor scenarios framework add "_" to
|
||||
# all non-benchmark methods.. Then this test will pass.
|
||||
#missing = set(base.Scenario.list_benchmark_scenarios()) - scenarios
|
||||
#self.assertEqual(missing, [],
|
||||
# "These scenarios don't have samples: %s" % missing)
|
@ -486,6 +486,29 @@ class FakeClients(object):
|
||||
return self._cinder
|
||||
|
||||
|
||||
class FakeRunner(object):
|
||||
|
||||
CONFIG_SCHEMA = {
|
||||
"type": "object",
|
||||
"$schema": "http://json-schema.org/draft-04/schema#",
|
||||
"properties": {
|
||||
"type": {
|
||||
"type": "string",
|
||||
"enum": ["fake"]
|
||||
},
|
||||
|
||||
"a": {
|
||||
"type": "string"
|
||||
},
|
||||
|
||||
"b": {
|
||||
"type": "number"
|
||||
}
|
||||
},
|
||||
"required": ["type", "a"]
|
||||
}
|
||||
|
||||
|
||||
class FakeScenario(base.Scenario):
|
||||
|
||||
def idle_time(self):
|
||||
@ -509,6 +532,19 @@ class FakeTimer(rally_utils.Timer):
|
||||
|
||||
class FakeContext(base_ctx.Context):
|
||||
|
||||
__ctx_name__ = "fake"
|
||||
|
||||
CONFIG_SCHEMA = {
|
||||
"type": "object",
|
||||
"$schema": "http://json-schema.org/draft-04/schema#",
|
||||
"properties": {
|
||||
"test": {
|
||||
"type": "integer"
|
||||
},
|
||||
},
|
||||
"additionalProperties": False
|
||||
}
|
||||
|
||||
def setup(self):
|
||||
pass
|
||||
|
||||
|
@ -15,6 +15,7 @@
|
||||
|
||||
"""Tests for db.task layer."""
|
||||
|
||||
import json
|
||||
import mock
|
||||
import uuid
|
||||
|
||||
@ -107,10 +108,10 @@ class TaskTestCase(test.TestCase):
|
||||
def test_update_verification_log(self, mock_update):
|
||||
mock_update.return_value = self.task
|
||||
task = objects.Task(task=self.task)
|
||||
task.update_verification_log('fake')
|
||||
task.update_verification_log({"a": "fake"})
|
||||
mock_update.assert_called_once_with(
|
||||
self.task['uuid'],
|
||||
{'verification_log': 'fake'},
|
||||
{'verification_log': json.dumps({"a": "fake"})}
|
||||
)
|
||||
|
||||
@mock.patch('rally.objects.task.db.task_result_create')
|
||||
@ -127,5 +128,5 @@ class TaskTestCase(test.TestCase):
|
||||
task.set_failed()
|
||||
mock_update.assert_called_once_with(
|
||||
self.task['uuid'],
|
||||
{'failed': True, 'status': 'failed'},
|
||||
{'failed': True, 'status': 'failed', 'verification_log': '""'},
|
||||
)
|
||||
|
@ -40,13 +40,17 @@ FAKE_TASK_CONFIG = {
|
||||
'FakeScenario.fake': [
|
||||
{
|
||||
'args': {},
|
||||
'execution': 'continuous',
|
||||
'config': {
|
||||
'runner': {
|
||||
'type': 'continuous',
|
||||
'timeout': 10000,
|
||||
'times': 1,
|
||||
'active_users': 1,
|
||||
'tenants': 1,
|
||||
'users_per_tenant': 1,
|
||||
'times': 3,
|
||||
'active_users': 2,
|
||||
},
|
||||
'context': {
|
||||
'users': {
|
||||
'tenants': 5,
|
||||
'users_per_tenant': 6,
|
||||
}
|
||||
}
|
||||
}
|
||||
]
|
||||
@ -93,8 +97,12 @@ class APITestCase(test.TestCase):
|
||||
mock_task.assert_called_once_with(deployment_uuid=deployment_uuid,
|
||||
tag=tag)
|
||||
|
||||
@mock.patch("rally.benchmark.engine.BenchmarkEngine."
|
||||
"_validate_scenario_args")
|
||||
@mock.patch("rally.benchmark.engine.BenchmarkEngine"
|
||||
"._validate_config_semantic")
|
||||
@mock.patch("rally.benchmark.engine.BenchmarkEngine"
|
||||
"._validate_config_syntax")
|
||||
@mock.patch("rally.benchmark.engine.BenchmarkEngine"
|
||||
"._validate_config_scenarios_name")
|
||||
@mock.patch('rally.benchmark.engine.osclients')
|
||||
@mock.patch('rally.benchmark.engine.base_runner.ScenarioRunner.get_runner')
|
||||
@mock.patch('rally.objects.deploy.db.deployment_get')
|
||||
@ -104,7 +112,8 @@ class APITestCase(test.TestCase):
|
||||
def test_start_task(self, mock_task_create, mock_task_update,
|
||||
mock_task_result_create, mock_deploy_get,
|
||||
mock_utils_runner, mock_osclients,
|
||||
mock_validate_scenario_args):
|
||||
mock_validate_names, mock_validate_syntax,
|
||||
mock_validate_semantic):
|
||||
mock_task_create.return_value = self.task
|
||||
mock_task_update.return_value = self.task
|
||||
mock_deploy_get.return_value = self.deployment
|
||||
@ -121,8 +130,9 @@ class APITestCase(test.TestCase):
|
||||
'deployment_uuid': self.deploy_uuid,
|
||||
})
|
||||
mock_task_update.assert_has_calls([
|
||||
mock.call(self.task_uuid,
|
||||
{'status': 'test_tool->benchmarking'})
|
||||
mock.call(self.task_uuid, {'status': consts.TaskStatus.VERIFYING}),
|
||||
mock.call(self.task_uuid, {'status': consts.TaskStatus.RUNNING}),
|
||||
mock.call(self.task_uuid, {'status': consts.TaskStatus.FINISHED})
|
||||
])
|
||||
# NOTE(akscram): It looks really awful, but checks degradation.
|
||||
mock_task_result_create.assert_called_once_with(
|
||||
@ -130,22 +140,25 @@ class APITestCase(test.TestCase):
|
||||
{
|
||||
'kw': {
|
||||
'args': {},
|
||||
'execution': 'continuous',
|
||||
'config': {
|
||||
'runner': {
|
||||
'type': 'continuous',
|
||||
'timeout': 10000,
|
||||
'times': 1,
|
||||
'active_users': 1,
|
||||
'tenants': 1,
|
||||
'users_per_tenant': 1,
|
||||
'times': 3,
|
||||
'active_users': 2,
|
||||
},
|
||||
'context': {
|
||||
'users': {
|
||||
'tenants': 5,
|
||||
'users_per_tenant': 6,
|
||||
}
|
||||
}
|
||||
},
|
||||
'name': 'FakeScenario.fake',
|
||||
'pos': 0,
|
||||
},
|
||||
{
|
||||
'raw': ['fake_result'],
|
||||
'validation': {'is_valid': True},
|
||||
},
|
||||
'raw': ['fake_result']
|
||||
}
|
||||
)
|
||||
|
||||
def test_abort_task(self):
|
||||
|
Loading…
Reference in New Issue
Block a user