7be9c10679
Not long ago we enabled a rally scenario booting VMs in the neutron gate so we can collect osprofiler reports about it. The rally scenario we re-used was only triggered by changes in the rally-openstack repo so I could not collect data about its failure rate. Now that it's running frequently in the neutron gate this scenario actually seems to be quite unstable (usually timing out while waiting for the VM to get to ACTIVE): http://logstash.openstack.org/#/dashboard/file/logstash.json?query=message:\"rally.exceptions.TimeoutException: Rally tired waiting\" AND build_name:\"neutron-rally-task\" AND voting:1&from=864000s Since we only want to run this scenario for the osprofiler report we can get rid of the gate instability by allowing a 100% failure rate in the scenario SLA. Change-Id: Ied354e8242274c8eeb26909e29afbe6d41662bfc Related-Change: https://review.opendev.org/662804
502 lines
14 KiB
YAML
502 lines
14 KiB
YAML
{% set floating_network = floating_network or "public" %}
|
|
{% set image_name = "^(cirros.*-disk|TestVM)$" %}
|
|
{% set flavor_name = "m1.tiny" %}
|
|
|
|
---
|
|
version: 2
|
|
title: Rally Task for OpenStack Neutron CI
|
|
description: >
|
|
The task contains various scenarios to prevent concurrency issues
|
|
subtasks:
|
|
-
|
|
title: Network related workloads.
|
|
workloads:
|
|
-
|
|
description: Check performance of list_networks action and ensure >
|
|
network quotas are not exceeded
|
|
scenario:
|
|
NeutronNetworks.create_and_list_networks: {}
|
|
runner:
|
|
constant:
|
|
times: 100
|
|
concurrency: 20
|
|
contexts:
|
|
users:
|
|
tenants: 1
|
|
users_per_tenant: 1
|
|
quotas:
|
|
neutron:
|
|
# worst case is other 19 writers have created
|
|
# resources, but quota reservation hasn't cleared
|
|
# yet on any of them. This value could be 100
|
|
# without concurrency. see bug/1623390
|
|
network: 119
|
|
sla:
|
|
max_avg_duration_per_atomic:
|
|
neutron.list_networks: 15 # reduce as perf is fixed
|
|
|
|
failure_rate:
|
|
max: 0
|
|
-
|
|
description: Check network update action
|
|
scenario:
|
|
NeutronNetworks.create_and_update_networks:
|
|
network_create_args: {}
|
|
network_update_args:
|
|
admin_state_up: False
|
|
name: "_updated"
|
|
runner:
|
|
constant:
|
|
times: 40
|
|
concurrency: 20
|
|
contexts:
|
|
users:
|
|
tenants: 1
|
|
users_per_tenant: 1
|
|
quotas:
|
|
neutron:
|
|
network: -1
|
|
-
|
|
scenario:
|
|
NeutronNetworks.create_and_delete_networks: {}
|
|
runner:
|
|
constant:
|
|
times: 40
|
|
concurrency: 20
|
|
contexts:
|
|
users:
|
|
tenants: 1
|
|
users_per_tenant: 1
|
|
quotas:
|
|
neutron:
|
|
network: -1
|
|
subnet: -1
|
|
-
|
|
title: Subnet related workloads.
|
|
workloads:
|
|
-
|
|
scenario:
|
|
NeutronNetworks.create_and_list_subnets:
|
|
subnets_per_network: 2
|
|
runner:
|
|
constant:
|
|
times: 40
|
|
concurrency: 20
|
|
contexts:
|
|
users:
|
|
tenants: 1
|
|
users_per_tenant: 1
|
|
quotas:
|
|
neutron:
|
|
subnet: -1
|
|
network: -1
|
|
-
|
|
scenario:
|
|
NeutronNetworks.create_and_update_subnets:
|
|
network_create_args: {}
|
|
subnet_create_args: {}
|
|
subnet_cidr_start: "1.4.0.0/16"
|
|
subnets_per_network: 2
|
|
subnet_update_args:
|
|
enable_dhcp: True
|
|
name: "_subnet_updated"
|
|
runner:
|
|
constant:
|
|
times: 100
|
|
concurrency: 20
|
|
contexts:
|
|
users:
|
|
tenants: 1
|
|
users_per_tenant: 5
|
|
quotas:
|
|
neutron:
|
|
network: -1
|
|
subnet: -1
|
|
port: -1
|
|
-
|
|
scenario:
|
|
NeutronNetworks.create_and_delete_subnets:
|
|
network_create_args: {}
|
|
subnet_create_args: {}
|
|
subnet_cidr_start: "1.1.0.0/30"
|
|
subnets_per_network: 2
|
|
runner:
|
|
constant:
|
|
times: 40
|
|
concurrency: 20
|
|
contexts:
|
|
users:
|
|
tenants: 1
|
|
users_per_tenant: 1
|
|
quotas:
|
|
neutron:
|
|
network: -1
|
|
subnet: -1
|
|
-
|
|
title: Routers related workloads.
|
|
workloads:
|
|
-
|
|
scenario:
|
|
NeutronNetworks.create_and_list_routers:
|
|
network_create_args:
|
|
subnet_create_args:
|
|
subnet_cidr_start: "1.1.0.0/30"
|
|
subnets_per_network: 2
|
|
router_create_args:
|
|
runner:
|
|
constant:
|
|
times: 40
|
|
concurrency: 20
|
|
contexts:
|
|
users:
|
|
tenants: 1
|
|
users_per_tenant: 1
|
|
quotas:
|
|
neutron:
|
|
network: -1
|
|
subnet: -1
|
|
router: -1
|
|
-
|
|
scenario:
|
|
NeutronNetworks.create_and_update_routers:
|
|
network_create_args: {}
|
|
subnet_create_args: {}
|
|
subnet_cidr_start: "1.1.0.0/30"
|
|
subnets_per_network: 2
|
|
router_create_args: {}
|
|
router_update_args:
|
|
admin_state_up: False
|
|
name: "_router_updated"
|
|
runner:
|
|
constant:
|
|
times: 40
|
|
concurrency: 20
|
|
contexts:
|
|
users:
|
|
tenants: 1
|
|
users_per_tenant: 1
|
|
quotas:
|
|
neutron:
|
|
network: -1
|
|
subnet: -1
|
|
router: -1
|
|
-
|
|
scenario:
|
|
NeutronNetworks.create_and_delete_routers:
|
|
network_create_args: {}
|
|
subnet_create_args: {}
|
|
subnet_cidr_start: "1.1.0.0/30"
|
|
subnets_per_network: 2
|
|
router_create_args: {}
|
|
runner:
|
|
constant:
|
|
times: 40
|
|
concurrency: 20
|
|
contexts:
|
|
users:
|
|
tenants: 1
|
|
users_per_tenant: 1
|
|
quotas:
|
|
neutron:
|
|
network: -1
|
|
subnet: -1
|
|
router: -1
|
|
-
|
|
title: Ports related workloads.
|
|
workloads:
|
|
-
|
|
description: Check performance of list ports action and ensure >
|
|
network quotas are not exceeded
|
|
scenario:
|
|
NeutronNetworks.create_and_list_ports:
|
|
network_create_args:
|
|
port_create_args:
|
|
ports_per_network: 50
|
|
runner:
|
|
constant:
|
|
times: 8
|
|
concurrency: 4
|
|
contexts:
|
|
users:
|
|
tenants: 1
|
|
users_per_tenant: 1
|
|
quotas:
|
|
neutron:
|
|
network: -1
|
|
subnet: -1
|
|
router: -1
|
|
# ((ports per net + 1 dhcp) * times) + (concurrency-1)
|
|
# see bug/1623390 for concurrency explanation
|
|
port: 811
|
|
sla:
|
|
max_avg_duration_per_atomic:
|
|
neutron.list_ports: 15 # reduce as perf is fixed
|
|
failure_rate:
|
|
max: 0
|
|
-
|
|
scenario:
|
|
NeutronNetworks.create_and_update_ports:
|
|
network_create_args: {}
|
|
port_create_args: {}
|
|
ports_per_network: 5
|
|
port_update_args:
|
|
admin_state_up: False
|
|
device_id: "dummy_id"
|
|
device_owner: "dummy_owner"
|
|
name: "_port_updated"
|
|
runner:
|
|
constant:
|
|
times: 40
|
|
concurrency: 20
|
|
contexts:
|
|
users:
|
|
tenants: 1
|
|
users_per_tenant: 1
|
|
quotas:
|
|
neutron:
|
|
network: -1
|
|
port: -1
|
|
-
|
|
scenario:
|
|
NeutronNetworks.create_and_delete_ports:
|
|
network_create_args: {}
|
|
port_create_args: {}
|
|
ports_per_network: 5
|
|
runner:
|
|
constant:
|
|
times: 40
|
|
concurrency: 20
|
|
contexts:
|
|
users:
|
|
tenants: 1
|
|
users_per_tenant: 1
|
|
quotas:
|
|
neutron:
|
|
network: -1
|
|
port: -1
|
|
-
|
|
title: Quotas update check
|
|
scenario:
|
|
Quotas.neutron_update:
|
|
max_quota: 1024
|
|
runner:
|
|
constant:
|
|
times: 40
|
|
concurrency: 20
|
|
contexts:
|
|
users:
|
|
tenants: 20
|
|
users_per_tenant: 1
|
|
-
|
|
title: Trunks related workload
|
|
scenario:
|
|
NeutronTrunks.create_and_list_trunks:
|
|
subport_count: 125
|
|
runner:
|
|
constant:
|
|
times: 4
|
|
concurrency: 4
|
|
contexts:
|
|
users:
|
|
tenants: 1
|
|
users_per_tenant: 1
|
|
quotas:
|
|
neutron:
|
|
network: -1
|
|
port: 1000
|
|
-
|
|
title: Floating IP related workloads
|
|
workloads:
|
|
-
|
|
scenario:
|
|
NeutronNetworks.create_and_delete_floating_ips:
|
|
floating_network: {{ floating_network }}
|
|
floating_ip_args: {}
|
|
runner:
|
|
constant:
|
|
times: 10
|
|
concurrency: 5
|
|
contexts:
|
|
users:
|
|
tenants: 2
|
|
users_per_tenant: 3
|
|
quotas:
|
|
neutron:
|
|
floatingip: -1
|
|
-
|
|
scenario:
|
|
NeutronNetworks.create_and_list_floating_ips:
|
|
floating_network: {{ floating_network }}
|
|
floating_ip_args: {}
|
|
runner:
|
|
constant:
|
|
times: 10
|
|
concurrency: 5
|
|
contexts:
|
|
users:
|
|
tenants: 2
|
|
users_per_tenant: 3
|
|
quotas:
|
|
neutron:
|
|
floatingip: -1
|
|
-
|
|
scenario:
|
|
NeutronNetworks.associate_and_dissociate_floating_ips:
|
|
floating_network: {{ floating_network }}
|
|
runner:
|
|
constant:
|
|
times: 10
|
|
concurrency: 5
|
|
contexts:
|
|
users:
|
|
tenants: 2
|
|
users_per_tenant: 3
|
|
quotas:
|
|
neutron:
|
|
floatingip: -1
|
|
-
|
|
title: Security Group Related Scenarios
|
|
workloads:
|
|
-
|
|
scenario:
|
|
NeutronSecurityGroup.create_and_delete_security_group_rule:
|
|
security_group_args: {}
|
|
security_group_rule_args: {}
|
|
runner:
|
|
constant:
|
|
times: 50
|
|
concurrency: 10
|
|
contexts:
|
|
users:
|
|
tenants: 2
|
|
users_per_tenant: 3
|
|
quotas:
|
|
neutron:
|
|
security_group: -1
|
|
security_group_rule: -1
|
|
-
|
|
scenario:
|
|
NeutronSecurityGroup.create_and_delete_security_groups:
|
|
security_group_create_args: {}
|
|
runner:
|
|
constant:
|
|
times: 50
|
|
concurrency: 10
|
|
contexts:
|
|
users:
|
|
tenants: 2
|
|
users_per_tenant: 3
|
|
quotas:
|
|
neutron:
|
|
security_group: -1
|
|
-
|
|
scenario:
|
|
NeutronSecurityGroup.create_and_list_security_group_rules:
|
|
security_group_args: {}
|
|
security_group_rule_args: {}
|
|
security_group_rules_count: 20
|
|
runner:
|
|
constant:
|
|
times: 50
|
|
concurrency: 10
|
|
contexts:
|
|
users:
|
|
tenants: 2
|
|
users_per_tenant: 3
|
|
quotas:
|
|
neutron:
|
|
security_group: -1
|
|
security_group_rule: -1
|
|
-
|
|
scenario:
|
|
NeutronSecurityGroup.create_and_list_security_groups:
|
|
security_group_create_args: {}
|
|
runner:
|
|
constant:
|
|
times: 50
|
|
concurrency: 10
|
|
contexts:
|
|
users:
|
|
tenants: 2
|
|
users_per_tenant: 3
|
|
quotas:
|
|
neutron:
|
|
security_group: -1
|
|
-
|
|
scenario:
|
|
NeutronSecurityGroup.create_and_show_security_group_rule:
|
|
security_group_args: {}
|
|
security_group_rule_args: {}
|
|
runner:
|
|
constant:
|
|
times: 50
|
|
concurrency: 10
|
|
contexts:
|
|
users:
|
|
tenants: 2
|
|
users_per_tenant: 3
|
|
quotas:
|
|
neutron:
|
|
security_group: -1
|
|
security_group_rule: -1
|
|
-
|
|
scenario:
|
|
NeutronSecurityGroup.create_and_show_security_group:
|
|
security_group_create_args: {}
|
|
runner:
|
|
constant:
|
|
times: 50
|
|
concurrency: 10
|
|
contexts:
|
|
users:
|
|
tenants: 2
|
|
users_per_tenant: 3
|
|
quotas:
|
|
neutron:
|
|
security_group: -1
|
|
-
|
|
scenario:
|
|
NeutronSecurityGroup.create_and_update_security_groups:
|
|
security_group_create_args: {}
|
|
security_group_update_args: {}
|
|
runner:
|
|
constant:
|
|
times: 50
|
|
concurrency: 10
|
|
contexts:
|
|
users:
|
|
tenants: 2
|
|
users_per_tenant: 3
|
|
quotas:
|
|
neutron:
|
|
security_group: -1
|
|
-
|
|
title: VM booting workloads
|
|
workloads:
|
|
-
|
|
scenario:
|
|
NovaServers.boot_and_delete_server:
|
|
flavor:
|
|
name: {{flavor_name}}
|
|
image:
|
|
name: {{image_name}}
|
|
auto_assign_nic: true
|
|
runner:
|
|
constant:
|
|
times: 2
|
|
concurrency: 2
|
|
contexts:
|
|
users:
|
|
tenants: 2
|
|
users_per_tenant: 2
|
|
network: {}
|
|
sla:
|
|
# NovaServers.boot_and_delete_server is unstable and frequently
|
|
# times out when waiting for the VM to become ACTIVE. We run this
|
|
# scenario for the osprofiler report and we ignore the rally
|
|
# scenario outcome. Ideally we should eliminate the cause of the
|
|
# timeouts, but even until then we'll get usable osprofiler
|
|
# results.
|
|
failure_rate:
|
|
max: 100
|