neutron/rally-jobs/task-neutron.yaml

519 lines
14 KiB
YAML

{% set floating_network = floating_network or "public" %}
{% set image_name = "^(cirros.*-disk|TestVM)$" %}
{% set flavor_name = "m1.tiny" %}
---
version: 2
title: Rally Task for OpenStack Neutron CI
description: >
The task contains various scenarios to prevent concurrency issues
subtasks:
-
title: Network related workloads.
workloads:
-
description: Check performance of list_networks action and ensure >
network quotas are not exceeded
scenario:
NeutronNetworks.create_and_list_networks: {}
runner:
constant:
times: 100
concurrency: 20
contexts:
users:
tenants: 1
users_per_tenant: 1
quotas:
neutron:
# worst case is other 19 writers have created
# resources, but quota reservation hasn't cleared
# yet on any of them. This value could be 100
# without concurrency. see bug/1623390
network: 119
sla:
max_avg_duration_per_atomic:
neutron.list_networks: 15 # reduce as perf is fixed
failure_rate:
max: 0
-
description: Check network update action
scenario:
NeutronNetworks.create_and_update_networks:
network_create_args: {}
network_update_args:
admin_state_up: False
runner:
constant:
times: 40
concurrency: 20
contexts:
users:
tenants: 1
users_per_tenant: 1
quotas:
neutron:
network: -1
-
scenario:
NeutronNetworks.create_and_delete_networks: {}
runner:
constant:
times: 40
concurrency: 20
contexts:
users:
tenants: 1
users_per_tenant: 1
quotas:
neutron:
network: -1
subnet: -1
-
title: Subnet related workloads.
workloads:
-
scenario:
NeutronNetworks.create_and_list_subnets:
subnets_per_network: 2
runner:
constant:
times: 40
concurrency: 20
contexts:
users:
tenants: 1
users_per_tenant: 1
quotas:
neutron:
subnet: -1
network: -1
-
scenario:
NeutronNetworks.create_and_update_subnets:
network_create_args: {}
subnet_create_args: {}
subnet_cidr_start: "1.4.0.0/16"
subnets_per_network: 2
subnet_update_args:
enable_dhcp: True
runner:
constant:
times: 100
concurrency: 20
contexts:
users:
tenants: 1
users_per_tenant: 5
quotas:
neutron:
network: -1
subnet: -1
port: -1
-
scenario:
NeutronNetworks.create_and_delete_subnets:
network_create_args: {}
subnet_create_args: {}
subnet_cidr_start: "1.1.0.0/30"
subnets_per_network: 2
runner:
constant:
times: 40
concurrency: 20
contexts:
users:
tenants: 1
users_per_tenant: 1
quotas:
neutron:
network: -1
subnet: -1
-
title: Routers related workloads.
workloads:
-
scenario:
NeutronNetworks.create_and_list_routers:
network_create_args:
subnet_create_args:
subnet_cidr_start: "1.1.0.0/30"
subnets_per_network: 2
router_create_args:
runner:
constant:
times: 40
concurrency: 20
contexts:
users:
tenants: 1
users_per_tenant: 1
quotas:
neutron:
network: -1
subnet: -1
router: -1
-
scenario:
NeutronNetworks.create_and_update_routers:
network_create_args: {}
subnet_create_args: {}
subnet_cidr_start: "1.1.0.0/30"
subnets_per_network: 2
router_create_args: {}
router_update_args:
admin_state_up: False
runner:
constant:
times: 40
concurrency: 20
contexts:
users:
tenants: 1
users_per_tenant: 1
quotas:
neutron:
network: -1
subnet: -1
router: -1
-
scenario:
NeutronNetworks.create_and_delete_routers:
network_create_args: {}
subnet_create_args: {}
subnet_cidr_start: "1.1.0.0/30"
subnets_per_network: 2
router_create_args: {}
runner:
constant:
times: 40
concurrency: 20
contexts:
users:
tenants: 1
users_per_tenant: 1
quotas:
neutron:
network: -1
subnet: -1
router: -1
-
title: Ports related workloads.
workloads:
-
description: Check performance of list ports action and ensure >
network quotas are not exceeded
scenario:
NeutronNetworks.create_and_list_ports:
network_create_args: {}
port_create_args: {}
ports_per_network: 50
runner:
constant:
times: 8
concurrency: 4
contexts:
users:
tenants: 1
users_per_tenant: 1
quotas:
neutron:
network: -1
subnet: -1
router: -1
# ((ports per net + 1 dhcp) * times) + (concurrency-1)
# see bug/1623390 for concurrency explanation
port: 811
sla:
max_avg_duration_per_atomic:
neutron.list_ports: 15 # reduce as perf is fixed
failure_rate:
max: 0
-
scenario:
NeutronNetworks.create_and_update_ports:
network_create_args: {}
port_create_args: {}
ports_per_network: 5
port_update_args:
admin_state_up: False
device_id: "dummy_id"
device_owner: "dummy_owner"
runner:
constant:
times: 40
concurrency: 20
contexts:
users:
tenants: 1
users_per_tenant: 1
quotas:
neutron:
network: -1
port: -1
-
scenario:
NeutronNetworks.create_and_bind_ports:
ports_per_network: 5
runner:
constant:
times: 3
concurrency: 2
contexts:
users:
tenants: 1
users_per_tenant: 1
roles:
- admin
quotas:
neutron:
network: -1
subnet: -1
port: -1
network: {}
networking_agents: {}
-
scenario:
NeutronNetworks.create_and_delete_ports:
network_create_args: {}
port_create_args: {}
ports_per_network: 5
runner:
constant:
times: 40
concurrency: 20
contexts:
users:
tenants: 1
users_per_tenant: 1
quotas:
neutron:
network: -1
port: -1
-
title: Quotas update check
scenario:
Quotas.neutron_update:
max_quota: 1024
runner:
constant:
times: 40
concurrency: 20
contexts:
users:
tenants: 20
users_per_tenant: 1
-
title: Trunks related workload
scenario:
NeutronTrunks.create_and_list_trunks:
subport_count: 125
runner:
constant:
times: 4
concurrency: 4
contexts:
users:
tenants: 1
users_per_tenant: 1
quotas:
neutron:
network: -1
port: 1000
-
title: Floating IP related workloads
workloads:
-
scenario:
NeutronNetworks.create_and_delete_floating_ips:
floating_network: {{ floating_network }}
floating_ip_args: {}
runner:
constant:
times: 10
concurrency: 5
contexts:
users:
tenants: 2
users_per_tenant: 3
quotas:
neutron:
floatingip: -1
-
scenario:
NeutronNetworks.create_and_list_floating_ips:
floating_network: {{ floating_network }}
floating_ip_args: {}
runner:
constant:
times: 10
concurrency: 5
contexts:
users:
tenants: 2
users_per_tenant: 3
quotas:
neutron:
floatingip: -1
-
scenario:
NeutronNetworks.associate_and_dissociate_floating_ips:
floating_network: {{ floating_network }}
runner:
constant:
times: 10
concurrency: 5
contexts:
users:
tenants: 2
users_per_tenant: 3
quotas:
neutron:
floatingip: -1
-
title: Security Group Related Scenarios
workloads:
-
scenario:
NeutronSecurityGroup.create_and_delete_security_group_rule:
security_group_args: {}
security_group_rule_args: {}
runner:
constant:
times: 50
concurrency: 10
contexts:
users:
tenants: 2
users_per_tenant: 3
quotas:
neutron:
security_group: -1
security_group_rule: -1
-
scenario:
NeutronSecurityGroup.create_and_delete_security_groups:
security_group_create_args: {}
runner:
constant:
times: 50
concurrency: 10
contexts:
users:
tenants: 2
users_per_tenant: 3
quotas:
neutron:
security_group: -1
-
scenario:
NeutronSecurityGroup.create_and_list_security_group_rules:
security_group_args: {}
security_group_rule_args: {}
security_group_rules_count: 20
runner:
constant:
times: 50
concurrency: 10
contexts:
users:
tenants: 2
users_per_tenant: 3
quotas:
neutron:
security_group: -1
security_group_rule: -1
-
scenario:
NeutronSecurityGroup.create_and_list_security_groups:
security_group_create_args: {}
runner:
constant:
times: 50
concurrency: 10
contexts:
users:
tenants: 2
users_per_tenant: 3
quotas:
neutron:
security_group: -1
-
scenario:
NeutronSecurityGroup.create_and_show_security_group_rule:
security_group_args: {}
security_group_rule_args: {}
runner:
constant:
times: 50
concurrency: 10
contexts:
users:
tenants: 2
users_per_tenant: 3
quotas:
neutron:
security_group: -1
security_group_rule: -1
-
scenario:
NeutronSecurityGroup.create_and_show_security_group:
security_group_create_args: {}
runner:
constant:
times: 50
concurrency: 10
contexts:
users:
tenants: 2
users_per_tenant: 3
quotas:
neutron:
security_group: -1
-
scenario:
NeutronSecurityGroup.create_and_update_security_groups:
security_group_create_args: {}
security_group_update_args: {}
runner:
constant:
times: 50
concurrency: 10
contexts:
users:
tenants: 2
users_per_tenant: 3
quotas:
neutron:
security_group: -1
-
title: VM booting workloads
workloads:
-
scenario:
NovaServers.boot_and_delete_server:
flavor:
name: {{flavor_name}}
image:
name: {{image_name}}
auto_assign_nic: true
runner:
constant:
times: 2
concurrency: 2
contexts:
users:
tenants: 2
users_per_tenant: 2
network: {}
sla:
# NovaServers.boot_and_delete_server is unstable and frequently
# times out when waiting for the VM to become ACTIVE. We run this
# scenario for the osprofiler report and we ignore the rally
# scenario outcome. Ideally we should eliminate the cause of the
# timeouts, but even until then we'll get usable osprofiler
# results.
failure_rate:
max: 100