diff --git a/rally-jobs/extra/instance_dd_test.sh b/rally-jobs/extra/instance_dd_test.sh index 08872b6521..9fc758b868 100644 --- a/rally-jobs/extra/instance_dd_test.sh +++ b/rally-jobs/extra/instance_dd_test.sh @@ -1,12 +1,12 @@ #!/bin/sh time_seconds(){ (time -p $1 ) 2>&1 |awk '/real/{print $2}'; } file=/tmp/test.img -c=1000 #1GB -write_seq_1gb=$(time_seconds "dd if=/dev/zero of=$file bs=1M count=$c") -read_seq_1gb=$(time_seconds "dd if=$file of=/dev/null bs=1M") +c=100 #100M +write_seq=$(time_seconds "dd if=/dev/zero of=$file bs=1M count=$c") +read_seq=$(time_seconds "dd if=$file of=/dev/null bs=1M") [ -f $file ] && rm $file echo "{ - \"write_seq_1gb\": $write_seq_1gb, - \"read_seq_1gb\": $read_seq_1gb + \"write_seq\": $write_seq, + \"read_seq\": $read_seq }" diff --git a/rally-jobs/rally-mos.yaml b/rally-jobs/rally-mos.yaml new file mode 100755 index 0000000000..486da243de --- /dev/null +++ b/rally-jobs/rally-mos.yaml @@ -0,0 +1,89 @@ +--- + + KeystoneBasic.create_user: + - + args: + name_length: 10 + runner: + type: "constant" + times: 8 + concurrency: 1 + sla: + failure_rate: + max: 0 + + KeystoneBasic.create_delete_user: + - + args: + name_length: 10 + runner: + type: "constant" + times: 8 + concurrency: 1 + sla: + failure_rate: + max: 0 + + KeystoneBasic.create_and_list_tenants: + - + args: + name_length: 10 + runner: + type: "constant" + times: 1 + concurrency: 1 + sla: + failure_rate: + max: 0 + + KeystoneBasic.create_and_list_users: + - + args: + name_length: 10 + runner: + type: "constant" + times: 1 + concurrency: 1 + sla: + failure_rate: + max: 0 + + KeystoneBasic.create_tenant: + - + args: + name_length: 10 + runner: + type: "constant" + times: 1 + concurrency: 1 + sla: + failure_rate: + max: 0 + + KeystoneBasic.create_tenant_with_users: + - + args: + name_length: 10 + users_per_tenant: 10 + runner: + type: "constant" + times: 1 + concurrency: 1 + context: + users: + tenants: 3 + sla: + failure_rate: + max: 0 + + KeystoneBasic.create_delete_user: + - + args: + name_length: 10 + runner: + type: "constant" + times: 1 + concurrency: 1 + sla: + failure_rate: + max: 0 diff --git a/rally-jobs/rally-mos_neutron_v3.yaml b/rally-jobs/rally-mos_neutron_v3.yaml new file mode 100644 index 0000000000..eae6a19801 --- /dev/null +++ b/rally-jobs/rally-mos_neutron_v3.yaml @@ -0,0 +1,260 @@ +--- + + NeutronNetworks.create_and_list_networks: + - + args: + network_create_args: + runner: + type: "constant" + times: 1 + concurrency: 1 + context: + users: + tenants: 1 + users_per_tenant: 1 + quotas: + neutron: + network: -1 + sla: + failure_rate: + max: 0 + + NeutronNetworks.create_and_list_subnets: + - + args: + network_create_args: + subnet_create_args: + subnet_cidr_start: "1.1.0.0/30" + subnets_per_network: 2 + runner: + type: "constant" + times: 1 + concurrency: 1 + context: + users: + tenants: 1 + users_per_tenant: 1 + quotas: + neutron: + network: -1 + subnet: -1 + sla: + failure_rate: + max: 0 + + NeutronNetworks.create_and_list_routers: + - + args: + network_create_args: + subnet_create_args: + subnet_cidr_start: "1.1.0.0/30" + subnets_per_network: 2 + router_create_args: + runner: + type: "constant" + times: 1 + concurrency: 1 + context: + users: + tenants: 1 + users_per_tenant: 1 + quotas: + neutron: + network: -1 + subnet: -1 + router: -1 + sla: + failure_rate: + max: 0 + + NeutronNetworks.create_and_list_ports: + - + args: + network_create_args: + port_create_args: + ports_per_network: 4 + runner: + type: "constant" + times: 1 + concurrency: 1 + context: + users: + tenants: 1 + users_per_tenant: 1 + quotas: + neutron: + network: -1 + subnet: -1 + router: -1 + port: -1 + sla: + failure_rate: + max: 0 + + NeutronNetworks.create_and_update_networks: + - + args: + network_create_args: {} + network_update_args: + admin_state_up: False + name: "_updated" + runner: + type: "constant" + times: 1 + concurrency: 1 + context: + users: + tenants: 1 + users_per_tenant: 1 + quotas: + neutron: + network: -1 + sla: + failure_rate: + max: 0 + + NeutronNetworks.create_and_update_subnets: + - + args: + network_create_args: {} + subnet_create_args: {} + subnet_cidr_start: "1.4.0.0/16" + subnets_per_network: 2 + subnet_update_args: + enable_dhcp: False + name: "_subnet_updated" + runner: + type: "constant" + times: 1 + concurrency: 1 + context: + users: + tenants: 5 + users_per_tenant: 5 + quotas: + neutron: + network: -1 + subnet: -1 + sla: + failure_rate: + max: 0 + + NeutronNetworks.create_and_update_routers: + - + args: + network_create_args: {} + subnet_create_args: {} + subnet_cidr_start: "1.1.0.0/30" + subnets_per_network: 2 + router_create_args: {} + router_update_args: + admin_state_up: False + name: "_router_updated" + runner: + type: "constant" + times: 1 + concurrency: 1 + context: + users: + tenants: 1 + users_per_tenant: 1 + quotas: + neutron: + network: -1 + subnet: -1 + router: -1 + sla: + failure_rate: + max: 0 + + NeutronNetworks.create_and_update_ports: + - + args: + network_create_args: {} + port_create_args: {} + ports_per_network: 5 + port_update_args: + admin_state_up: False + device_id: "dummy_id" + device_owner: "dummy_owner" + name: "_port_updated" + runner: + type: "constant" + times: 1 + concurrency: 1 + context: + users: + tenants: 1 + users_per_tenant: 1 + quotas: + neutron: + network: -1 + port: -1 + sla: + failure_rate: + max: 0 + + NeutronNetworks.create_and_delete_networks: + - + args: + network_create_args: {} + runner: + type: "constant" + times: 1 + concurrency: 1 + context: + users: + tenants: 1 + users_per_tenant: 1 + quotas: + neutron: + network: -1 + subnet: -1 + sla: + failure_rate: + max: 0 + + NeutronNetworks.create_and_delete_subnets: + - + args: + network_create_args: {} + subnet_create_args: {} + subnet_cidr_start: "1.1.0.0/30" + subnets_per_network: 2 + runner: + type: "constant" + times: 1 + concurrency: 1 + context: + users: + tenants: 1 + users_per_tenant: 1 + quotas: + neutron: + network: -1 + subnet: -1 + sla: + failure_rate: + max: 0 + + NeutronNetworks.create_and_delete_ports: + - + args: + network_create_args: {} + port_create_args: {} + ports_per_network: 10 + runner: + type: "constant" + times: 1 + concurrency: 1 + context: + users: + tenants: 1 + users_per_tenant: 1 + quotas: + neutron: + network: -1 + port: -1 + sla: + failure_rate: + max: 0 diff --git a/rally-jobs/rally-mos_v3.yaml b/rally-jobs/rally-mos_v3.yaml new file mode 100755 index 0000000000..829738cf02 --- /dev/null +++ b/rally-jobs/rally-mos_v3.yaml @@ -0,0 +1,549 @@ +--- + + KeystoneBasic.create_user: + - + args: + name_length: 10 + runner: + type: "constant" + times: 1 + concurrency: 1 + sla: + failure_rate: + max: 0 + + KeystoneBasic.create_delete_user: + - + args: + name_length: 10 + runner: + type: "constant" + times: 1 + concurrency: 1 + sla: + failure_rate: + max: 0 + + KeystoneBasic.create_and_list_tenants: + - + args: + name_length: 10 + runner: + type: "constant" + times: 1 + concurrency: 1 + sla: + failure_rate: + max: 0 + + KeystoneBasic.create_and_list_users: + - + args: + name_length: 10 + runner: + type: "constant" + times: 1 + concurrency: 1 + sla: + failure_rate: + max: 0 + + KeystoneBasic.create_tenant: + - + args: + name_length: 10 + runner: + type: "constant" + times: 1 + concurrency: 1 + sla: + failure_rate: + max: 0 + + KeystoneBasic.create_tenant_with_users: + - + args: + name_length: 10 + users_per_tenant: 10 + runner: + type: "constant" + times: 1 + concurrency: 1 + context: + users: + tenants: 1 + sla: + failure_rate: + max: 0 + + KeystoneBasic.create_delete_user: + - + args: + name_length: 10 + runner: + type: "constant" + times: 1 + concurrency: 1 + sla: + failure_rate: + max: 0 + + HeatStacks.create_and_list_stack: + - + runner: + type: "constant" + times: 1 + concurrency: 1 + context: + users: + tenants: 1 + users_per_tenant: 1 + sla: + failure_rate: + max: 0 + + HeatStacks.create_and_delete_stack: + - + runner: + type: "constant" + times: 1 + concurrency: 1 + context: + users: + tenants: 1 + users_per_tenant: 1 + sla: + failure_rate: + max: 0 + + Authenticate.keystone: + - + runner: + type: "constant" + times: 1 + concurrency: 1 + context: + users: + tenants: 1 + users_per_tenant: 1 + sla: + failure_rate: + max: 0 + + SaharaNodeGroupTemplates.create_and_list_node_group_templates: + - + args: + flavor: + name: "m1.small" + runner: + type: "constant" + times: 1 + concurrency: 1 + context: + users: + tenants: 1 + users_per_tenant: 1 + sla: + failure_rate: + max: 0 + + SaharaNodeGroupTemplates.create_delete_node_group_templates: + - + args: + flavor: + name: "m1.small" + runner: + type: "constant" + times: 1 + concurrency: 1 + context: + users: + tenants: 1 + users_per_tenant: 1 + sla: + failure_rate: + max: 0 + + Authenticate.validate_cinder: + - + args: + repetitions: 2 + runner: + type: "constant" + times: 1 + concurrency: 1 + context: + users: + tenants: 1 + users_per_tenant: 1 + sla: + failure_rate: + max: 0 + + Authenticate.validate_glance: + - + args: + repetitions: 2 + runner: + type: "constant" + times: 1 + concurrency: 1 + context: + users: + tenants: 1 + users_per_tenant: 1 + sla: + failure_rate: + max: 0 + + Authenticate.validate_heat: + - + args: + repetitions: 2 + runner: + type: "constant" + times: 1 + concurrency: 1 + context: + users: + tenants: 1 + users_per_tenant: 1 + sla: + failure_rate: + max: 0 + + Authenticate.validate_nova: + - + args: + repetitions: 2 + runner: + type: "constant" + times: 1 + concurrency: 1 + context: + users: + tenants: 1 + users_per_tenant: 1 + sla: + failure_rate: + max: 0 + + Quotas.cinder_update_and_delete: + - + args: + max_quota: 1024 + runner: + type: "constant" + times: 1 + concurrency: 1 + context: + users: + tenants: 1 + users_per_tenant: 1 + sla: + failure_rate: + max: 0 + + Quotas.cinder_update: + - + args: + max_quota: 1024 + runner: + type: "constant" + times: 1 + concurrency: 1 + context: + users: + tenants: 1 + users_per_tenant: 1 + sla: + failure_rate: + max: 0 + + Quotas.nova_update_and_delete: + - + args: + max_quota: 1024 + runner: + type: "constant" + times: 1 + concurrency: 1 + context: + users: + tenants: 1 + users_per_tenant: 1 + sla: + failure_rate: + max: 0 + + Quotas.nova_update: + - + args: + max_quota: 1024 + runner: + type: "constant" + times: 1 + concurrency: 1 + context: + users: + tenants: 1 + users_per_tenant: 1 + sla: + failure_rate: + max: 0 + + VMTasks.boot_runcommand_delete: + - + args: + flavor: + name: "m1.tiny" + image: + name: "TestVM|cirros.*uec" + floating_network: "net04_ext" + use_floatingip: true + script: "/home/rally/.rally/extra/instance_dd_test.sh" + interpreter: "/bin/sh" + username: "cirros" + runner: + type: "constant" + times: 1 + concurrency: 1 + context: + users: + tenants: 1 + users_per_tenant: 1 + network: {} + sla: + failure_rate: + max: 0 + + + NovaServers.boot_and_delete_server: + - + args: + flavor: + name: "m1.tiny" + image: + name: "TestVM|cirros.*uec" + runner: + type: "constant" + times: 1 + concurrency: 1 + context: + users: + tenants: 1 + users_per_tenant: 1 + sla: + failure_rate: + max: 0 + + - + args: + auto_assign_nic: true + flavor: + name: "m1.tiny" + image: + name: "TestVM|cirros.*uec" + runner: + type: "constant" + times: 1 + concurrency: 1 + context: + users: + tenants: 1 + users_per_tenant: 1 + network: + start_cidr: "10.2.0.0/24" + networks_per_tenant: 2 + sla: + failure_rate: + max: 0 + + + NovaServers.boot_and_list_server: + - + args: + flavor: + name: "m1.tiny" + image: + name: "TestVM|cirros.*uec" + detailed: True + runner: + type: "constant" + times: 1 + concurrency: 1 + context: + users: + tenants: 1 + users_per_tenant: 1 + sla: + failure_rate: + max: 0 + + NovaServers.list_servers: + - + args: + detailed: True + runner: + type: "constant" + times: 1 + concurrency: 1 + context: + users: + tenants: 1 + users_per_tenant: 1 + servers: + flavor: + name: "m1.tiny" + image: + name: "TestVM|cirros.*uec" + servers_per_tenant: 1 + sla: + failure_rate: + max: 0 + + NovaServers.boot_and_bounce_server: + - + args: + flavor: + name: "m1.tiny" + image: + name: "TestVM|cirros.*uec" + actions: + - + hard_reboot: 1 + - + stop_start: 1 + - + rescue_unrescue: 1 + runner: + type: "constant" + times: 1 + concurrency: 1 + context: + users: + tenants: 1 + users_per_tenant: 1 + sla: + failure_rate: + max: 0 + + NovaServers.boot_server: + - + args: + flavor: + name: "^ram64$" + image: + name: "TestVM|cirros.*uec" + runner: + type: "constant" + times: 1 + concurrency: 1 + context: + users: + tenants: 1 + users_per_tenant: 1 + flavors: + - + name: "ram64" + ram: 64 + sla: + failure_rate: + max: 0 + - + args: + flavor: + name: "m1.tiny" + image: + name: "TestVM|cirros.*uec" + runner: + type: "constant" + times: 1 + concurrency: 1 + context: + users: + tenants: 1 + users_per_tenant: 1 + sla: + failure_rate: + max: 0 + + Requests.check_response: + - + args: + url: "http://git.openstack.org/cgit" + response: 200 + runner: + type: "constant" + times: 1 + concurrency: 1 + sla: + failure_rate: + max: 0 + + NovaSecGroup.create_and_delete_secgroups: + - + args: + security_group_count: 5 + rules_per_security_group: 5 + runner: + type: "constant" + times: 1 + concurrency: 1 + context: + users: + tenants: 1 + users_per_tenant: 1 + quotas: + nova: + security_groups: -1 + security_group_rules: -1 + sla: + failure_rate: + max: 0 + + NovaSecGroup.create_and_list_secgroups: + - + args: + security_group_count: 5 + rules_per_security_group: 5 + runner: + type: "constant" + times: 1 + concurrency: 1 + context: + users: + tenants: 1 + users_per_tenant: 1 + quotas: + nova: + security_groups: -1 + security_group_rules: -1 + sla: + failure_rate: + max: 0 + + + NovaSecGroup.boot_and_delete_server_with_secgroups: + - + args: + flavor: + name: "m1.tiny" + image: + name: "TestVM|cirros.*uec" + security_group_count: 5 + rules_per_security_group: 5 + runner: + type: "constant" + times: 1 + concurrency: 1 + context: + users: + tenants: 1 + users_per_tenant: 1 + network: + start_cidr: "10.2.0.0/24" + quotas: + nova: + security_groups: -1 + security_group_rules: -1 diff --git a/tests/ci/rally-gate.py b/tests/ci/rally-gate.py new file mode 100755 index 0000000000..a3c3750a94 --- /dev/null +++ b/tests/ci/rally-gate.py @@ -0,0 +1,189 @@ +#!/usr/bin/env python +# +# Copyright 2015: Mirantis Inc. +# All Rights Reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); you may +# not use this file except in compliance with the License. You may obtain +# a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the +# License for the specific language governing permissions and limitations +# under the License. + +import errno +import json +import os +import pwd +import re +import shutil +import subprocess +import sys +import tempfile + +from six.moves.urllib import parse + + +def use_keystone_v3(): + """Alter deployment to use keystone v3.""" + print("Changing deployment to v3") + config = json.loads(subprocess.check_output(["rally", "deployment", + "config"])) + v3_url = parse.urlsplit(config["auth_url"])._replace(path="v3").geturl() + config["auth_url"] = v3_url + endpoint = config.get("endpoint") + if endpoint: + v3_enpoint = parse.urlsplit(endpoint)._replace(path="v3").geturl() + config["endpoint"] = v3_enpoint + config["project_name"] = config["tenant"] + config["project_domain_name"] = config["tenant"] + cfg_file = tempfile.NamedTemporaryFile() + json.dump(config, cfg_file) + print("New config for keystone v3:") + print(json.dumps(config, indent=2)) + cfg_file.flush() + subprocess.call(["rally", "deployment", "create", + "--name", "V3", "--file", cfg_file.name]) + print(subprocess.check_output(["rally", "deployment", "check"])) + +TAG_HANDLERS = {"v3": use_keystone_v3} + + +def perror(s): + sys.stderr.write(s + "\n") + sys.stderr.flush() + + +def run(cmd, stdout=None, gzip=True, check=False): + """Run shell command. + + Save output to file, and gzip-compress if needed. + If exit status is non-zero and check is True then raise exception. + Return exit status otherwise. + """ + print("Starting %s" % " ".join(cmd)) + status = subprocess.call(cmd, stdout=open(stdout, "w") if stdout else None) + if stdout and gzip: + subprocess.call(["gzip", "-9", stdout]) + if check and status: + raise Exception("Failed with status %d" % status) + return status + + +def run_task(task, tags=None): + new_home_dir = tempfile.mkdtemp(prefix="rally_gate_") + shutil.copytree(os.path.join(pwd.getpwuid(os.getuid()).pw_dir, ".rally"), + os.path.join(new_home_dir, ".rally")) + print("Setting $HOME to %s" % new_home_dir) + os.environ["HOME"] = new_home_dir + for tag in tags or []: + if tag == "args": + continue + if tag not in TAG_HANDLERS: + perror("Warning! Unknown tag '%s'" % tag) + continue + try: + TAG_HANDLERS[tag]() + except Exception as e: + perror("Error processing tag '%s': %s" % (tag, e)) + + run(["rally", "task", "validate", "--task", task], check=True) + cmd = ["rally", "task", "start", "--task", task] + args_file, ext = task.rsplit(".", 1) + args_file = args_file + "_args." + ext + if os.path.isfile(args_file): + cmd += ["--task-args-file", args_file] + run(cmd, check=True) + task_name = os.path.split(task)[-1] + pub_dir = os.environ.get("RCI_PUB_DIR", "rally-plot") + try: + os.makedirs(os.path.join(pub_dir, "extra")) + except Exception as e: + if e.errno != errno.EEXIST: + raise + run(["rally", "task", "report", "--out", + "%s/%s.html" % (pub_dir, task_name)]) + run(["rally", "task", "results"], + stdout="%s/results-%s.json" % (pub_dir, task_name)) + status = run(["rally", "task", "sla_check"], + stdout="%s/%s.sla.txt" % (pub_dir, task_name)) + run(["rally", "task", "detailed"], + stdout="rally-plot/detailed-%s.txt" % task_name) + run(["rally", "task", "detailed", "--iterations-data"], + stdout="rally-plot/detailed_with_iterations-%s.txt" % task_name) + + return status + + +def get_name_from_git(): + """Determine org/project name from git.""" + r = re.compile(".*/(.*?)/(.*?).git$") + for l in open(".git/config"): + m = r.match(l.strip()) + if m: + return m.groups() + raise Exception("Unable to get project name from git") + + +def get_project_name(): + for var in ("ZUUL_PROJECT", "GERRIT_PROJECT"): + if var in os.environ: + return os.environ[var].split("/") + return get_name_from_git() + + +def main(): + statuses = [] + org, project = get_project_name() + + base = os.environ.get("BASE") + if base: + base_jobs_dir = os.path.join(base, "new", project) + else: + base_jobs_dir = os.path.realpath(".") + + rally_root = "/home/rally/rally/" + if not os.path.exists(rally_root): + rally_root = os.environ["BASE"] + "/new/rally/" + + jobs_dir = os.path.join(base_jobs_dir, "rally-jobs") + if not os.path.exists(jobs_dir): + # fallback to legacy path + jobs_dir = os.path.join(base_jobs_dir, "rally-scenarios") + if not os.path.exists(jobs_dir): + raise Exception("Rally jobs directory does not exist.") + + for directory in ("plugins", "extra"): + dst = os.path.expanduser("~/.rally/%s" % directory) + try: + shutil.copytree(os.path.join(jobs_dir, directory), dst) + except OSError as e: + if e.errno != errno.EEXIST: + raise + + scenario = os.environ.get("RALLY_SCENARIO", project + ".yaml") + scenario_name, scenario_ext = scenario.split(".") + print("Processing scenario %s" % scenario) + + for fname in os.listdir(jobs_dir): + print("Processing %s" % fname) + if fname.startswith(scenario_name): + tags = fname[len(scenario_name):-len(scenario_ext) - 1].split("_") + statuses.append(run_task(os.path.join(jobs_dir, fname), tags)) + else: + print("Ignoring file %s" % fname) + print("Exit statuses: %r" % statuses) + + run(["python", rally_root + "/rally/ui/utils.py", "render", + "tests/ci/rally-gate/index.mako"], + gzip=False, stdout="rally-plot/extra/index.html") + + return any(statuses) + + +if __name__ == "__main__": + sys.exit(main())