From 1cc90a41236b9b5707140e057bbe9eaf41268f3f Mon Sep 17 00:00:00 2001 From: Sergey Galkin Date: Wed, 30 Mar 2016 20:45:56 +0300 Subject: [PATCH] Add scripts for mesos marathon performance testing This is a draft 1. application_managment_helper.py is helper to create or delete apps in marathon 2. marathon-scale-tests.py is scripts for testing marathon and mesos 3. run_full_tests.sh is script for run all tests and generate json with results 4. result-analize.py is script for generate RST report with table and graphs Change-Id: Ic7dc472c90bf5746c3c2b364afc694c83b8509b7 --- .../README.md | 7 + .../application_managment_helper.py | 109 ++++ .../marathon-scale-tests.py | 492 ++++++++++++++++++ .../requirements.txt | 2 + .../result-analize.py | 122 +++++ .../run_full_tests.sh | 26 + 6 files changed, 758 insertions(+) create mode 100644 scripts/mesos-marathon-performance-testing/README.md create mode 100644 scripts/mesos-marathon-performance-testing/application_managment_helper.py create mode 100644 scripts/mesos-marathon-performance-testing/marathon-scale-tests.py create mode 100644 scripts/mesos-marathon-performance-testing/requirements.txt create mode 100644 scripts/mesos-marathon-performance-testing/result-analize.py create mode 100755 scripts/mesos-marathon-performance-testing/run_full_tests.sh diff --git a/scripts/mesos-marathon-performance-testing/README.md b/scripts/mesos-marathon-performance-testing/README.md new file mode 100644 index 0000000..be0300c --- /dev/null +++ b/scripts/mesos-marathon-performance-testing/README.md @@ -0,0 +1,7 @@ +This is draft scripts + +1. application_managment_helper.py is helper to create or delete apps in marathon +2. marathon-scale-tests.py is scripts for testing marathon and mesos +3. run_full_tests.sh is script for run all tests and generate json with results +4. result-analize.py is script for generate RST report with table and graphs + diff --git a/scripts/mesos-marathon-performance-testing/application_managment_helper.py b/scripts/mesos-marathon-performance-testing/application_managment_helper.py new file mode 100644 index 0000000..8f08658 --- /dev/null +++ b/scripts/mesos-marathon-performance-testing/application_managment_helper.py @@ -0,0 +1,109 @@ +import argparse +import hashlib +import logging +import random +import signal + + +from marathon import MarathonClient +from marathon.models.constraint import MarathonConstraint +from marathon.models.container import MarathonContainer +from marathon.models.container import MarathonContainerPortMapping +from marathon.models.container import MarathonDockerContainer +from marathon.models import MarathonApp +from marathon.models import MarathonHealthCheck +from multiprocessing import Pool + +MEM = 256 +CPUS = 1 +DISK = 50 + + +def init_worker(): + signal.signal(signal.SIGINT, signal.SIG_IGN) + + +def create_app(app_instances): + port_mapping = MarathonContainerPortMapping(container_port=80, + protocol="tcp") + app_docker = MarathonDockerContainer( + image="nginx", + network="BRIDGE", + port_mappings=[port_mapping]) + app_container = MarathonContainer(docker=app_docker) + http_health_check = MarathonHealthCheck(protocol="HTTP", + path="/", + grace_period_seconds=300, + interval_seconds=30, + timeout_seconds=20, + max_consecutive_failures=3) + + app_name = str(hashlib.md5(str(random.random())).hexdigest()) + logging.debug("Create cluster {}".format(app_name)) + app_constraint = MarathonConstraint(field="hostname", operator="UNIQUE") + new_app = MarathonApp(cpus=CPUS, mem=MEM, disk=DISK, + container=app_container, + health_checks=[http_health_check], + instances=app_instances, + constraints=[app_constraint], + max_launch_delay_seconds=5) + print("Creating {}".format(app_name)) + cluster.create_app(app_id=app_name, + app=new_app) + return None + + +def concur_operations(function, arguments, concurrency): + pool = Pool(concurrency, init_worker) + results = [] + try: + results = eval("pool.map({}, {})".format(function, arguments)) + pool.close() + pool.join() + except KeyboardInterrupt: + print("Caught KeyboardInterrupt, terminating workers") + pool.terminate() + pool.join() + return results + + +def concur_create_apps(concurrency, instances): + if not args.silent: + print("======= Creating {1} applications with {0} instances, " + "concurrency is {1} " + "===============".format(instances, concurrency)) + list_instances = [instances] * concurrency + return concur_operations("create_app", str(list_instances), + concurrency) + +if __name__ == '__main__': + parser = argparse.ArgumentParser() + parser.add_argument("-m", "--marathon", + help="Marathon URL, on example " + "http://127.0.0.1:8080/marathon", + required=True) + parser.add_argument("-e", "--execute", help="Operation execute", + choices=['delete', 'create'], required=True) + parser.add_argument("-d", "--delete", + help="Delete all applications", + action="store_true") + parser.add_argument("-c", "--concurrency", + help="Concurrency") + parser.add_argument("-n", "--nodes", + help="Number of tasks per application") + parser.add_argument("-s", "--silent", + help="Print only results", + action="store_true") + args = parser.parse_args() + cluster = MarathonClient(args.marathon, timeout=240) + + if args.execute == "delete": + cluster = MarathonClient(args.marathon) + all_apps = cluster.list_apps() + for app in all_apps: + print("Delete {}".format(app.id)) + cluster.delete_app(app.id, force=True) + if args.execute == "create": + concur = 1 if args.concurrency is None else args.concurrency + nodes = 1 if args.nodes is None else args.nodes + concur_create_apps(int(concur), int(nodes)) diff --git a/scripts/mesos-marathon-performance-testing/marathon-scale-tests.py b/scripts/mesos-marathon-performance-testing/marathon-scale-tests.py new file mode 100644 index 0000000..87736f9 --- /dev/null +++ b/scripts/mesos-marathon-performance-testing/marathon-scale-tests.py @@ -0,0 +1,492 @@ +import argparse +import hashlib +import json +import logging +import random +import signal +import statistics +import sys +import time + + +from marathon import MarathonClient +from marathon.models.container import MarathonContainer +from marathon.models.container import MarathonContainerPortMapping +from marathon.models.container import MarathonDockerContainer +from marathon.models import MarathonApp +from marathon.models import MarathonHealthCheck + + +from multiprocessing import Pool + +MEM = 256 +CPUS = 1 +DISK = 50 + + +def percentage(part, whole): + return 100 * float(part)/float(whole) + + +def init_worker(): + signal.signal(signal.SIGINT, signal.SIG_IGN) + + +def is_timeout(start_time, timeout): + logging.debug("[is_timeout] start timeout is {}".format(start_time)) + if time.time() - start_time > timeout: + logging.warning("Timeout") + return True + else: + return False + + +def check_instances(check, app_instances, app_name, start_time, timeout, + ready_tasks=None): + in_progress = 0 + while in_progress < app_instances: + in_progress = 0 + tasks = cluster.list_tasks(app_name) + for task in tasks: + if check == "task": + if ready_tasks is not None: + app_index = -1 + for ready_task in ready_tasks: + app_index += 1 + if ready_task.id != task.id: + break + if (task.started_at is not None and + task.started_at != + ready_tasks[app_index].started_at): + logging.info("[check_instances] Task started " + "at {}".format(task.started_at)) + in_progress += 1 + else: + if task.started_at is not None: + logging.info("[check_instances] Task started " + "at {}".format(task.started_at)) + in_progress += 1 + elif check == "health": + if task.health_check_results: + in_progress += 1 + if is_timeout(start_time, timeout): + return in_progress + return in_progress + + +def delete_app(app_name, force=False): + cluster.delete_app(app_id=app_name, force=force) + + +def calculate_results_per_operation(results): + return {"app_name": results[0], + "instances": results[1], + "successful_instances": results[2], + "successful_instance_percent": percentage(results[2], results[1]), + "app_full_time": round(results[3], 2), + "instances_mean": round(statistics.mean(results[4]), 2), + "instances_median": round(statistics.median(results[4]), 2), + "instances_min": round(min(results[4]), 2), + "instances_max": round(max(results[4]), 2)} + + +def check_in_deployment(app_name, timeout): + deployments = 1 + start_time = time.time() + while deployments != 0 and not is_timeout(start_time, timeout): + for app in cluster.list_apps(): + if app.id == "/{}".format(app_name): + deployments = len(app.deployments) + return None + + +def check_operation_status(start_time, app_name, app_instances, + timeout, ready_tasks=None): + successful_instances = check_instances( + "task", app_instances, app_name, start_time, timeout, ready_tasks) + all_starting = [] + tasks = cluster.list_tasks(app_name) + for task in tasks: + logging.info("[check_operation_status] Task started at =" + " {}".format(task.started_at)) + logging.debug("[check_operation_status] {} - {} ".format( + task.started_at, task.staged_at)) + if task.started_at is not None: + starting = task.started_at - task.staged_at + all_starting.append(starting.total_seconds()) + if len(all_starting) == 0: + all_starting = [0] + check_in_deployment(app_name, timeout) + logging.debug("[check_operation_status] start time is {}".format( + start_time)) + app_full_time = time.time() - start_time + return successful_instances, all_starting, app_full_time + + +def restart_and_wait_app(app_name): + timeout = 600 + list_tasks = cluster.list_tasks(app_name) + app_instances = len(list_tasks) + start_time = time.time() + cluster.restart_app(app_id=app_name) + time.sleep(5) + successful_instances, all_starting, app_full_time = \ + check_operation_status(start_time, app_name, app_instances, + timeout, list_tasks) + return calculate_results_per_operation([app_name, app_instances, + successful_instances, + app_full_time, + all_starting]) + + +def update_and_wait_cpu(app_name): + return update_and_wait_app(app_name, "cpu", "2") + + +def update_and_wait_mem(app_name): + return update_and_wait_app(app_name, "mem", "2") + + +def update_and_wait_disk(app_name): + return update_and_wait_app(app_name, "disk", "2") + + +def update_and_wait_instances(app_name): + return update_and_wait_app(app_name, "instances", "2") + + +def update_and_wait_app(app_name, scale_param, scale, scale_type="*"): + timeout = 600 + list_tasks = cluster.list_tasks(app_name) + app_instances = len(list_tasks) + cpus = CPUS + mem = MEM + disk = DISK + instances = app_instances + if scale_param == "cpu": + cpus = eval("{} {} {}".format(cpus, scale_type, scale)) + if scale_param == "mem": + mem = eval("{} {} {}".format(mem, scale_type, scale)) + if scale_param == "disk": + disk = eval("{} {} {}".format(disk, scale_type, scale)) + if scale_param == "instances": + instances = eval("{} {} {}".format(instances, scale_type, scale)) + + updated_app = MarathonApp(cpus=cpus, mem=mem, disk=disk, + instances=instances) + start_time = time.time() + cluster.update_app(app_id=app_name, app=updated_app) + time.sleep(5) + successful_instances, all_starting, app_full_time = \ + check_operation_status(start_time, app_name, instances, + timeout, list_tasks) + return calculate_results_per_operation([app_name, app_instances, + successful_instances, + app_full_time, + all_starting]) + + +def delete_and_wait_app(app_name): + timeout = 600 + start_time = time.time() + try: + cluster.delete_app(app_id=app_name) + while (len(cluster.list_apps()) > 0 and + time.time() - start_time < timeout): + time.sleep(0.01) + logging.debug("[delete_and_wait_app] start time is {}".format( + start_time)) + end_time = time.time() - start_time + return {"app_name": app_name, + "delete_time": round(end_time, 2) + } + except BaseException as ex: + logging.error(ex) + return {"app_name": app_name, + "delete_time": None + } + + +def create_and_delete_app(app_instances): + return create_app(app_instances, delete=True) + + +def create_several_apps(apps_amount, instances_amount): + all_apps = [] + if not args.silent: + print("======= Creating {} applications, with {} instances " + "===============".format(apps_amount, instances_amount)) + sys.stdout.write('Creating apps: ') + for count in range(apps_amount): + if not args.silent: + if count % 10: + sys.stdout.write('.') + sys.stdout.flush() + else: + sys.stdout.write(str(count)) + sys.stdout.flush() + all_apps.append( + create_app(app_instances=instances_amount, + need_statistics=False)["app_name"]) + if not args.silent: + print(str(apps_amount)) + return all_apps + + +def create_app(app_instances, delete=False, + timeout=1200, need_statistics=True): + port_mapping = MarathonContainerPortMapping(container_port=80, + protocol="tcp") + app_docker = MarathonDockerContainer( + image="nginx", + network="BRIDGE", + port_mappings=[port_mapping]) + app_container = MarathonContainer(docker=app_docker) + http_health_check = MarathonHealthCheck(protocol="HTTP", + path="/", + grace_period_seconds=300, + interval_seconds=2, + timeout_seconds=20, + max_consecutive_failures=3) + + app_name = str(hashlib.md5(str(random.random())).hexdigest()) + logging.debug("Create cluster {}".format(app_name)) + new_app = MarathonApp(cpus=CPUS, mem=MEM, disk=DISK, + container=app_container, + health_checks=[http_health_check], + instances=app_instances, + max_launch_delay_seconds=5) + start_time = time.time() + cluster.create_app(app_id=app_name, + app=new_app) + logging.debug("Get tasks for cluster {}".format(app_name)) + successful_instances, all_starting, app_full_time = \ + check_operation_status(start_time, app_name, app_instances, timeout) + if delete: + logging.debug('Delete {}'.format(app_name)) + delete_app(app_name, force=True) + if need_statistics: + return {"app_name": app_name, + "app_full_time": round(app_full_time, 2), + "instances": app_instances, + "successful_instances": successful_instances, + "instances_mean": round(statistics.mean(all_starting), 2), + "instances_median": round(statistics.median(all_starting), 2), + "instances_min": round(min(all_starting), 2), + "instances_max": round(max(all_starting), 2), + "id_run": id_run} + else: + return {"app_name": app_name} + + +def concur_operations(function, arguments, concurrency): + pool = Pool(concurrency, init_worker) + results = [] + try: + results = eval("pool.map({}, {})".format(function, arguments)) + pool.close() + pool.join() + except KeyboardInterrupt: + print("Caught KeyboardInterrupt, terminating workers") + pool.terminate() + pool.join() + return results + + +def concur_create_apps(concurrency, instances): + if not args.silent: + print("======= Creating applications with {} instances, " + "concurrency is {} " + "===============".format(instances, concurrency)) + list_instances = [instances] * concurrency + return concur_operations("create_and_delete_app", str(list_instances), + concurrency) + + +def concur_restart_apps(concurrency, instances): + apps = create_several_apps(concurrency, instances) + if not args.silent: + print("======= Restart applications with {} instances , " + "concurrency is {} " + "===============".format(instances, concurrency)) + results = concur_operations("restart_and_wait_app", str(apps), concurrency) + for app in apps: + delete_app(app, True) + return results + + +def concur_update_app(update_type, concurrency, instances): + apps = create_several_apps(concurrency, instances) + if not args.silent: + print("======= Update applications, concurrency is {} " + "===============".format(concurrency)) + results = [] + if update_type == "cpu": + results = concur_operations("update_and_wait_cpu", str(apps), + concurrency) + if update_type == "mem": + results = concur_operations("update_and_wait_mem", str(apps), + concurrency) + if update_type == "disk": + results = concur_operations("update_and_wait_disk", str(apps), + concurrency) + if update_type == "instances": + results = concur_operations("update_and_wait_instances", str(apps), + concurrency) + for app in apps: + delete_app(app, True) + return results + + +def concur_delete_apps(concurrency, instances): + apps = create_several_apps(concurrency, instances) + if not args.silent: + print("======= Delete applications with {}, concurrency is {} " + "===============".format(instances, concurrency)) + results = concur_operations("delete_and_wait_app", str(apps), concurrency) + return results + + +def calculate_summary(results): + if len(results[0]) == 10: + max_tmp = [] + min_tmp = [] + successful_tmp = [] + mean_tmp = [] + median_tmp = [] + app_full_time_tmp = [] + for result in results: + max_tmp.append(result["instances_max"]) + min_tmp.append(result["instances_min"]) + successful_tmp.append(result["successful_instances"]) + mean_tmp.append(result["instances_mean"]) + median_tmp.append(result["instances_median"]) + app_full_time_tmp.append(result["app_full_time"]) + sum_result = { + "type": "summary", + "instances_max": max(max_tmp), + "instances_min": min(min_tmp), + "instances_mean": round(statistics.mean(mean_tmp), 2), + "instances_median": statistics.median(median_tmp), + "app_full_time_max": max(app_full_time_tmp), + "app_full_time_min": min(app_full_time_tmp), + "app_full_time_mean": round(statistics.mean(app_full_time_tmp), 2), + "app_full_time_median": statistics.median(app_full_time_tmp), + "test": args.tests, + "concurrency": args.concurrency, + "nodes": args.nodes, + "id_run": id_run + } + else: + time_tmp = [] + delete_fails = 0 + for result in results: + if result["delete_time"] is not None: + time_tmp.append(result["delete_time"]) + else: + delete_fails += 1 + sum_result = { + "type": "summary", + "delete_time_max": max(time_tmp), + "delete_time_min": min(time_tmp), + "delete_time_mean": round(statistics.mean(time_tmp), 2), + "delete_time_median": round(statistics.median(time_tmp), 2), + "delete_fails": delete_fails, + "test": args.tests, + "concurrency": args.concurrency, + "nodes": args.nodes, + "id_run": id_run + } + + return sum_result + + +def print_results(results): + full_results = [] + for result in results: + result["type"] = "single" + full_results.append(result) + full_results.append(calculate_summary(results)) + if args.only_summary: + for result in full_results: + if result["type"] == "summary": + full_results = [result] + if args.pretty_output: + print(json.dumps(full_results, sort_keys=True, + indent=4, separators=(',', ': '))) + else: + print("{},".format(json.dumps(full_results, sort_keys=True))) + + +if __name__ == '__main__': + parser = argparse.ArgumentParser() + parser.add_argument("-t", "--tests", help="Tests", + choices=['all', 'create', 'update_cpu', + 'update_mem', 'update_disk', + 'update_instances', + 'restart', 'delete'], required=True) + parser.add_argument("-m", "--marathon", + help="Marathon URL, on example " + "http://172.20.8.34:8080/virt-env-2/marathon", + required=True) + parser.add_argument("-c", "--concurrency", + help="Concurrency", + required=True) + parser.add_argument("-n", "--nodes", + help="Number of tasks per application", + required=True) + parser.add_argument("-l", "--log_level", help="logging level", + choices=['DEBUG', 'INFO', 'WARNING', 'ERROR', + 'CRITICAL']) + parser.add_argument("-s", "--silent", + help="Print only results", + action="store_true") + parser.add_argument("-o", "--only_summary", + help="Print only summary results", + action="store_true") + parser.add_argument("-p", "--pretty_output", + help="Pretty json outpur", + action="store_true") + args = parser.parse_args() + + cluster = MarathonClient(args.marathon) + id_run = str(hashlib.md5(str(random.random())).hexdigest()) + + if args.log_level is None: + log_level = logging.DEBUG + else: + log_level = eval("logging.{}".format(args.log_level)) + logging.basicConfig( + filename="tests-debug.log", + level=log_level, + format='%(asctime)s - %(name)s - %(levelname)s - %(message)s') + + if args.tests == "create": + print_results(concur_create_apps(int(args.concurrency), + int(args.nodes))) + + if args.tests == "restart": + print_results(concur_restart_apps(int(args.concurrency), + int(args.nodes))) + if args.tests == "update_cpu": + print_results(concur_update_app("cpu", + int(args.concurrency), + int(args.nodes))) + + if args.tests == "update_mem": + print_results(concur_update_app("mem", + int(args.concurrency), + int(args.nodes))) + + if args.tests == "update_disk": + print_results(concur_update_app("disk", + int(args.concurrency), + int(args.nodes))) + if args.tests == "update_instances": + print_results(concur_update_app("instances", + int(args.concurrency), + int(args.nodes))) + + if args.tests == "delete": + print_results(concur_delete_apps(int(args.concurrency), + int(args.nodes))) diff --git a/scripts/mesos-marathon-performance-testing/requirements.txt b/scripts/mesos-marathon-performance-testing/requirements.txt new file mode 100644 index 0000000..835e738 --- /dev/null +++ b/scripts/mesos-marathon-performance-testing/requirements.txt @@ -0,0 +1,2 @@ +marathon +statistics \ No newline at end of file diff --git a/scripts/mesos-marathon-performance-testing/result-analize.py b/scripts/mesos-marathon-performance-testing/result-analize.py new file mode 100644 index 0000000..a74c0e4 --- /dev/null +++ b/scripts/mesos-marathon-performance-testing/result-analize.py @@ -0,0 +1,122 @@ +import argparse +import copy +import json + +import matplotlib.pyplot as plt +import numpy as np + +parser = argparse.ArgumentParser() +parser.add_argument("-r", "--results", + help="File with results", + required=True) +args = parser.parse_args() + +with open(args.results) as data_file: + data = json.load(data_file) + + +# Create empty json for results +tmp_nodes = {} +for nodes in [50, 100, 500]: + tmp_nodes[nodes] = [] +tmp_concur = {} +for concur in [1, 2, 4, 8, 16]: + tmp_concur[concur] = copy.deepcopy(tmp_nodes) +results_sum = {} +for tests in ["create", "update_cpu", "update_mem", "update_disk", + "update_instances", "restart", "delete"]: + results_sum[tests] = copy.deepcopy(tmp_concur) + +for i in data: + for j in i: + if j["type"] == "summary": + t_test = j["test"] + t_concur = int(j["concurrency"]) + t_nodes = int(j["nodes"]) + if j["test"] != "delete": + results_sum[t_test][t_concur][t_nodes] = [ + j["app_full_time_min"], j["app_full_time_max"], + j["app_full_time_mean"], j["app_full_time_median"]] + else: + results_sum[t_test][t_concur][t_nodes] = [ + j["delete_time_min"], j["delete_time_max"], + j["delete_time_mean"], j["delete_time_median"]] + +for test in sorted(results_sum): + graph_string = "" + test_title = "Test {}".format(test) + print(test_title) + print("-" * len(test_title)) + print("+-------------+------------------------------+--------+--------" + "+---------+--------+\n" + "| CONCURRENCY | NODES_NUMBER_PER_APPLICATION | " + "APPLICATION_OPERATION |\n" + "| | +--------+--------+" + "---------+--------+\n" + "| | |minima | maxima | " + "average | median |\n" + "+=============+==============================+========+========+=" + "========+========+") + for concurrency in sorted(results_sum[test]): + graph_max = () + graph_min = () + graph_mean = () + graph_median = () + for nodes in sorted(results_sum[test][concurrency]): + if len(results_sum[test][concurrency][nodes]) > 0: + print("|{:<13}|{:<30}|{:<8}|{:<8}|{:<9}|{:<8}|".format( + concurrency, nodes, + results_sum[test][concurrency][nodes][0], + results_sum[test][concurrency][nodes][1], + results_sum[test][concurrency][nodes][2], + results_sum[test][concurrency][nodes][3], + )) + print("+-------------+------------------------------+--------" + "+--------+---------+--------+") + graph_min += (results_sum[test][concurrency][nodes][0],) + graph_max += (results_sum[test][concurrency][nodes][1],) + graph_mean += (results_sum[test][concurrency][nodes][2],) + graph_median += (results_sum[test][concurrency][nodes][3],) + + if (len(graph_max) == 3 and len(graph_min) == 3 and len(graph_mean) and + len(graph_median) == 3): + fig, ax = plt.subplots() + n_groups = 3 + plt.subplot() + index = np.arange(n_groups) + bar_width = 0.15 + opacity = 0.4 + plt.bar(index, graph_min, bar_width, + alpha=opacity, + color='g', + label='Min') + plt.bar(index + bar_width, graph_mean, bar_width, + alpha=opacity, + color='y', + label='Median') + plt.bar(index + bar_width*2, graph_median, bar_width, + alpha=opacity, + color='b', + label='Mean') + plt.bar(index + bar_width*3, graph_max, bar_width, + alpha=opacity, + color='r', + label='Max') + + plt.xlabel('Nodes') + plt.ylabel('Seconds') + plt.title('Test {}'.format(test)) + plt.xticks(index + bar_width*2, ('50', '100', '500')) + plt.legend(loc=0) + plt.tight_layout() + pic_file_name = "{}-{}.png".format(test, concurrency) + plt.savefig(pic_file_name) + graph_string = ("{0}\nGraph for test {2}, " + "concurrency {3}\n" + "\n.. image:: {1}\n" + " :alt: Graph for test {2}, " + "concurrency {3}\n\n".format( + graph_string, pic_file_name, + test, concurrency)) + plt.close() + print(graph_string) diff --git a/scripts/mesos-marathon-performance-testing/run_full_tests.sh b/scripts/mesos-marathon-performance-testing/run_full_tests.sh new file mode 100755 index 0000000..522fe37 --- /dev/null +++ b/scripts/mesos-marathon-performance-testing/run_full_tests.sh @@ -0,0 +1,26 @@ +#!/bin/bash +DATE=$(date +%Y-%m-%d-%H-%M-%S) +FILE_RESULTS="marathon-mesos-test-results-$DATE" +MARATHON_URL="http://127.0.0.1:8080/marathon" + +TOP_DIR=$(cd $(dirname "$0") && pwd) +cd ${TOP_DIR} + +virtualenv .venv +VPYTHON=".venv/bin/python" +.venv/bin/pip install -r requirements.txt + +echo "[" > ${FILE_RESULTS}.json +for test in create update_cpu update_mem update_disk update_instances restart delete; do + for concur in 1 2 4 8 16; do + for nodes in 50 100 500; do + echo "$(date) - Start test $test with concurrency $concur with $nodes nodes" + $VPYTHON marathon-scale-tests.py -m $MARATHON_URL -t${test} -c${concur} -n${nodes} -s >> ${FILE_RESULTS}.json + # If something wrong, clean all + sleep 30 + $VPYTHON application_managment_helper.py -m $MARATHON_URL -edelete + done + done +done +sed -i '$ s/.$//' ${FILE_RESULTS}.json +echo "]" >> ${FILE_RESULTS}.json