diff --git a/performa/engine/aggregator.py b/performa/engine/aggregator.py index 3992d36..7d74a7c 100644 --- a/performa/engine/aggregator.py +++ b/performa/engine/aggregator.py @@ -60,6 +60,8 @@ def aggregate(scenario, mongo_url, db_name, tag): ] series_pipeline.extend(values_pipeline) + LOG.debug('Running series pipeline: %s', series_pipeline) + point = next(series_collection.aggregate(series_pipeline)) del point['_id'] # to avoid overwriting rec.update(point) diff --git a/performa/engine/player.py b/performa/engine/player.py index 161b494..e935a64 100644 --- a/performa/engine/player.py +++ b/performa/engine/player.py @@ -37,10 +37,12 @@ def _pick_tasks(tasks, matrix): def play_setup(runner, setup_playbook): + LOG.info('Running setup') runner.run(setup_playbook) def play_execution(runner, execution_playbook): + LOG.info('Running execution') records = [] series = [] diff --git a/performa/engine/report.py b/performa/engine/report.py index e78a3ff..0f0769f 100644 --- a/performa/engine/report.py +++ b/performa/engine/report.py @@ -40,6 +40,10 @@ def generate_chart(chart_str, records_collection, doc_folder, tag): fill = chart.get('fill') or False axes = chart.get('axes') or dict(x='x', y='y') + LOG.debug('Title: %s', title) + + pipeline.insert(0, {'$match': {'status': 'OK'}}) + if tag: pipeline.insert(0, {'$match': {'tag': tag}}) @@ -62,7 +66,7 @@ def generate_chart(chart_str, records_collection, doc_folder, tag): for k in y_keys: lines[k].append((chart_rec['x'], chart_rec[k])) table += (' *\n' + - '\n'.join(' - %d' % chart_rec[v] + '\n'.join(' - %.1f' % chart_rec[v] for v in sorted(axes.keys())) + '\n') @@ -72,6 +76,8 @@ def generate_chart(chart_str, records_collection, doc_folder, tag): include_x_axis=True, x_title=axes['x']) + LOG.debug('Lines: %s', lines) + for k in y_keys: xy_chart.add(axes[k], lines[k]) diff --git a/performa/modules/omsimulator.py b/performa/modules/omsimulator.py index b681598..1925eda 100644 --- a/performa/modules/omsimulator.py +++ b/performa/modules/omsimulator.py @@ -10,6 +10,7 @@ SERVER_FILE_NAME = os.path.join(tempfile.gettempdir(), 'performa.oms.srv') CLIENT_FILE_NAME = os.path.join(tempfile.gettempdir(), 'performa.oms.cln') UNIQUE_NAME = 'performa_omsimulator' DIR = '/tmp/performa/oslo.messaging/tools/' +PYTHON = '/tmp/performa/oslo.messaging/.venv/bin/python' PATTERNS = [ r'(?P\d+) messages were sent for (?P\d+) sec', @@ -49,7 +50,7 @@ def chdir(module): def start_daemon(module, cmd): - cmd = ('daemon -n %(name)s -D %(dir)s -F %(pid)s -- %(cmd)s' % + cmd = ('daemon -n %(name)s -D %(dir)s -F %(pid)s -U -- %(cmd)s' % dict(name=UNIQUE_NAME, dir=DIR, pid=SERVER_PID, cmd=cmd)) rc, stdout, stderr = module.run_command(cmd) @@ -102,16 +103,17 @@ def run(module): server_tool = 'rpc-server' client_tool = 'rpc-client' + params['python'] = PYTHON params['server_tool'] = server_tool params['client_tool'] = client_tool params['server_file'] = SERVER_FILE_NAME params['client_file'] = CLIENT_FILE_NAME - server = ('python simulator.py ' + server = ('%(python)s simulator.py ' '--url %(url)s ' '--json %(server_file)s ' '%(server_tool)s ') % params - client = ('python simulator.py ' + client = ('%(python)s simulator.py ' '--url=%(url)s ' '--json %(client_file)s ' '-l %(duration)s ' @@ -139,14 +141,22 @@ def run(module): server_data = read_file(SERVER_FILE_NAME) client_summary = client_data['summary']['client'] - client_summary['component'] = 'client' + + record = dict(start=client_summary['start'], + end=client_summary['end'], + client=client_summary) + + if 'round_trip' in client_data['summary']: + round_trip_summary = client_data['summary']['round_trip'] + record['round_trip'] = round_trip_summary + server_summary = server_data['summary'] - server_summary['component'] = 'server' + record['server'] = server_summary series = transform_series(client_data['series']) series.extend(transform_series(server_data['series'])) - result = dict(records=[client_summary, server_summary], series=series) + result = dict(records=[record], series=series) module.exit_json(**result) except Exception as e: msg = 'Failed to read omsimulator output: %s' % e diff --git a/performa/scenarios/mq/omsimulator-threading.rst b/performa/scenarios/mq/omsimulator-threading.rst new file mode 100644 index 0000000..3d8cd46 --- /dev/null +++ b/performa/scenarios/mq/omsimulator-threading.rst @@ -0,0 +1,211 @@ +Oslo.messaging simulator report +------------------------------- + +This report shows how many concurrent threads can handle a single +oslo.messaging process. + +Test Case 1: RPC CALL Throughput Test +^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ + +**Message processing** + +Messages are collected at 3 points: ``sent`` - messages sent by the client, +``received`` - messages received by the server, ``round-trip`` - replies +received by the client. Also the number of lost messages is calculated. + +{{''' + title: RPC CALL Message count + axes: + x: threads + y: sent, msg + y2: received, msg + y3: round-trip, msg + y4: lost, msg + chart: line + pipeline: + - { $match: { task: omsimulator, mode: call }} + - { $group: { _id: { threads: { $multiply: [ "$threads", "$host_count" ] } }, + sent: { $avg: "$client.count" }, + received: { $avg: "$server.count" }, + round_trip: { $avg: "$round_trip.count" }, + lost: { $avg: { $subtract: ["$client.count", "$round_trip.count"] }} + }} + - { $project: { x: "$_id.threads", + y: "$sent", + y2: "$received", + y3: "$round_trip", + y4: "$lost" + }} + - { $sort: { x: 1 }} +''' | chart +}} + + +**Message throughput, latency and RabbitMQ CPU utilization depending on thread count** + +The chart shows the throughput, latency and CPU utilization by RabbitMQ server +depending on number of concurrent threads. + +{{''' + title: RPC CALL throughput, latency and RabbitMQ CPU utilization depending on thread count + axes: + x: threads + y: sent, msg/sec + y2: received, msg/sec + y3: round-trip, msg/sec + y4: latency, ms + y5: RabbitMQ CPU consumption, % + chart: line + pipeline: + - { $match: { task: omsimulator, mode: call }} + - { $group: { _id: { threads: { $multiply: [ "$threads", "$host_count" ] } }, + msg_sent_per_sec: { $avg: { $divide: ["$client.count", "$client.duration"] }}, + msg_received_per_sec: { $avg: { $divide: ["$server.count", "$server.duration"] }}, + msg_round_trip_per_sec: { $avg: { $divide: ["$round_trip.count", "$round_trip.duration"] }}, + latency: { $avg: "$round_trip.latency" }, + rabbit_total: { $avg: "$rabbit_total" } + }} + - { $project: { x: "$_id.threads", + y: "$msg_sent_per_sec", + y2: "$msg_received_per_sec", + y3: "$msg_round_trip_per_sec", + y4: { $multiply: [ "$latency", 1000 ] }, + y5: { $multiply: [ "$rabbit_total", 100 ] } + }} + - { $sort: { x: 1 }} +''' | chart +}} + + +Test Case 2: RPC CAST Throughput Test +^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ + +**Message processing** + +Messages are collected at 2 points: ``sent`` - messages sent by the client +and ``received`` - messages received by the server. Also the number of lost +messages is calculated. + +{{''' + title: RPC CAST Message count + axes: + x: threads + y: sent, msg + y2: received, msg + y3: lost, msg + chart: line + pipeline: + - { $match: { task: omsimulator, mode: cast }} + - { $group: { _id: { threads: { $multiply: [ "$threads", "$host_count" ] } }, + sent: { $avg: "$client.count" }, + received: { $avg: "$server.count" }, + lost: { $avg: { $subtract: ["$client.count", "$server.count"] }} + }} + - { $project: { x: "$_id.threads", + y: "$sent", + y2: "$received", + y3: "$lost" + }} + - { $sort: { x: 1 }} +''' | chart +}} + + +**Message throughput, latency and RabbitMQ CPU utilization depending on thread count** + +The chart shows the throughput, latency and CPU utilization by RabbitMQ server +depending on number of concurrent threads. + +{{''' + title: RPC CAST throughput, latency and RabbitMQ CPU utilization depending on thread count + axes: + x: threads + y: sent, msg/sec + y2: received, msg/sec + y3: latency, ms + y4: RabbitMQ CPU consumption, % + chart: line + pipeline: + - { $match: { task: omsimulator, mode: cast }} + - { $group: { _id: { threads: { $multiply: [ "$threads", "$host_count" ] } }, + msg_sent_per_sec: { $avg: { $divide: ["$client.count", "$client.duration"] }}, + msg_received_per_sec: { $avg: { $divide: ["$server.count", "$server.duration"] }}, + latency: { $avg: "$server.latency" }, + rabbit_total: { $avg: "$rabbit_total" } + }} + - { $project: { x: "$_id.threads", + y: "$msg_sent_per_sec", + y2: "$msg_received_per_sec", + y3: { $multiply: [ "$latency", 1000 ] }, + y4: { $multiply: [ "$rabbit_total", 100 ] } + }} + - { $sort: { x: 1 }} +''' | chart +}} + + +Test Case 3: Notification Throughput Test +^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ + +**Message processing** + +Messages are collected at 2 points: ``sent`` - messages sent by the client +and ``received`` - messages received by the server. Also the number of lost +messages is calculated. + +{{''' + title: NOTIFY Message count + axes: + x: threads + y: sent, msg + y2: received, msg + l3: lost, msg + chart: line + pipeline: + - { $match: { task: omsimulator, mode: notify }} + - { $group: { _id: { threads: { $multiply: [ "$threads", "$host_count" ] } }, + sent: { $avg: "$client.count" }, + received: { $avg: "$server.count" }, + lost: { $avg: { $subtract: ["$client.count", "$server.count"] }} + }} + - { $project: { x: "$_id.threads", + y: "$sent", + y2: "$received", + y3: "$lost" + }} + - { $sort: { x: 1 }} +''' | chart +}} + + +**Message throughput, latency and RabbitMQ CPU utilization depending on thread count** + +The chart shows the throughput, latency and CPU utilization by RabbitMQ server +depending on number of concurrent threads. + +{{''' + title: NOTIFY throughput, latency and RabbitMQ CPU utilization depending on thread count + axes: + x: threads + y: sent, msg/sec + y2: received, msg/sec + y3: latency, ms + y4: RabbitMQ CPU consumption, % + chart: line + pipeline: + - { $match: { task: omsimulator, mode: notify }} + - { $group: { _id: { threads: { $multiply: [ "$threads", "$host_count" ] } }, + msg_sent_per_sec: { $avg: { $divide: ["$client.count", "$client.duration"] }}, + msg_received_per_sec: { $avg: { $divide: ["$server.count", "$server.duration"] }}, + latency: { $avg: "$server.latency" }, + rabbit_total: { $avg: "$rabbit_total" } + }} + - { $project: { x: "$_id.threads", + y: "$msg_sent_per_sec", + y2: "$msg_received_per_sec", + y3: { $multiply: [ "$latency", 1000 ] }, + y4: { $multiply: [ "$rabbit_total", 100 ] } + }} + - { $sort: { x: 1 }} +''' | chart +}} diff --git a/performa/scenarios/mq/omsimulator-threading.yaml b/performa/scenarios/mq/omsimulator-threading.yaml new file mode 100644 index 0000000..6032bfd --- /dev/null +++ b/performa/scenarios/mq/omsimulator-threading.yaml @@ -0,0 +1,92 @@ +title: OMSimulator + +description: + This scenario uses oslo.messaging simulator tool to execute MQ test plan. + +parameters: + tester_hosts: List of hosts were omsimulator will be executed + rabbit_hosts: List of hosts were RabbitMQ runs + rabbit_url: RabbitMQ address + +setup: + - + hosts: {{ tester_hosts }} + tasks: + - apt: name=git + become: yes + - apt: name=daemon + become: yes + - name: installing omsimulator + git: repo=git://git.openstack.org/openstack/oslo.messaging + dest=/tmp/performa/oslo.messaging + - apt: name=python-dev + become: yes + - apt: name=python-pip + become: yes + - pip: name=virtualenv + become: yes + - pip: requirements=/tmp/performa/oslo.messaging/requirements.txt virtualenv=/tmp/performa/oslo.messaging/.venv + - pip: name=eventlet virtualenv=/tmp/performa/oslo.messaging/.venv + - command: /tmp/performa/oslo.messaging/.venv/bin/python setup.py install + args: + chdir: /tmp/performa/oslo.messaging + - + hosts: {{ rabbit_hosts }} + tasks: + - apt: name=atop + become: yes + - apt: name=daemon + become: yes + + +execution: + - + hosts: {{ rabbit_hosts }} + tasks: + - atop: command=start + - + hosts: {{ tester_hosts }} + matrix: + threads: [ 1, 2, 5, 10, 20, 50, 100 ] + tasks: + - omsimulator: + mode: call + duration: 100 + url: {{ rabbit_url }} + - + hosts: {{ tester_hosts }} + matrix: + threads: [ 1, 2, 5, 10, 20, 50, 100 ] + tasks: + - omsimulator: + mode: cast + duration: 100 + url: {{ rabbit_url }} + - + hosts: {{ tester_hosts }} + matrix: + threads: [ 1, 2, 5, 10, 20, 50, 100 ] + tasks: + - omsimulator: + mode: notify + duration: 100 + url: {{ rabbit_url }} + - + hosts: {{ rabbit_hosts }} + tasks: + - atop: + command: stop + labels: [ PRC ] + +aggregation: + - + update: + query: + { task: omsimulator } + values: + pipeline: + - { $match: { task: atop, status: OK, label: PRC, name: { $regex: beam.* } }} + - { $group: { _id: null, rabbit_sys: { $avg: "$sys" }, rabbit_user: { $avg: "$user" }, rabbit_total: { $avg: { $add: [ "$sys", "$user" ] }} }} + +report: + template: omsimulator-threading.rst diff --git a/performa/scenarios/mq/omsimulator.rst b/performa/scenarios/mq/omsimulator.rst index 51b7678..695a9b2 100644 --- a/performa/scenarios/mq/omsimulator.rst +++ b/performa/scenarios/mq/omsimulator.rst @@ -7,20 +7,52 @@ This is the report of execution test plan Results ^^^^^^^ -Messages per second depending on threads count: +**Message processing** + +The chart and table show the number of messages processed by a single process +depending on number of eventlet threads inside of it. Messages are collected +at 3 points: ``sent`` - messages sent by the client, ``received`` - messages +received by the server, ``round-trip`` - replies received by the client. {{''' - title: Messages per second + title: Throughput axes: x: threads - y: messages per sec - y2: latency + y: sent, msg + y2: received, msg + y3: round-trip, msg chart: line pipeline: - - { $match: { task: omsimulator, status: OK }} + - { $match: { task: omsimulator, component: client }} + - { $group: { _id: { threads: { $multiply: [ "$threads", "$host_count" ] } }, + sent: { $avg: "$client.count" }, + received: { $avg: "$server.count" }, + round_trip: { $avg: "$round_trip.count" } + }} + - { $project: { x: "$_id.threads", + y: "$sent", + y2: "$received", + y3: "$round_trip" + }} + - { $sort: { x: 1 }} +''' | chart +}} + + +**Message throughput, latency depending on thread count** + +{{''' + title: Throughput, latency depending on thread count + axes: + x: threads + y: throughput, msg/sec + y2: latency, ms + chart: line + pipeline: + - { $match: { task: omsimulator, component: client }} - { $group: { _id: { threads: { $multiply: [ "$threads", "$host_count" ] } }, msg_sent_per_sec: { $avg: { $divide: ["$count", "$duration"] }}, - latency: { $avg: "$latency" } + latency: { $avg: "$latency" }, }} - { $project: { x: "$_id.threads", y: "$msg_sent_per_sec", @@ -40,7 +72,7 @@ Messages per second and rabbit CPU consumption depending on threads count: y2: rabbit CPU consumption, % chart: line pipeline: - - { $match: { task: omsimulator, status: OK }} + - { $match: { task: omsimulator }} - { $group: { _id: { threads: { $multiply: [ "$threads", "$host_count" ] } }, msg_sent_per_sec: { $avg: { $divide: ["$count", "$duration"] }}, rabbit_total: { $avg: "$rabbit_total" } @@ -61,7 +93,7 @@ Messages per second and rabbit CPU consumption depending on threads count: y: latency chart: line pipeline: - - { $match: { task: omsimulator, status: OK }} + - { $match: { task: omsimulator }} - { $group: { _id: { threads: { $multiply: [ "$threads", "$host_count" ] } }, msg_sent_per_sec: { $avg: { $divide: ["$count", "$duration"] }}, latency: { $avg: "$latency" } diff --git a/performa/scenarios/mq/omsimulator.yaml b/performa/scenarios/mq/omsimulator.yaml index f9fd79a..7a236db 100644 --- a/performa/scenarios/mq/omsimulator.yaml +++ b/performa/scenarios/mq/omsimulator.yaml @@ -15,6 +15,9 @@ setup: # - name: installing omsimulator # git: repo=git://git.openstack.org/openstack/oslo.messaging # dest=/tmp/performa/oslo.messaging +# - name: installing omsimulator +# git: repo=git://git.openstack.org/openstack/oslo.messaging +# dest=/tmp/performa/oslo.messaging # - command: git fetch https://review.openstack.org/openstack/oslo.messaging refs/changes/91/291191/2 # args: # chdir: /tmp/performa/oslo.messaging @@ -43,12 +46,13 @@ execution: - hosts: {{ target }} matrix: - host_count: [ 1, 2 ] + host_count: [ 1 ] + threads: [ 1, 10, 100 ] tasks: - omsimulator: mode: call duration: 10 - threads: 1 + threads: 10 url: {{ rabbit_url }} - hosts: {{ target }}