Browse Source

Add scenario MQ/threading

Ilya Shakhat 3 years ago
parent
commit
95761686cd

+ 2
- 0
performa/engine/aggregator.py View File

@@ -60,6 +60,8 @@ def aggregate(scenario, mongo_url, db_name, tag):
60 60
                     ]
61 61
                     series_pipeline.extend(values_pipeline)
62 62
 
63
+                    LOG.debug('Running series pipeline: %s', series_pipeline)
64
+
63 65
                     point = next(series_collection.aggregate(series_pipeline))
64 66
                     del point['_id']  # to avoid overwriting
65 67
                     rec.update(point)

+ 2
- 0
performa/engine/player.py View File

@@ -37,10 +37,12 @@ def _pick_tasks(tasks, matrix):
37 37
 
38 38
 
39 39
 def play_setup(runner, setup_playbook):
40
+    LOG.info('Running setup')
40 41
     runner.run(setup_playbook)
41 42
 
42 43
 
43 44
 def play_execution(runner, execution_playbook):
45
+    LOG.info('Running execution')
44 46
     records = []
45 47
     series = []
46 48
 

+ 7
- 1
performa/engine/report.py View File

@@ -40,6 +40,10 @@ def generate_chart(chart_str, records_collection, doc_folder, tag):
40 40
     fill = chart.get('fill') or False
41 41
     axes = chart.get('axes') or dict(x='x', y='y')
42 42
 
43
+    LOG.debug('Title: %s', title)
44
+
45
+    pipeline.insert(0, {'$match': {'status': 'OK'}})
46
+
43 47
     if tag:
44 48
         pipeline.insert(0, {'$match': {'tag': tag}})
45 49
 
@@ -62,7 +66,7 @@ def generate_chart(chart_str, records_collection, doc_folder, tag):
62 66
         for k in y_keys:
63 67
             lines[k].append((chart_rec['x'], chart_rec[k]))
64 68
         table += ('   *\n' +
65
-                  '\n'.join('     - %d' % chart_rec[v]
69
+                  '\n'.join('     - %.1f' % chart_rec[v]
66 70
                             for v in sorted(axes.keys())) +
67 71
                   '\n')
68 72
 
@@ -72,6 +76,8 @@ def generate_chart(chart_str, records_collection, doc_folder, tag):
72 76
                         include_x_axis=True,
73 77
                         x_title=axes['x'])
74 78
 
79
+    LOG.debug('Lines: %s', lines)
80
+
75 81
     for k in y_keys:
76 82
         xy_chart.add(axes[k], lines[k])
77 83
 

+ 16
- 6
performa/modules/omsimulator.py View File

@@ -10,6 +10,7 @@ SERVER_FILE_NAME = os.path.join(tempfile.gettempdir(), 'performa.oms.srv')
10 10
 CLIENT_FILE_NAME = os.path.join(tempfile.gettempdir(), 'performa.oms.cln')
11 11
 UNIQUE_NAME = 'performa_omsimulator'
12 12
 DIR = '/tmp/performa/oslo.messaging/tools/'
13
+PYTHON = '/tmp/performa/oslo.messaging/.venv/bin/python'
13 14
 
14 15
 PATTERNS = [
15 16
     r'(?P<msg_sent>\d+) messages were sent for (?P<duration>\d+) sec',
@@ -49,7 +50,7 @@ def chdir(module):
49 50
 
50 51
 
51 52
 def start_daemon(module, cmd):
52
-    cmd = ('daemon -n %(name)s -D %(dir)s -F %(pid)s -- %(cmd)s' %
53
+    cmd = ('daemon -n %(name)s -D %(dir)s -F %(pid)s -U -- %(cmd)s' %
53 54
            dict(name=UNIQUE_NAME, dir=DIR, pid=SERVER_PID, cmd=cmd))
54 55
 
55 56
     rc, stdout, stderr = module.run_command(cmd)
@@ -102,16 +103,17 @@ def run(module):
102 103
         server_tool = 'rpc-server'
103 104
         client_tool = 'rpc-client'
104 105
 
106
+    params['python'] = PYTHON
105 107
     params['server_tool'] = server_tool
106 108
     params['client_tool'] = client_tool
107 109
     params['server_file'] = SERVER_FILE_NAME
108 110
     params['client_file'] = CLIENT_FILE_NAME
109 111
 
110
-    server = ('python simulator.py '
112
+    server = ('%(python)s simulator.py '
111 113
               '--url %(url)s '
112 114
               '--json %(server_file)s '
113 115
               '%(server_tool)s ') % params
114
-    client = ('python simulator.py '
116
+    client = ('%(python)s simulator.py '
115 117
               '--url=%(url)s '
116 118
               '--json %(client_file)s '
117 119
               '-l %(duration)s '
@@ -139,14 +141,22 @@ def run(module):
139 141
         server_data = read_file(SERVER_FILE_NAME)
140 142
 
141 143
         client_summary = client_data['summary']['client']
142
-        client_summary['component'] = 'client'
144
+
145
+        record = dict(start=client_summary['start'],
146
+                       end=client_summary['end'],
147
+                       client=client_summary)
148
+
149
+        if 'round_trip' in client_data['summary']:
150
+            round_trip_summary = client_data['summary']['round_trip']
151
+            record['round_trip'] = round_trip_summary
152
+
143 153
         server_summary = server_data['summary']
144
-        server_summary['component'] = 'server'
154
+        record['server'] = server_summary
145 155
 
146 156
         series = transform_series(client_data['series'])
147 157
         series.extend(transform_series(server_data['series']))
148 158
 
149
-        result = dict(records=[client_summary, server_summary], series=series)
159
+        result = dict(records=[record], series=series)
150 160
         module.exit_json(**result)
151 161
     except Exception as e:
152 162
         msg = 'Failed to read omsimulator output: %s' % e

+ 211
- 0
performa/scenarios/mq/omsimulator-threading.rst View File

@@ -0,0 +1,211 @@
1
+Oslo.messaging simulator report
2
+-------------------------------
3
+
4
+This report shows how many concurrent threads can handle a single
5
+oslo.messaging process.
6
+
7
+Test Case 1: RPC CALL Throughput Test
8
+^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^
9
+
10
+**Message processing**
11
+
12
+Messages are collected at 3 points: ``sent`` - messages sent by the client,
13
+``received`` - messages received by the server, ``round-trip`` - replies
14
+received by the client. Also the number of lost messages is calculated.
15
+
16
+{{'''
17
+    title: RPC CALL Message count
18
+    axes:
19
+      x: threads
20
+      y: sent, msg
21
+      y2: received, msg
22
+      y3: round-trip, msg
23
+      y4: lost, msg
24
+    chart: line
25
+    pipeline:
26
+    - { $match: { task: omsimulator, mode: call }}
27
+    - { $group: { _id: { threads: { $multiply: [ "$threads", "$host_count" ] } },
28
+                  sent: { $avg: "$client.count" },
29
+                  received: { $avg: "$server.count" },
30
+                  round_trip: { $avg: "$round_trip.count" },
31
+                  lost: { $avg: { $subtract: ["$client.count", "$round_trip.count"] }}
32
+                }}
33
+    - { $project: { x: "$_id.threads",
34
+                    y: "$sent",
35
+                    y2: "$received",
36
+                    y3: "$round_trip",
37
+                    y4: "$lost"
38
+                  }}
39
+    - { $sort: { x: 1 }}
40
+''' | chart
41
+}}
42
+
43
+
44
+**Message throughput, latency and RabbitMQ CPU utilization depending on thread count**
45
+
46
+The chart shows the throughput, latency and CPU utilization by RabbitMQ server
47
+depending on number of concurrent threads.
48
+
49
+{{'''
50
+    title: RPC CALL throughput, latency and RabbitMQ CPU utilization depending on thread count
51
+    axes:
52
+      x: threads
53
+      y: sent, msg/sec
54
+      y2: received, msg/sec
55
+      y3: round-trip, msg/sec
56
+      y4: latency, ms
57
+      y5: RabbitMQ CPU consumption, %
58
+    chart: line
59
+    pipeline:
60
+    - { $match: { task: omsimulator, mode: call }}
61
+    - { $group: { _id: { threads: { $multiply: [ "$threads", "$host_count" ] } },
62
+                  msg_sent_per_sec: { $avg: { $divide: ["$client.count", "$client.duration"] }},
63
+                  msg_received_per_sec: { $avg: { $divide: ["$server.count", "$server.duration"] }},
64
+                  msg_round_trip_per_sec: { $avg: { $divide: ["$round_trip.count", "$round_trip.duration"] }},
65
+                  latency: { $avg: "$round_trip.latency" },
66
+                  rabbit_total: { $avg: "$rabbit_total" }
67
+                }}
68
+    - { $project: { x: "$_id.threads",
69
+                    y: "$msg_sent_per_sec",
70
+                    y2: "$msg_received_per_sec",
71
+                    y3: "$msg_round_trip_per_sec",
72
+                    y4: { $multiply: [ "$latency", 1000 ] },
73
+                    y5: { $multiply: [ "$rabbit_total", 100 ] }
74
+                  }}
75
+    - { $sort: { x: 1 }}
76
+''' | chart
77
+}}
78
+
79
+
80
+Test Case 2: RPC CAST Throughput Test
81
+^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^
82
+
83
+**Message processing**
84
+
85
+Messages are collected at 2 points: ``sent`` - messages sent by the client
86
+and ``received`` - messages received by the server. Also the number of lost
87
+messages is calculated.
88
+
89
+{{'''
90
+    title: RPC CAST Message count
91
+    axes:
92
+      x: threads
93
+      y: sent, msg
94
+      y2: received, msg
95
+      y3: lost, msg
96
+    chart: line
97
+    pipeline:
98
+    - { $match: { task: omsimulator, mode: cast }}
99
+    - { $group: { _id: { threads: { $multiply: [ "$threads", "$host_count" ] } },
100
+                  sent: { $avg: "$client.count" },
101
+                  received: { $avg: "$server.count" },
102
+                  lost: { $avg: { $subtract: ["$client.count", "$server.count"] }}
103
+                }}
104
+    - { $project: { x: "$_id.threads",
105
+                    y: "$sent",
106
+                    y2: "$received",
107
+                    y3: "$lost"
108
+                  }}
109
+    - { $sort: { x: 1 }}
110
+''' | chart
111
+}}
112
+
113
+
114
+**Message throughput, latency and RabbitMQ CPU utilization depending on thread count**
115
+
116
+The chart shows the throughput, latency and CPU utilization by RabbitMQ server
117
+depending on number of concurrent threads.
118
+
119
+{{'''
120
+    title: RPC CAST throughput, latency and RabbitMQ CPU utilization depending on thread count
121
+    axes:
122
+      x: threads
123
+      y: sent, msg/sec
124
+      y2: received, msg/sec
125
+      y3: latency, ms
126
+      y4: RabbitMQ CPU consumption, %
127
+    chart: line
128
+    pipeline:
129
+    - { $match: { task: omsimulator, mode: cast }}
130
+    - { $group: { _id: { threads: { $multiply: [ "$threads", "$host_count" ] } },
131
+                  msg_sent_per_sec: { $avg: { $divide: ["$client.count", "$client.duration"] }},
132
+                  msg_received_per_sec: { $avg: { $divide: ["$server.count", "$server.duration"] }},
133
+                  latency: { $avg: "$server.latency" },
134
+                  rabbit_total: { $avg: "$rabbit_total" }
135
+                }}
136
+    - { $project: { x: "$_id.threads",
137
+                    y: "$msg_sent_per_sec",
138
+                    y2: "$msg_received_per_sec",
139
+                    y3: { $multiply: [ "$latency", 1000 ] },
140
+                    y4: { $multiply: [ "$rabbit_total", 100 ] }
141
+                  }}
142
+    - { $sort: { x: 1 }}
143
+''' | chart
144
+}}
145
+
146
+
147
+Test Case 3: Notification Throughput Test
148
+^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^
149
+
150
+**Message processing**
151
+
152
+Messages are collected at 2 points: ``sent`` - messages sent by the client
153
+and ``received`` - messages received by the server. Also the number of lost
154
+messages is calculated.
155
+
156
+{{'''
157
+    title: NOTIFY Message count
158
+    axes:
159
+      x: threads
160
+      y: sent, msg
161
+      y2: received, msg
162
+      l3: lost, msg
163
+    chart: line
164
+    pipeline:
165
+    - { $match: { task: omsimulator, mode: notify }}
166
+    - { $group: { _id: { threads: { $multiply: [ "$threads", "$host_count" ] } },
167
+                  sent: { $avg: "$client.count" },
168
+                  received: { $avg: "$server.count" },
169
+                  lost: { $avg: { $subtract: ["$client.count", "$server.count"] }}
170
+                }}
171
+    - { $project: { x: "$_id.threads",
172
+                    y: "$sent",
173
+                    y2: "$received",
174
+                    y3: "$lost"
175
+                  }}
176
+    - { $sort: { x: 1 }}
177
+''' | chart
178
+}}
179
+
180
+
181
+**Message throughput, latency and RabbitMQ CPU utilization depending on thread count**
182
+
183
+The chart shows the throughput, latency and CPU utilization by RabbitMQ server
184
+depending on number of concurrent threads.
185
+
186
+{{'''
187
+    title: NOTIFY throughput, latency and RabbitMQ CPU utilization depending on thread count
188
+    axes:
189
+      x: threads
190
+      y: sent, msg/sec
191
+      y2: received, msg/sec
192
+      y3: latency, ms
193
+      y4: RabbitMQ CPU consumption, %
194
+    chart: line
195
+    pipeline:
196
+    - { $match: { task: omsimulator, mode: notify }}
197
+    - { $group: { _id: { threads: { $multiply: [ "$threads", "$host_count" ] } },
198
+                  msg_sent_per_sec: { $avg: { $divide: ["$client.count", "$client.duration"] }},
199
+                  msg_received_per_sec: { $avg: { $divide: ["$server.count", "$server.duration"] }},
200
+                  latency: { $avg: "$server.latency" },
201
+                  rabbit_total: { $avg: "$rabbit_total" }
202
+                }}
203
+    - { $project: { x: "$_id.threads",
204
+                    y: "$msg_sent_per_sec",
205
+                    y2: "$msg_received_per_sec",
206
+                    y3: { $multiply: [ "$latency", 1000 ] },
207
+                    y4: { $multiply: [ "$rabbit_total", 100 ] }
208
+                  }}
209
+    - { $sort: { x: 1 }}
210
+''' | chart
211
+}}

+ 92
- 0
performa/scenarios/mq/omsimulator-threading.yaml View File

@@ -0,0 +1,92 @@
1
+title: OMSimulator
2
+
3
+description:
4
+  This scenario uses oslo.messaging simulator tool to execute MQ test plan.
5
+
6
+parameters:
7
+  tester_hosts: List of hosts were omsimulator will be executed
8
+  rabbit_hosts: List of hosts were RabbitMQ runs
9
+  rabbit_url: RabbitMQ address
10
+
11
+setup:
12
+  -
13
+    hosts: {{ tester_hosts }}
14
+    tasks:
15
+    - apt: name=git
16
+      become: yes
17
+    - apt: name=daemon
18
+      become: yes
19
+    - name: installing omsimulator
20
+      git: repo=git://git.openstack.org/openstack/oslo.messaging
21
+           dest=/tmp/performa/oslo.messaging
22
+    - apt: name=python-dev
23
+      become: yes
24
+    - apt: name=python-pip
25
+      become: yes
26
+    - pip: name=virtualenv
27
+      become: yes
28
+    - pip: requirements=/tmp/performa/oslo.messaging/requirements.txt virtualenv=/tmp/performa/oslo.messaging/.venv
29
+    - pip: name=eventlet virtualenv=/tmp/performa/oslo.messaging/.venv
30
+    - command: /tmp/performa/oslo.messaging/.venv/bin/python setup.py install
31
+      args:
32
+        chdir: /tmp/performa/oslo.messaging
33
+  -
34
+    hosts: {{ rabbit_hosts }}
35
+    tasks:
36
+    - apt: name=atop
37
+      become: yes
38
+    - apt: name=daemon
39
+      become: yes
40
+
41
+
42
+execution:
43
+  -
44
+    hosts: {{ rabbit_hosts }}
45
+    tasks:
46
+    - atop: command=start
47
+  -
48
+    hosts: {{ tester_hosts }}
49
+    matrix:
50
+      threads: [ 1, 2, 5, 10, 20, 50, 100 ]
51
+    tasks:
52
+    - omsimulator:
53
+        mode: call
54
+        duration: 100
55
+        url: {{ rabbit_url }}
56
+  -
57
+    hosts: {{ tester_hosts }}
58
+    matrix:
59
+      threads: [ 1, 2, 5, 10, 20, 50, 100 ]
60
+    tasks:
61
+    - omsimulator:
62
+        mode: cast
63
+        duration: 100
64
+        url: {{ rabbit_url }}
65
+  -
66
+    hosts: {{ tester_hosts }}
67
+    matrix:
68
+      threads: [ 1, 2, 5, 10, 20, 50, 100 ]
69
+    tasks:
70
+    - omsimulator:
71
+        mode: notify
72
+        duration: 100
73
+        url: {{ rabbit_url }}
74
+  -
75
+    hosts: {{ rabbit_hosts }}
76
+    tasks:
77
+    - atop:
78
+        command: stop
79
+        labels: [ PRC ]
80
+
81
+aggregation:
82
+  -
83
+    update:
84
+      query:
85
+        { task: omsimulator }
86
+      values:
87
+        pipeline:
88
+        - { $match: { task: atop, status: OK, label: PRC, name: { $regex: beam.* } }}
89
+        - { $group: { _id: null, rabbit_sys: { $avg: "$sys" }, rabbit_user: { $avg: "$user" }, rabbit_total: { $avg: { $add: [ "$sys", "$user" ] }} }}
90
+
91
+report:
92
+  template: omsimulator-threading.rst

+ 40
- 8
performa/scenarios/mq/omsimulator.rst View File

@@ -7,20 +7,52 @@ This is the report of execution test plan
7 7
 Results
8 8
 ^^^^^^^
9 9
 
10
-Messages per second depending on threads count:
10
+**Message processing**
11
+
12
+The chart and table show the number of messages processed by a single process
13
+depending on number of eventlet threads inside of it. Messages are collected
14
+at 3 points: ``sent`` - messages sent by the client, ``received`` - messages
15
+received by the server, ``round-trip`` - replies received by the client.
11 16
 
12 17
 {{'''
13
-    title: Messages per second
18
+    title: Throughput
14 19
     axes:
15 20
       x: threads
16
-      y: messages per sec
17
-      y2: latency
21
+      y: sent, msg
22
+      y2: received, msg
23
+      y3: round-trip, msg
18 24
     chart: line
19 25
     pipeline:
20
-    - { $match: { task: omsimulator, status: OK }}
26
+    - { $match: { task: omsimulator, component: client }}
27
+    - { $group: { _id: { threads: { $multiply: [ "$threads", "$host_count" ] } },
28
+                  sent: { $avg: "$client.count" },
29
+                  received: { $avg: "$server.count" },
30
+                  round_trip: { $avg: "$round_trip.count" }
31
+                }}
32
+    - { $project: { x: "$_id.threads",
33
+                    y: "$sent",
34
+                    y2: "$received",
35
+                    y3: "$round_trip"
36
+                  }}
37
+    - { $sort: { x: 1 }}
38
+''' | chart
39
+}}
40
+
41
+
42
+**Message throughput, latency depending on thread count**
43
+
44
+{{'''
45
+    title: Throughput, latency depending on thread count
46
+    axes:
47
+      x: threads
48
+      y: throughput, msg/sec
49
+      y2: latency, ms
50
+    chart: line
51
+    pipeline:
52
+    - { $match: { task: omsimulator, component: client }}
21 53
     - { $group: { _id: { threads: { $multiply: [ "$threads", "$host_count" ] } },
22 54
                   msg_sent_per_sec: { $avg: { $divide: ["$count", "$duration"] }},
23
-                  latency: { $avg: "$latency" }
55
+                  latency: { $avg: "$latency" },
24 56
                 }}
25 57
     - { $project: { x: "$_id.threads",
26 58
                     y: "$msg_sent_per_sec",
@@ -40,7 +72,7 @@ Messages per second and rabbit CPU consumption depending on threads count:
40 72
       y2: rabbit CPU consumption, %
41 73
     chart: line
42 74
     pipeline:
43
-    - { $match: { task: omsimulator, status: OK }}
75
+    - { $match: { task: omsimulator }}
44 76
     - { $group: { _id: { threads: { $multiply: [ "$threads", "$host_count" ] } },
45 77
                   msg_sent_per_sec: { $avg: { $divide: ["$count", "$duration"] }},
46 78
                   rabbit_total: { $avg: "$rabbit_total" }
@@ -61,7 +93,7 @@ Messages per second and rabbit CPU consumption depending on threads count:
61 93
       y: latency
62 94
     chart: line
63 95
     pipeline:
64
-    - { $match: { task: omsimulator, status: OK }}
96
+    - { $match: { task: omsimulator }}
65 97
     - { $group: { _id: { threads: { $multiply: [ "$threads", "$host_count" ] } },
66 98
                   msg_sent_per_sec: { $avg: { $divide: ["$count", "$duration"] }},
67 99
                   latency: { $avg: "$latency" }

+ 6
- 2
performa/scenarios/mq/omsimulator.yaml View File

@@ -15,6 +15,9 @@ setup:
15 15
 #    - name: installing omsimulator
16 16
 #      git: repo=git://git.openstack.org/openstack/oslo.messaging
17 17
 #           dest=/tmp/performa/oslo.messaging
18
+#    - name: installing omsimulator
19
+#      git: repo=git://git.openstack.org/openstack/oslo.messaging
20
+#           dest=/tmp/performa/oslo.messaging
18 21
 #    - command: git fetch https://review.openstack.org/openstack/oslo.messaging refs/changes/91/291191/2
19 22
 #      args:
20 23
 #        chdir: /tmp/performa/oslo.messaging
@@ -43,12 +46,13 @@ execution:
43 46
   -
44 47
     hosts: {{ target }}
45 48
     matrix:
46
-      host_count: [ 1, 2 ]
49
+      host_count: [ 1 ]
50
+      threads: [ 1, 10, 100 ]
47 51
     tasks:
48 52
     - omsimulator:
49 53
         mode: call
50 54
         duration: 10
51
-        threads: 1
55
+        threads: 10
52 56
         url: {{ rabbit_url }}
53 57
   -
54 58
     hosts: {{ target }}

Loading…
Cancel
Save