Browse Source

Create devstack plugin for monasca-events

!Parts of code taken from monasca-api and monasca-log-api!

Also create a dummy implementataion for version endpoint
so that the deployment can be tested.

Story: 2001112
Task: 4808

Change-Id: Ic35f6388c34c4d4c1198401a1c6972b802a54c67
Jakub Wachowski 1 year ago
parent
commit
c3a3b4b765

+ 19
- 0
.zuul.yaml View File

@@ -0,0 +1,19 @@
1
+- job:
2
+    name: monascaevents-tempest-events-base
3
+    parent: legacy-dsvm-base
4
+    run: playbooks/legacy/monasca-tempest-events-base/run
5
+    post-run: playbooks/legacy/monasca-tempest-events-base/post
6
+    timeout: 7800
7
+    required-projects:
8
+      - openstack-infra/devstack-gate
9
+      - openstack/monasca-events-api
10
+      - openstack/monasca-common
11
+      - openstack/monasca-persister
12
+      - openstack/python-monascaclient
13
+      - openstack/tempest
14
+    voting: false
15
+- project:
16
+    name: openstack/monasca-events-api
17
+    check:
18
+      jobs:
19
+        - monascaevents-tempest-events-base

+ 3
- 1
devstack/Vagrantfile View File

@@ -79,7 +79,9 @@ LOGDIR=$DEST/logs
79 79
 LOG_COLOR=False
80 80
 
81 81
 disable_all_services
82
-enable_service zookeeper rabbit mysql key tempest horizon
82
+enable_service rabbit mysql key tempest horizon
83
+
84
+enable_plugin monasca-events-api https://git.openstack.org/openstack/monasca-events-api
83 85
 
84 86
 ' > local.conf
85 87
     ./stack.sh

+ 3
- 0
devstack/files/debs/monasca-events-api View File

@@ -0,0 +1,3 @@
1
+jq # dist:xenial
2
+python-dev # dist:xenial
3
+build-essential # dist:xenial

+ 360
- 0
devstack/files/elasticsearch/elasticsearch.yml View File

@@ -0,0 +1,360 @@
1
+##################### Elasticsearch Configuration Example #####################
2
+
3
+# This file contains an overview of various configuration settings,
4
+# targeted at operations staff. Application developers should
5
+# consult the guide at <http://elasticsearch.org/guide>.
6
+#
7
+# The installation procedure is covered at
8
+# <http://elasticsearch.org/guide/en/elasticsearch/reference/current/setup.html>.
9
+#
10
+# Elasticsearch comes with reasonable defaults for most settings,
11
+# so you can try it out without bothering with configuration.
12
+#
13
+# Most of the time, these defaults are just fine for running a production
14
+# cluster. If you're fine-tuning your cluster, or wondering about the
15
+# effect of certain configuration option, please _do ask_ on the
16
+# mailing list or IRC channel [http://elasticsearch.org/community].
17
+
18
+# Any element in the configuration can be replaced with environment variables
19
+# by placing them in ${...} notation. For example:
20
+#
21
+#node.rack: ${RACK_ENV_VAR}
22
+
23
+# For information on supported formats and syntax for the config file, see
24
+# <http://elasticsearch.org/guide/en/elasticsearch/reference/current/setup-configuration.html>
25
+
26
+
27
+################################### Cluster ###################################
28
+
29
+# Cluster name identifies your cluster for auto-discovery. If you're running
30
+# multiple clusters on the same network, make sure you're using unique names.
31
+#
32
+cluster.name: monasca_events_elastic
33
+
34
+
35
+#################################### Node #####################################
36
+
37
+# Node names are generated dynamically on startup, so you're relieved
38
+# from configuring them manually. You can tie this node to a specific name:
39
+#
40
+node.name: "devstack"
41
+
42
+# Allow this node to be eligible as a master node (enabled by default):
43
+node.master: true
44
+
45
+# Allow this node to store data (enabled by default)
46
+node.data:  true
47
+
48
+# You can exploit these settings to design advanced cluster topologies.
49
+#
50
+# 1. You want this node to never become a master node, only to hold data.
51
+#    This will be the "workhorse" of your cluster.
52
+#
53
+#node.master: false
54
+#node.data: true
55
+#
56
+# 2. You want this node to only serve as a master: to not store any data and
57
+#    to have free resources. This will be the "coordinator" of your cluster.
58
+#
59
+#node.master: true
60
+#node.data: false
61
+#
62
+# 3. You want this node to be neither master nor data node, but
63
+#    to act as a "search load balancer" (fetching data from nodes,
64
+#    aggregating results, etc.)
65
+#
66
+#node.master: false
67
+#node.data: false
68
+
69
+# Use the Cluster Health API [http://localhost:9200/_cluster/health], the
70
+# Node Info API [http://localhost:9200/_nodes] or GUI tools
71
+# such as <http://www.elasticsearch.org/overview/marvel/>,
72
+# <http://github.com/karmi/elasticsearch-paramedic>,
73
+# <http://github.com/lukas-vlcek/bigdesk> and
74
+# <http://mobz.github.com/elasticsearch-head> to inspect the cluster state.
75
+
76
+# A node can have generic attributes associated with it, which can later be used
77
+# for customized shard allocation filtering, or allocation awareness. An attribute
78
+# is a simple key value pair, similar to node.key: value, here is an example:
79
+#
80
+#node.rack: rack314
81
+
82
+# By default, multiple nodes are allowed to start from the same installation location
83
+# to disable it, set the following:
84
+#node.max_local_storage_nodes: 1
85
+
86
+
87
+#################################### Index ####################################
88
+
89
+# You can set a number of options (such as shard/replica options, mapping
90
+# or analyzer definitions, translog settings, ...) for indices globally,
91
+# in this file.
92
+#
93
+# Note, that it makes more sense to configure index settings specifically for
94
+# a certain index, either when creating it or by using the index templates API.
95
+#
96
+# See <http://elasticsearch.org/guide/en/elasticsearch/reference/current/index-modules.html> and
97
+# <http://elasticsearch.org/guide/en/elasticsearch/reference/current/indices-create-index.html>
98
+# for more information.
99
+
100
+# Set the number of shards (splits) of an index (5 by default):
101
+#
102
+#index.number_of_shards: 5
103
+
104
+# Set the number of replicas (additional copies) of an index (1 by default):
105
+#
106
+#index.number_of_replicas: 1
107
+
108
+# Note, that for development on a local machine, with small indices, it usually
109
+# makes sense to "disable" the distributed features:
110
+#
111
+#index.number_of_shards: 1
112
+#index.number_of_replicas: 0
113
+
114
+# These settings directly affect the performance of index and search operations
115
+# in your cluster. Assuming you have enough machines to hold shards and
116
+# replicas, the rule of thumb is:
117
+#
118
+# 1. Having more *shards* enhances the _indexing_ performance and allows to
119
+#    _distribute_ a big index across machines.
120
+# 2. Having more *replicas* enhances the _search_ performance and improves the
121
+#    cluster _availability_.
122
+#
123
+# The "number_of_shards" is a one-time setting for an index.
124
+#
125
+# The "number_of_replicas" can be increased or decreased anytime,
126
+# by using the Index Update Settings API.
127
+#
128
+# Elasticsearch takes care about load balancing, relocating, gathering the
129
+# results from nodes, etc. Experiment with different settings to fine-tune
130
+# your setup.
131
+
132
+# Use the Index Status API (<http://localhost:9200/A/_status>) to inspect
133
+# the index status.
134
+
135
+
136
+#################################### Paths ####################################
137
+
138
+# Path to directory where to store index data allocated for this node.
139
+path.data: %ELASTICSEARCH_DATA_DIR%
140
+
141
+# Path to log files:
142
+path.logs: %ELASTICSEARCH_LOG_DIR%
143
+
144
+# Path to where plugins are installed:
145
+#path.plugins: /path/to/plugins
146
+
147
+# Path to temporary files
148
+#path.work: /path/to/work
149
+
150
+# Path to directory containing configuration (this file and logging.yml):
151
+#path.conf: /path/to/conf
152
+
153
+
154
+#################################### Plugin ###################################
155
+
156
+# If a plugin listed here is not installed for current node, the node will not start.
157
+#
158
+#plugin.mandatory: mapper-attachments,lang-groovy
159
+
160
+
161
+################################### Memory ####################################
162
+
163
+# Elasticsearch performs poorly when JVM starts swapping: you should ensure that
164
+# it _never_ swaps.
165
+#
166
+# Set this property to true to lock the memory:
167
+#
168
+#bootstrap.mlockall: true
169
+
170
+# Make sure that the ES_MIN_MEM and ES_MAX_MEM environment variables are set
171
+# to the same value, and that the machine has enough memory to allocate
172
+# for Elasticsearch, leaving enough memory for the operating system itself.
173
+#
174
+# You should also make sure that the Elasticsearch process is allowed to lock
175
+# the memory, eg. by using `ulimit -l unlimited`.
176
+
177
+
178
+############################## Network And HTTP ###############################
179
+
180
+# Elasticsearch, by default, binds itself to the 0.0.0.0 address, and listens
181
+# on port [9200-9300] for HTTP traffic and on port [9300-9400] for node-to-node
182
+# communication. (the range means that if the port is busy, it will automatically
183
+# try the next port).
184
+
185
+# Set the bind address specifically (IPv4 or IPv6):
186
+network.bind_host: %ELASTICSEARCH_BIND_HOST%
187
+
188
+# Set the address other nodes will use to communicate with this node. If not
189
+# set, it is automatically derived. It must point to an actual IP address.
190
+network.publish_host: %ELASTICSEARCH_PUBLISH_HOST%
191
+
192
+# Set a custom port for the node to node communication (9300 by default):
193
+transport.tcp.port: %ELASTICSEARCH_PUBLISH_PORT%
194
+
195
+# Enable compression for all communication between nodes (disabled by default):
196
+#
197
+#transport.tcp.compress: true
198
+
199
+# Set a custom port to listen for HTTP traffic:
200
+#
201
+http.port: %ELASTICSEARCH_BIND_PORT%
202
+
203
+# Set a custom allowed content length:
204
+#
205
+#http.max_content_length: 100mb
206
+
207
+# Disable HTTP completely:
208
+#
209
+#http.enabled: false
210
+
211
+
212
+################################### Gateway ###################################
213
+
214
+# The gateway allows for persisting the cluster state between full cluster
215
+# restarts. Every change to the state (such as adding an index) will be stored
216
+# in the gateway, and when the cluster starts up for the first time,
217
+# it will read its state from the gateway.
218
+
219
+# There are several types of gateway implementations. For more information, see
220
+# <http://elasticsearch.org/guide/en/elasticsearch/reference/current/modules-gateway.html>.
221
+
222
+# The default gateway type is the "local" gateway (recommended):
223
+#
224
+#gateway.type: local
225
+
226
+# Settings below control how and when to start the initial recovery process on
227
+# a full cluster restart (to reuse as much local data as possible when using shared
228
+# gateway).
229
+
230
+# Allow recovery process after N nodes in a cluster are up:
231
+#
232
+#gateway.recover_after_nodes: 1
233
+# Set the timeout to initiate the recovery process, once the N nodes
234
+# from previous setting are up (accepts time value):
235
+#
236
+#gateway.recover_after_time: 5m
237
+
238
+# Set how many nodes are expected in this cluster. Once these N nodes
239
+# are up (and recover_after_nodes is met), begin recovery process immediately
240
+# (without waiting for recover_after_time to expire):
241
+#
242
+#gateway.expected_nodes: 2
243
+
244
+
245
+############################# Recovery Throttling #############################
246
+
247
+# These settings allow to control the process of shards allocation between
248
+# nodes during initial recovery, replica allocation, rebalancing,
249
+# or when adding and removing nodes.
250
+
251
+# Set the number of concurrent recoveries happening on a node:
252
+#
253
+# 1. During the initial recovery
254
+#
255
+#cluster.routing.allocation.node_initial_primaries_recoveries: 4
256
+#
257
+# 2. During adding/removing nodes, rebalancing, etc
258
+#
259
+#cluster.routing.allocation.node_concurrent_recoveries: 2
260
+
261
+# Set to throttle throughput when recovering (eg. 100mb, by default 20mb):
262
+#
263
+#indices.recovery.max_bytes_per_sec: 20mb
264
+
265
+# Set to limit the number of open concurrent streams when
266
+# recovering a shard from a peer:
267
+#
268
+#indices.recovery.concurrent_streams: 5
269
+
270
+
271
+################################## Discovery ##################################
272
+
273
+# Discovery infrastructure ensures nodes can be found within a cluster
274
+# and master node is elected. Multicast discovery is the default.
275
+
276
+# Set to ensure a node sees N other master eligible nodes to be considered
277
+# operational within the cluster. This should be set to a quorum/majority of
278
+# the master-eligible nodes in the cluster.
279
+#
280
+discovery.zen.minimum_master_nodes: 1
281
+
282
+# Set the time to wait for ping responses from other nodes when discovering.
283
+# Set this option to a higher value on a slow or congested network
284
+# to minimize discovery failures:
285
+#
286
+#discovery.zen.ping.timeout: 3s
287
+
288
+# For more information, see
289
+# <http://elasticsearch.org/guide/en/elasticsearch/reference/current/modules-discovery-zen.html>
290
+
291
+# Unicast discovery allows to explicitly control which nodes will be used
292
+# to discover the cluster. It can be used when multicast is not present,
293
+# or to restrict the cluster communication-wise.
294
+#
295
+# 1. Disable multicast discovery (enabled by default):
296
+#
297
+discovery.zen.ping.multicast.enabled: false
298
+# 2. Configure an initial list of master nodes in the cluster
299
+#    to perform discovery when new nodes (master or data) are started:
300
+#
301
+# discovery.zen.ping.unicast.hosts: [127.0.0.1]
302
+
303
+# EC2 discovery allows to use AWS EC2 API in order to perform discovery.
304
+#
305
+# You have to install the cloud-aws plugin for enabling the EC2 discovery.
306
+#
307
+# For more information, see
308
+# <http://elasticsearch.org/guide/en/elasticsearch/reference/current/modules-discovery-ec2.html>
309
+#
310
+# See <http://elasticsearch.org/tutorials/elasticsearch-on-ec2/>
311
+# for a step-by-step tutorial.
312
+
313
+# GCE discovery allows to use Google Compute Engine API in order to perform discovery.
314
+#
315
+# You have to install the cloud-gce plugin for enabling the GCE discovery.
316
+#
317
+# For more information, see <https://github.com/elasticsearch/elasticsearch-cloud-gce>.
318
+
319
+# Azure discovery allows to use Azure API in order to perform discovery.
320
+#
321
+# You have to install the cloud-azure plugin for enabling the Azure discovery.
322
+#
323
+# For more information, see <https://github.com/elasticsearch/elasticsearch-cloud-azure>.
324
+
325
+################################## Slow Log ##################################
326
+
327
+# Shard level query and fetch threshold logging.
328
+
329
+#index.search.slowlog.threshold.query.warn: 10s
330
+#index.search.slowlog.threshold.query.info: 5s
331
+#index.search.slowlog.threshold.query.debug: 2s
332
+#index.search.slowlog.threshold.query.trace: 500ms
333
+
334
+#index.search.slowlog.threshold.fetch.warn: 1s
335
+#index.search.slowlog.threshold.fetch.info: 800ms
336
+#index.search.slowlog.threshold.fetch.debug: 500ms
337
+#index.search.slowlog.threshold.fetch.trace: 200ms
338
+
339
+#index.indexing.slowlog.threshold.index.warn: 10s
340
+#index.indexing.slowlog.threshold.index.info: 5s
341
+#index.indexing.slowlog.threshold.index.debug: 2s
342
+#index.indexing.slowlog.threshold.index.trace: 500ms
343
+
344
+################################## GC Logging ################################
345
+
346
+#monitor.jvm.gc.young.warn: 1000ms
347
+#monitor.jvm.gc.young.info: 700ms
348
+#monitor.jvm.gc.young.debug: 400ms
349
+
350
+#monitor.jvm.gc.old.warn: 10s
351
+#monitor.jvm.gc.old.info: 5s
352
+#monitor.jvm.gc.old.debug: 2s
353
+
354
+################################## Security ################################
355
+
356
+# Uncomment if you want to enable JSONP as a valid return transport on the
357
+# http server. With this enabled, it may pose a security risk, so disabling
358
+# it unless you need it is recommended (it is disabled by default).
359
+#
360
+#http.jsonp.enable: true

+ 38
- 0
devstack/files/kafka/kafka-server-start.sh View File

@@ -0,0 +1,38 @@
1
+#!/bin/bash
2
+# Licensed to the Apache Software Foundation (ASF) under one or more
3
+# contributor license agreements.  See the NOTICE file distributed with
4
+# this work for additional information regarding copyright ownership.
5
+# The ASF licenses this file to You under the Apache License, Version 2.0
6
+# (the "License"); you may not use this file except in compliance with
7
+# the License.  You may obtain a copy of the License at
8
+#
9
+#    http://www.apache.org/licenses/LICENSE-2.0
10
+#
11
+# Unless required by applicable law or agreed to in writing, software
12
+# distributed under the License is distributed on an "AS IS" BASIS,
13
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
14
+# See the License for the specific language governing permissions and
15
+# limitations under the License.
16
+
17
+if [ $# -lt 1 ];
18
+then
19
+    echo "USAGE: $0 [-daemon] server.properties"
20
+    exit 1
21
+fi
22
+base_dir=$(dirname $0)
23
+export KAFKA_LOG4J_OPTS="-Dlog4j.configuration=file:$base_dir/../config/log4j.properties"
24
+export KAFKA_HEAP_OPTS="-Xms256m -Xmx256m"
25
+
26
+EXTRA_ARGS="-name kafkaServer -loggc"
27
+
28
+COMMAND=$1
29
+case $COMMAND in
30
+  -daemon)
31
+    EXTRA_ARGS="-daemon "$EXTRA_ARGS
32
+    shift
33
+    ;;
34
+  *)
35
+    ;;
36
+esac
37
+
38
+exec $base_dir/kafka-run-class.sh $EXTRA_ARGS kafka.Kafka $@

+ 72
- 0
devstack/files/kafka/log4j.properties View File

@@ -0,0 +1,72 @@
1
+# Licensed to the Apache Software Foundation (ASF) under one or more
2
+# contributor license agreements.  See the NOTICE file distributed with
3
+# this work for additional information regarding copyright ownership.
4
+# The ASF licenses this file to You under the Apache License, Version 2.0
5
+# (the "License"); you may not use this file except in compliance with
6
+# the License.  You may obtain a copy of the License at
7
+#
8
+#    http://www.apache.org/licenses/LICENSE-2.0
9
+#
10
+# Unless required by applicable law or agreed to in writing, software
11
+# distributed under the License is distributed on an "AS IS" BASIS,
12
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
13
+# See the License for the specific language governing permissions and
14
+# limitations under the License.
15
+
16
+log4j.rootLogger=WARN, stdout
17
+
18
+log4j.appender.stdout=org.apache.log4j.ConsoleAppender
19
+log4j.appender.stdout.layout=org.apache.log4j.PatternLayout
20
+log4j.appender.stdout.layout.ConversionPattern=[%d] %p %m (%c)%n
21
+
22
+log4j.appender.kafkaAppender=org.apache.log4j.RollingFileAppender
23
+log4j.appender.kafkaAppender.MaxFileSize=50MB
24
+log4j.appender.kafkaAppender.MaxBackupIndex=4
25
+log4j.appender.kafkaAppender.File=/var/log/kafka/server.log
26
+log4j.appender.kafkaAppender.layout=org.apache.log4j.PatternLayout
27
+log4j.appender.kafkaAppender.layout.ConversionPattern=[%d] %p %m (%c)%n
28
+
29
+log4j.appender.stateChangeAppender=org.apache.log4j.RollingFileAppender
30
+log4j.appender.stateChangeAppender.MaxFileSize=50MB
31
+log4j.appender.stateChangeAppender.MaxBackupIndex=4
32
+log4j.appender.stateChangeAppender.File=/var/log/kafka/state-change.log
33
+log4j.appender.stateChangeAppender.layout=org.apache.log4j.PatternLayout
34
+log4j.appender.stateChangeAppender.layout.ConversionPattern=[%d] %p %m (%c)%n
35
+
36
+log4j.appender.controllerAppender=org.apache.log4j.RollingFileAppender
37
+log4j.appender.controllerAppender.MaxFileSize=50MB
38
+log4j.appender.controllerAppender.MaxBackupIndex=4
39
+log4j.appender.controllerAppender.File=/var/log/kafka/controller.log
40
+log4j.appender.controllerAppender.layout=org.apache.log4j.PatternLayout
41
+log4j.appender.controllerAppender.layout.ConversionPattern=[%d] %p %m (%c)%n
42
+
43
+# Turn on all our debugging info
44
+#log4j.logger.kafka.producer.async.DefaultEventHandler=DEBUG, kafkaAppender
45
+#log4j.logger.kafka.client.ClientUtils=DEBUG, kafkaAppender
46
+#log4j.logger.kafka.perf=DEBUG, kafkaAppender
47
+#log4j.logger.kafka.perf.ProducerPerformance$ProducerThread=DEBUG, kafkaAppender
48
+#log4j.logger.org.I0Itec.zkclient.ZkClient=DEBUG
49
+log4j.logger.kafka=WARN, kafkaAppender
50
+
51
+# Tracing requests results in large logs
52
+#log4j.appender.requestAppender=org.apache.log4j.RollingFileAppender
53
+#log4j.appender.requestAppender.MaxFileSize=50MB
54
+#log4j.appender.requestAppender.MaxBackupIndex=4
55
+#log4j.appender.requestAppender.File=/var/log/kafka/kafka-request.log
56
+#log4j.appender.requestAppender.layout=org.apache.log4j.PatternLayout
57
+#log4j.appender.requestAppender.layout.ConversionPattern=[%d] %p %m (%c)%n
58
+#
59
+#log4j.logger.kafka.network.RequestChannel$=TRACE, requestAppender
60
+#log4j.additivity.kafka.network.RequestChannel$=false
61
+#
62
+#log4j.logger.kafka.network.Processor=TRACE, requestAppender
63
+#log4j.logger.kafka.server.KafkaApis=TRACE, requestAppender
64
+#log4j.additivity.kafka.server.KafkaApis=false
65
+#log4j.logger.kafka.request.logger=TRACE, requestAppender
66
+#log4j.additivity.kafka.request.logger=false
67
+
68
+log4j.logger.kafka.controller=TRACE, controllerAppender
69
+log4j.additivity.kafka.controller=false
70
+
71
+log4j.logger.state.change.logger=TRACE, stateChangeAppender
72
+log4j.additivity.state.change.logger=false

+ 118
- 0
devstack/files/kafka/server.properties View File

@@ -0,0 +1,118 @@
1
+#
2
+# (C) Copyright 2015 Hewlett Packard Enterprise Development Company LP
3
+#
4
+# Licensed under the Apache License, Version 2.0 (the "License");
5
+# you may not use this file except in compliance with the License.
6
+# You may obtain a copy of the License at
7
+#
8
+#    http://www.apache.org/licenses/LICENSE-2.0
9
+#
10
+# Unless required by applicable law or agreed to in writing, software
11
+# distributed under the License is distributed on an "AS IS" BASIS,
12
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
13
+# implied.
14
+# See the License for the specific language governing permissions and
15
+# limitations under the License.
16
+#
17
+
18
+############################# Server Basics #############################
19
+
20
+# The id of the broker. This must be set to a unique integer for each broker.
21
+broker.id=0
22
+
23
+############################# Socket Server Settings #############################
24
+
25
+# The port the socket server listens on
26
+port=9092
27
+
28
+# Hostname the broker will bind to. If not set, the server will bind to all interfaces
29
+#host.name=127.0.0.1
30
+
31
+# Hostname the broker will advertise to producers and consumers. If not set, it uses the
32
+# value for "host.name" if configured.  Otherwise, it will use the value returned from
33
+# java.net.InetAddress.getCanonicalHostName().
34
+#advertised.host.name=<hostname routable by clients>
35
+
36
+# The port to publish to ZooKeeper for clients to use. If this is not set,
37
+# it will publish the same port that the broker binds to.
38
+#advertised.port=<port accessible by clients>
39
+
40
+# The number of threads handling network requests
41
+num.network.threads=2
42
+
43
+# The number of threads doing disk I/O
44
+num.io.threads=2
45
+
46
+# The send buffer (SO_SNDBUF) used by the socket server
47
+socket.send.buffer.bytes=1048576
48
+
49
+# The receive buffer (SO_RCVBUF) used by the socket server
50
+socket.receive.buffer.bytes=1048576
51
+
52
+# The maximum size of a request that the socket server will accept (protection against OOM)
53
+socket.request.max.bytes=104857600
54
+
55
+
56
+############################# Log Basics #############################
57
+
58
+# A comma separated list of directories under which to store log files
59
+log.dirs=/var/kafka
60
+
61
+auto.create.topics.enable=false
62
+# The number of logical partitions per topic per server. More partitions allow greater parallelism
63
+# for consumption, but also mean more files.
64
+num.partitions=2
65
+
66
+############################# Log Flush Policy #############################
67
+
68
+# Messages are immediately written to the filesystem but by default we only fsync() to sync
69
+# the OS cache lazily. The following configurations control the flush of data to disk.
70
+# There are a few important trade-offs here:
71
+#    1. Durability: Unflushed data may be lost if you are not using replication.
72
+#    2. Latency: Very large flush intervals may lead to latency spikes when the flush does occur as there will be a lot of data to flush.
73
+#    3. Throughput: The flush is generally the most expensive operation, and a small flush interval may lead to exceessive seeks.
74
+# The settings below allow one to configure the flush policy to flush data after a period of time or
75
+# every N messages (or both). This can be done globally and overridden on a per-topic basis.
76
+
77
+# The number of messages to accept before forcing a flush of data to disk
78
+log.flush.interval.messages=10000
79
+
80
+# The maximum amount of time a message can sit in a log before we force a flush
81
+log.flush.interval.ms=1000
82
+
83
+############################# Log Retention Policy #############################
84
+
85
+# The following configurations control the disposal of log segments. The policy can
86
+# be set to delete segments after a period of time, or after a given size has accumulated.
87
+# A segment will be deleted whenever *either* of these criteria are met. Deletion always happens
88
+# from the end of the log.
89
+
90
+# The minimum age of a log file to be eligible for deletion
91
+log.retention.hours=24
92
+
93
+# A size-based retention policy for logs. Segments are pruned from the log as long as the remaining
94
+# segments don't drop below log.retention.bytes.
95
+log.retention.bytes=104857600
96
+
97
+# The maximum size of a log segment file. When this size is reached a new log segment will be created.
98
+log.segment.bytes=104857600
99
+
100
+# The interval at which log segments are checked to see if they can be deleted according
101
+# to the retention policies
102
+log.retention.check.interval.ms=60000
103
+
104
+# By default the log cleaner is disabled and the log retention policy will default to just delete segments after their retention expires.
105
+# If log.cleaner.enable=true is set the cleaner will be enabled and individual logs can then be marked for log compaction.
106
+log.cleaner.enable=false
107
+
108
+############################# Zookeeper #############################
109
+
110
+# Zookeeper connection string (see zookeeper docs for details).
111
+# This is a comma separated host:port pairs, each corresponding to a zk
112
+# server. e.g. "127.0.0.1:3000,127.0.0.1:3001,127.0.0.1:3002".
113
+# You can also append an optional chroot string to the urls to specify the
114
+# root directory for all kafka znodes.
115
+zookeeper.connect=127.0.0.1:2181
116
+
117
+# Timeout in ms for connecting to zookeeper
118
+zookeeper.connection.timeout.ms=1000000

+ 36
- 0
devstack/files/monasca-events-persister/events-persister-logging.conf View File

@@ -0,0 +1,36 @@
1
+[loggers]
2
+keys = root, kafka
3
+
4
+[handlers]
5
+keys = console, file
6
+
7
+[formatters]
8
+keys = generic
9
+
10
+[logger_root]
11
+level = DEBUG
12
+formatter = default
13
+handlers = console, file
14
+
15
+[logger_kafka]
16
+qualname = monasca_common.kafka_lib
17
+level = INFO
18
+formatter = default
19
+handlers = console, file
20
+propagate = 0
21
+
22
+[handler_console]
23
+class = logging.StreamHandler
24
+args = (sys.stderr,)
25
+level = DEBUG
26
+formatter = generic
27
+
28
+[handler_file]
29
+class = logging.handlers.RotatingFileHandler
30
+level = DEBUG
31
+formatter = generic
32
+# store up to 5*100MB of logs
33
+args = ('%MONASCA_EVENTS_LOG_DIR%/events-persister.log', 'a', 104857600, 5)
34
+
35
+[formatter_generic]
36
+format = %(asctime)s %(levelname)s [%(name)s][%(threadName)s] %(message)s

+ 36
- 0
devstack/files/zookeeper/environment View File

@@ -0,0 +1,36 @@
1
+#
2
+# (C) Copyright 2015 Hewlett Packard Enterprise Development Company LP
3
+#
4
+# Licensed under the Apache License, Version 2.0 (the "License");
5
+# you may not use this file except in compliance with the License.
6
+# You may obtain a copy of the License at
7
+#
8
+#    http://www.apache.org/licenses/LICENSE-2.0
9
+#
10
+# Unless required by applicable law or agreed to in writing, software
11
+# distributed under the License is distributed on an "AS IS" BASIS,
12
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
13
+# implied.
14
+# See the License for the specific language governing permissions and
15
+# limitations under the License.
16
+#
17
+
18
+# Modified from http://packages.ubuntu.com/saucy/zookeeperd
19
+NAME=zookeeper
20
+ZOOCFGDIR=/etc/zookeeper/conf
21
+
22
+# seems, that log4j requires the log4j.properties file to be in the classpath
23
+CLASSPATH="$ZOOCFGDIR:/usr/share/java/jline.jar:/usr/share/java/log4j-1.2.jar:/usr/share/java/xercesImpl.jar:/usr/share/java/xmlParserAPIs.jar:/usr/share/java/netty.jar:/usr/share/java/slf4j-api.jar:/usr/share/java/slf4j-log4j12.jar:/usr/share/java/zookeeper.jar"
24
+
25
+ZOOCFG="$ZOOCFGDIR/zoo.cfg"
26
+ZOO_LOG_DIR=/var/log/zookeeper
27
+USER=$NAME
28
+GROUP=$NAME
29
+PIDDIR=/var/run/$NAME
30
+PIDFILE=$PIDDIR/$NAME.pid
31
+SCRIPTNAME=/etc/init.d/$NAME
32
+JAVA=/usr/bin/java
33
+ZOOMAIN="org.apache.zookeeper.server.quorum.QuorumPeerMain"
34
+ZOO_LOG4J_PROP="INFO,ROLLINGFILE"
35
+JMXLOCALONLY=false
36
+JAVA_OPTS=""

+ 69
- 0
devstack/files/zookeeper/log4j.properties View File

@@ -0,0 +1,69 @@
1
+#
2
+# (C) Copyright 2015 Hewlett Packard Enterprise Development Company LP
3
+#
4
+# Licensed under the Apache License, Version 2.0 (the "License");
5
+# you may not use this file except in compliance with the License.
6
+# You may obtain a copy of the License at
7
+#
8
+#    http://www.apache.org/licenses/LICENSE-2.0
9
+#
10
+# Unless required by applicable law or agreed to in writing, software
11
+# distributed under the License is distributed on an "AS IS" BASIS,
12
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
13
+# implied.
14
+# See the License for the specific language governing permissions and
15
+# limitations under the License.
16
+#
17
+
18
+# From http://packages.ubuntu.com/saucy/zookeeperd
19
+
20
+# ZooKeeper Logging Configuration
21
+#
22
+
23
+# Format is "<default threshold> (, <appender>)+
24
+
25
+log4j.rootLogger=${zookeeper.root.logger}
26
+
27
+# Example: console appender only
28
+# log4j.rootLogger=INFO, CONSOLE
29
+
30
+# Example with rolling log file
31
+#log4j.rootLogger=DEBUG, CONSOLE, ROLLINGFILE
32
+
33
+# Example with rolling log file and tracing
34
+#log4j.rootLogger=TRACE, CONSOLE, ROLLINGFILE, TRACEFILE
35
+
36
+#
37
+# Log INFO level and above messages to the console
38
+#
39
+log4j.appender.CONSOLE=org.apache.log4j.ConsoleAppender
40
+log4j.appender.CONSOLE.Threshold=INFO
41
+log4j.appender.CONSOLE.layout=org.apache.log4j.PatternLayout
42
+log4j.appender.CONSOLE.layout.ConversionPattern=%d{ISO8601} - %-5p [%t:%C{1}@%L] - %m%n
43
+
44
+#
45
+# Add ROLLINGFILE to rootLogger to get log file output
46
+#    Log DEBUG level and above messages to a log file
47
+log4j.appender.ROLLINGFILE=org.apache.log4j.RollingFileAppender
48
+log4j.appender.ROLLINGFILE.Threshold=WARN
49
+log4j.appender.ROLLINGFILE.File=${zookeeper.log.dir}/zookeeper.log
50
+
51
+# Max log file size of 10MB
52
+log4j.appender.ROLLINGFILE.MaxFileSize=10MB
53
+# uncomment the next line to limit number of backup files
54
+#log4j.appender.ROLLINGFILE.MaxBackupIndex=10
55
+
56
+log4j.appender.ROLLINGFILE.layout=org.apache.log4j.PatternLayout
57
+log4j.appender.ROLLINGFILE.layout.ConversionPattern=%d{ISO8601} - %-5p [%t:%C{1}@%L] - %m%n
58
+
59
+
60
+#
61
+# Add TRACEFILE to rootLogger to get log file output
62
+#    Log DEBUG level and above messages to a log file
63
+log4j.appender.TRACEFILE=org.apache.log4j.FileAppender
64
+log4j.appender.TRACEFILE.Threshold=TRACE
65
+log4j.appender.TRACEFILE.File=${zookeeper.log.dir}/zookeeper_trace.log
66
+
67
+log4j.appender.TRACEFILE.layout=org.apache.log4j.PatternLayout
68
+### Notice we are including log4j's NDC here (%x)
69
+log4j.appender.TRACEFILE.layout.ConversionPattern=%d{ISO8601} - %-5p [%t:%C{1}@%L][%x] - %m%n

+ 1
- 0
devstack/files/zookeeper/myid View File

@@ -0,0 +1 @@
1
+0

+ 74
- 0
devstack/files/zookeeper/zoo.cfg View File

@@ -0,0 +1,74 @@
1
+#
2
+# (C) Copyright 2015 Hewlett Packard Enterprise Development Company LP
3
+#
4
+# Licensed under the Apache License, Version 2.0 (the "License");
5
+# you may not use this file except in compliance with the License.
6
+# You may obtain a copy of the License at
7
+#
8
+#    http://www.apache.org/licenses/LICENSE-2.0
9
+#
10
+# Unless required by applicable law or agreed to in writing, software
11
+# distributed under the License is distributed on an "AS IS" BASIS,
12
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
13
+# implied.
14
+# See the License for the specific language governing permissions and
15
+# limitations under the License.
16
+#
17
+# http://hadoop.apache.org/zookeeper/docs/current/zookeeperAdmin.html
18
+
19
+# The number of milliseconds of each tick
20
+tickTime=2000
21
+# The number of ticks that the initial
22
+# synchronization phase can take
23
+initLimit=10
24
+# The number of ticks that can pass between
25
+# sending a request and getting an acknowledgement
26
+syncLimit=5
27
+# the directory where the snapshot is stored.
28
+dataDir=/var/lib/zookeeper
29
+# Place the dataLogDir to a separate physical disc for better performance
30
+# dataLogDir=/disk2/zookeeper
31
+
32
+# the port at which the clients will connect
33
+clientPort=2181
34
+
35
+# Maximum number of clients that can connect from one client
36
+maxClientCnxns=60
37
+
38
+# specify all zookeeper servers
39
+# The fist port is used by followers to connect to the leader
40
+# The second one is used for leader election
41
+
42
+server.0=127.0.0.1:2888:3888
43
+
44
+# To avoid seeks ZooKeeper allocates space in the transaction log file in
45
+# blocks of preAllocSize kilobytes. The default block size is 64M. One reason
46
+# for changing the size of the blocks is to reduce the block size if snapshots
47
+# are taken more often. (Also, see snapCount).
48
+#preAllocSize=65536
49
+
50
+# Clients can submit requests faster than ZooKeeper can process them,
51
+# especially if there are a lot of clients. To prevent ZooKeeper from running
52
+# out of memory due to queued requests, ZooKeeper will throttle clients so that
53
+# there is no more than globalOutstandingLimit outstanding requests in the
54
+# system. The default limit is 1,000.ZooKeeper logs transactions to a
55
+# transaction log. After snapCount transactions are written to a log file a
56
+# snapshot is started and a new transaction log file is started. The default
57
+# snapCount is 10,000.
58
+#snapCount=1000
59
+
60
+# If this option is defined, requests will be will logged to a trace file named
61
+# traceFile.year.month.day.
62
+#traceFile=
63
+
64
+# Leader accepts client connections. Default value is "yes". The leader machine
65
+# coordinates updates. For higher update throughput at thes slight expense of
66
+# read throughput the leader can be configured to not accept clients and focus
67
+# on coordination.
68
+#leaderServes=yes
69
+
70
+# Autopurge every hour to avoid using lots of disk in bursts
71
+# Order of the next 2 properties matters.
72
+# autopurge.snapRetainCount must be before autopurge.purgeInterval.
73
+autopurge.snapRetainCount=3
74
+autopurge.purgeInterval=1

+ 98
- 0
devstack/lib/elasticsearch.sh View File

@@ -0,0 +1,98 @@
1
+#!/bin/bash
2
+
3
+# Copyright 2017 FUJITSU LIMITED
4
+#
5
+# Licensed under the Apache License, Version 2.0 (the "License"); you may
6
+# not use this file except in compliance with the License. You may obtain
7
+# a copy of the License at
8
+#
9
+#      http://www.apache.org/licenses/LICENSE-2.0
10
+#
11
+# Unless required by applicable law or agreed to in writing, software
12
+# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
13
+# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
14
+# License for the specific language governing permissions and limitations
15
+# under the License.
16
+
17
+
18
+_XTRACE_ELASTICSEARCH=$(set +o | grep xtrace)
19
+set +o xtrace
20
+
21
+function is_elasticsearch_enabled {
22
+    is_service_enabled monasca-elasticsearch && return 0
23
+    return 1
24
+}
25
+
26
+function install_elasticsearch {
27
+    if is_elasticsearch_enabled; then
28
+        echo_summary "Installing ElasticSearch ${ELASTICSEARCH_VERSION}"
29
+
30
+        local es_tarball=elasticsearch-${ELASTICSEARCH_VERSION}.tar.gz
31
+        local es_url=http://download.elasticsearch.org/elasticsearch/elasticsearch/${es_tarball}
32
+        local es_dest=${FILES}/${es_tarball}
33
+
34
+        download_file ${es_url} ${es_dest}
35
+        tar xzf ${es_dest} -C $DEST
36
+
37
+        sudo chown -R $STACK_USER $DEST/elasticsearch-${ELASTICSEARCH_VERSION}
38
+        ln -sf $DEST/elasticsearch-${ELASTICSEARCH_VERSION} $ELASTICSEARCH_DIR
39
+    fi
40
+}
41
+
42
+function configure_elasticsearch {
43
+    if is_elasticsearch_enabled; then
44
+        echo_summary "Configuring ElasticSearch ${ELASTICSEARCH_VERSION}"
45
+
46
+        local templateDir=$ELASTICSEARCH_CFG_DIR/templates
47
+
48
+        for dir in $ELASTICSEARCH_LOG_DIR $templateDir $ELASTICSEARCH_DATA_DIR; do
49
+            sudo install -m 755 -d -o $STACK_USER $dir
50
+        done
51
+
52
+        sudo cp -f "${PLUGIN_FILES}"/elasticsearch/elasticsearch.yml $ELASTICSEARCH_CFG_DIR/elasticsearch.yml
53
+        sudo chown -R $STACK_USER $ELASTICSEARCH_CFG_DIR/elasticsearch.yml
54
+        sudo chmod 0644 $ELASTICSEARCH_CFG_DIR/elasticsearch.yml
55
+
56
+        sudo sed -e "
57
+            s|%ELASTICSEARCH_BIND_HOST%|$ELASTICSEARCH_BIND_HOST|g;
58
+            s|%ELASTICSEARCH_BIND_PORT%|$ELASTICSEARCH_BIND_PORT|g;
59
+            s|%ELASTICSEARCH_PUBLISH_HOST%|$ELASTICSEARCH_PUBLISH_HOST|g;
60
+            s|%ELASTICSEARCH_PUBLISH_PORT%|$ELASTICSEARCH_PUBLISH_PORT|g;
61
+            s|%ELASTICSEARCH_DATA_DIR%|$ELASTICSEARCH_DATA_DIR|g;
62
+            s|%ELASTICSEARCH_LOG_DIR%|$ELASTICSEARCH_LOG_DIR|g;
63
+        " -i $ELASTICSEARCH_CFG_DIR/elasticsearch.yml
64
+    fi
65
+}
66
+
67
+function start_elasticsearch {
68
+    if is_elasticsearch_enabled; then
69
+        echo_summary "Starting ElasticSearch ${ELASTICSEARCH_VERSION}"
70
+        # TODO(jwachowski) find some nicer solution for setting env variable
71
+        local service_file="/etc/systemd/system/devstack@elasticsearch.service"
72
+        local es_java_opts="ES_JAVA_OPTS=-Dmapper.allow_dots_in_name=true"
73
+        iniset -sudo "$service_file" "Service" "Environment" "$es_java_opts"
74
+        run_process "elasticsearch" "$ELASTICSEARCH_DIR/bin/elasticsearch"
75
+    fi
76
+}
77
+
78
+function stop_elasticsearch {
79
+    if is_elasticsearch_enabled; then
80
+        echo_summary "Stopping ElasticSearch ${ELASTICSEARCH_VERSION}"
81
+        stop_process "elasticsearch" || true
82
+    fi
83
+}
84
+
85
+function clean_elasticsearch {
86
+    if is_elasticsearch_enabled; then
87
+        echo_summary "Cleaning Elasticsearch ${ELASTICSEARCH_VERSION}"
88
+
89
+        sudo rm -rf ELASTICSEARCH_DIR || true
90
+        sudo rm -rf ELASTICSEARCH_CFG_DIR || true
91
+        sudo rm -rf ELASTICSEARCH_LOG_DIR || true
92
+        sudo rm -rf ELASTICSEARCH_DATA_DIR || true
93
+        sudo rm -rf $FILES/elasticsearch-${ELASTICSEARCH_VERSION}.tar.gz || true
94
+        sudo rm -rf $DEST/elasticsearch-${ELASTICSEARCH_VERSION} || true
95
+    fi
96
+}
97
+
98
+$_XTRACE_ELASTICSEARCH

+ 61
- 0
devstack/lib/events-agent.sh View File

@@ -0,0 +1,61 @@
1
+#!/bin/bash
2
+
3
+# Copyright 2017 FUJITSU LIMITED
4
+#
5
+# Licensed under the Apache License, Version 2.0 (the "License"); you may
6
+# not use this file except in compliance with the License. You may obtain
7
+# a copy of the License at
8
+#
9
+#      http://www.apache.org/licenses/LICENSE-2.0
10
+#
11
+# Unless required by applicable law or agreed to in writing, software
12
+# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
13
+# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
14
+# License for the specific language governing permissions and limitations
15
+# under the License.
16
+
17
+
18
+_XTRACE_EVENTS_AGENT=$(set +o | grep xtrace)
19
+set +o xtrace
20
+
21
+function is_events_agent_enabled {
22
+    is_service_enabled monasca-events-agent && return 0
23
+    return 1
24
+}
25
+
26
+function install_events_agent {
27
+    if is_events_agent_enabled; then
28
+        echo_summary "Installing Events Agent"
29
+        # TODO implement this
30
+    fi
31
+}
32
+
33
+function configure_events_agent {
34
+    if is_events_agent_enabled; then
35
+        echo_summary "Configuring Events Agent"
36
+        # TODO implement this
37
+    fi
38
+}
39
+
40
+function start_events_agent {
41
+    if is_events_agent_enabled; then
42
+        echo_summary "Starting Events Agent"
43
+        # TODO implement this
44
+    fi
45
+}
46
+
47
+function stop_events_agent {
48
+    if is_events_agent_enabled; then
49
+        echo_summary "Stopping Events Agent"
50
+        # TODO implement this
51
+    fi
52
+}
53
+
54
+function clean_events_agent {
55
+    if is_events_agent_enabled; then
56
+        echo_summary "Cleaning Events Agent"
57
+        # TODO implement this
58
+    fi
59
+}
60
+
61
+$_XTRACE_EVENTS_AGENT

+ 97
- 0
devstack/lib/events-api.sh View File

@@ -0,0 +1,97 @@
1
+#!/bin/bash
2
+
3
+# Copyright 2017 FUJITSU LIMITED
4
+#
5
+# Licensed under the Apache License, Version 2.0 (the "License"); you may
6
+# not use this file except in compliance with the License. You may obtain
7
+# a copy of the License at
8
+#
9
+#      http://www.apache.org/licenses/LICENSE-2.0
10
+#
11
+# Unless required by applicable law or agreed to in writing, software
12
+# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
13
+# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
14
+# License for the specific language governing permissions and limitations
15
+# under the License.
16
+
17
+
18
+_XTRACE_EVENTS_API=$(set +o | grep xtrace)
19
+set +o xtrace
20
+
21
+function is_events_api_enabled {
22
+    is_service_enabled monasca-events-api && return 0
23
+    return 1
24
+}
25
+
26
+function install_events_api {
27
+    if is_events_api_enabled; then
28
+        echo_summary "Installing Events Api"
29
+        git_clone $MONASCA_EVENTS_API_REPO $MONASCA_EVENTS_API_DIR $MONASCA_EVENTS_API_BRANCH
30
+        setup_develop ${MONASCA_EVENTS_API_DIR}
31
+
32
+        install_keystonemiddleware
33
+        pip_install gunicorn
34
+    fi
35
+}
36
+
37
+function configure_events_api {
38
+    if is_events_api_enabled; then
39
+        echo_summary "Configuring Events Api"
40
+
41
+        # Put config files in ``$MONASCA_EVENTS_API_CONF_DIR`` for everyone to find
42
+        sudo install -d -o $STACK_USER $MONASCA_EVENTS_API_CONF_DIR
43
+
44
+        # ensure fresh installation of configuration files
45
+        rm -rf $MONASCA_EVENTS_API_CONF $MONASCA_EVENTS_API_PASTE $MONASCA_EVENTS_API_LOGGING_CONF
46
+
47
+        if [[ "$MONASCA_EVENTS_API_CONF_DIR" != "$MONASCA_EVENTS_API_DIR/etc/monasca" ]]; then
48
+            install -m 600 $MONASCA_EVENTS_API_DIR/etc/monasca/events-api-paste.ini $MONASCA_EVENTS_API_PASTE
49
+            install -m 600 $MONASCA_EVENTS_API_DIR/etc/monasca/events-api-logging.conf $MONASCA_EVENTS_API_LOGGING_CONF
50
+        fi
51
+
52
+        oslo-config-generator \
53
+            --config-file $MONASCA_EVENTS_API_DIR/config-generator/config.conf \
54
+            --output-file $MONASCA_EVENTS_API_CONF
55
+
56
+        iniset "$MONASCA_EVENTS_API_CONF" DEFAULT log_config_append $MONASCA_EVENTS_API_LOGGING_CONF
57
+
58
+        # configure keystone middleware
59
+        configure_auth_token_middleware "$MONASCA_EVENTS_API_CONF" "admin" $MONASCA_EVENTS_API_CACHE_DIR
60
+        iniset "$MONASCA_EVENTS_API_CONF" keystone_authtoken region_name $REGION_NAME
61
+        iniset "$MONASCA_EVENTS_API_CONF" keystone_authtoken project_name "admin"
62
+        iniset "$MONASCA_EVENTS_API_CONF" keystone_authtoken password $ADMIN_PASSWORD
63
+
64
+        # configure log-api-paste.ini
65
+        iniset "$MONASCA_EVENTS_API_PASTE" server:main bind $MONASCA_EVENTS_API_SERVICE_HOST:$MONASCA_EVENTS_API_SERVICE_PORT
66
+        iniset "$MONASCA_EVENTS_API_PASTE" server:main chdir $MONASCA_EVENTS_API_DIR
67
+        iniset "$MONASCA_EVENTS_API_PASTE" server:main workers $API_WORKERS
68
+    fi
69
+}
70
+
71
+function start_events_api {
72
+    if is_events_api_enabled; then
73
+        echo_summary "Starting Events Api"
74
+        run_process "monasca-events-api" "/usr/local/bin/gunicorn --paste $MONASCA_EVENTS_API_PASTE"
75
+    fi
76
+}
77
+
78
+function stop_events_api {
79
+    if is_events_api_enabled; then
80
+        echo_summary "Stopping Events Api"
81
+        stop_process "monasca-events-api"
82
+    fi
83
+}
84
+
85
+function clean_events_api {
86
+    if is_events_api_enabled; then
87
+        echo_summary "Cleaning Events Api"
88
+        sudo rm -f $MONASCA_EVENTS_API_CONF || true
89
+        sudo rm -f $MONASCA_EVENTS_API_PASTE  || true
90
+        sudo rm -f $MONASCA_EVENTS_API_LOGGING_CONF || true
91
+        sudo rm -rf $MONASCA_EVENTS_API_CACHE_DIR || true
92
+
93
+        sudo rm -rf $MONASCA_EVENTS_API_DIR || true
94
+    fi
95
+}
96
+
97
+$_XTRACE_EVENTS_API

+ 89
- 0
devstack/lib/events-persister.sh View File

@@ -0,0 +1,89 @@
1
+#!/bin/bash
2
+
3
+# Copyright 2017 FUJITSU LIMITED
4
+#
5
+# Licensed under the Apache License, Version 2.0 (the "License"); you may
6
+# not use this file except in compliance with the License. You may obtain
7
+# a copy of the License at
8
+#
9
+#      http://www.apache.org/licenses/LICENSE-2.0
10
+#
11
+# Unless required by applicable law or agreed to in writing, software
12
+# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
13
+# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
14
+# License for the specific language governing permissions and limitations
15
+# under the License.
16
+
17
+
18
+_XTRACE_EVENTS_PERSISTER=$(set +o | grep xtrace)
19
+set +o xtrace
20
+
21
+function is_events_persister_enabled {
22
+    is_service_enabled monasca-events-persister && return 0
23
+    return 1
24
+}
25
+
26
+function install_events_persister {
27
+    if is_events_persister_enabled; then
28
+        echo_summary "Installing Events Persister"
29
+        git_clone $MONASCA_EVENTS_PERSISTER_REPO $MONASCA_EVENTS_PERSISTER_DIR $MONASCA_EVENTS_PERSISTER_BRANCH
30
+        setup_develop ${MONASCA_EVENTS_PERSISTER_DIR}
31
+        pip_install "elasticsearch>=2.0.0,<3.0.0"
32
+    fi
33
+}
34
+
35
+function configure_events_persister {
36
+    if is_events_persister_enabled; then
37
+        echo_summary "Configuring Events Persister"
38
+        # Put config files in ``$MONASCA_EVENTS_PERSISTER_CONF_DIR`` for everyone to find
39
+        sudo install -d -o $STACK_USER $MONASCA_EVENTS_PERSISTER_CONF_DIR
40
+
41
+        # ensure fresh installation of configuration files
42
+        rm -rf $MONASCA_EVENTS_PERSISTER_CONF $MONASCA_EVENTS_PERSISTER_LOGGING_CONF
43
+
44
+        oslo-config-generator \
45
+            --config-file $MONASCA_EVENTS_PERSISTER_DIR/config-generator/persister.conf \
46
+            --output-file $MONASCA_EVENTS_PERSISTER_CONF
47
+
48
+        iniset "$MONASCA_EVENTS_PERSISTER_CONF" DEFAULT log_config_append $MONASCA_EVENTS_PERSISTER_LOGGING_CONF
49
+        iniset "$MONASCA_EVENTS_PERSISTER_CONF" zookeeper uri 127.0.0.1:2181
50
+        iniset "$MONASCA_EVENTS_PERSISTER_CONF" zookeeper partition_interval_recheck_seconds 15
51
+        iniset "$MONASCA_EVENTS_PERSISTER_CONF" kafka num_processors 0
52
+        iniset "$MONASCA_EVENTS_PERSISTER_CONF" kafka_events num_processors 1
53
+        iniset "$MONASCA_EVENTS_PERSISTER_CONF" kafka_events uri 127.0.0.1:9092
54
+        iniset "$MONASCA_EVENTS_PERSISTER_CONF" elasticsearch hosts ${ELASTICSEARCH_BIND_HOST}:${ELASTICSEARCH_BIND_PORT}
55
+
56
+        sudo cp -f "${MONASCA_EVENTS_DEVSTACK_DIR}"/files/monasca-events-persister/events-persister-logging.conf \
57
+                    "${MONASCA_EVENTS_PERSISTER_LOGGING_CONF}"
58
+
59
+        sudo sed -e "
60
+            s|%MONASCA_EVENTS_LOG_DIR%|$MONASCA_EVENTS_LOG_DIR|g;
61
+        " -i ${MONASCA_EVENTS_PERSISTER_LOGGING_CONF}
62
+    fi
63
+}
64
+
65
+function start_events_persister {
66
+    if is_events_persister_enabled; then
67
+        echo_summary "Starting Events Persister"
68
+        run_process "monasca-events-persister" "/usr/local/bin/monasca-persister --config-file $MONASCA_EVENTS_PERSISTER_CONF"
69
+    fi
70
+}
71
+
72
+function stop_events_persister {
73
+    if is_events_persister_enabled; then
74
+        echo_summary "Stopping Events Persister"
75
+        stop_process "monasca-events-persister" || true
76
+    fi
77
+}
78
+
79
+function clean_events_persister {
80
+    if is_events_persister_enabled; then
81
+        echo_summary "Cleaning Events Persister"
82
+        sudo rm -f $MONASCA_EVENTS_PERSISTER_CONF || true
83
+        sudo rm -f $MONASCA_EVENTS_PERSISTER_LOGGING_CONF  || true
84
+
85
+        sudo rm -rf $MONASCA_EVENTS_PERSISTER_DIR || true
86
+    fi
87
+}
88
+
89
+$_XTRACE_EVENTS_PERSISTER

+ 98
- 0
devstack/lib/kafka.sh View File

@@ -0,0 +1,98 @@
1
+#!/bin/bash
2
+
3
+# Copyright 2017 FUJITSU LIMITED
4
+#
5
+# Licensed under the Apache License, Version 2.0 (the "License"); you may
6
+# not use this file except in compliance with the License. You may obtain
7
+# a copy of the License at
8
+#
9
+#      http://www.apache.org/licenses/LICENSE-2.0
10
+#
11
+# Unless required by applicable law or agreed to in writing, software
12
+# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
13
+# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
14
+# License for the specific language governing permissions and limitations
15
+# under the License.
16
+
17
+
18
+_XTRACE_KAFKA=$(set +o | grep xtrace)
19
+set +o xtrace
20
+
21
+function is_kafka_enabled {
22
+    is_service_enabled monasca-kafka && return 0
23
+    return 1
24
+}
25
+
26
+function install_kafka {
27
+    if is_kafka_enabled; then
28
+        echo_summary "Installing kafka"
29
+
30
+        local kafka_tarball=kafka_${KAFKA_VERSION}.tgz
31
+        local kafka_tarball_url=${APACHE_MIRROR}kafka/${BASE_KAFKA_VERSION}/${kafka_tarball}
32
+        local kafka_tarball_dest=${FILES}/${kafka_tarball}
33
+
34
+        download_file ${kafka_tarball_url} ${kafka_tarball_dest}
35
+
36
+        sudo groupadd --system kafka || true
37
+        sudo useradd --system -g kafka kafka || true
38
+        sudo tar -xzf ${kafka_tarball_dest} -C /opt
39
+        sudo ln -sf /opt/kafka_${KAFKA_VERSION} /opt/kafka
40
+        sudo cp -f "${MONASCA_EVENTS_API_DIR}"/devstack/files/kafka/kafka-server-start.sh /opt/kafka_${KAFKA_VERSION}/bin/kafka-server-start.sh
41
+    fi
42
+}
43
+
44
+function configure_kafka {
45
+    if is_kafka_enabled; then
46
+        echo_summary "Configuring kafka"
47
+        sudo mkdir -p /var/kafka || true
48
+        sudo chown kafka:kafka /var/kafka
49
+        sudo chmod 755 /var/kafka
50
+        sudo rm -rf /var/kafka/lost+found
51
+        sudo mkdir -p /var/log/kafka || true
52
+        sudo chown kafka:kafka /var/log/kafka
53
+        sudo chmod 755 /var/log/kafka
54
+        sudo ln -sf /opt/kafka/config /etc/kafka
55
+        sudo ln -sf /var/log/kafka /opt/kafka/logs
56
+
57
+        sudo cp -f "${MONASCA_EVENTS_DEVSTACK_DIR}"/files/kafka/log4j.properties /etc/kafka/log4j.properties
58
+        sudo cp -f "${MONASCA_EVENTS_DEVSTACK_DIR}"/files/kafka/server.properties /etc/kafka/server.properties
59
+        sudo chown kafka:kafka /etc/kafka/*
60
+        sudo chmod 644 /etc/kafka/*
61
+    fi
62
+}
63
+
64
+function start_kafka {
65
+    if is_kafka_enabled; then
66
+        echo_summary "Starting Monasca Kafka"
67
+        run_process "kafka" "/opt/kafka/bin/kafka-server-start.sh /etc/kafka/server.properties" "kafka" "kafka"
68
+    fi
69
+}
70
+
71
+function stop_kafka {
72
+    if is_kafka_enabled; then
73
+        echo_summary "Stopping Monasca Kafka"
74
+        stop_process "kafka" || true
75
+    fi
76
+}
77
+
78
+function clean_kafka {
79
+    if is_kafka_enabled; then
80
+        echo_summary "Clean Monasca Kafka"
81
+        sudo rm -rf /var/kafka
82
+        sudo rm -rf /var/log/kafka
83
+        sudo rm -rf /etc/kafka
84
+        sudo rm -rf /opt/kafka
85
+        sudo userdel kafka || true
86
+        sudo groupdel kafka || true
87
+        sudo rm -rf /opt/kafka_${KAFKA_VERSION}
88
+        sudo rm -rf ${FILES}/kafka_${KAFKA_VERSION}.tgz
89
+    fi
90
+}
91
+
92
+function create_kafka_topic {
93
+    if is_kafka_enabled; then
94
+        /opt/kafka/bin/kafka-topics.sh --create --zookeeper localhost:2181 --replication-factor 1 --partitions 4 --topic $1
95
+    fi
96
+}
97
+
98
+$_XTRACE_KAFKA

+ 75
- 0
devstack/lib/utils.sh View File

@@ -0,0 +1,75 @@
1
+#!/bin/bash
2
+
3
+# Copyright 2017 FUJITSU LIMITED
4
+#
5
+# Licensed under the Apache License, Version 2.0 (the "License"); you may
6
+# not use this file except in compliance with the License. You may obtain
7
+# a copy of the License at
8
+#
9
+#      http://www.apache.org/licenses/LICENSE-2.0
10
+#
11
+# Unless required by applicable law or agreed to in writing, software
12
+# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
13
+# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
14
+# License for the specific language governing permissions and limitations
15
+# under the License.
16
+
17
+
18
+_XTRACE_UTILS=$(set +o | grep xtrace)
19
+set +o xtrace
20
+
21
+function find_nearest_apache_mirror {
22
+    if [ -z $APACHE_MIRROR ]; then
23
+        local mirror;
24
+        mirror=`curl -s 'https://www.apache.org/dyn/closer.cgi?as_json=1' | jq --raw-output '.preferred'`
25
+        APACHE_MIRROR=$mirror
26
+    fi
27
+}
28
+
29
+# download_file
30
+#  $1 - url to download
31
+#  $2 - location where to save url to
32
+#
33
+# Download file only when it not exists or there is newer version of it.
34
+#
35
+#  Uses global variables:
36
+#  - OFFLINE
37
+#  - DOWNLOAD_FILE_TIMEOUT
38
+function download_file {
39
+    local url=$1
40
+    local file=$2
41
+
42
+    # If in OFFLINE mode check if file already exists
43
+    if [[ ${OFFLINE} == "True" ]] && [[ ! -f ${file} ]]; then
44
+        die $LINENO "You are running in OFFLINE mode but
45
+                        the target file \"$file\" was not found"
46
+    fi
47
+
48
+    local curl_z_flag=""
49
+    if [[ -f "${file}" ]]; then
50
+        # If the file exists tell cURL to download only if newer version
51
+        # is available
52
+        curl_z_flag="-z $file"
53
+    fi
54
+
55
+    # yeah...downloading...devstack...hungry..om, om, om
56
+    local timeout=0
57
+
58
+    if [[ -n "${DOWNLOAD_FILE_TIMEOUT}" ]]; then
59
+        timeout=${DOWNLOAD_FILE_TIMEOUT}
60
+    fi
61
+
62
+    time_start "download_file"
63
+    _safe_permission_operation ${CURL_GET} -L $url --connect-timeout $timeout --retry 3 --retry-delay 5 -o $file $curl_z_flag
64
+    time_stop "download_file"
65
+}
66
+
67
+function configure_log_dir {
68
+    local logdir=$1
69
+
70
+    sudo mkdir -p $logdir
71
+    sudo chmod -R 0777 $logdir
72
+
73
+}
74
+
75
+$_XTRACE_UTILS

+ 67
- 0
devstack/lib/zookeeper.sh View File

@@ -0,0 +1,67 @@
1
+#!/bin/bash
2
+
3
+# Copyright 2017 FUJITSU LIMITED
4
+#
5
+# Licensed under the Apache License, Version 2.0 (the "License"); you may
6
+# not use this file except in compliance with the License. You may obtain
7
+# a copy of the License at
8
+#
9
+#      http://www.apache.org/licenses/LICENSE-2.0
10
+#
11
+# Unless required by applicable law or agreed to in writing, software
12
+# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
13
+# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
14
+# License for the specific language governing permissions and limitations
15
+# under the License.
16
+
17
+_XTRACE_ZOOKEEPER=$(set +o | grep xtrace)
18
+set +o xtrace
19
+
20
+# Set up default directories
21
+ZOOKEEPER_DATA_DIR=$DEST/data/zookeeper
22
+ZOOKEEPER_CONF_DIR=/etc/zookeeper
23
+
24
+function is_zookeeper_enabled {
25
+    is_service_enabled monasca-zookeeper && return 0
26
+    return 1
27
+}
28
+
29
+function install_zookeeper {
30
+    if is_zookeeper_enabled; then
31
+        if is_ubuntu; then
32
+            install_package zookeeperd
33
+        else
34
+            die $LINENO "Don't know how to install zookeeper on this platform"
35
+        fi
36
+    fi
37
+}
38
+
39
+function configure_zookeeper {
40
+    if is_zookeeper_enabled; then
41
+        sudo cp $MONASCA_EVENTS_API_DIR/devstack/files/zookeeper/* $ZOOKEEPER_CONF_DIR
42
+        sudo sed -i -e 's|.*dataDir.*|dataDir='$ZOOKEEPER_DATA_DIR'|' $ZOOKEEPER_CONF_DIR/zoo.cfg
43
+        sudo rm -rf $ZOOKEEPER_DATA_DIR || true
44
+        sudo mkdir -p $ZOOKEEPER_DATA_DIR || true
45
+    fi
46
+}
47
+
48
+function start_zookeeper {
49
+    if is_zookeeper_enabled; then
50
+        start_service zookeeper
51
+    fi
52
+}
53
+
54
+function stop_zookeeper {
55
+    if is_zookeeper_enabled; then
56
+        stop_service zookeeper
57
+    fi
58
+}
59
+
60
+function clean_zookeeper {
61
+    if is_zookeeper_enabled; then
62
+        sudo rm -rf $ZOOKEEPER_DATA_DIR
63
+        apt_get -y purge zookeeper
64
+    fi
65
+}
66
+
67
+$_XTRACE_ZOOKEEPER

+ 125
- 1
devstack/plugin.sh View File

@@ -1,7 +1,7 @@
1 1
 #!/bin/bash
2 2
 
3 3
 #
4
-# Copyright 2016 FUJITSU LIMITED
4
+# Copyright 2017 FUJITSU LIMITED
5 5
 #
6 6
 # Licensed under the Apache License, Version 2.0 (the "License");
7 7
 # you may not use this file except in compliance with the License.
@@ -15,3 +15,127 @@
15 15
 # implied.
16 16
 # See the License for the specific language governing permissions and
17 17
 # limitations under the License.
18
+
19
+# Save trace setting
20
+_EVENTS_XTRACE=$(set +o | grep xtrace)
21
+set -o xtrace
22
+_EVENTS_ERREXIT=$(set +o | grep errexit)
23
+set -o errexit
24
+
25
+# source lib/*
26
+source ${MONASCA_EVENTS_API_DIR}/devstack/lib/utils.sh
27
+source ${MONASCA_EVENTS_API_DIR}/devstack/lib/zookeeper.sh
28
+source ${MONASCA_EVENTS_API_DIR}/devstack/lib/kafka.sh
29
+source ${MONASCA_EVENTS_API_DIR}/devstack/lib/elasticsearch.sh
30
+source ${MONASCA_EVENTS_API_DIR}/devstack/lib/events-persister.sh
31
+source ${MONASCA_EVENTS_API_DIR}/devstack/lib/events-api.sh
32
+source ${MONASCA_EVENTS_API_DIR}/devstack/lib/events-agent.sh
33
+
34
+function pre_install_monasca_events {
35
+    echo_summary "Pre-Installing Monasca Events Dependency Components"
36
+
37
+    find_nearest_apache_mirror
38
+    install_zookeeper
39
+    install_kafka
40
+    install_elasticsearch
41
+}
42
+
43
+function install_monasca_events {
44
+    echo_summary "Installing Core Monasca Events Components"
45
+    install_events_persister
46
+    install_events_api
47
+    install_events_agent
48
+}
49
+
50
+function configure_monasca_events {
51
+    echo_summary "Configuring Monasca Events Dependency Components"
52
+    configure_zookeeper
53
+    configure_kafka
54
+    configure_elasticsearch
55
+
56
+    echo_summary "Configuring Monasca Events Core Components"
57
+    configure_log_dir ${MONASCA_EVENTS_LOG_DIR}
58
+    configure_events_persister
59
+    configure_events_api
60
+    configure_events_agent
61
+}
62
+
63
+function init_monasca_events {
64
+    echo_summary "Initializing Monasca Events Components"
65
+    start_zookeeper
66
+    start_kafka
67
+    start_elasticsearch
68
+    # wait for all services to start
69
+    sleep 10s
70
+    create_kafka_topic monevents
71
+}
72
+
73
+function start_monasca_events {
74
+    echo_summary "Starting Monasca Events Components"
75
+    start_events_persister
76
+    start_events_api
77
+    start_events_agent
78
+}
79
+
80
+function unstack_monasca_events {
81
+    echo_summary "Unstacking Monasca Events Components"
82
+    stop_events_agent
83
+    stop_events_api
84
+    stop_events_persister
85
+    stop_elasticsearch
86
+    stop_kafka
87
+    stop_zookeeper
88
+}
89
+
90
+function clean_monasca_events {
91
+    echo_summary "Cleaning Monasca Events Components"
92
+    clean_events_agent
93
+    clean_events_api
94
+    clean_events_persister
95
+    clean_elasticsearch
96
+    clean_kafka
97
+    clean_zookeeper
98
+}
99
+
100
+# check for service enabled
101
+if is_service_enabled monasca-events; then
102
+
103
+    if [[ "$1" == "stack" && "$2" == "pre-install" ]]; then
104
+        # Set up system services
105
+        echo_summary "Configuring Monasca Events system services"
106
+        pre_install_monasca_events
107
+
108
+    elif [[ "$1" == "stack" && "$2" == "install" ]]; then
109
+        # Perform installation of service source
110
+        echo_summary "Installing Monasca Events"
111
+        install_monasca_events
112
+
113
+    elif [[ "$1" == "stack" && "$2" == "post-config" ]]; then
114
+        # Configure after the other layer 1 and 2 services have been configured
115
+        echo_summary "Configuring Monasca Events"
116
+        configure_monasca_events
117
+
118
+    elif [[ "$1" == "stack" && "$2" == "extra" ]]; then
119
+        # Initialize and start the Monasca service
120
+        echo_summary "Initializing Monasca Events"
121
+        init_monasca_events
122
+        start_monasca_events
123
+    fi
124
+
125
+    if [[ "$1" == "unstack" ]]; then
126
+        # Shut down Monasca services
127
+        echo_summary "Unstacking Monasca Events"
128
+        unstack_monasca_events
129
+    fi
130
+
131
+    if [[ "$1" == "clean" ]]; then
132
+        # Remove state and transient data
133
+        # Remember clean.sh first calls unstack.sh
134
+        echo_summary "Cleaning Monasca Events"
135
+        clean_monasca_events
136
+    fi
137
+fi
138
+
139
+# Restore errexit & xtrace
140
+${_EVENTS_ERREXIT}
141
+${_EVENTS_XTRACE}

+ 0
- 28
devstack/post_test_hook.sh View File

@@ -1,28 +0,0 @@
1
-#
2
-# (C) Copyright 2015 Hewlett Packard Enterprise Development Company LP
3
-# (C) Copyright 2016-2017 FUJITSU LIMITED
4
-#
5
-# Licensed under the Apache License, Version 2.0 (the "License");
6
-# you may not use this file except in compliance with the License.
7
-# You may obtain a copy of the License at
8
-#
9
-#    http://www.apache.org/licenses/LICENSE-2.0
10
-#
11
-# Unless required by applicable law or agreed to in writing, software
12
-# distributed under the License is distributed on an "AS IS" BASIS,
13
-# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
14
-# implied.
15
-# See the License for the specific language governing permissions and
16
-# limitations under the License.
17
-
18
-# Sleep some time until all services are starting
19
-sleep 6
20
-
21
-function load_devstack_utilities {
22
-    source $BASE/new/devstack/stackrc
23
-    source $BASE/new/devstack/functions
24
-    source $BASE/new/devstack/openrc admin admin
25
-
26
-    # print OS_ variables
27
-    env | grep OS_
28
-}

+ 72
- 1
devstack/settings View File

@@ -13,4 +13,75 @@
13 13
 # implied.
14 14
 # See the License for the specific language governing permissions and
15 15
 # limitations under the License.
16
-#
16
+#
17
+
18
+# Monasca infrastructure services
19
+enable_service monasca-zookeeper
20
+enable_service monasca-kafka
21
+enable_service monasca-elasticsearch
22
+
23
+
24
+# Monasca Events services
25
+enable_service monasca-events
26
+enable_service monasca-events-api
27
+enable_service monasca-events-persister
28
+enable_service monasca-events-agent
29
+
30
+
31
+# Dependent Software Versions
32
+BASE_KAFKA_VERSION=${BASE_KAFKA_VERSION:-0.9.0.1}
33
+SCALA_VERSION=${SCALA_VERSION:-2.11}
34
+KAFKA_VERSION=${KAFKA_VERSION:-${SCALA_VERSION}-${BASE_KAFKA_VERSION}}
35
+ELASTICSEARCH_VERSION=${ELASTICSEARCH_VERSION:-2.4.2}
36
+
37
+
38
+# Path settings
39
+MONASCA_BASE=${DEST}
40
+
41
+# Repository settings
42
+MONASCA_EVENTS_API_REPO=${MONASCA_EVENTS_API_REPO:-${GIT_BASE}/openstack/monasca-events-api.git}
43
+MONASCA_EVENTS_API_BRANCH=${MONASCA_EVENTS_API_BRANCH:-master}
44
+MONASCA_EVENTS_API_DIR=${MONASCA_BASE}/monasca-events-api
45
+
46
+MONASCA_EVENTS_PERSISTER_REPO=${MONASCA_EVENTS_PERSISTER_REPO:-${GIT_BASE}/openstack/monasca-persister.git}
47
+MONASCA_EVENTS_PERSISTER_BRANCH=${MONASCA_EVENTS_PERSISTER_BRANCH:-master}
48
+MONASCA_EVENTS_PERSISTER_DIR=${MONASCA_BASE}/monasca-persister
49
+
50
+MONASCA_EVENTS_AGENT_REPO=${MONASCA_EVENTS_AGENT_REPO:-${GIT_BASE}/openstack/monasca-events-agent.git}
51
+MONASCA_EVENTS_AGENT_BRANCH=${MONASCA_EVENTS_AGENT_BRANCH:-master}
52
+MONASCA_EVENTS_AGENT_DIR=${MONASCA_BASE}/monasca-events-agent
53
+
54
+# Dependencies settings
55
+ELASTICSEARCH_BIND_HOST=${ELASTICSEARCH_BIND_HOST:-${SERVICE_HOST}}
56
+ELASTICSEARCH_BIND_PORT=${ELASTICSEARCH_BIND_PORT:-9200}
57
+ELASTICSEARCH_PUBLISH_HOST=${ELASTICSEARCH_PUBLISH_HOST:-${SERVICE_HOST}}
58
+ELASTICSEARCH_PUBLISH_PORT=${ELASTICSEARCH_PUBLISH_PORT:-9300}
59
+ELASTICSEARCH_DIR=$DEST/elasticsearch
60
+ELASTICSEARCH_CFG_DIR=$ELASTICSEARCH_DIR/config
61
+ELASTICSEARCH_LOG_DIR=$LOGDIR/elasticsearch
62
+ELASTICSEARCH_DATA_DIR=$DATA_DIR/elasticsearch
63
+
64
+KAFKA_SERVICE_HOST=${KAFKA_SERVICE_HOST:-${SERVICE_HOST}}
65
+KAFKA_SERVICE_PORT=${KAFKA_SERVICE_PORT:-9092}
66
+
67
+# configuration
68
+MONASCA_EVENTS_LOG_DIR=${MONASCA_EVENTS_LOG_DIR:-/var/log/monasca}
69
+MONASCA_EVENTS_DEVSTACK_DIR=${MONASCA_EVENTS_DEVSTACK_DIR:-$MONASCA_EVENTS_API_DIR/devstack}
70
+
71
+MONASCA_EVENTS_API_CONF_DIR=${MONASCA_EVENTS_API_CONF_DIR:-/etc/monasca}
72
+MONASCA_EVENTS_API_CONF=${MONASCA_EVENTS_API_CONF:-$MONASCA_EVENTS_API_CONF_DIR/events-api.conf}
73
+MONASCA_EVENTS_API_PASTE=${MONASCA_EVENTS_API_PASTE:-$MONASCA_EVENTS_API_CONF_DIR/events-api-paste.ini}
74
+MONASCA_EVENTS_API_LOGGING_CONF=${MONASCA_EVENTS_API_LOGGING_CONF:-$MONASCA_EVENTS_API_CONF_DIR/events-api-logging.conf}
75
+MONASCA_EVENTS_API_CACHE_DIR=${MONASCA_EVENTS_API_CACHE_DIR:-/var/cache/monasca-events-api}
76
+MONASCA_EVENTS_API_SERVICE_HOST=${MONASCA_EVENTS_API_SERVICE_HOST:-${SERVICE_HOST}}
77
+MONASCA_EVENTS_API_SERVICE_PORT=${MONASCA_EVENTS_API_SERVICE_PORT:-5670}
78
+MONASCA_EVENTS_API_SERVICE_PROTOCOL=${MONASCA_EVENTS_API_SERVICE_PROTOCOL:-${SERVICE_PROTOCOL}}
79
+
80
+MONASCA_EVENTS_PERSISTER_CONF_DIR=${MONASCA_EVENTS_PERSISTER_CONF_DIR:-/etc/monasca}
81
+MONASCA_EVENTS_PERSISTER_CONF=${MONASCA_EVENTS_PERSISTER_CONF:-${MONASCA_EVENTS_PERSISTER_CONF_DIR}/events-persister.conf}
82
+MONASCA_EVENTS_PERSISTER_LOGGING_CONF=${MONASCA_EVENTS_PERSISTER_LOGGING_CONF:-${MONASCA_EVENTS_PERSISTER_CONF_DIR}/events-persister-logging.conf}
83
+
84
+
85
+# Other settings
86
+PLUGIN_FILES=$MONASCA_EVENTS_API_DIR/devstack/files
87
+DOWNLOAD_FILE_TIMEOUT=${DOWNLOAD_FILE_TIMEOUT:-300}

+ 34
- 0
etc/monasca/events-api-logging.conf View File

@@ -0,0 +1,34 @@
1
+[loggers]
2
+keys = root, kafka
3
+
4
+[handlers]
5
+keys = console, file
6
+
7
+[formatters]
8
+keys = context
9
+
10
+[logger_root]
11
+level = DEBUG
12
+handlers = console, file
13
+
14
+[logger_kafka]
15
+qualname = kafka
16
+level = DEBUG
17
+handlers = console, file
18
+propagate = 0
19
+
20
+[handler_console]
21
+class = logging.StreamHandler
22
+args = (sys.stderr,)
23
+level = DEBUG
24
+formatter = context
25
+
26
+[handler_file]
27
+class = logging.handlers.RotatingFileHandler
28
+level = DEBUG
29
+formatter = context
30
+# store up to 5*100MB of logs
31
+args = ('monasca-events-api.log', 'a', 104857600, 5)
32
+
33
+[formatter_context]
34
+class = oslo_log.formatters.ContextFormatter

+ 30
- 6
etc/monasca/events-api-paste.ini View File

@@ -17,15 +17,39 @@ name = main
17 17
 
18 18
 [composite:main]
19 19
 use = egg:Paste#urlmap
20
-/: ea_version
21
-/v1.0: ea_version_v1
20
+/: events_version
22 21
 
23
-[pipeline:ea_version_v1]
24
-pipeline = request_id auth
22
+[pipeline:events_version]
23
+pipeline = error_trap versionapp
25 24
 
25
+[app:versionapp]
26
+paste.app_factory = monasca_events_api.app.api:create_version_app
27
+
28
+[filter:auth]
29
+paste.filter_factory = keystonemiddleware.auth_token:filter_factory
30
+
31
+[filter:roles]
32
+paste.filter_factory = monasca_events_api.middleware.role_middleware:RoleMiddleware.factory
26 33
 
27 34
 [filter:request_id]
28 35
 paste.filter_factory = oslo_middleware.request_id:RequestId.factory
29 36
 
30
-[filter:auth]
31
-paste.filter_factory = keystonemiddleware.auth_token:filter_factory
37
+# NOTE(trebskit) this is optional
38
+# insert this into either pipeline to get some WSGI environment debug output
39
+[filter:debug]
40
+paste.filter_factory = oslo_middleware.debug:Debug.factory
41
+
42
+[filter:error_trap]
43
+paste.filter_factory = oslo_middleware.catch_errors:CatchErrors.factory
44
+
45
+[server:main]
46
+use = egg:gunicorn#main
47
+bind = 127.0.0.1:5670
48
+workers = 9
49
+worker-connections = 2000
50
+worker-class = eventlet
51
+timeout = 30
52
+backlog = 2048
53
+keepalive = 2
54
+proc_name = monasca-events-api
55
+loglevel = DEBUG

+ 58
- 0
monasca_events_api/app/api.py View File

@@ -0,0 +1,58 @@
1
+# Copyright 2017 FUJITSU LIMITED
2
+#
3
+# Licensed under the Apache License, Version 2.0 (the "License"); you may
4
+# not use this file except in compliance with the License. You may obtain
5
+# a copy of the License at
6
+#
7
+#      http://www.apache.org/licenses/LICENSE-2.0
8
+#
9
+# Unless required by applicable law or agreed to in writing, software
10
+# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
11
+# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
12
+# License for the specific language governing permissions and limitations
13
+# under the License.
14
+
15
+"""Module initializes various applications of monasca-events-api."""
16
+
17
+
18
+import falcon
19
+from oslo_config import cfg
20
+from oslo_log import log
21
+
22
+
23
+LOG = log.getLogger(__name__)
24
+CONF = cfg.CONF
25
+
26
+_CONF_LOADED = False
27
+
28
+
29
+class Versions(object):
30
+    """Versions API.
31
+
32
+    Versions returns information about API itself.
33
+
34
+    """
35
+
36
+    def __init__(self):
37
+        """Init the Version App."""
38
+        LOG.info('Initializing VersionsAPI!')
39
+
40
+    def on_get(self, req, res):
41
+        """On get method."""
42
+        res.status = falcon.HTTP_200
43
+        res.body = '{"version": "v1.0"}'
44
+
45
+
46
+def create_version_app(global_conf, **local_conf):
47
+    """Create Version application."""
48
+    ctrl = Versions()
49
+    controllers = {
50
+        '/': ctrl,   # redirect http://host:port/ down to Version app
51
+                     # avoid conflicts with actual pipelines and 404 error
52
+        '/version': ctrl,  # list all the versions
53
+    }
54
+
55
+    wsgi_app = falcon.API()
56
+    for route, ctrl in controllers.items():
57
+        wsgi_app.add_route(route, ctrl)
58
+    return wsgi_app

devstack/pre_test_hook.sh → monasca_events_api/tests/functional/contrib/gate_hook.sh View File


+ 100
- 0
monasca_events_api/tests/functional/contrib/post_test_hook.sh View File

@@ -0,0 +1,100 @@
1
+#
2
+# (C) Copyright 2015 Hewlett Packard Enterprise Development Company LP
3
+# (C) Copyright 2017 FUJITSU LIMITED
4
+#
5
+# Licensed under the Apache License, Version 2.0 (the "License");
6
+# you may not use this file except in compliance with the License.
7
+# You may obtain a copy of the License at
8
+#
9
+#    http://www.apache.org/licenses/LICENSE-2.0
10
+#
11
+# Unless required by applicable law or agreed to in writing, software
12
+# distributed under the License is distributed on an "AS IS" BASIS,
13
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
14
+# implied.
15
+# See the License for the specific language governing permissions and
16
+# limitations under the License.
17
+#
18
+
19
+sleep 6
20
+
21
+function load_devstack_utilities {
22
+    source $BASE/new/devstack/stackrc
23
+    source $BASE/new/devstack/functions
24
+    source $BASE/new/devstack/openrc admin admin
25
+}
26
+
27
+function setup_monasca_events_api {
28
+
29
+    local constraints="-c ${REQUIRMENTS_DIR}/upper-constraints.txt"
30
+
31
+    pushd $TEMPEST_DIR
32
+    sudo -EH pip install $constraints -r requirements.txt -r test-requirements.txt
33
+    popd;
34
+
35
+    pushd $MONASCA_EVENTS_API_DIR
36
+    sudo -EH pip install $constraints -r requirements.txt -r test-requirements.txt
37
+    sudo -EH python setup.py install
38
+    popd;
39
+}
40
+
41
+function set_tempest_conf {
42
+
43
+    local conf_file=$TEMPEST_DIR/etc/tempest.conf
44
+    pushd $TEMPEST_DIR
45
+    oslo-config-generator \
46
+        --config-file tempest/cmd/config-generator.tempest.conf \
47
+        --output-file $conf_file
48
+    popd
49
+
50
+    cp -f $DEST/tempest/etc/logging.conf.sample $DEST/tempest/etc/logging.conf
51
+
52
+    # set identity section
53
+    iniset $conf_file identity admin_domain_scope True
54
+    iniset $conf_file identity user_unique_last_password_count 2
55
+    iniset $conf_file identity user_locakout_duration 5
56
+    iniset $conf_file identity user_lockout_failure_attempts 2
57
+    iniset $conf_file identity uri $OS_AUTH_URL/v2.0
58
+    iniset $conf_file identity uri_v3 $OS_AUTH_URL/v3
59
+    iniset $conf_file identity auth_version v$OS_IDENTITY_API_VERSION
60
+    # set auth section
61
+    iniset $conf_file auth use_dynamic_credentials True
62
+    iniset $conf_file auth admin_username $OS_USERNAME
63
+    iniset $conf_file auth admin_password $OS_PASSWORD
64
+    iniset $conf_file auth admin_domain_name $OS_PROJECT_DOMAIN_ID
65
+    iniset $conf_file auth admin_project_name $OS_PROJECT_NAME
66
+
67
+}
68
+
69
+function function_exists {
70
+    declare -f -F $1 > /dev/null
71
+}
72
+
73
+if ! function_exists echo_summary; then
74
+    function echo_summary {
75
+        echo $@
76
+    }
77
+fi
78
+
79
+XTRACE=$(set +o | grep xtrace)
80
+set -o xtrace
81
+
82
+echo_summary "monasca's events post_test_hook.sh was called..."
83
+(set -o posix; set)
84
+
85
+# save ref to monasca-api dir
86
+export MONASCA_EVENTS_API_DIR="$BASE/new/monasca-events-api"
87
+export TEMPEST_DIR="$BASE/new/tempest"
88
+
89
+sudo chown -R $USER:stack $MONASCA_EVENTS_API_DIR
90
+sudo chown -R $USER:stack $TEMPEST_DIR
91
+
92
+load_devstack_utilities
93
+setup_monasca_events_api
94
+set_tempest_conf
95
+
96
+(cd $TEMPEST_DIR; testr init)
97
+(cd $TEMPEST_DIR; testr list-tests monasca_events_api/tests/functional > monasca_tempest_tests)
98
+(cd $TEMPEST_DIR; cat monasca_tempest_tests)
99
+(cd $TEMPEST_DIR; cat monasca_tempest_tests | grep gate > monasca_tempest_tests_gate)
100
+(cd $TEMPEST_DIR; testr run --subunit --load-list=monasca_tempest_tests_gate | subunit-trace --fails)

+ 15
- 0
playbooks/legacy/monasca-tempest-events-base/post.yaml View File

@@ -0,0 +1,15 @@
1
+- hosts: primary
2
+  tasks:
3
+
4
+    - name: Copy files from {{ ansible_user_dir }}/workspace/ on node
5
+      synchronize:
6
+        src: '{{ ansible_user_dir }}/workspace/'
7
+        dest: '{{ zuul.executor.log_root }}'
8
+        mode: pull
9
+        copy_links: true
10
+        verify_host: true
11
+        rsync_opts:
12
+          - --include=/logs/**
13
+          - --include=*/
14
+          - --exclude=*
15
+          - --prune-empty-dirs

+ 72
- 0
playbooks/legacy/monasca-tempest-events-base/run.yaml View File

@@ -0,0 +1,72 @@
1
+- hosts: all
2
+  name: Autoconverted job legacy-tempest-dsvm-monasca-python-mysql-full from old job
3
+    gate-tempest-dsvm-monasca-python-mysql-full-ubuntu-xenial-nv
4
+  tasks:
5
+
6
+    - name: Ensure legacy workspace directory
7
+      file:
8
+        path: '{{ ansible_user_dir }}/workspace'
9
+        state: directory
10
+
11
+    - shell:
12
+        cmd: |
13
+          set -e
14
+          set -x
15
+          cat > clonemap.yaml << EOF
16
+          clonemap:
17
+            - name: openstack-infra/devstack-gate
18
+              dest: devstack-gate
19
+          EOF
20
+          /usr/zuul-env/bin/zuul-cloner -m clonemap.yaml --cache-dir /opt/git \
21
+              git://git.openstack.org \
22
+              openstack-infra/devstack-gate
23
+        executable: /bin/bash
24
+        chdir: '{{ ansible_user_dir }}/workspace'
25
+      environment: '{{ zuul | zuul_legacy_vars }}'
26
+
27
+    - shell:
28
+        cmd: |
29
+          set -e
30
+          set -x
31
+          cat << 'EOF' >>"/tmp/dg-local.conf"
32
+          [[local|localrc]]
33
+          enable_plugin monasca-events-api git://git.openstack.org/openstack/monasca-events-api
34
+
35
+          EOF
36
+        executable: /bin/bash
37
+        chdir: '{{ ansible_user_dir }}/workspace'
38
+      environment: '{{ zuul | zuul_legacy_vars }}'
39
+
40
+    - shell:
41
+        cmd: |
42
+          set -e
43
+          set -x
44
+          export PYTHONUNBUFFERED=true
45
+
46
+          export ENABLED_SERVICES=tempest
47
+
48
+          export DEVSTACK_GATE_NEUTRON=1
49
+          export DEVSTACK_GATE_EXERCISES=0
50
+          export DEVSTACK_GATE_POSTGRES=0
51
+
52
+          export PROJECTS="openstack/monasca-events-api $PROJECTS"
53
+          export PROJECTS="openstack/monasca-persister $PROJECTS"
54
+          export PROJECTS="openstack/monasca-common $PROJECTS"
55
+          export PROJECTS="openstack/python-monascaclient $PROJECTS"
56
+
57
+          function pre_test_hook {
58
+              source $BASE/new/monasca-events-api/monasca_events_api/tests/functional/contrib/gate_hook.sh
59
+          }
60
+          export -f pre_test_hook
61
+
62
+          function post_test_hook {
63
+              # Configure and run tempest on monasca-api installation
64
+              source $BASE/new/monasca-events-api/monasca_events_api/tests/functional/contrib/post_test_hook.sh
65
+          }
66
+          export -f post_test_hook
67
+
68
+          cp devstack-gate/devstack-vm-gate-wrap.sh ./safe-devstack-vm-gate-wrap.sh
69
+          ./safe-devstack-vm-gate-wrap.sh
70
+        executable: /bin/bash
71
+        chdir: '{{ ansible_user_dir }}/workspace'
72
+      environment: '{{ zuul | zuul_legacy_vars }}'

Loading…
Cancel
Save