Add monasca thresh

Change-Id: I9ce5c21eba5db0a98fe956ceeb9ba34d079bf335
This commit is contained in:
okozachenko 2020-09-09 09:33:17 +03:00
parent 66b2d677a7
commit 1dafd655cf
15 changed files with 1463 additions and 0 deletions

View File

@ -0,0 +1,118 @@
ARG COMMON_REPO=https://review.opendev.org/openstack/monasca-common
ARG COMMON_VERSION=3.1.0
ARG REPO_VERSION=stable/ussuri
ARG CONSTRAINTS_BRANCH=master
ARG CONSTRAINTS_FILE=https://opendev.org/openstack/requirements/raw/branch/stable/ussuri/upper-constraints.txt
ARG DOCKER_IMAGE=monasca/thresh
ARG APP_REPO=https://review.opendev.org/openstack/monasca-thresh
FROM storm:1.1.1
ENV \
MAVEN_HOME="/usr/share/maven" \
ZOOKEEPER_SERVERS="zookeeper" \
ZOOKEEPER_PORT="2181" \
ZOOKEEPER_WAIT="true" \
SUPERVISOR_SLOTS_PORTS="6701,6702" \
SUPERVISOR_MAX_MB="256" \
WORKER_MAX_MB="784" \
NIMBUS_SEEDS="storm-nimbus" \
NIMBUS_MAX_MB="256" \
UI_MAX_MB="768" \
WORKER_LOGS_TO_STDOUT="false" \
USE_SSL_ENABLED="true"
COPY memory.py settings.xml.j2 /
COPY start.sh mysql_check.py kafka_wait_for_topics.py /
COPY templates /templates
COPY logging /logging
ENV \
KAFKA_URI="kafka:9092" \
KAFKA_WAIT_FOR_TOPICS=alarm-state-transitions,metrics,events \
LOGSTASH_FIELDS="service=monasca-thresh" \
LOG_CONFIG_FILE="/storm/log4j2/cluster.xml" \
MYSQL_HOST=mysql \
MYSQL_PORT=3306 \
MYSQL_USER=thresh \
MYSQL_PASSWORD=password \
MYSQL_DB=mon \
NO_STORM_CLUSTER=false \
STORM_WAIT_DELAY=5 \
STORM_WAIT_RETRIES=24 \
STORM_WAIT_TIMEOUT=20 \
WORKER_MAX_HEAP_MB=256
ARG SKIP_COMMON_TESTS=false
ARG SKIP_THRESH_TESTS=false
ARG CREATION_TIME
ARG DOCKER_IMAGE
ARG APP_REPO
ARG GITHUB_REPO
ARG REPO_VERSION
ARG GIT_COMMIT
ARG CONSTRAINTS_BRANCH
ARG CONSTRAINTS_FILE
ARG EXTRA_DEPS
ARG COMMON_REPO
ARG COMMON_VERSION
ARG COMMON_GIT_COMMIT
SHELL ["/bin/ash", "-eo", "pipefail", "-c"]
RUN \
echo 'http://dl-cdn.alpinelinux.org/alpine/v3.8/main' > /etc/apk/repositories && \
echo 'http://dl-cdn.alpinelinux.org/alpine/v3.8/community' | tee -a /etc/apk/repositories && \
apk upgrade --purge && \
apk add --no-cache --virtual build-dep maven git py3-pip python3-dev git openjdk8 make g++ && \
apk add --no-cache python3 mysql-client && \
mkdir /root/.m2 && \
pip3 install --no-cache-dir \
jinja2 \
pykafka \
pymysql \
Templer==1.1.4 && \
set -x && mkdir /monasca-common && \
git -C /monasca-common init && \
git -C /monasca-common remote add origin "$COMMON_REPO" && \
echo "Cloning monasca-common in version: $COMMON_VERSION" && \
git -C /monasca-common fetch origin "$COMMON_VERSION" && \
git -C /monasca-common reset --hard FETCH_HEAD && \
cd /monasca-common && \
mvn --quiet -B clean install $([ "$SKIP_COMMON_TESTS" = "true" ] && echo "-DskipTests") && \
cd / && \
mkdir /app && \
git -C /app init && \
git -C /app remote add origin "$APP_REPO" && \
echo "Cloning app in version: $REPO_VERSION" && \
git -C /app fetch origin "$REPO_VERSION" && \
git -C /app reset --hard FETCH_HEAD && \
cd /app/thresh && \
mvn --quiet -B clean package $([ "$SKIP_THRESH_TESTS" = "true" ] && echo "-DskipTests") && \
cp /app/thresh/target/*-SNAPSHOT-shaded.jar /monasca-thresh.jar && \
cd / && \
# Save info about build to `/VERSIONS` file.
printf "App: %s\\n" "$DOCKER_IMAGE" >> /VERSIONS && \
printf "Repository: %s\\n" "$APP_REPO" >> /VERSIONS && \
printf "Version: %s\\n" "$REPO_VERSION" >> /VERSIONS && \
printf "Revision: %s\\n" "$GIT_COMMIT" >> /VERSIONS && \
printf "Build date: %s\\n" "$CREATION_TIME" >> /VERSIONS && \
printf "Monasca-common version: %s\\n" "$COMMON_VERSION" \
>> /VERSIONS && \
printf "Monasca-common revision: %s\\n" \
"$COMMON_GIT_COMMIT" >> /VERSIONS && \
printf "Constraints file: %s\\n" \
"$CONSTRAINTS_FILE"?h="$CONSTRAINTS_BRANCH" >> /VERSIONS && \
apk del build-dep && \
rm -rf \
/app \
/monasca-common \
/root/.cache/ \
/root/.m2/repository \
/tmp/* \
/var/cache/apk/* \
/var/log/*
ENTRYPOINT ["/start.sh"]

View File

@ -0,0 +1,162 @@
===============================
Docker image for Monasca Thresh
===============================
This image has a containerized version of the Monasca Threshold Engine. For
more information on the Monasca project, see the wiki_.
Sources: monasca-thresh_, monasca-docker_, Dockerfile_
Usage
=====
The Threshold engine requires configured instances of MySQL, Kafka,
Zookeeper, and optionally `Storm monasca-api`. In environments resembling
the official docker-compose__ or Kubernetes_ environments, this image requires
little to no configuration and can be minimally run like so:
docker run monasca/thresh:master
Environment variables
~~~~~~~~~~~~~~~~~~~~~
============================= ======================== ============================================
Variable Default Description
============================= ======================== ============================================
KAFKA_URI kafka:9092 URI to Apache Kafka
KAFKA_WAIT_FOR_TOPICS alarm-state-transitions, Comma-separated list of topic names to check
metrics,events
KAFKA_WAIT_RETRIES 24 Number of Kafka connection attempts
KAFKA_WAIT_DELAY 5 Seconds to wait between attempts
MYSQL_HOST mysql MySQL hostname
MYSQL_PORT 3306 MySQL port
MYSQL_USER thresh MySQL username
MYSQL_PASSWORD password MySQL password
MYSQL_DATABASE mon MySQL database name
MYSQL_WAIT_RETRIES 24 Number of MySQL connection attempts
MYSQL_WAIT_DELAY 5 Seconds to wait between attempts
ZOOKEEPER_URL zookeeper:2181 Zookeeper URL
NO_STORM_CLUSTER unset If ``true``, run without Storm daemons
STORM_WAIT_RETRIES 24 # of tries to verify Storm availability
STORM_WAIT_DELAY 5 # seconds between retry attempts
WORKER_MAX_MB unset If set and ``NO_STORM_CLUSTER``is ``true``,
use as MaxRam Size for JVM
METRIC_SPOUT_THREADS 2 Metric Spout threads
METRIC_SPOUT_TASKS 2 Metric Spout tasks
EVENT_SPOUT_THREADS 2 Event Spout Threads
EVENT_SPOUT_TASKS 2 Event Spout Tasks
EVENT_BOLT_THREADS 2 Event Bolt Threads
EVENT_BOLT_TASKS 2 Event Bolt Tasks
FILTERING_BOLT_THREADS 2 Filtering Bolt Threads
FILTERING_BOLT_TASKS 2 Filtering Bolt Tasks
ALARM_CREATION_BOLT_THREADS 2 Alarm Creation Bolt Threads
ALARM_CREATION_BOLT_TASKS 2 Alarm Creation Bolt Tasks
AGGREGATION_BOLT_THREADS 2 Aggregation Bolt Threads
AGGREGATION_BOLT_TASKS 2 Aggregation Bolt Tasks
THRESHOLDING_BOLT_THREADS 2 Thresholding Bolt Threads
THRESHOLDING_BOLT_TASKS 2 Thresholding Bolt Tasks
THRESH_STACK_SIZE 1024k JVM stack size
============================= ======================== ============================================
Wait scripts environment variables
~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
======================== ================================ =========================================
Variable Default Description
======================== ================================ =========================================
KAFKA_URI kafka:9092 URI to Apache Kafka
KAFKA_WAIT_FOR_TOPICS alarm-state-transitions,metrics, Comma-separated list of topic names
events to check
KAFKA_WAIT_RETRIES 24 Number of kafka connection attempts
KAFKA_WAIT_DELAY 5 Seconds to wait between attempts
MYSQL_HOST mysql The host for MySQL
MYSQL_PORT 3306 The port for MySQL
MYSQL_USER monapi The MySQL username
MYSQL_PASSWORD password The MySQL password
MYSQL_DB mon The MySQL database name
MYSQL_WAIT_RETRIES 24 Number of MySQL connection attempts
MYSQL_WAIT_DELAY 5 Seconds to wait between attempts
======================== ================================ =========================================
Building Monasca Thresh image
~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
Example:
$ ./build_image.sh <repository_version> <upper_constains_branch> <common_version>
Everything after ``./build_image.sh`` is optional and by default configured
to get versions from ``Dockerfile``. ``./build_image.sh`` also contain more
detailed build description.
Scripts
~~~~~~~
start.sh
In this starting script provide all steps that lead to the proper service
start. Including usage of wait scripts and templating of configuration
files. You also could provide the ability to allow running container after
service died for easier debugging.
health_check.py
This file will be used for checking the status of the application.
# TODO: Test how it's working or if it's working
Running with and without Storm
~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
The Threshold Engine can be run in two different modes, with Storm Daemons
or without Storm Daemons. If run with the Storm Daemons, multiple
Storm Supervisor containers can be used with more than one worker process
in each. With no Storm Daemons, only a single Threshold Engine container
can be run with a single worker process.
The default docker-compose.yml file is configured to run without Storm.
To change docker-compose.yml to run with Storm, delete the `thresh` service
entry and replace it with the below::
nimbus:
image: storm:1.1.1
environment:
LOGSTASH_FIELDS: "service=nimbus"
command: storm nimbus
depends_on:
- zookeeper
links:
- zookeeper
restart: unless-stopped
supervisor:
image: storm:1.1.1
environment:
LOGSTASH_FIELDS: "service=supervisor"
command: storm supervisor
depends_on:
- nimbus
- zookeeper
links:
- nimbus
- zookeeper
restart: unless-stopped
thresh-init:
image: monasca/thresh:master
environment:
NIMBUS_SEEDS: "nimbus"
WORKER_MAX_HEAP_MB: "256"
LOGSTASH_FIELDS: "service=monasca-thresh"
depends_on:
- zookeeper
- kafka
- nimbus
- supervisor
.. _wiki: https://wiki.openstack.org/wiki/Monasca
.. _monasca-thresh: https://opendev.org/openstack/monasca-thresh
.. _monasca-docker: https://github.com/monasca/monasca-docker/
.. _Dockerfile: https://opendev.org/openstack/monasca-thresh/src/branch/master/docker/Dockerfile
.. _`Storm monasca-api`: https://github.com/monasca/monasca-docker/blob/master/storm/Dockerfile
.. _Kubernetes: https://github.com/monasca/monasca-helm
__ monasca-docker_

View File

@ -0,0 +1,150 @@
#!/bin/bash
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
# TODO(Dobroslaw): move this script to monasca-common/docker folder
# and leave here small script to download it and execute using env variables
# to minimize code duplication.
set -x # Print each script step.
set -eo pipefail # Exit the script if any statement returns error.
# This script is used for building Docker image with proper labels
# and proper version of monasca-common.
#
# Example usage:
# $ ./build_image.sh <repository_version> <upper_constains_branch> <common_version>
#
# Everything after `./build_image.sh` is optional and by default configured
# to get versions from `Dockerfile`.
#
# To build from master branch (default):
# $ ./build_image.sh
# To build specific version run this script in the following way:
# $ ./build_image.sh stable/queens
# Building from specific commit:
# $ ./build_image.sh cb7f226
# When building from a tag monasca-common will be used in version available
# in upper constraint file:
# $ ./build_image.sh 2.5.0
# To build image from Gerrit patch sets that is targeting branch stable/queens:
# $ ./build_image.sh refs/changes/51/558751/1 stable/queens
#
# If you want to build image with custom monasca-common version you need
# to provide it as in the following example:
# $ ./build_image.sh master master refs/changes/19/595719/3
# Go to folder with Docker files.
REAL_PATH=$(python -c "import os,sys; print(os.path.realpath('$0'))")
cd "$(dirname "$REAL_PATH")/../docker/"
[ -z "$DOCKER_IMAGE" ] && \
DOCKER_IMAGE=$(\grep "ARG DOCKER_IMAGE=" Dockerfile | cut -f2 -d"=")
: "${REPO_VERSION:=$1}"
[ -z "$REPO_VERSION" ] && \
REPO_VERSION=$(\grep "ARG REPO_VERSION=" Dockerfile | cut -f2 -d"=")
# Let's stick to more readable version and disable SC2001 here.
# shellcheck disable=SC2001
REPO_VERSION_CLEAN=$(echo "$REPO_VERSION" | sed 's|/|-|g')
[ -z "$APP_REPO" ] && APP_REPO=$(\grep "ARG APP_REPO=" Dockerfile | cut -f2 -d"=")
GITHUB_REPO=$(echo "$APP_REPO" | sed 's/review.opendev.org/github.com/' | \
sed 's/ssh:/https:/')
if [ -z "$CONSTRAINTS_FILE" ]; then
CONSTRAINTS_FILE=$(\grep "ARG CONSTRAINTS_FILE=" Dockerfile | cut -f2 -d"=") || true
: "${CONSTRAINTS_FILE:=https://opendev.org/openstack/requirements/raw/branch/master/upper-constraints.txt}"
fi
: "${CONSTRAINTS_BRANCH:=$2}"
[ -z "$CONSTRAINTS_BRANCH" ] && \
CONSTRAINTS_BRANCH=$(\grep "ARG CONSTRAINTS_BRANCH=" Dockerfile | cut -f2 -d"=")
# When using stable version of repository use same stable constraints file.
case "$REPO_VERSION" in
*stable*)
CONSTRAINTS_BRANCH_CLEAN="$REPO_VERSION"
CONSTRAINTS_FILE=${CONSTRAINTS_FILE/master/$CONSTRAINTS_BRANCH_CLEAN}
# Get monasca-common version from stable upper constraints file.
CONSTRAINTS_TMP_FILE=$(mktemp)
wget --output-document "$CONSTRAINTS_TMP_FILE" \
$CONSTRAINTS_FILE
UPPER_COMMON=$(\grep 'monasca-common' "$CONSTRAINTS_TMP_FILE")
# Get only version part from monasca-common.
UPPER_COMMON_VERSION="${UPPER_COMMON##*===}"
rm -rf "$CONSTRAINTS_TMP_FILE"
;;
*)
CONSTRAINTS_BRANCH_CLEAN="$CONSTRAINTS_BRANCH"
;;
esac
# Monasca-common variables.
if [ -z "$COMMON_REPO" ]; then
COMMON_REPO=$(\grep "ARG COMMON_REPO=" Dockerfile | cut -f2 -d"=") || true
: "${COMMON_REPO:=https://review.opendev.org/openstack/monasca-common}"
fi
: "${COMMON_VERSION:=$3}"
if [ -z "$COMMON_VERSION" ]; then
COMMON_VERSION=$(\grep "ARG COMMON_VERSION=" Dockerfile | cut -f2 -d"=") || true
if [ "$UPPER_COMMON_VERSION" ]; then
# Common from upper constraints file.
COMMON_VERSION="$UPPER_COMMON_VERSION"
fi
fi
# Clone project to temporary directory for getting proper commit number from
# branches and tags. We need this for setting proper image labels.
# Docker does not allow to get any data from inside of system when building
# image.
TMP_DIR=$(mktemp -d)
(
cd "$TMP_DIR"
# This many steps are needed to support gerrit patch sets.
git init
git remote add origin "$APP_REPO"
git fetch origin "$REPO_VERSION"
git reset --hard FETCH_HEAD
)
GIT_COMMIT=$(git -C "$TMP_DIR" rev-parse HEAD)
[ -z "${GIT_COMMIT}" ] && echo "No git commit hash found" && exit 1
rm -rf "$TMP_DIR"
# Do the same for monasca-common.
COMMON_TMP_DIR=$(mktemp -d)
(
cd "$COMMON_TMP_DIR"
# This many steps are needed to support gerrit patch sets.
git init
git remote add origin "$COMMON_REPO"
git fetch origin "$COMMON_VERSION"
git reset --hard FETCH_HEAD
)
COMMON_GIT_COMMIT=$(git -C "$COMMON_TMP_DIR" rev-parse HEAD)
[ -z "${COMMON_GIT_COMMIT}" ] && echo "No git commit hash found" && exit 1
rm -rf "$COMMON_TMP_DIR"
CREATION_TIME=$(date -u +"%Y-%m-%dT%H:%M:%SZ")
docker build --no-cache \
--build-arg CREATION_TIME="$CREATION_TIME" \
--build-arg GITHUB_REPO="$GITHUB_REPO" \
--build-arg APP_REPO="$APP_REPO" \
--build-arg REPO_VERSION="$REPO_VERSION" \
--build-arg GIT_COMMIT="$GIT_COMMIT" \
--build-arg CONSTRAINTS_FILE="$CONSTRAINTS_FILE" \
--build-arg COMMON_REPO="$COMMON_REPO" \
--build-arg COMMON_VERSION="$COMMON_VERSION" \
--build-arg COMMON_GIT_COMMIT="$COMMON_GIT_COMMIT" \
--tag "$DOCKER_IMAGE":"$REPO_VERSION_CLEAN" .

View File

@ -0,0 +1,145 @@
#!/usr/bin/env python
# coding=utf-8
# (C) Copyright 2017 Hewlett Packard Enterprise Development LP
# (C) Copyright 2018 FUJITSU LIMITED
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
"""Wait for specific Kafka topics.
For using this script you need to set two environment variables:
* `KAFKA_URI` for connection string to Kafka together with port.
Example: `kafka:9092`, `192.168.10.6:9092`.
* `KAFKA_WAIT_FOR_TOPICS` that contain topics that should exist in Kafka
to consider it's working. Many topics should be separated with comma.
Example: `retry-notifications,alarm-state-transitions`.
After making sure that this environment variables are set you can simply
execute this script in the following way:
`python3 kafka_wait_for_topics.py && ./start_service.sh`
`python3 kafka_wait_for_topics.py || exit 1`
Additional environment variables available are:
* `LOG_LEVEL` - default to `INFO`
* `KAFKA_WAIT_RETRIES` - number of retries, default to `24`
* `KAFKA_WAIT_INTERVAL` - in seconds, default to `5`
"""
import logging
import os
import sys
import time
from pykafka import KafkaClient
from pykafka.exceptions import NoBrokersAvailableError
# Run this script only with Python 3
if sys.version_info.major != 3:
sys.stdout.write("Sorry, requires Python 3.x\n")
sys.exit(1)
LOG_LEVEL = logging.getLevelName(os.environ.get('LOG_LEVEL', 'INFO'))
logging.basicConfig(level=LOG_LEVEL)
logger = logging.getLogger(__name__)
KAFKA_HOSTS = os.environ.get('KAFKA_URI', 'kafka:9092')
REQUIRED_TOPICS = os.environ.get('KAFKA_WAIT_FOR_TOPICS', '') \
.encode('utf-8').split(b',')
KAFKA_WAIT_RETRIES = int(os.environ.get('KAFKA_WAIT_RETRIES', '24'))
KAFKA_WAIT_INTERVAL = int(os.environ.get('KAFKA_WAIT_INTERVAL', '5'))
class TopicNoPartition(Exception):
"""Raise when topic has no partitions."""
class TopicNotFound(Exception):
"""Raise when topic was not found."""
def retry(retries=KAFKA_WAIT_RETRIES, delay=KAFKA_WAIT_INTERVAL,
check_exceptions=()):
"""Retry decorator."""
def decorator(func):
"""Decorator."""
def f_retry(*args, **kwargs):
"""Retry running function on exception after delay."""
for i in range(1, retries + 1):
try:
return func(*args, **kwargs)
# pylint: disable=W0703
# We want to catch all exceptions here to retry.
except check_exceptions + (Exception,) as exc:
if i < retries:
logger.info('Connection attempt %d of %d failed',
i, retries)
if isinstance(exc, check_exceptions):
logger.debug('Caught known exception, retrying...',
exc_info=True)
else:
logger.warn(
'Caught unknown exception, retrying...',
exc_info=True)
else:
logger.exception('Failed after %d attempts', retries)
raise
# No exception so wait before retrying
time.sleep(delay)
return f_retry
return decorator
@retry(check_exceptions=(TopicNoPartition, TopicNotFound))
def check_topics(client, req_topics):
"""Check for existence of provided topics in Kafka."""
client.update_cluster()
logger.debug('Found topics: %r', client.topics.keys())
for req_topic in req_topics:
if req_topic not in client.topics.keys():
err_topic_not_found = 'Topic not found: {}'.format(req_topic)
logger.warning(err_topic_not_found)
raise TopicNotFound(err_topic_not_found)
topic = client.topics[req_topic]
if not topic.partitions:
err_topic_no_part = 'Topic has no partitions: {}'.format(req_topic)
logger.warning(err_topic_no_part)
raise TopicNoPartition(err_topic_no_part)
logger.info('Topic is ready: %s', req_topic)
@retry(check_exceptions=(NoBrokersAvailableError,))
def connect_kafka(hosts):
"""Connect to Kafka with retries."""
return KafkaClient(hosts=hosts)
def main():
"""Start main part of the wait script."""
logger.info('Checking for available topics: %r', repr(REQUIRED_TOPICS))
client = connect_kafka(hosts=KAFKA_HOSTS)
check_topics(client, REQUIRED_TOPICS)
if __name__ == '__main__':
main()

View File

@ -0,0 +1,37 @@
<?xml version="1.0" encoding="UTF-8"?>
<!--
Licensed to the Apache Software Foundation (ASF) under one or more
contributor license agreements. See the NOTICE file distributed with
this work for additional information regarding copyright ownership.
The ASF licenses this file to You under the Apache License, Version 2.0
(the "License"); you may not use this file except in compliance with
the License. You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
-->
{% set level = LOG_LEVEL | default('warn') %}
<configuration monitorInterval="60" shutdownHook="disable">
<properties>
<property name="pattern">%d{yyyy-MM-dd HH:mm:ss.SSS} %t %c{1.} [%p] %msg%n</property>
</properties>
<appenders>
<Console name="console" target="SYSTEM_OUT">
<PatternLayout>
<pattern>${pattern}</pattern>
</PatternLayout>
</Console>
</appenders>
<loggers>
<root level="{{ level | lower }}"> <!-- We log everything -->
<appender-ref ref="console"/>
</root>
</loggers>
</configuration>

View File

@ -0,0 +1,49 @@
<?xml version="1.0" encoding="UTF-8"?>
<!--
Licensed to the Apache Software Foundation (ASF) under one or more
contributor license agreements. See the NOTICE file distributed with
this work for additional information regarding copyright ownership.
The ASF licenses this file to You under the Apache License, Version 2.0
(the "License"); you may not use this file except in compliance with
the License. You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
-->
{% set level = LOG_LEVEL | default('warn') %}
<configuration monitorInterval="60" shutdownHook="disable">
<properties>
<property name="pattern">%d{yyyy-MM-dd HH:mm:ss.SSS} %c{1.} %t [%p] %msg%n</property>
<property name="patternNoTime">%msg%n</property>
<property name="patternMetrics">%d %-8r %m%n</property>
</properties>
<appenders>
<File name="A1"
fileName="${sys:workers.artifacts}/thresh/${sys:worker.port}/${sys:logfile.name}">
<PatternLayout>
<pattern>logfile=${sys:worker.port}/${sys:logfile.name} ${pattern}</pattern>
</PatternLayout>
</File>
<File name="METRICS"
fileName="${sys:workers.artifacts}/thresh/${sys:worker.port}/${sys:logfile.name}.metrics">
<PatternLayout>
<pattern>logfile=${sys:worker.port}/${sys:logfile.name}.metrics ${pattern}</pattern>
</PatternLayout>
</File>
</appenders>
<loggers>
<root level="{{ level }}">
<appender-ref ref="A1"/>
</root>
<Logger name="org.apache.storm.metric.LoggingMetricsConsumer" level="{{ level }}" additivity="false">
<appender-ref ref="METRICS"/>
</Logger>
</loggers>
</configuration>

View File

@ -0,0 +1,78 @@
#!/usr/bin/env python
# coding=utf-8
# (C) Copyright 2017 Hewlett Packard Enterprise Development LP
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
import os
import sys
JVM_MAX_RATIO = os.environ.get('JVM_MAX_RATIO', '0.75')
JVM_MAX_MB = os.environ.get('JVM_MAX_MB', None)
MAX_OVERRIDE_MB = os.environ.get('MAX_OVERRIDE_MB', None)
def get_system_memory_mb():
with open('/proc/meminfo', 'r') as f:
for line in f.readlines():
tokens = line.split()
if tokens[0] != 'MemTotal:':
continue
assert tokens[2] == 'kB'
total_kb = int(tokens[1])
return total_kb / 1024
return None
def get_cgroup_memory_mb():
with open('/sys/fs/cgroup/memory/memory.limit_in_bytes', 'r') as f:
limit = int(f.read().strip())
return limit / 1024 / 1024
def get_effective_memory_limit_mb():
return min(get_system_memory_mb(), get_cgroup_memory_mb())
def main():
if MAX_OVERRIDE_MB:
print('{}m'.format(MAX_OVERRIDE_MB))
return
system_max = get_system_memory_mb()
cgroup_max = get_cgroup_memory_mb()
effective_max_ratio = float(JVM_MAX_RATIO)
effective_max = int(min(system_max, cgroup_max) * effective_max_ratio)
if JVM_MAX_MB:
env_max = int(JVM_MAX_MB)
else:
env_max = effective_max
if len(sys.argv) == 2:
arg_max = int(sys.argv[1])
else:
arg_max = effective_max
print('{:d}m'.format(min([
effective_max,
env_max,
arg_max
])))
if __name__ == '__main__':
main()

View File

@ -0,0 +1,132 @@
#!/usr/bin/env python
# coding=utf-8
# (C) Copyright 2018 FUJITSU LIMITED
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
"""Health check for MySQL returns 0 when all checks works properly.
It's checking if requested database already exists.
For using this script you need to set some environment variables:
* `MYSQL_HOST` for connection string to MySQL.
Example: `mysql`, `192.168.10.6`.
Default: `mysql`.
* `MYSQL_PORT` for connection string to MySQL port.
Default: `3306`.
* `MYSQL_USER` for user that is cappable to connect to MySQL.
Default: `monapi`.
* `MYSQL_PASSWORD` for user password.
Default: `password`.
* `MYSQL_DB` for database that you need to have before starting service.
Default: `mon`.
After making sure that this environment variables are set you can simply
execute this script in the following way:
`python3 mysql_check.py && ./start_service.sh`
`python3 mysql_check.py || exit 1`
Additional environment variables available are:
* `LOG_LEVEL` - default to `INFO`
* `MYSQL_WAIT_RETRIES` - number of retries, default to `24`
* `MYSQL_WAIT_INTERVAL` - in seconds, default to `5`
"""
import logging
import os
import sys
import time
import pymysql
# Run this script only with Python 3
if sys.version_info.major != 3:
sys.stdout.write("Sorry, requires Python 3.x\n")
sys.exit(1)
LOG_LEVEL = logging.getLevelName(os.environ.get('LOG_LEVEL', 'INFO'))
logging.basicConfig(level=LOG_LEVEL)
logger = logging.getLogger(__name__)
MYSQL_HOST = os.environ.get('MYSQL_HOST', 'mysql')
MYSQL_PORT = int(os.environ.get('MYSQL_PORT', 3306))
MYSQL_USER = os.environ.get('MYSQL_USER', 'monapi')
MYSQL_PASSWORD = os.environ.get('MYSQL_PASSWORD', 'password')
MYSQL_DB = os.environ.get('MYSQL_DB', 'mon')
MYSQL_WAIT_RETRIES = int(os.environ.get('MYSQL_WAIT_RETRIES', '24'))
MYSQL_WAIT_INTERVAL = int(os.environ.get('MYSQL_WAIT_INTERVAL', '5'))
def retry(retries=MYSQL_WAIT_RETRIES, delay=MYSQL_WAIT_INTERVAL,
check_exceptions=()):
"""Retry decorator."""
def decorator(func):
"""Decorator."""
def f_retry(*args, **kwargs):
"""Retry running function on exception after delay."""
for i in range(1, retries + 1):
try:
return func(*args, **kwargs)
# pylint: disable=W0703
# We want to catch all exceptions here to retry.
except check_exceptions + (Exception,) as exc:
if i < retries:
logger.info('Connection attempt %d of %d failed',
i, retries)
if isinstance(exc, check_exceptions):
logger.debug('Caught known exception, retrying...',
exc_info=True)
else:
logger.warn(
'Caught unknown exception, retrying...',
exc_info=True)
else:
logger.exception('Failed after %d attempts', retries)
raise
# No exception so wait before retrying
time.sleep(delay)
return f_retry
return decorator
@retry(check_exceptions=(pymysql.err.OperationalError,))
def connect_mysql(host, port, user, password, database):
"""Connect to MySQL with retries."""
return pymysql.connect(
host=host, port=port,
user=user, passwd=password,
db=database
)
def main():
"""Start main part of the wait script."""
logger.info('Waiting for database: `%s`', MYSQL_DB)
connect_mysql(
host=MYSQL_HOST, port=MYSQL_PORT,
user=MYSQL_USER, password=MYSQL_PASSWORD,
database=MYSQL_DB
)
logger.info('Database `%s` found', MYSQL_DB)
if __name__ == '__main__':
main()

View File

@ -0,0 +1,28 @@
<settings>
<localRepository>/repo</localRepository>
<proxies>
{% if HTTP_PROXY %}
{% set proxy = HTTP_PROXY %}
{% elif http_proxy %}
{% set proxy = http_proxy %}
{% endif %}
{% if proxy %}
{% set hostport = proxy.rpartition('//')[-1].partition('/')[0] %}
{% if ':' in hostport %}
{% set host, port = hostport.split(':', 1) %}
{% else %}
{% set host = hostport %}
{% set port = '80' %}
{% endif %}
<proxy>
<id>proxy</id>
<active>true</active>
<protocol>http</protocol>
<host>{{ host }}</host>
<port>{{ port }}</port>
</proxy>
{% endif %}
</proxies>
</settings>

View File

@ -0,0 +1,235 @@
#!/bin/ash
# shellcheck shell=dash
if [ -n "$DEBUG" ]; then
set -x
fi
CONFIG_TEMPLATES="/templates"
CONFIG_DEST="/etc/monasca"
LOG_TEMPLATES="/logging"
LOG_DEST="/storm/log4j2"
APACHE_STORM_DIR="/apache-storm-1.1.1"
ZOOKEEPER_WAIT=${ZOOKEEPER_WAIT:-"true"}
ZOOKEEPER_WAIT_TIMEOUT=${ZOOKEEPER_WAIT_TIMEOUT:-"3"}
ZOOKEEPER_WAIT_DELAY=${ZOOKEEPER_WAIT_DELAY:-"10"}
ZOOKEEPER_WAIT_RETRIES=${ZOOKEEPER_WAIT_RETRIES:-"20"}
SUPERVISOR_STACK_SIZE=${SUPERVISOR_STACK_SIZE:-"1024k"}
WORKER_STACK_SIZE=${WORKER_STACK_SIZE:-"1024k"}
NIMBUS_STACK_SIZE=${NIMBUS_STACK_SIZE:-"1024k"}
UI_STACK_SIZE=${UI_STACK_SIZE:-"1024k"}
TOPOLOGY_NAME="thresh-cluster"
MYSQL_WAIT_RETRIES=${MYSQL_WAIT_RETRIES:-"24"}
MYSQL_WAIT_DELAY=${MYSQL_WAIT_DELAY:-"5"}
KAFKA_WAIT_RETRIES=${KAFKA_WAIT_RETRIES:-"24"}
KAFKA_WAIT_DELAY=${KAFKA_WAIT_DELAY:-"5"}
THRESH_STACK_SIZE=${THRESH_STACK_SIZE:-"1024k"}
if [ -n "$ZOOKEEPER_SERVERS" ]; then
if [ -z "$STORM_ZOOKEEPER_SERVERS" ]; then
export STORM_ZOOKEEPER_SERVERS="$ZOOKEEPER_SERVERS"
fi
if [ -z "$TRANSACTIONAL_ZOOKEEPER_SERVERS" ]; then
export TRANSACTIONAL_ZOOKEEPER_SERVERS="$ZOOKEEPER_SERVERS"
fi
fi
if [ -n "$ZOOKEEPER_PORT" ]; then
if [ -z "$STORM_ZOOKEEPER_PORT" ]; then
export STORM_ZOOKEEPER_PORT="$ZOOKEEPER_PORT"
fi
if [ -z "$TRANSACTIONAL_ZOOKEEPER_PORT" ]; then
export TRANSACTIONAL_ZOOKEEPER_PORT="$ZOOKEEPER_PORT"
fi
fi
first_zk=$(echo "$STORM_ZOOKEEPER_SERVERS" | cut -d, -f1)
# wait for zookeeper to become available
if [ "$ZOOKEEPER_WAIT" = "true" ]; then
success="false"
for i in $(seq "$ZOOKEEPER_WAIT_RETRIES"); do
if ok=$(echo ruok | nc "$first_zk" "$STORM_ZOOKEEPER_PORT" -w "$ZOOKEEPER_WAIT_TIMEOUT") && [ "$ok" = "imok" ]; then
success="true"
break
else
echo "Connect attempt $i of $ZOOKEEPER_WAIT_RETRIES failed, retrying..."
sleep "$ZOOKEEPER_WAIT_DELAY"
fi
done
if [ "$success" != "true" ]; then
echo "Could not connect to $first_zk after $i attempts, exiting..."
sleep 1
exit 1
fi
fi
if [ -z "$STORM_LOCAL_HOSTNAME" ]; then
# see also: http://stackoverflow.com/a/21336679
ip=$(ip route get 8.8.8.8 | awk 'NR==1 {print $NF}')
echo "Using autodetected IP as advertised hostname: $ip"
export STORM_LOCAL_HOSTNAME=$ip
fi
if [ -z "$SUPERVISOR_CHILDOPTS" ]; then
SUPERVISOR_CHILDOPTS="-XX:MaxRAM=$(python /memory.py "$SUPERVISOR_MAX_MB") -XX:+UseSerialGC -Xss$SUPERVISOR_STACK_SIZE"
export SUPERVISOR_CHILDOPTS
fi
if [ -z "$WORKER_CHILDOPTS" ]; then
WORKER_CHILDOPTS="-XX:MaxRAM=$(python /memory.py "$WORKER_MAX_MB") -Xss$WORKER_STACK_SIZE"
WORKER_CHILDOPTS="$WORKER_CHILDOPTS -XX:+UseConcMarkSweepGC"
if [ "$WORKER_REMOTE_JMX" = "true" ]; then
WORKER_CHILDOPTS="$WORKER_CHILDOPTS -Dcom.sun.management.jmxremote"
fi
export WORKER_CHILDOPTS
fi
if [ -z "$NIMBUS_CHILDOPTS" ]; then
NIMBUS_CHILDOPTS="-XX:MaxRAM=$(python /memory.py "$NIMBUS_MAX_MB") -XX:+UseSerialGC -Xss$NIMBUS_STACK_SIZE"
export NIMBUS_CHILDOPTS
fi
if [ -z "$UI_CHILDOPTS" ]; then
UI_CHILDOPTS="-XX:MaxRAM=$(python /memory.py "$UI_MAX_MB") -XX:+UseSerialGC -Xss$UI_STACK_SIZE"
export UI_CHILDOPTS
fi
template_dir() {
src_dir=$1
dest_dir=$2
for f in "$src_dir"/*; do
# Skip directories, links, etc
if [ ! -f "$f" ]; then
continue
fi
name=$(basename "$f")
dest=$(basename "$f" .j2)
if [ "$dest" = "$name" ]; then
# file does not end in .j2
cp "$f" "$dest_dir/$dest"
else
# file ends in .j2, apply template
templer --verbose --force "$f" "$dest_dir/$dest"
fi
done
}
templer --verbose --force "$CONFIG_TEMPLATES/storm.yaml.j2" "$STORM_CONF_DIR/storm.yaml"
template_dir "$CONFIG_TEMPLATES" "$CONFIG_DEST"
template_dir "$LOG_TEMPLATES" "$LOG_DEST"
if [ "$WORKER_LOGS_TO_STDOUT" = "true" ]; then
for PORT in $(echo "$SUPERVISOR_SLOTS_PORTS" | sed -e "s/,/ /"); do
LOGDIR="/storm/logs/workers-artifacts/thresh/$PORT"
mkdir -p "$LOGDIR"
WORKER_LOG="$LOGDIR/worker.log"
RECREATE="true"
if [ -e "$WORKER_LOG" ]; then
if [ -L "$WORKER_LOG" ]; then
RECREATE="false"
else
rm -f "$WORKER_LOG"
fi
fi
if [ $RECREATE = "true" ]; then
ln -s /proc/1/fd/1 "$WORKER_LOG"
fi
done
fi
# Test services we need before starting our service.
echo "Start script: waiting for needed services"
python3 /kafka_wait_for_topics.py
python3 /mysql_check.py
if [ "${NO_STORM_CLUSTER}" = "true" ]; then
echo "Using Thresh Config file /etc/monasca/thresh-config.yml. Contents:"
grep -vi password /etc/monasca/thresh-config.yml
# shellcheck disable=SC2086
JAVAOPTS="-XX:MaxRAM=$(python /memory.py $WORKER_MAX_MB) -XX:+UseSerialGC -Xss$THRESH_STACK_SIZE"
if [ "$LOCAL_JMX" = "true" ]; then
JAVAOPTS="$JAVAOPTS -Dcom.sun.management.jmxremote=true"
port="${LOCAL_JMX_PORT:-9090}"
JAVAOPTS="$JAVAOPTS -Dcom.sun.management.jmxremote.port=$port"
JAVAOPTS="$JAVAOPTS -Dcom.sun.management.jmxremote.rmi.port=$port"
JAVAOPTS="$JAVAOPTS -Dcom.sun.management.jmxremote.ssl=false"
JAVAOPTS="$JAVAOPTS -Dcom.sun.management.jmxremote.authenticate=false"
JAVAOPTS="$JAVAOPTS -Dcom.sun.management.jmxremote.local.only=false"
fi
if [ -n "$LOG_CONFIG_FILE" ]; then
JAVAOPTS="$JAVAOPTS -Dlog4j.configurationFile=$LOG_CONFIG_FILE"
fi
echo "Submitting storm topology as local cluster using JAVAOPTS of $JAVAOPTS"
# shellcheck disable=SC2086
java $JAVAOPTS -classpath "/monasca-thresh.jar:$APACHE_STORM_DIR/lib/*" monasca.thresh.ThresholdingEngine /etc/monasca/thresh-config.yml thresh-cluster local
exit $?
fi
echo "Waiting for storm to become available..."
success="false"
for i in $(seq "$STORM_WAIT_RETRIES"); do
if timeout -t "$STORM_WAIT_TIMEOUT" storm list; then
echo "Storm is available, continuing..."
success="true"
break
else
echo "Connection attempt $i of $STORM_WAIT_RETRIES failed"
sleep "$STORM_WAIT_DELAY"
fi
done
if [ "$success" != "true" ]; then
echo "Unable to connect to Storm! Exiting..."
sleep 1
exit 1
fi
topologies=$(storm list | awk '/-----/,0{if (!/-----/)print $1}')
found="false"
for topology in $topologies; do
if [ "$topology" = "$TOPOLOGY_NAME" ]; then
found="true"
echo "Found existing storm topology with name: $topology"
break
fi
done
if [ "$found" = "true" ]; then
echo "Storm topology already exists, will not submit again"
# TODO handle upgrades
else
echo "Using Thresh Config file /etc/monasca/thresh-config.yml. Contents:"
grep -vi password /etc/monasca/thresh-config.yml
echo "Submitting storm topology..."
storm jar /monasca-thresh.jar \
monasca.thresh.ThresholdingEngine \
/etc/monasca/thresh-config.yml \
"$TOPOLOGY_NAME"
fi
# Template all config files before start, it will use env variables.
# Read usage examples: https://pypi.org/project/Templer/
echo "Start script: creating config files from templates"
# Add proxy configuration for maven
mkdir -p /root/.m2
templer --ignore-undefined-variables --verbose --force \
/settings.xml.j2 /root/.m2/settings.xml

View File

@ -0,0 +1,60 @@
# (C) Copyright 2017 Hewlett Packard Enterprise Development LP
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
storm.local.dir: {{ STORM_LOCAL_DIR | default('/data') }}
storm.local.hostname: "{{ STORM_LOCAL_HOSTNAME }}"
storm.zookeeper.servers:
{% for server in STORM_ZOOKEEPER_SERVERS.split(',') %}
{% set server = server.strip() %}
- "{{ server }}"
{% endfor %}
storm.zookeeper.port: {{ STORM_ZOOKEEPER_PORT | default('2181') }}
storm.zookeeper.retry.interval: {{ STORM_ZOOKEEPER_RETRY_INTERVAL | default('5000') }}
storm.zookeeper.retry.times: {{ STORM_ZOOKEEPER_RETRY_TIMES | default('60') }}
storm.zookeeper.root: {{ STORM_ZOOKEEPER_ROOT | default ('/storm') }}
storm.zookeeper.session.timeout: {{ STORM_ZOOKEEPER_SESSION_TIMEOUT | default('3000') }}
supervisor.slots.ports:
{% for port in SUPERVISOR_SLOTS_PORTS.split(',') %}
{% set port = port.strip() %}
- {{ port }}
{% endfor %}
supervisor.childopts: "{{ SUPERVISOR_CHILDOPTS }}"
worker.childopts: "{{ WORKER_CHILDOPTS }}"
nimbus.seeds:
{% for seed in NIMBUS_SEEDS.split(',') %}
{% set seed = seed.strip() %}
- "{{ seed }}"
{% endfor %}
nimbus.thrift.port: {{ NIMBUS_THRIFT_PORT | default('6627') }}
nimbus.childopts: "{{ NIMBUS_CHILDOPTS }}"
ui.host: "{{ STORM_UI_HOST | default ('0.0.0.0') }}"
ui.port: {{ STORM_UI_PORT | default('8088') }}
ui.childopts: "{{ STORM_UI_CHILDOPTS | default('') }}"
transactional.zookeeper.servers:
{% for server in TRANSACTIONAL_ZOOKEEPER_SERVERS.split(',') %}
{% set server = server.strip() %}
- "{{ server }}"
{% endfor %}
transactional.zookeeper.port: {{ TRANSACTIONAL_ZOOKEEPER_PORT | default('2181') }}
transactional.zookeeper.root: {{ TRANSACTIONAL_ZOOKEEPER_ROOT | default('/storm-transactional') }}
topology.acker.executors: {{ TOPOLOGY_ACKER_EXECUTORS | default('1') }}
topology.debug: {{ TOPOLOGY_DEBUG | default('false') }}
topology.max.spout.pending: {{ TOPOLOGY_MAX_SPOUT_PENDING | default('500') }}

View File

@ -0,0 +1,139 @@
# (C) Copyright 2015, 2017 Hewlett Packard Enterprise Development LP
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
{% set zookeeper = ZOOKEEPER_SERVERS.split(',')[0] %}
metricSpoutThreads: {{ METRIC_SPOUT_THREADS | default(2) }}
metricSpoutTasks: {{ METRIC_SPOUT_TASKS | default(2) }}
eventSpoutThreads: {{ EVENT_SPOUT_THREADS | default(2) }}
eventSpoutTasks: {{ EVENT_SPOUT_TASKS | default(2) }}
eventBoltThreads: {{ EVENT_BOLT_THREADS | default(2) }}
eventBoltTasks: {{ EVENT_BOLT_TASKS | default(2) }}
filteringBoltThreads: {{ FILTERING_BOLT_THREADS | default(2) }}
filteringBoltTasks: {{ FILTERING_BOLT_TASKS | default(2) }}
alarmCreationBoltThreads: {{ ALARM_CREATION_BOLT_THREADS | default(2) }}
alarmCreationBoltTasks: {{ ALARM_CREATION_BOLT_TASKS | default(2) }}
aggregationBoltThreads: {{ AGGREGATION_BOLT_THREADS | default(2) }}
aggregationBoltTasks: {{ AGGREGATION_BOLT_TASKS | default(2) }}
thresholdingBoltThreads: {{ THRESHOLDING_BOLT_THREADS | default(2) }}
thresholdingBoltTasks: {{ THRESHOLDING_BOLT_TASKS | default(2) }}
statsdConfig:
host: "127.0.0.1"
port: 8125
prefix: monasca.storm.
dimensions: !!map
service: monitoring
component: storm
metricSpoutConfig:
kafkaConsumerConfiguration:
# See http://kafka.apache.org/documentation.html#api for semantics and defaults.
topic: "metrics"
numThreads: 1
groupId: "thresh-metric"
zookeeperConnect: "{{ zookeeper }}:{{ ZOOKEEPER_PORT | default('2181') }}"
consumerId: 1
socketTimeoutMs: 30000
socketReceiveBufferBytes: 65536
fetchMessageMaxBytes: 1048576
autoCommitEnable: true
autoCommitIntervalMs: 60000
queuedMaxMessageChunks: 10
rebalanceMaxRetries: 4
fetchMinBytes: 1
fetchWaitMaxMs: 100
rebalanceBackoffMs: 2000
refreshLeaderBackoffMs: 200
autoOffsetReset: largest
consumerTimeoutMs: -1
clientId: 1
zookeeperSessionTimeoutMs: 60000
zookeeperConnectionTimeoutMs: 60000
zookeeperSyncTimeMs: 2000
eventSpoutConfig:
kafkaConsumerConfiguration:
# See http://kafka.apache.org/documentation.html#api for semantics and defaults.
topic: "events"
numThreads: 1
groupId: "thresh-event"
zookeeperConnect: "{{ zookeeper }}:{{ ZOOKEEPER_PORT }}"
consumerId: 1
socketTimeoutMs: 30000
socketReceiveBufferBytes: 65536
fetchMessageMaxBytes: 1048576
autoCommitEnable: true
autoCommitIntervalMs: 60000
queuedMaxMessageChunks: 10
rebalanceMaxRetries: 4
fetchMinBytes: 1
fetchWaitMaxMs: 100
rebalanceBackoffMs: 2000
refreshLeaderBackoffMs: 200
autoOffsetReset: largest
consumerTimeoutMs: -1
clientId: 1
zookeeperSessionTimeoutMs: 60000
zookeeperConnectionTimeoutMs: 60000
zookeeperSyncTimeMs: 2000
kafkaProducerConfig:
# See http://kafka.apache.org/documentation.html#api for semantics and defaults.
topic: "alarm-state-transitions"
metadataBrokerList: "{{ KAFKA_URI | default('kafka:9092') }}"
serializerClass: kafka.serializer.StringEncoder
partitionerClass:
requestRequiredAcks: 1
requestTimeoutMs: 10000
producerType: sync
keySerializerClass:
compressionCodec: none
compressedTopics:
messageSendMaxRetries: 3
retryBackoffMs: 100
topicMetadataRefreshIntervalMs: 600000
queueBufferingMaxMs: 5000
queueBufferingMaxMessages: 10000
queueEnqueueTimeoutMs: -1
batchNumMessages: 200
sendBufferBytes: 102400
clientId : Threshold_Engine
sporadicMetricNamespaces:
- foo
database:
driverClass: org.drizzle.jdbc.DrizzleDriver
url: "jdbc:drizzle://{{ MYSQL_DB_HOST | default('mysql') }}:{{ MYSQL_DB_PORT | default('3306') }}/{{ MYSQL_DB_DATABASE | default('mon') }}"
user: "{{ MYSQL_DB_USERNAME | default('thresh') }}"
password: "{{ MYSQL_DB_PASSWORD | default('password') }}"
properties:
ssl: {{ USE_SSL_ENABLED }}
# the maximum amount of time to wait on an empty pool before throwing an exception
maxWaitForConnection: 1s
# the SQL query to run when validating a connection's liveness
validationQuery: "/* MyService Health Check */ SELECT 1"
# the minimum number of connections to keep open
minSize: 8
# the maximum number of connections to keep open
maxSize: 41

View File

@ -0,0 +1,85 @@
---
# Copyright 2020 VEXXHOST, Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
apiVersion: apps/v1
kind: DaemonSet
metadata:
name: monasca-thresh
namespace: openstack
labels:
{{ labels("monasca", component="thresh") | indent(4) }}
spec:
updateStrategy:
rollingUpdate:
maxUnavailable: 1
type: RollingUpdate
selector:
matchLabels:
{{ labels("monasca", component="thresh") | indent(6) }}
template:
metadata:
labels:
{{ labels("monasca", component="thresh") | indent(8) }}
annotations:
checksum/config: "{{ config_hash }}"
spec:
automountServiceAccountToken: false
containers:
- name: monasca-thresh
image: vexxhost/monasca-thresh:latest
imagePullPolicy: Always
{% if env is defined and env|length %}
env:
{% if 'sentryDSN' in spec %}
- name: SENTRY_DSN
value: {{ spec.sentryDSN }}
{% endif %}
{% for v in env %}
- name: "{{ v.name }}"
value: "{{ v.value }}"
{% endfor %}
{% endif %}
lifecycle:
preStop:
exec:
command: ["/bin/sleep", "5"]
securityContext:
runAsUser: 65534
runAsGroup: 65534
volumeMounts:
- mountPath: /etc/monasca
name: config
- name: uwsgi-config
mountPath: /etc/uwsgi
volumes:
- name: config
secret:
secretName: monasca-config
- name: uwsgi-config
configMap:
defaultMode: 420
name: uwsgi-default
{% if 'nodeSelector' in spec %}
nodeSelector:
{{ spec.nodeSelector | to_yaml | indent(8) }}
{% endif %}
{% if 'tolerations' in spec %}
tolerations:
{{ spec.tolerations | to_yaml | indent(8) }}
{% endif %}
{% if 'hostAliases' in spec %}
hostAliases:
{{ spec.hostAliases | to_yaml | indent(8) }}
{% endif %}

View File

@ -71,6 +71,8 @@
jobs:
- openstack-operator:functional:
dependencies:
- name: openstack-operator:images:build:monasca-thresh
soft: true
- name: openstack-operator:images:build:mcrouter-exporter
soft: true
- name: openstack-operator:images:build:horizon
@ -114,6 +116,8 @@
jobs:
- openstack-operator:functional:
dependencies:
- name: openstack-operator:images:upload:monasca-thresh
soft: true
- name: openstack-operator:images:upload:mcrouter-exporter
soft: true
- name: openstack-operator:images:upload:horizon

View File

@ -0,0 +1,41 @@
- job:
name: openstack-operator:images:build:monasca-thresh
parent: vexxhost-build-docker-image
provides: openstack-operator:image:monasca-thresh
nodeset: &id001
nodes:
- name: ubuntu-bionic
label: ubuntu-bionic-vexxhost
vars: &id002
docker_images:
- context: images/monasca-thresh
repository: vexxhost/monasca-thresh
dependencies:
- openstack-operator:images:build:openstack-operator
files: &id003
- ^images/monasca-thresh/.*
- job:
name: openstack-operator:images:upload:monasca-thresh
parent: vexxhost-upload-docker-image
provides: openstack-operator:image:monasca-thresh
nodeset: *id001
vars: *id002
dependencies:
- openstack-operator:images:upload:openstack-operator
files: *id003
- job:
name: openstack-operator:images:promote:monasca-thresh
parent: vexxhost-promote-docker-image
nodeset: *id001
vars: *id002
files: *id003
- project:
check:
jobs:
- openstack-operator:images:build:monasca-thresh
gate:
jobs:
- openstack-operator:images:upload:monasca-thresh
promote:
jobs:
- openstack-operator:images:promote:monasca-thresh