Update devstack plugin to support new api

Merge log-api and api

Story: 2003881
Task: 36195

Depends-On: https://review.opendev.org/681419/
Change-Id: Idb2361e5ab701cd3dee4eabb4b43116f6e1205c4
This commit is contained in:
Adrian Czarnecki 2019-07-23 13:06:58 +02:00
parent 3ee8332e44
commit 1e3e1173ad
24 changed files with 4342 additions and 123 deletions

View File

@ -43,6 +43,7 @@
c-vol: false
cinder: false
horizon: false
monasca-log: false
tox_envlist: all
tempest_test_regex: monasca_tempest_tests.tests.api
devstack_plugins:
@ -60,6 +61,70 @@
- ^doc/.*$
- ^releasenotes/.*$
- job:
name: monasca-log-tempest-base
parent: devstack-tempest
description: |
Base job for running tempest tests with monasca-log-api devstack plugin.
timeout: 7800
required-projects:
- openstack/devstack-gate
- openstack/monasca-agent
- openstack/monasca-api
- openstack/monasca-common
- openstack/monasca-grafana-datasource
- openstack/monasca-notification
- openstack/monasca-persister
- openstack/monasca-statsd
- openstack/monasca-thresh
- openstack/monasca-ui
- openstack/python-monascaclient
- openstack/tempest
- openstack/monasca-kibana-plugin
- openstack/monasca-tempest-plugin
vars:
devstack_services:
monasca-log: true
# Disable unneeded services.
etcd3: false
g-api: false
g-reg: false
n-api: false
n-api-meta: false
n-cauth: false
n-cond: false
n-cpu: false
n-novnc: false
n-obj: false
n-sch: false
placement-api: false
s-account: false
s-container: false
s-object: false
s-proxy: false
c-api: false
c-bak: false
c-sch: false
c-vol: false
cinder: false
horizon: false
tox_envlist: all
tempest_test_regex: monasca_tempest_tests.tests.log_api
devstack_plugins:
monasca-api: https://opendev.org/openstack/monasca-api
zuul_copy_output:
/var/log/kafka: logs
/var/log/monasca/notification: logs
/etc/kafka/server.properties: logs
/etc/kafka/producer.properties: logs
/etc/kafka/consumer.properties: logs
/etc/monasca/monasca-notification.conf: logs
irrelevant-files:
- ^.*\.rst$
- ^.*\.md$
- ^doc/.*$
- ^releasenotes/.*$
- job:
name: monasca-tempest-python2-influxdb
parent: monasca-tempest-base
@ -144,6 +209,41 @@
TEMPEST_PLUGINS: /opt/stack/monasca-tempest-plugin
tempest_test_regex: (?!.*\[.*\btimerange\b.*\])(^monasca_tempest_tests.tests.api)
- job:
name: monasca-log-tempest-python3-influxdb
parent: monasca-log-tempest-base
vars:
devstack_localrc:
USE_PYTHON3: true
USE_OLD_LOG_API: false
MONASCA_API_IMPLEMENTATION_LANG: python
MONASCA_PERSISTER_IMPLEMENTATION_LANG: python
MONASCA_METRICS_DB: influxdb
TEMPEST_PLUGINS: /opt/stack/monasca-tempest-plugin
- job:
name: monasca-log-tempest-python2-influxdb
parent: monasca-log-tempest-base
vars:
devstack_localrc:
USE_PYTHON3: false
USE_OLD_LOG_API: false
MONASCA_API_IMPLEMENTATION_LANG: python
MONASCA_PERSISTER_IMPLEMENTATION_LANG: python
MONASCA_METRICS_DB: influxdb
TEMPEST_PLUGINS: /opt/stack/monasca-tempest-plugin
- job:
name: monasca-log-tempest-oldapi-python3-influxdb
parent: monasca-log-tempest-base
vars:
devstack_localrc:
USE_PYTHON3: true
USE_OLD_LOG_API: false
MONASCA_API_IMPLEMENTATION_LANG: python
MONASCA_PERSISTER_IMPLEMENTATION_LANG: python
MONASCA_METRICS_DB: influxdb
TEMPEST_PLUGINS: /opt/stack/monasca-tempest-plugin
- project:
templates:
@ -156,19 +256,19 @@
- release-notes-jobs-python3
check:
jobs:
- monasca-log-tempest-python3-influxdb
- monasca-tempest-python2-influxdb
- monasca-tempest-python3-influxdb
- monasca-tempest-python2-cassandra
- monasca-tempest-python2-java-cassandra
- monascalog-python3-tempest
- build-monasca-docker-image
gate:
queue: monasca
jobs:
- monasca-log-tempest-python3-influxdb
- monasca-tempest-python2-influxdb
- monasca-tempest-python3-influxdb
- monasca-tempest-python2-cassandra
- monascalog-python3-tempest
post:
jobs:
- publish-monasca-api-docker-image

14
devstack/Vagrantfile vendored
View File

@ -145,6 +145,11 @@ enable_service monasca-storm-nimbus
enable_service monasca-storm-supervisor
enable_service monasca-storm-ui
enable_service monasca-storm-logviewer
enable_service monasca-log
# Uncomment this line to disable log part
# disable_service monasca-log
# Enable/Disable ORM support for mysql/postgresql
# HINT: If postgresql service is enabled, ORM is enforced
@ -169,15 +174,20 @@ MONASCA_METRICS_DB=${MONASCA_METRICS_DB:-influxdb}
MONASCA_API_USE_MOD_WSGI=${MONASCA_API_USE_MOD_WSGI:-True}
# Uncomment to stack devstack with old log-api
# USE_OLD_LOG_API=true
# enable_service monasca-log-api
# Start devstack with services running under Python 3
USE_PYTHON3=True
# Uncomment one of the following lines and modify accordingly to enable the Monasca DevStack Plugin
enable_plugin monasca-api https://git.openstack.org/openstack/monasca-api
enable_plugin monasca-api https://opendev.org/openstack/monasca-api
# enable_plugin monasca-api file:///vagrant_home/Documents/repos/openstack/monasca-api.vertica
# Uncomment to install tempest tests
# enable_plugin monasca-tempest-plugin https://git.openstack.org/openstack/monasca-tempest-plugin
enable_plugin monasca-tempest-plugin https://opendev.org/openstack/monasca-tempest-plugin
' > local.conf
./stack.sh

View File

@ -0,0 +1,360 @@
##################### Elasticsearch Configuration Example #####################
# This file contains an overview of various configuration settings,
# targeted at operations staff. Application developers should
# consult the guide at <http://elasticsearch.org/guide>.
#
# The installation procedure is covered at
# <http://elasticsearch.org/guide/en/elasticsearch/reference/current/setup.html>.
#
# Elasticsearch comes with reasonable defaults for most settings,
# so you can try it out without bothering with configuration.
#
# Most of the time, these defaults are just fine for running a production
# cluster. If you're fine-tuning your cluster, or wondering about the
# effect of certain configuration option, please _do ask_ on the
# mailing list or IRC channel [http://elasticsearch.org/community].
# Any element in the configuration can be replaced with environment variables
# by placing them in ${...} notation. For example:
#
#node.rack: ${RACK_ENV_VAR}
# For information on supported formats and syntax for the config file, see
# <http://elasticsearch.org/guide/en/elasticsearch/reference/current/setup-configuration.html>
################################### Cluster ###################################
# Cluster name identifies your cluster for auto-discovery. If you're running
# multiple clusters on the same network, make sure you're using unique names.
#
cluster.name: monasca_elastic
#################################### Node #####################################
# Node names are generated dynamically on startup, so you're relieved
# from configuring them manually. You can tie this node to a specific name:
#
node.name: "devstack"
# Allow this node to be eligible as a master node (enabled by default):
node.master: true
# Allow this node to store data (enabled by default)
node.data: true
# You can exploit these settings to design advanced cluster topologies.
#
# 1. You want this node to never become a master node, only to hold data.
# This will be the "workhorse" of your cluster.
#
#node.master: false
#node.data: true
#
# 2. You want this node to only serve as a master: to not store any data and
# to have free resources. This will be the "coordinator" of your cluster.
#
#node.master: true
#node.data: false
#
# 3. You want this node to be neither master nor data node, but
# to act as a "search load balancer" (fetching data from nodes,
# aggregating results, etc.)
#
#node.master: false
#node.data: false
# Use the Cluster Health API [http://localhost:9200/_cluster/health], the
# Node Info API [http://localhost:9200/_nodes] or GUI tools
# such as <http://www.elasticsearch.org/overview/marvel/>,
# <http://github.com/karmi/elasticsearch-paramedic>,
# <http://github.com/lukas-vlcek/bigdesk> and
# <http://mobz.github.com/elasticsearch-head> to inspect the cluster state.
# A node can have generic attributes associated with it, which can later be used
# for customized shard allocation filtering, or allocation awareness. An attribute
# is a simple key value pair, similar to node.key: value, here is an example:
#
#node.rack: rack314
# By default, multiple nodes are allowed to start from the same installation location
# to disable it, set the following:
#node.max_local_storage_nodes: 1
#################################### Index ####################################
# You can set a number of options (such as shard/replica options, mapping
# or analyzer definitions, translog settings, ...) for indices globally,
# in this file.
#
# Note, that it makes more sense to configure index settings specifically for
# a certain index, either when creating it or by using the index templates API.
#
# See <http://elasticsearch.org/guide/en/elasticsearch/reference/current/index-modules.html> and
# <http://elasticsearch.org/guide/en/elasticsearch/reference/current/indices-create-index.html>
# for more information.
# Set the number of shards (splits) of an index (5 by default):
#
#index.number_of_shards: 5
# Set the number of replicas (additional copies) of an index (1 by default):
#
#index.number_of_replicas: 1
# Note, that for development on a local machine, with small indices, it usually
# makes sense to "disable" the distributed features:
#
#index.number_of_shards: 1
#index.number_of_replicas: 0
# These settings directly affect the performance of index and search operations
# in your cluster. Assuming you have enough machines to hold shards and
# replicas, the rule of thumb is:
#
# 1. Having more *shards* enhances the _indexing_ performance and allows to
# _distribute_ a big index across machines.
# 2. Having more *replicas* enhances the _search_ performance and improves the
# cluster _availability_.
#
# The "number_of_shards" is a one-time setting for an index.
#
# The "number_of_replicas" can be increased or decreased anytime,
# by using the Index Update Settings API.
#
# Elasticsearch takes care about load balancing, relocating, gathering the
# results from nodes, etc. Experiment with different settings to fine-tune
# your setup.
# Use the Index Status API (<http://localhost:9200/A/_status>) to inspect
# the index status.
#################################### Paths ####################################
# Path to directory where to store index data allocated for this node.
path.data: %ES_DATA_DIR%
# Path to log files:
path.logs: %ES_LOG_DIR%
# Path to where plugins are installed:
#path.plugins: /path/to/plugins
# Path to temporary files
#path.work: /path/to/work
# Path to directory containing configuration (this file and logging.yml):
#path.conf: /path/to/conf
#################################### Plugin ###################################
# If a plugin listed here is not installed for current node, the node will not start.
#
#plugin.mandatory: mapper-attachments,lang-groovy
################################### Memory ####################################
# Elasticsearch performs poorly when JVM starts swapping: you should ensure that
# it _never_ swaps.
#
# Set this property to true to lock the memory:
#
#bootstrap.mlockall: true
# Make sure that the ES_MIN_MEM and ES_MAX_MEM environment variables are set
# to the same value, and that the machine has enough memory to allocate
# for Elasticsearch, leaving enough memory for the operating system itself.
#
# You should also make sure that the Elasticsearch process is allowed to lock
# the memory, eg. by using `ulimit -l unlimited`.
############################## Network And HTTP ###############################
# Elasticsearch, by default, binds itself to the 0.0.0.0 address, and listens
# on port [9200-9300] for HTTP traffic and on port [9300-9400] for node-to-node
# communication. (the range means that if the port is busy, it will automatically
# try the next port).
# Set the bind address specifically (IPv4 or IPv6):
network.bind_host: %ES_SERVICE_BIND_HOST%
# Set the address other nodes will use to communicate with this node. If not
# set, it is automatically derived. It must point to an actual IP address.
network.publish_host: %ES_SERVICE_PUBLISH_HOST%
# Set a custom port for the node to node communication (9300 by default):
transport.tcp.port: %ES_SERVICE_PUBLISH_PORT%
# Enable compression for all communication between nodes (disabled by default):
#
#transport.tcp.compress: true
# Set a custom port to listen for HTTP traffic:
#
http.port: %ES_SERVICE_BIND_PORT%
# Set a custom allowed content length:
#
#http.max_content_length: 100mb
# Disable HTTP completely:
#
#http.enabled: false
################################### Gateway ###################################
# The gateway allows for persisting the cluster state between full cluster
# restarts. Every change to the state (such as adding an index) will be stored
# in the gateway, and when the cluster starts up for the first time,
# it will read its state from the gateway.
# There are several types of gateway implementations. For more information, see
# <http://elasticsearch.org/guide/en/elasticsearch/reference/current/modules-gateway.html>.
# The default gateway type is the "local" gateway (recommended):
#
#gateway.type: local
# Settings below control how and when to start the initial recovery process on
# a full cluster restart (to reuse as much local data as possible when using shared
# gateway).
# Allow recovery process after N nodes in a cluster are up:
#
#gateway.recover_after_nodes: 1
# Set the timeout to initiate the recovery process, once the N nodes
# from previous setting are up (accepts time value):
#
#gateway.recover_after_time: 5m
# Set how many nodes are expected in this cluster. Once these N nodes
# are up (and recover_after_nodes is met), begin recovery process immediately
# (without waiting for recover_after_time to expire):
#
#gateway.expected_nodes: 2
############################# Recovery Throttling #############################
# These settings allow to control the process of shards allocation between
# nodes during initial recovery, replica allocation, rebalancing,
# or when adding and removing nodes.
# Set the number of concurrent recoveries happening on a node:
#
# 1. During the initial recovery
#
#cluster.routing.allocation.node_initial_primaries_recoveries: 4
#
# 2. During adding/removing nodes, rebalancing, etc
#
#cluster.routing.allocation.node_concurrent_recoveries: 2
# Set to throttle throughput when recovering (eg. 100mb, by default 20mb):
#
#indices.recovery.max_bytes_per_sec: 20mb
# Set to limit the number of open concurrent streams when
# recovering a shard from a peer:
#
#indices.recovery.concurrent_streams: 5
################################## Discovery ##################################
# Discovery infrastructure ensures nodes can be found within a cluster
# and master node is elected. Multicast discovery is the default.
# Set to ensure a node sees N other master eligible nodes to be considered
# operational within the cluster. This should be set to a quorum/majority of
# the master-eligible nodes in the cluster.
#
discovery.zen.minimum_master_nodes: 1
# Set the time to wait for ping responses from other nodes when discovering.
# Set this option to a higher value on a slow or congested network
# to minimize discovery failures:
#
#discovery.zen.ping.timeout: 3s
# For more information, see
# <http://elasticsearch.org/guide/en/elasticsearch/reference/current/modules-discovery-zen.html>
# Unicast discovery allows to explicitly control which nodes will be used
# to discover the cluster. It can be used when multicast is not present,
# or to restrict the cluster communication-wise.
#
# 1. Disable multicast discovery (enabled by default):
#
discovery.zen.ping.multicast.enabled: false
# 2. Configure an initial list of master nodes in the cluster
# to perform discovery when new nodes (master or data) are started:
#
# discovery.zen.ping.unicast.hosts: [127.0.0.1]
# EC2 discovery allows to use AWS EC2 API in order to perform discovery.
#
# You have to install the cloud-aws plugin for enabling the EC2 discovery.
#
# For more information, see
# <http://elasticsearch.org/guide/en/elasticsearch/reference/current/modules-discovery-ec2.html>
#
# See <http://elasticsearch.org/tutorials/elasticsearch-on-ec2/>
# for a step-by-step tutorial.
# GCE discovery allows to use Google Compute Engine API in order to perform discovery.
#
# You have to install the cloud-gce plugin for enabling the GCE discovery.
#
# For more information, see <https://github.com/elasticsearch/elasticsearch-cloud-gce>.
# Azure discovery allows to use Azure API in order to perform discovery.
#
# You have to install the cloud-azure plugin for enabling the Azure discovery.
#
# For more information, see <https://github.com/elasticsearch/elasticsearch-cloud-azure>.
################################## Slow Log ##################################
# Shard level query and fetch threshold logging.
#index.search.slowlog.threshold.query.warn: 10s
#index.search.slowlog.threshold.query.info: 5s
#index.search.slowlog.threshold.query.debug: 2s
#index.search.slowlog.threshold.query.trace: 500ms
#index.search.slowlog.threshold.fetch.warn: 1s
#index.search.slowlog.threshold.fetch.info: 800ms
#index.search.slowlog.threshold.fetch.debug: 500ms
#index.search.slowlog.threshold.fetch.trace: 200ms
#index.indexing.slowlog.threshold.index.warn: 10s
#index.indexing.slowlog.threshold.index.info: 5s
#index.indexing.slowlog.threshold.index.debug: 2s
#index.indexing.slowlog.threshold.index.trace: 500ms
################################## GC Logging ################################
#monitor.jvm.gc.young.warn: 1000ms
#monitor.jvm.gc.young.info: 700ms
#monitor.jvm.gc.young.debug: 400ms
#monitor.jvm.gc.old.warn: 10s
#monitor.jvm.gc.old.info: 5s
#monitor.jvm.gc.old.debug: 2s
################################## Security ################################
# Uncomment if you want to enable JSONP as a valid return transport on the
# http server. With this enabled, it may pose a security risk, so disabling
# it unless you need it is recommended (it is disabled by default).
#
#http.jsonp.enable: true

View File

@ -0,0 +1,624 @@
{
"id": null,
"title": "Kibana",
"tags": [
"logs"
],
"style": "dark",
"timezone": "browser",
"editable": true,
"sharedCrosshair": false,
"hideControls": false,
"time": {
"from": "now-1h",
"to": "now"
},
"timepicker": {
"refresh_intervals": [
"5s",
"10s",
"30s",
"1m",
"5m",
"15m",
"30m",
"1h",
"2h",
"1d"
],
"time_options": [
"5m",
"15m",
"1h",
"6h",
"12h",
"24h",
"2d",
"7d",
"30d"
]
},
"templating": {
"list": []
},
"annotations": {
"list": []
},
"schemaVersion": 13,
"version": 7,
"links": [],
"gnetId": null,
"rows": [
{
"title": "Dashboard Row",
"panels": [
{
"cacheTimeout": null,
"colorBackground": true,
"colorValue": false,
"colors": [
"rgba(225, 40, 40, 0.59)",
"rgba(245, 150, 40, 0.73)",
"rgba(71, 212, 59, 0.4)"
],
"format": "none",
"gauge": {
"maxValue": 100,
"minValue": 0,
"show": false,
"thresholdLabels": false,
"thresholdMarkers": true
},
"id": 1,
"interval": null,
"links": [],
"mappingType": 1,
"mappingTypes": [
],
"maxDataPoints": 100,
"nullPointMode": "connected",
"nullText": null,
"postfix": "",
"postfixFontSize": "50%",
"prefix": "",
"prefixFontSize": "50%",
"span": 4,
"sparkline": {
"fillColor": "rgba(31, 118, 189, 0.18)",
"full": false,
"lineColor": "rgb(31, 120, 193)",
"show": false
},
"targets": [
{
"aggregator": "none",
"dimensions": [
{
"key": "service",
"value": "kibana"
}
],
"error": "",
"metric": "process.pid_count",
"period": "300",
"refId": "A"
}
],
"thresholds": "0.2,0.8",
"title": "Kibana",
"type": "singlestat",
"valueFontSize": "80%",
"valueMaps": [
{
"value": "0",
"op": "=",
"text": "DOWN"
},
{
"value": "1",
"op": "=",
"text": "UP"
},
{
"value": "2",
"op": "=",
"text": "UP"
},
{
"value": "3",
"op": "=",
"text": "UP"
},
{
"value": "4",
"op": "=",
"text": "UP"
},
{
"value": "5",
"op": "=",
"text": "UP"
},
{
"value": "6",
"op": "=",
"text": "UP"
},
{
"value": "7",
"op": "=",
"text": "UP"
},
{
"value": "8",
"op": "=",
"text": "UP"
},
{
"value": "9",
"op": "=",
"text": "UP"
},
{
"value": "10",
"op": "=",
"text": "UP"
},
{
"value": "11",
"op": "=",
"text": "UP"
},
{
"value": "12",
"op": "=",
"text": "UP"
}
],
"valueName": "current"
},
{
"aliasColors": {},
"bars": false,
"datasource": null,
"fill": 1,
"id": 2,
"legend": {
"avg": false,
"current": false,
"max": false,
"min": false,
"show": true,
"total": false,
"values": false
},
"lines": true,
"linewidth": 1,
"links": [],
"nullPointMode": "null",
"percentage": false,
"pointradius": 5,
"points": false,
"renderer": "flot",
"seriesOverrides": [],
"span": 4,
"stack": false,
"steppedLine": false,
"targets": [
{
"aggregator": "avg",
"dimensions": [
{
"key": "process_name",
"value": "kibana"
}
],
"error": "",
"metric": "process.cpu_perc",
"period": "300",
"refId": "A"
}
],
"thresholds": [],
"timeFrom": null,
"timeShift": null,
"title": "CPU",
"tooltip": {
"shared": true,
"sort": 0,
"value_type": "individual"
},
"type": "graph",
"xaxis": {
"mode": "time",
"name": null,
"show": true,
"values": []
},
"yaxes": [
{
"format": "percent",
"label": null,
"logBase": 1,
"max": null,
"min": null,
"show": true
},
{
"format": "short",
"label": null,
"logBase": 1,
"max": null,
"min": null,
"show": true
}
]
},
{
"aliasColors": {},
"bars": false,
"datasource": null,
"fill": 1,
"id": 3,
"legend": {
"avg": false,
"current": false,
"max": false,
"min": false,
"show": true,
"total": false,
"values": false
},
"lines": true,
"linewidth": 1,
"links": [],
"nullPointMode": "null",
"percentage": false,
"pointradius": 5,
"points": false,
"renderer": "flot",
"seriesOverrides": [],
"span": 4,
"stack": false,
"steppedLine": false,
"targets": [
{
"aggregator": "avg",
"dimensions": [
{
"key": "process_name",
"value": "kibana"
}
],
"error": "",
"metric": "process.mem.rss_mbytes",
"period": "300",
"refId": "A"
}
],
"thresholds": [],
"timeFrom": null,
"timeShift": null,
"title": "Memory",
"tooltip": {
"shared": true,
"sort": 0,
"value_type": "individual"
},
"type": "graph",
"xaxis": {
"mode": "time",
"name": null,
"show": true,
"values": []
},
"yaxes": [
{
"format": "bytes",
"label": null,
"logBase": 1,
"max": null,
"min": null,
"show": true
},
{
"format": "short",
"label": null,
"logBase": 1,
"max": null,
"min": null,
"show": true
}
]
}
],
"showTitle": false,
"titleSize": "h6",
"height": "250px",
"repeat": null,
"repeatRowId": null,
"repeatIteration": null,
"collapse": false
},
{
"title": "Dashboard Row",
"panels": [
{
"aliasColors": {},
"bars": false,
"datasource": null,
"fill": 1,
"id": 4,
"legend": {
"avg": false,
"current": false,
"max": false,
"min": false,
"show": true,
"total": false,
"values": false
},
"lines": true,
"linewidth": 1,
"links": [],
"nullPointMode": "null",
"percentage": false,
"pointradius": 5,
"points": false,
"renderer": "flot",
"seriesOverrides": [],
"span": 6,
"stack": false,
"steppedLine": false,
"targets": [
{
"aggregator": "avg",
"dimensions": [
{
"key": "process_name",
"value": "kibana"
}
],
"error": "",
"metric": "process.io.read_count",
"period": "300",
"refId": "A"
},
{
"aggregator": "avg",
"dimensions": [
{
"key": "process_name",
"value": "kibana"
}
],
"error": "",
"metric": "process.io.write_count",
"period": "300",
"refId": "B"
}
],
"thresholds": [],
"timeFrom": null,
"timeShift": null,
"title": "IO Count",
"tooltip": {
"shared": true,
"sort": 0,
"value_type": "individual"
},
"type": "graph",
"xaxis": {
"mode": "time",
"name": null,
"show": true,
"values": []
},
"yaxes": [
{
"format": "short",
"label": null,
"logBase": 1,
"max": null,
"min": null,
"show": true
},
{
"format": "short",
"label": null,
"logBase": 1,
"max": null,
"min": null,
"show": true
}
]
},
{
"aliasColors": {},
"bars": false,
"datasource": null,
"fill": 1,
"id": 5,
"legend": {
"avg": false,
"current": false,
"max": false,
"min": false,
"show": true,
"total": false,
"values": false
},
"lines": true,
"linewidth": 1,
"links": [],
"nullPointMode": "null",
"percentage": false,
"pointradius": 5,
"points": false,
"renderer": "flot",
"seriesOverrides": [],
"span": 6,
"stack": false,
"steppedLine": false,
"targets": [
{
"aggregator": "avg",
"dimensions": [
{
"key": "process_name",
"value": "kibana"
}
],
"error": "",
"metric": "process.io.read_kbytes",
"period": "300",
"refId": "A"
},
{
"aggregator": "avg",
"dimensions": [
{
"key": "process_name",
"value": "kibana"
}
],
"error": "",
"metric": "process.io.write_kbytes",
"period": "300",
"refId": "B"
}
],
"thresholds": [],
"timeFrom": null,
"timeShift": null,
"title": "IO Read/Write [kB]",
"tooltip": {
"shared": true,
"sort": 0,
"value_type": "individual"
},
"type": "graph",
"xaxis": {
"mode": "time",
"name": null,
"show": true,
"values": []
},
"yaxes": [
{
"format": "kbytes",
"label": null,
"logBase": 1,
"max": null,
"min": null,
"show": true
},
{
"format": "short",
"label": null,
"logBase": 1,
"max": null,
"min": null,
"show": true
}
]
}
],
"showTitle": false,
"titleSize": "h6",
"height": 250,
"repeat": null,
"repeatRowId": null,
"repeatIteration": null,
"collapse": false
},
{
"title": "Dashboard Row",
"panels": [
{
"aliasColors": {},
"bars": false,
"datasource": null,
"fill": 1,
"id": 6,
"legend": {
"avg": false,
"current": false,
"max": false,
"min": false,
"show": true,
"total": false,
"values": false
},
"lines": true,
"linewidth": 1,
"nullPointMode": "null",
"percentage": false,
"pointradius": 5,
"points": false,
"renderer": "flot",
"seriesOverrides": [],
"span": 12,
"stack": false,
"steppedLine": false,
"targets": [
{
"aggregator": "avg",
"dimensions": [
{
"key": "process_name",
"value": "kibana"
}
],
"error": "",
"metric": "process.open_file_descriptors",
"period": "300",
"refId": "A"
}
],
"thresholds": [],
"timeFrom": null,
"timeShift": null,
"title": "Open File Descriptors",
"tooltip": {
"shared": true,
"sort": 0,
"value_type": "individual"
},
"type": "graph",
"xaxis": {
"mode": "time",
"name": null,
"show": true,
"values": []
},
"yaxes": [
{
"format": "short",
"label": null,
"logBase": 1,
"max": null,
"min": null,
"show": true
},
{
"format": "short",
"label": null,
"logBase": 1,
"max": null,
"min": null,
"show": true
}
]
}
],
"showTitle": false,
"titleSize": "h6",
"height": 250,
"repeat": null,
"repeatRowId": null,
"repeatIteration": null,
"collapse": false
}
]
}

View File

@ -0,0 +1,624 @@
{
"id": null,
"title": "Log Transformer",
"tags": [
"logs"
],
"style": "dark",
"timezone": "browser",
"editable": true,
"sharedCrosshair": false,
"hideControls": false,
"time": {
"from": "now-1h",
"to": "now"
},
"timepicker": {
"refresh_intervals": [
"5s",
"10s",
"30s",
"1m",
"5m",
"15m",
"30m",
"1h",
"2h",
"1d"
],
"time_options": [
"5m",
"15m",
"1h",
"6h",
"12h",
"24h",
"2d",
"7d",
"30d"
]
},
"templating": {
"list": []
},
"annotations": {
"list": []
},
"schemaVersion": 13,
"version": 7,
"links": [],
"gnetId": null,
"rows": [
{
"title": "Dashboard Row",
"panels": [
{
"cacheTimeout": null,
"colorBackground": true,
"colorValue": false,
"colors": [
"rgba(225, 40, 40, 0.59)",
"rgba(245, 150, 40, 0.73)",
"rgba(71, 212, 59, 0.4)"
],
"format": "none",
"gauge": {
"maxValue": 100,
"minValue": 0,
"show": false,
"thresholdLabels": false,
"thresholdMarkers": true
},
"id": 1,
"interval": null,
"links": [],
"mappingType": 1,
"mappingTypes": [
],
"maxDataPoints": 100,
"nullPointMode": "connected",
"nullText": null,
"postfix": "",
"postfixFontSize": "50%",
"prefix": "",
"prefixFontSize": "50%",
"span": 4,
"sparkline": {
"fillColor": "rgba(31, 118, 189, 0.18)",
"full": false,
"lineColor": "rgb(31, 120, 193)",
"show": false
},
"targets": [
{
"aggregator": "none",
"dimensions": [
{
"key": "service",
"value": "log-transformer"
}
],
"error": "",
"metric": "process.pid_count",
"period": "300",
"refId": "A"
}
],
"thresholds": "0.2,0.8",
"title": "Log Transformer",
"type": "singlestat",
"valueFontSize": "80%",
"valueMaps": [
{
"value": "0",
"op": "=",
"text": "DOWN"
},
{
"value": "1",
"op": "=",
"text": "UP"
},
{
"value": "2",
"op": "=",
"text": "UP"
},
{
"value": "3",
"op": "=",
"text": "UP"
},
{
"value": "4",
"op": "=",
"text": "UP"
},
{
"value": "5",
"op": "=",
"text": "UP"
},
{
"value": "6",
"op": "=",
"text": "UP"
},
{
"value": "7",
"op": "=",
"text": "UP"
},
{
"value": "8",
"op": "=",
"text": "UP"
},
{
"value": "9",
"op": "=",
"text": "UP"
},
{
"value": "10",
"op": "=",
"text": "UP"
},
{
"value": "11",
"op": "=",
"text": "UP"
},
{
"value": "12",
"op": "=",
"text": "UP"
}
],
"valueName": "current"
},
{
"aliasColors": {},
"bars": false,
"datasource": null,
"fill": 1,
"id": 2,
"legend": {
"avg": false,
"current": false,
"max": false,
"min": false,
"show": true,
"total": false,
"values": false
},
"lines": true,
"linewidth": 1,
"links": [],
"nullPointMode": "null",
"percentage": false,
"pointradius": 5,
"points": false,
"renderer": "flot",
"seriesOverrides": [],
"span": 4,
"stack": false,
"steppedLine": false,
"targets": [
{
"aggregator": "avg",
"dimensions": [
{
"key": "process_name",
"value": "log-transformer"
}
],
"error": "",
"metric": "process.cpu_perc",
"period": "300",
"refId": "A"
}
],
"thresholds": [],
"timeFrom": null,
"timeShift": null,
"title": "CPU",
"tooltip": {
"shared": true,
"sort": 0,
"value_type": "individual"
},
"type": "graph",
"xaxis": {
"mode": "time",
"name": null,
"show": true,
"values": []
},
"yaxes": [
{
"format": "percent",
"label": null,
"logBase": 1,
"max": null,
"min": null,
"show": true
},
{
"format": "short",
"label": null,
"logBase": 1,
"max": null,
"min": null,
"show": true
}
]
},
{
"aliasColors": {},
"bars": false,
"datasource": null,
"fill": 1,
"id": 3,
"legend": {
"avg": false,
"current": false,
"max": false,
"min": false,
"show": true,
"total": false,
"values": false
},
"lines": true,
"linewidth": 1,
"links": [],
"nullPointMode": "null",
"percentage": false,
"pointradius": 5,
"points": false,
"renderer": "flot",
"seriesOverrides": [],
"span": 4,
"stack": false,
"steppedLine": false,
"targets": [
{
"aggregator": "avg",
"dimensions": [
{
"key": "process_name",
"value": "log-transformer"
}
],
"error": "",
"metric": "process.mem.rss_mbytes",
"period": "300",
"refId": "A"
}
],
"thresholds": [],
"timeFrom": null,
"timeShift": null,
"title": "Memory",
"tooltip": {
"shared": true,
"sort": 0,
"value_type": "individual"
},
"type": "graph",
"xaxis": {
"mode": "time",
"name": null,
"show": true,
"values": []
},
"yaxes": [
{
"format": "bytes",
"label": null,
"logBase": 1,
"max": null,
"min": null,
"show": true
},
{
"format": "short",
"label": null,
"logBase": 1,
"max": null,
"min": null,
"show": true
}
]
}
],
"showTitle": false,
"titleSize": "h6",
"height": "250px",
"repeat": null,
"repeatRowId": null,
"repeatIteration": null,
"collapse": false
},
{
"title": "Dashboard Row",
"panels": [
{
"aliasColors": {},
"bars": false,
"datasource": null,
"fill": 1,
"id": 4,
"legend": {
"avg": false,
"current": false,
"max": false,
"min": false,
"show": true,
"total": false,
"values": false
},
"lines": true,
"linewidth": 1,
"links": [],
"nullPointMode": "null",
"percentage": false,
"pointradius": 5,
"points": false,
"renderer": "flot",
"seriesOverrides": [],
"span": 6,
"stack": false,
"steppedLine": false,
"targets": [
{
"aggregator": "avg",
"dimensions": [
{
"key": "process_name",
"value": "log-transformer"
}
],
"error": "",
"metric": "process.io.read_count",
"period": "300",
"refId": "A"
},
{
"aggregator": "avg",
"dimensions": [
{
"key": "process_name",
"value": "log-transformer"
}
],
"error": "",
"metric": "process.io.write_count",
"period": "300",
"refId": "B"
}
],
"thresholds": [],
"timeFrom": null,
"timeShift": null,
"title": "IO Count",
"tooltip": {
"shared": true,
"sort": 0,
"value_type": "individual"
},
"type": "graph",
"xaxis": {
"mode": "time",
"name": null,
"show": true,
"values": []
},
"yaxes": [
{
"format": "short",
"label": null,
"logBase": 1,
"max": null,
"min": null,
"show": true
},
{
"format": "short",
"label": null,
"logBase": 1,
"max": null,
"min": null,
"show": true
}
]
},
{
"aliasColors": {},
"bars": false,
"datasource": null,
"fill": 1,
"id": 5,
"legend": {
"avg": false,
"current": false,
"max": false,
"min": false,
"show": true,
"total": false,
"values": false
},
"lines": true,
"linewidth": 1,
"links": [],
"nullPointMode": "null",
"percentage": false,
"pointradius": 5,
"points": false,
"renderer": "flot",
"seriesOverrides": [],
"span": 6,
"stack": false,
"steppedLine": false,
"targets": [
{
"aggregator": "avg",
"dimensions": [
{
"key": "process_name",
"value": "log-transformer"
}
],
"error": "",
"metric": "process.io.read_kbytes",
"period": "300",
"refId": "A"
},
{
"aggregator": "avg",
"dimensions": [
{
"key": "process_name",
"value": "log-transformer"
}
],
"error": "",
"metric": "process.io.write_kbytes",
"period": "300",
"refId": "B"
}
],
"thresholds": [],
"timeFrom": null,
"timeShift": null,
"title": "IO Read/Write [kB]",
"tooltip": {
"shared": true,
"sort": 0,
"value_type": "individual"
},
"type": "graph",
"xaxis": {
"mode": "time",
"name": null,
"show": true,
"values": []
},
"yaxes": [
{
"format": "kbytes",
"label": null,
"logBase": 1,
"max": null,
"min": null,
"show": true
},
{
"format": "short",
"label": null,
"logBase": 1,
"max": null,
"min": null,
"show": true
}
]
}
],
"showTitle": false,
"titleSize": "h6",
"height": 250,
"repeat": null,
"repeatRowId": null,
"repeatIteration": null,
"collapse": false
},
{
"title": "Dashboard Row",
"panels": [
{
"aliasColors": {},
"bars": false,
"datasource": null,
"fill": 1,
"id": 6,
"legend": {
"avg": false,
"current": false,
"max": false,
"min": false,
"show": true,
"total": false,
"values": false
},
"lines": true,
"linewidth": 1,
"nullPointMode": "null",
"percentage": false,
"pointradius": 5,
"points": false,
"renderer": "flot",
"seriesOverrides": [],
"span": 12,
"stack": false,
"steppedLine": false,
"targets": [
{
"aggregator": "avg",
"dimensions": [
{
"key": "process_name",
"value": "log-transformer"
}
],
"error": "",
"metric": "process.open_file_descriptors",
"period": "300",
"refId": "A"
}
],
"thresholds": [],
"timeFrom": null,
"timeShift": null,
"title": "Open File Descriptors",
"tooltip": {
"shared": true,
"sort": 0,
"value_type": "individual"
},
"type": "graph",
"xaxis": {
"mode": "time",
"name": null,
"show": true,
"values": []
},
"yaxes": [
{
"format": "short",
"label": null,
"logBase": 1,
"max": null,
"min": null,
"show": true
},
{
"format": "short",
"label": null,
"logBase": 1,
"max": null,
"min": null,
"show": true
}
]
}
],
"showTitle": false,
"titleSize": "h6",
"height": 250,
"repeat": null,
"repeatRowId": null,
"repeatIteration": null,
"collapse": false
}
]
}

View File

@ -0,0 +1,624 @@
{
"id": null,
"title": "Log Metrics",
"tags": [
"logs"
],
"style": "dark",
"timezone": "browser",
"editable": true,
"sharedCrosshair": false,
"hideControls": false,
"time": {
"from": "now-1h",
"to": "now"
},
"timepicker": {
"refresh_intervals": [
"5s",
"10s",
"30s",
"1m",
"5m",
"15m",
"30m",
"1h",
"2h",
"1d"
],
"time_options": [
"5m",
"15m",
"1h",
"6h",
"12h",
"24h",
"2d",
"7d",
"30d"
]
},
"templating": {
"list": []
},
"annotations": {
"list": []
},
"schemaVersion": 13,
"version": 7,
"links": [],
"gnetId": null,
"rows": [
{
"title": "Dashboard Row",
"panels": [
{
"cacheTimeout": null,
"colorBackground": true,
"colorValue": false,
"colors": [
"rgba(225, 40, 40, 0.59)",
"rgba(245, 150, 40, 0.73)",
"rgba(71, 212, 59, 0.4)"
],
"format": "none",
"gauge": {
"maxValue": 100,
"minValue": 0,
"show": false,
"thresholdLabels": false,
"thresholdMarkers": true
},
"id": 1,
"interval": null,
"links": [],
"mappingType": 1,
"mappingTypes": [
],
"maxDataPoints": 100,
"nullPointMode": "connected",
"nullText": null,
"postfix": "",
"postfixFontSize": "50%",
"prefix": "",
"prefixFontSize": "50%",
"span": 4,
"sparkline": {
"fillColor": "rgba(31, 118, 189, 0.18)",
"full": false,
"lineColor": "rgb(31, 120, 193)",
"show": false
},
"targets": [
{
"aggregator": "none",
"dimensions": [
{
"key": "service",
"value": "log-metrics"
}
],
"error": "",
"metric": "process.pid_count",
"period": "300",
"refId": "A"
}
],
"thresholds": "0.2,0.8",
"title": "Log Metrics",
"type": "singlestat",
"valueFontSize": "80%",
"valueMaps": [
{
"value": "0",
"op": "=",
"text": "DOWN"
},
{
"value": "1",
"op": "=",
"text": "UP"
},
{
"value": "2",
"op": "=",
"text": "UP"
},
{
"value": "3",
"op": "=",
"text": "UP"
},
{
"value": "4",
"op": "=",
"text": "UP"
},
{
"value": "5",
"op": "=",
"text": "UP"
},
{
"value": "6",
"op": "=",
"text": "UP"
},
{
"value": "7",
"op": "=",
"text": "UP"
},
{
"value": "8",
"op": "=",
"text": "UP"
},
{
"value": "9",
"op": "=",
"text": "UP"
},
{
"value": "10",
"op": "=",
"text": "UP"
},
{
"value": "11",
"op": "=",
"text": "UP"
},
{
"value": "12",
"op": "=",
"text": "UP"
}
],
"valueName": "current"
},
{
"aliasColors": {},
"bars": false,
"datasource": null,
"fill": 1,
"id": 2,
"legend": {
"avg": false,
"current": false,
"max": false,
"min": false,
"show": true,
"total": false,
"values": false
},
"lines": true,
"linewidth": 1,
"links": [],
"nullPointMode": "null",
"percentage": false,
"pointradius": 5,
"points": false,
"renderer": "flot",
"seriesOverrides": [],
"span": 4,
"stack": false,
"steppedLine": false,
"targets": [
{
"aggregator": "avg",
"dimensions": [
{
"key": "process_name",
"value": "log-metrics"
}
],
"error": "",
"metric": "process.cpu_perc",
"period": "300",
"refId": "A"
}
],
"thresholds": [],
"timeFrom": null,
"timeShift": null,
"title": "CPU",
"tooltip": {
"shared": true,
"sort": 0,
"value_type": "individual"
},
"type": "graph",
"xaxis": {
"mode": "time",
"name": null,
"show": true,
"values": []
},
"yaxes": [
{
"format": "percent",
"label": null,
"logBase": 1,
"max": null,
"min": null,
"show": true
},
{
"format": "short",
"label": null,
"logBase": 1,
"max": null,
"min": null,
"show": true
}
]
},
{
"aliasColors": {},
"bars": false,
"datasource": null,
"fill": 1,
"id": 3,
"legend": {
"avg": false,
"current": false,
"max": false,
"min": false,
"show": true,
"total": false,
"values": false
},
"lines": true,
"linewidth": 1,
"links": [],
"nullPointMode": "null",
"percentage": false,
"pointradius": 5,
"points": false,
"renderer": "flot",
"seriesOverrides": [],
"span": 4,
"stack": false,
"steppedLine": false,
"targets": [
{
"aggregator": "avg",
"dimensions": [
{
"key": "process_name",
"value": "log-metrics"
}
],
"error": "",
"metric": "process.mem.rss_mbytes",
"period": "300",
"refId": "A"
}
],
"thresholds": [],
"timeFrom": null,
"timeShift": null,
"title": "Memory",
"tooltip": {
"shared": true,
"sort": 0,
"value_type": "individual"
},
"type": "graph",
"xaxis": {
"mode": "time",
"name": null,
"show": true,
"values": []
},
"yaxes": [
{
"format": "bytes",
"label": null,
"logBase": 1,
"max": null,
"min": null,
"show": true
},
{
"format": "short",
"label": null,
"logBase": 1,
"max": null,
"min": null,
"show": true
}
]
}
],
"showTitle": false,
"titleSize": "h6",
"height": "250px",
"repeat": null,
"repeatRowId": null,
"repeatIteration": null,
"collapse": false
},
{
"title": "Dashboard Row",
"panels": [
{
"aliasColors": {},
"bars": false,
"datasource": null,
"fill": 1,
"id": 4,
"legend": {
"avg": false,
"current": false,
"max": false,
"min": false,
"show": true,
"total": false,
"values": false
},
"lines": true,
"linewidth": 1,
"links": [],
"nullPointMode": "null",
"percentage": false,
"pointradius": 5,
"points": false,
"renderer": "flot",
"seriesOverrides": [],
"span": 6,
"stack": false,
"steppedLine": false,
"targets": [
{
"aggregator": "avg",
"dimensions": [
{
"key": "process_name",
"value": "log-metrics"
}
],
"error": "",
"metric": "process.io.read_count",
"period": "300",
"refId": "A"
},
{
"aggregator": "avg",
"dimensions": [
{
"key": "process_name",
"value": "log-metrics"
}
],
"error": "",
"metric": "process.io.write_count",
"period": "300",
"refId": "B"
}
],
"thresholds": [],
"timeFrom": null,
"timeShift": null,
"title": "IO Count",
"tooltip": {
"shared": true,
"sort": 0,
"value_type": "individual"
},
"type": "graph",
"xaxis": {
"mode": "time",
"name": null,
"show": true,
"values": []
},
"yaxes": [
{
"format": "short",
"label": null,
"logBase": 1,
"max": null,
"min": null,
"show": true
},
{
"format": "short",
"label": null,
"logBase": 1,
"max": null,
"min": null,
"show": true
}
]
},
{
"aliasColors": {},
"bars": false,
"datasource": null,
"fill": 1,
"id": 5,
"legend": {
"avg": false,
"current": false,
"max": false,
"min": false,
"show": true,
"total": false,
"values": false
},
"lines": true,
"linewidth": 1,
"links": [],
"nullPointMode": "null",
"percentage": false,
"pointradius": 5,
"points": false,
"renderer": "flot",
"seriesOverrides": [],
"span": 6,
"stack": false,
"steppedLine": false,
"targets": [
{
"aggregator": "avg",
"dimensions": [
{
"key": "process_name",
"value": "log-metrics"
}
],
"error": "",
"metric": "process.io.read_kbytes",
"period": "300",
"refId": "A"
},
{
"aggregator": "avg",
"dimensions": [
{
"key": "process_name",
"value": "log-metrics"
}
],
"error": "",
"metric": "process.io.write_kbytes",
"period": "300",
"refId": "B"
}
],
"thresholds": [],
"timeFrom": null,
"timeShift": null,
"title": "IO Read/Write [kB]",
"tooltip": {
"shared": true,
"sort": 0,
"value_type": "individual"
},
"type": "graph",
"xaxis": {
"mode": "time",
"name": null,
"show": true,
"values": []
},
"yaxes": [
{
"format": "kbytes",
"label": null,
"logBase": 1,
"max": null,
"min": null,
"show": true
},
{
"format": "short",
"label": null,
"logBase": 1,
"max": null,
"min": null,
"show": true
}
]
}
],
"showTitle": false,
"titleSize": "h6",
"height": 250,
"repeat": null,
"repeatRowId": null,
"repeatIteration": null,
"collapse": false
},
{
"title": "Dashboard Row",
"panels": [
{
"aliasColors": {},
"bars": false,
"datasource": null,
"fill": 1,
"id": 6,
"legend": {
"avg": false,
"current": false,
"max": false,
"min": false,
"show": true,
"total": false,
"values": false
},
"lines": true,
"linewidth": 1,
"nullPointMode": "null",
"percentage": false,
"pointradius": 5,
"points": false,
"renderer": "flot",
"seriesOverrides": [],
"span": 12,
"stack": false,
"steppedLine": false,
"targets": [
{
"aggregator": "avg",
"dimensions": [
{
"key": "process_name",
"value": "log-metrics"
}
],
"error": "",
"metric": "process.open_file_descriptors",
"period": "300",
"refId": "A"
}
],
"thresholds": [],
"timeFrom": null,
"timeShift": null,
"title": "Open File Descriptors",
"tooltip": {
"shared": true,
"sort": 0,
"value_type": "individual"
},
"type": "graph",
"xaxis": {
"mode": "time",
"name": null,
"show": true,
"values": []
},
"yaxes": [
{
"format": "short",
"label": null,
"logBase": 1,
"max": null,
"min": null,
"show": true
},
{
"format": "short",
"label": null,
"logBase": 1,
"max": null,
"min": null,
"show": true
}
]
}
],
"showTitle": false,
"titleSize": "h6",
"height": 250,
"repeat": null,
"repeatRowId": null,
"repeatIteration": null,
"collapse": false
}
]
}

View File

@ -0,0 +1,624 @@
{
"id": null,
"title": "Log Persister",
"tags": [
"logs"
],
"style": "dark",
"timezone": "browser",
"editable": true,
"sharedCrosshair": false,
"hideControls": false,
"time": {
"from": "now-1h",
"to": "now"
},
"timepicker": {
"refresh_intervals": [
"5s",
"10s",
"30s",
"1m",
"5m",
"15m",
"30m",
"1h",
"2h",
"1d"
],
"time_options": [
"5m",
"15m",
"1h",
"6h",
"12h",
"24h",
"2d",
"7d",
"30d"
]
},
"templating": {
"list": []
},
"annotations": {
"list": []
},
"schemaVersion": 13,
"version": 7,
"links": [],
"gnetId": null,
"rows": [
{
"title": "Dashboard Row",
"panels": [
{
"cacheTimeout": null,
"colorBackground": true,
"colorValue": false,
"colors": [
"rgba(225, 40, 40, 0.59)",
"rgba(245, 150, 40, 0.73)",
"rgba(71, 212, 59, 0.4)"
],
"format": "none",
"gauge": {
"maxValue": 100,
"minValue": 0,
"show": false,
"thresholdLabels": false,
"thresholdMarkers": true
},
"id": 1,
"interval": null,
"links": [],
"mappingType": 1,
"mappingTypes": [
],
"maxDataPoints": 100,
"nullPointMode": "connected",
"nullText": null,
"postfix": "",
"postfixFontSize": "50%",
"prefix": "",
"prefixFontSize": "50%",
"span": 4,
"sparkline": {
"fillColor": "rgba(31, 118, 189, 0.18)",
"full": false,
"lineColor": "rgb(31, 120, 193)",
"show": false
},
"targets": [
{
"aggregator": "none",
"dimensions": [
{
"key": "service",
"value": "log-persister"
}
],
"error": "",
"metric": "process.pid_count",
"period": "300",
"refId": "A"
}
],
"thresholds": "0.2,0.8",
"title": "Log Persister",
"type": "singlestat",
"valueFontSize": "80%",
"valueMaps": [
{
"value": "0",
"op": "=",
"text": "DOWN"
},
{
"value": "1",
"op": "=",
"text": "UP"
},
{
"value": "2",
"op": "=",
"text": "UP"
},
{
"value": "3",
"op": "=",
"text": "UP"
},
{
"value": "4",
"op": "=",
"text": "UP"
},
{
"value": "5",
"op": "=",
"text": "UP"
},
{
"value": "6",
"op": "=",
"text": "UP"
},
{
"value": "7",
"op": "=",
"text": "UP"
},
{
"value": "8",
"op": "=",
"text": "UP"
},
{
"value": "9",
"op": "=",
"text": "UP"
},
{
"value": "10",
"op": "=",
"text": "UP"
},
{
"value": "11",
"op": "=",
"text": "UP"
},
{
"value": "12",
"op": "=",
"text": "UP"
}
],
"valueName": "current"
},
{
"aliasColors": {},
"bars": false,
"datasource": null,
"fill": 1,
"id": 2,
"legend": {
"avg": false,
"current": false,
"max": false,
"min": false,
"show": true,
"total": false,
"values": false
},
"lines": true,
"linewidth": 1,
"links": [],
"nullPointMode": "null",
"percentage": false,
"pointradius": 5,
"points": false,
"renderer": "flot",
"seriesOverrides": [],
"span": 4,
"stack": false,
"steppedLine": false,
"targets": [
{
"aggregator": "avg",
"dimensions": [
{
"key": "process_name",
"value": "log-persister"
}
],
"error": "",
"metric": "process.cpu_perc",
"period": "300",
"refId": "A"
}
],
"thresholds": [],
"timeFrom": null,
"timeShift": null,
"title": "CPU",
"tooltip": {
"shared": true,
"sort": 0,
"value_type": "individual"
},
"type": "graph",
"xaxis": {
"mode": "time",
"name": null,
"show": true,
"values": []
},
"yaxes": [
{
"format": "percent",
"label": null,
"logBase": 1,
"max": null,
"min": null,
"show": true
},
{
"format": "short",
"label": null,
"logBase": 1,
"max": null,
"min": null,
"show": true
}
]
},
{
"aliasColors": {},
"bars": false,
"datasource": null,
"fill": 1,
"id": 3,
"legend": {
"avg": false,
"current": false,
"max": false,
"min": false,
"show": true,
"total": false,
"values": false
},
"lines": true,
"linewidth": 1,
"links": [],
"nullPointMode": "null",
"percentage": false,
"pointradius": 5,
"points": false,
"renderer": "flot",
"seriesOverrides": [],
"span": 4,
"stack": false,
"steppedLine": false,
"targets": [
{
"aggregator": "avg",
"dimensions": [
{
"key": "process_name",
"value": "log-persister"
}
],
"error": "",
"metric": "process.mem.rss_mbytes",
"period": "300",
"refId": "A"
}
],
"thresholds": [],
"timeFrom": null,
"timeShift": null,
"title": "Memory",
"tooltip": {
"shared": true,
"sort": 0,
"value_type": "individual"
},
"type": "graph",
"xaxis": {
"mode": "time",
"name": null,
"show": true,
"values": []
},
"yaxes": [
{
"format": "bytes",
"label": null,
"logBase": 1,
"max": null,
"min": null,
"show": true
},
{
"format": "short",
"label": null,
"logBase": 1,
"max": null,
"min": null,
"show": true
}
]
}
],
"showTitle": false,
"titleSize": "h6",
"height": "250px",
"repeat": null,
"repeatRowId": null,
"repeatIteration": null,
"collapse": false
},
{
"title": "Dashboard Row",
"panels": [
{
"aliasColors": {},
"bars": false,
"datasource": null,
"fill": 1,
"id": 4,
"legend": {
"avg": false,
"current": false,
"max": false,
"min": false,
"show": true,
"total": false,
"values": false
},
"lines": true,
"linewidth": 1,
"links": [],
"nullPointMode": "null",
"percentage": false,
"pointradius": 5,
"points": false,
"renderer": "flot",
"seriesOverrides": [],
"span": 6,
"stack": false,
"steppedLine": false,
"targets": [
{
"aggregator": "avg",
"dimensions": [
{
"key": "process_name",
"value": "log-persister"
}
],
"error": "",
"metric": "process.io.read_count",
"period": "300",
"refId": "A"
},
{
"aggregator": "avg",
"dimensions": [
{
"key": "process_name",
"value": "log-persister"
}
],
"error": "",
"metric": "process.io.write_count",
"period": "300",
"refId": "B"
}
],
"thresholds": [],
"timeFrom": null,
"timeShift": null,
"title": "IO Count",
"tooltip": {
"shared": true,
"sort": 0,
"value_type": "individual"
},
"type": "graph",
"xaxis": {
"mode": "time",
"name": null,
"show": true,
"values": []
},
"yaxes": [
{
"format": "short",
"label": null,
"logBase": 1,
"max": null,
"min": null,
"show": true
},
{
"format": "short",
"label": null,
"logBase": 1,
"max": null,
"min": null,
"show": true
}
]
},
{
"aliasColors": {},
"bars": false,
"datasource": null,
"fill": 1,
"id": 5,
"legend": {
"avg": false,
"current": false,
"max": false,
"min": false,
"show": true,
"total": false,
"values": false
},
"lines": true,
"linewidth": 1,
"links": [],
"nullPointMode": "null",
"percentage": false,
"pointradius": 5,
"points": false,
"renderer": "flot",
"seriesOverrides": [],
"span": 6,
"stack": false,
"steppedLine": false,
"targets": [
{
"aggregator": "avg",
"dimensions": [
{
"key": "process_name",
"value": "log-persister"
}
],
"error": "",
"metric": "process.io.read_kbytes",
"period": "300",
"refId": "A"
},
{
"aggregator": "avg",
"dimensions": [
{
"key": "process_name",
"value": "log-persister"
}
],
"error": "",
"metric": "process.io.write_kbytes",
"period": "300",
"refId": "B"
}
],
"thresholds": [],
"timeFrom": null,
"timeShift": null,
"title": "IO Read/Write [kB]",
"tooltip": {
"shared": true,
"sort": 0,
"value_type": "individual"
},
"type": "graph",
"xaxis": {
"mode": "time",
"name": null,
"show": true,
"values": []
},
"yaxes": [
{
"format": "kbytes",
"label": null,
"logBase": 1,
"max": null,
"min": null,
"show": true
},
{
"format": "short",
"label": null,
"logBase": 1,
"max": null,
"min": null,
"show": true
}
]
}
],
"showTitle": false,
"titleSize": "h6",
"height": 250,
"repeat": null,
"repeatRowId": null,
"repeatIteration": null,
"collapse": false
},
{
"title": "Dashboard Row",
"panels": [
{
"aliasColors": {},
"bars": false,
"datasource": null,
"fill": 1,
"id": 6,
"legend": {
"avg": false,
"current": false,
"max": false,
"min": false,
"show": true,
"total": false,
"values": false
},
"lines": true,
"linewidth": 1,
"nullPointMode": "null",
"percentage": false,
"pointradius": 5,
"points": false,
"renderer": "flot",
"seriesOverrides": [],
"span": 12,
"stack": false,
"steppedLine": false,
"targets": [
{
"aggregator": "avg",
"dimensions": [
{
"key": "process_name",
"value": "log-persister"
}
],
"error": "",
"metric": "process.open_file_descriptors",
"period": "300",
"refId": "A"
}
],
"thresholds": [],
"timeFrom": null,
"timeShift": null,
"title": "Open File Descriptors",
"tooltip": {
"shared": true,
"sort": 0,
"value_type": "individual"
},
"type": "graph",
"xaxis": {
"mode": "time",
"name": null,
"show": true,
"values": []
},
"yaxes": [
{
"format": "short",
"label": null,
"logBase": 1,
"max": null,
"min": null,
"show": true
},
{
"format": "short",
"label": null,
"logBase": 1,
"max": null,
"min": null,
"show": true
}
]
}
],
"showTitle": false,
"titleSize": "h6",
"height": 250,
"repeat": null,
"repeatRowId": null,
"repeatIteration": null,
"collapse": false
}
]
}

View File

@ -0,0 +1,78 @@
# Kibana is served by a back end server. This controls which port to use.
server.port: %KIBANA_SERVICE_PORT%
# The host to bind the server to.
server.host: %KIBANA_SERVICE_HOST%
# If you are running kibana behind a proxy, and want to mount it at a path,
# specify that path here. The basePath can't end in a slash.
server.basePath: /dashboard/monitoring/logs_proxy
# The Elasticsearch instance to use for all your queries.
elasticsearch.url: http://%ES_SERVICE_BIND_HOST%:%ES_SERVICE_BIND_PORT%
# preserve_elasticsearch_host true will send the hostname specified in `elasticsearch`. If you set it to false,
# then the host you use to connect to *this* Kibana instance will be sent.
elasticsearch.preserveHost: True
# Kibana uses an index in Elasticsearch to store saved searches, visualizations
# and dashboards. It will create a new index if it doesn't already exist.
kibana.index: ".kibana"
# The default application to load.
kibana.defaultAppId: "discover"
# If your Elasticsearch is protected with basic auth, these are the user credentials
# used by the Kibana server to perform maintenance on the kibana_index at startup. Your Kibana
# users will still need to authenticate with Elasticsearch (which is proxied through
# the Kibana server)
# elasticsearch.username: "user"
# elasticsearch.password: "pass"
# SSL for outgoing requests from the Kibana Server to the browser (PEM formatted)
# server.ssl.cert: /path/to/your/server.crt
# server.ssl.key: /path/to/your/server.key
# Optional setting to validate that your Elasticsearch backend uses the same key files (PEM formatted)
# elasticsearch.ssl.cert: /path/to/your/client.crt
# elasticsearch.ssl.key: /path/to/your/client.key
# If you need to provide a CA certificate for your Elasticsearch instance, put
# the path of the pem file here.
# elasticsearch.ssl.ca: /path/to/your/CA.pem
# Set to false to have a complete disregard for the validity of the SSL
# certificate.
# elasticsearch.ssl.verify: true
# Time in milliseconds to wait for elasticsearch to respond to pings, defaults to
# request_timeout setting
elasticsearch.pingTimeout: 1500
# Time in milliseconds to wait for responses from the back end or elasticsearch.
# This must be > 0
elasticsearch.requestTimeout: 300000
# Time in milliseconds for Elasticsearch to wait for responses from shards.
# Set to 0 to disable.
elasticsearch.shardTimeout: 0
# Time in milliseconds to wait for Elasticsearch at Kibana startup before retrying
elasticsearch.startupTimeout: 5000
# Set the path to where you would like the process id file to be created.
# pid.file: /var/run/kibana.pid
# Set this to true to suppress all logging output.
logging.silent: false
# Set this to true to suppress all logging output except for error messages.
logging.quiet: false
# Set this to true to log all events, including system usage information and all requests.
logging.verbose: true
# monasca-kibana-plugin configuration
monasca-kibana-plugin.auth_uri: %KEYSTONE_AUTH_URI%
monasca-kibana-plugin.enabled: True
monasca-kibana-plugin.cookie.isSecure: False
optimize.useBundleCache: False

View File

@ -0,0 +1,3 @@
init_config:
instances:
- url: http://{{IP}}:9200

View File

@ -3,6 +3,6 @@ init_config:
ssh_port: 22
ssh_timeout: 0.5
instances:
- alive_test: ssh
- alive_test: ssh
host_name: 127.0.0.1
name: 127.0.0.1

View File

@ -1,17 +1,17 @@
init_config: null
instances:
- name: keystone
dimensions:
service: keystone
timeout: 3
url: http://127.0.0.1/identity
- name: mysql
dimensions:
service: mysql
timeout: 3
url: http://127.0.0.1:3306
- name: influxdb
dimensions:
service: influxdb
timeout: 3
url: http://127.0.0.1:8086/ping
- name: keystone
dimensions:
service: keystone
timeout: 3
url: http://127.0.0.1/identity
- name: mysql
dimensions:
service: mysql
timeout: 3
url: http://127.0.0.1:3306
- name: influxdb
dimensions:
service: influxdb
timeout: 3
url: http://127.0.0.1:8086/ping

View File

@ -1,14 +1,14 @@
init_config:
instances:
- built_by: Kafka
consumer_groups:
1_metrics:
metrics: []
- built_by: Kafka
consumer_groups:
1_metrics:
metrics: []
thresh-event:
events: []
thresh-metric:
metrics: []
kafka_connect_str: 127.0.0.1:9092
name: 127.0.0.1:9092
per_partition: false
thresh-metric:
metrics: []
kafka_connect_str: 127.0.0.1:9092
name: 127.0.0.1:9092
per_partition: false

View File

@ -1,8 +1,8 @@
init_config:
instances:
- built_by: MySQL
name: mysql
server: 127.0.0.1
port: 3306
user: root
pass: secretdatabase
- built_by: MySQL
name: mysql
server: 127.0.0.1
port: 3306
user: root
pass: secretdatabase

View File

@ -1,87 +1,87 @@
init_config:
instances:
- name: influxd
detailed: true
dimensions:
service: influxd
exact_match: false
search_string:
- influxd
- name: monasca-statsd
detailed: true
dimensions:
service: monasca-statsd
exact_match: false
search_string:
- monasca-statsd
- name: monasca-notification
detailed: true
dimensions:
service: monasca-notification
exact_match: false
search_string:
- monasca-notification
- name: persister
detailed: true
dimensions:
service: persister
exact_match: false
search_string:
- persister
- name: storm
detailed: true
dimensions:
service: storm
exact_match: false
search_string:
- storm
- name: monasca-api
detailed: true
dimensions:
service: uwsgi
exact_match: false
search_string:
- uwsgi
- name: monasca-collector
detailed: true
dimensions:
service: monasca-collector
exact_match: false
search_string:
- monasca-collector
- name: memcached
detailed: true
dimensions:
service: memcached
exact_match: false
search_string:
- memcached
- name: monasca-forwarder
detailed: true
dimensions:
service: monasca-forwarder
exact_match: false
search_string:
- monasca-forwarder
- name: zookeeper
detailed: true
dimensions:
service: zookeeper
exact_match: false
search_string:
- zookeeper
- name: kafka
detailed: true
dimensions:
service: kafka
exact_match: false
search_string:
- kafka
- name: mysqld
detailed: true
dimensions:
service: mysqld
exact_match: false
search_string:
- mysqld
- name: influxd
detailed: true
dimensions:
service: influxd
exact_match: false
search_string:
- influxd
- name: monasca-statsd
detailed: true
dimensions:
service: monasca-statsd
exact_match: false
search_string:
- monasca-statsd
- name: monasca-notification
detailed: true
dimensions:
service: monasca-notification
exact_match: false
search_string:
- monasca-notification
- name: persister
detailed: true
dimensions:
service: persister
exact_match: false
search_string:
- persister
- name: storm
detailed: true
dimensions:
service: storm
exact_match: false
search_string:
- storm
- name: monasca-api
detailed: true
dimensions:
service: uwsgi
exact_match: false
search_string:
- uwsgi
- name: monasca-collector
detailed: true
dimensions:
service: monasca-collector
exact_match: false
search_string:
- monasca-collector
- name: memcached
detailed: true
dimensions:
service: memcached
exact_match: false
search_string:
- memcached
- name: monasca-forwarder
detailed: true
dimensions:
service: monasca-forwarder
exact_match: false
search_string:
- monasca-forwarder
- name: zookeeper
detailed: true
dimensions:
service: zookeeper
exact_match: false
search_string:
- zookeeper
- name: kafka
detailed: true
dimensions:
service: kafka
exact_match: false
search_string:
- kafka
- name: mysqld
detailed: true
dimensions:
service: mysqld
exact_match: false
search_string:
- mysqld

View File

@ -0,0 +1,46 @@
#
# Copyright 2016 FUJITSU LIMITED
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
# implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
input {
file {
add_field => { "dimensions" => { "service" => "system" }}
path => "/var/log/syslog"
tags => ["syslog"]
}
}
filter {
if "syslog" in [tags] {
multiline {
negate => "true"
pattern => "^%{SYSLOGTIMESTAMP}"
what => "previous"
}
}
}
output {
monasca_log_api {
monasca_log_api_url => "%MONASCA_API_URI_V2%"
keystone_api_url => "%KEYSTONE_AUTH_URI%"
project_name => "mini-mon"
username => "monasca-agent"
password => "password"
user_domain_name => "default"
project_domain_name => "default"
dimensions => [ "hostname:devstack" ]
}
}

View File

@ -0,0 +1,81 @@
# Copyright 2016 FUJITSU LIMITED
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
# implied.
# See the License for the specific language governing permissions and
# limitations under the License.
input {
kafka {
zk_connect => "127.0.0.1:2181"
topic_id => "transformed-log"
group_id => "log-metric"
consumer_id => "monasca_log_metrics"
consumer_threads => "4"
}
}
filter {
# drop logs that have not set log level
if "level" not in [log] {
drop { periodic_flush => true }
} else {
ruby {
code => "
log_level = event['log']['level'].downcase
event['log']['level'] = log_level
"
}
}
# drop logs with log level not in warning,error
if [log][level] not in [warning,error] {
drop { periodic_flush => true }
}
ruby {
code => "
log_level = event['log']['level'].downcase
log_ts = Time.now.to_f * 1000.0
# metric name
metric_name = 'log.%s' % log_level
# build metric
metric = {}
metric['name'] = metric_name
metric['timestamp'] = log_ts
metric['value'] = 1
metric['dimensions'] = event['log']['dimensions']
metric['value_meta'] = {}
event['metric'] = metric.to_hash
"
}
mutate {
remove_field => ["log", "@version", "@timestamp", "log_level_original", "tags"]
}
}
output {
kafka {
bootstrap_servers => "127.0.0.1:9092"
topic_id => "metrics"
client_id => "monasca_log_metrics"
compression_type => "none"
}
}

View File

@ -0,0 +1,71 @@
#
# Copyright 2016 FUJITSU LIMITED
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
# implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
input {
kafka {
zk_connect => "127.0.0.1:2181"
topic_id => "transformed-log"
group_id => "logstash-persister"
}
}
filter {
date {
match => ["[log][timestamp]", "UNIX"]
target => "@timestamp"
}
date {
match => ["creation_time", "UNIX"]
target => "creation_time"
}
grok {
match => {
"[@timestamp]" => "^(?<index_date>\d{4}-\d{2}-\d{2})"
}
}
if "dimensions" in [log] {
ruby {
code => "
fieldHash = event['log']['dimensions']
fieldHash.each do |key, value|
event[key] = value
end
"
}
}
mutate {
add_field => {
message => "%{[log][message]}"
log_level => "%{[log][level]}"
tenant => "%{[meta][tenantId]}"
region => "%{[meta][region]}"
}
remove_field => ["@version", "host", "type", "tags" ,"_index_date", "meta", "log"]
}
}
output {
elasticsearch {
index => "logs-%{tenant}-%{index_date}"
document_type => "log"
hosts => ["%ES_SERVICE_BIND_HOST%"]
flush_size => 500
}
}

View File

@ -0,0 +1,87 @@
#
# Copyright 2016 FUJITSU LIMITED
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
# implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
input {
kafka {
zk_connect => "127.0.0.1:2181"
topic_id => "log"
group_id => "transformer-logstash-consumer"
}
}
filter {
ruby {
code => "event['message_tmp'] = event['log']['message'][0..49]"
}
grok {
match => {
"[message_tmp]" => "(?i)(?<log_level>AUDIT|CRITICAL|DEBUG|INFO|TRACE|ERR(OR)?|WARN(ING)?)|\"level\":\s?(?<log_level>\d{2})"
}
}
if ! [log_level] {
grok {
match => {
"[log][message]" => "(?i)(?<log_level>AUDIT|CRITICAL|DEBUG|INFO|TRACE|ERR(OR)?|WARN(ING)?)|\"level\":\s?(?<log_level>\d{2})"
}
}
}
ruby {
init => "
LOG_LEVELS_MAP = {
# SYSLOG
'warn' => :Warning,
'err' => :Error,
# Bunyan errcodes
'10' => :Trace,
'20' => :Debug,
'30' => :Info,
'40' => :Warning,
'50' => :Error,
'60' => :Fatal
}
"
code => "
if event['log_level']
# keep original value
log_level = event['log_level'].downcase
if LOG_LEVELS_MAP.has_key?(log_level)
event['log_level_original'] = event['log_level']
event['log_level'] = LOG_LEVELS_MAP[log_level]
else
event['log_level'] = log_level.capitalize
end
else
event['log_level'] = 'Unknown'
end
"
}
mutate {
add_field => {
"[log][level]" => "%{log_level}"
}
# remove temporary fields
remove_field => ["log_level", "message_tmp"]
}
}
output {
kafka {
bootstrap_servers => "%KAFKA_SERVICE_HOST%:%KAFKA_SERVICE_PORT%"
topic_id => "transformed-log"
}
}

747
devstack/lib/monasca-log.sh Normal file
View File

@ -0,0 +1,747 @@
#!/bin/bash
#
# Copyright 2016-2017 FUJITSU LIMITED
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
# implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
_XTRACE_MON_LOG=$(set +o | grep xtrace)
set +o xtrace
_ERREXIT_MON_LOG=$(set +o | grep errexit)
set -o errexit
# configuration bits of various services
LOG_PERSISTER_DIR=$DEST/monasca-log-persister
LOG_TRANSFORMER_DIR=$DEST/monasca-log-transformer
LOG_METRICS_DIR=$DEST/monasca-log-metrics
LOG_AGENT_DIR=$DEST/monasca-log-agent
ELASTICSEARCH_DIR=$DEST/elasticsearch
ELASTICSEARCH_CFG_DIR=$ELASTICSEARCH_DIR/config
ELASTICSEARCH_LOG_DIR=$LOGDIR/elasticsearch
ELASTICSEARCH_DATA_DIR=$DATA_DIR/elasticsearch
KIBANA_DIR=$DEST/kibana
KIBANA_CFG_DIR=$KIBANA_DIR/config
LOGSTASH_DIR=$DEST/logstash
ES_SERVICE_BIND_HOST=${ES_SERVICE_BIND_HOST:-${SERVICE_HOST}}
ES_SERVICE_BIND_PORT=${ES_SERVICE_BIND_PORT:-9200}
ES_SERVICE_PUBLISH_HOST=${ES_SERVICE_PUBLISH_HOST:-${SERVICE_HOST}}
ES_SERVICE_PUBLISH_PORT=${ES_SERVICE_PUBLISH_PORT:-9300}
KIBANA_SERVICE_HOST=${KIBANA_SERVICE_HOST:-${SERVICE_HOST}}
KIBANA_SERVICE_PORT=${KIBANA_SERVICE_PORT:-5601}
KIBANA_SERVER_BASE_PATH=${KIBANA_SERVER_BASE_PATH:-"/dashboard/monitoring/logs_proxy"}
MONASCA_LOG_API_BASE_URI=https://${MONASCA_API_BASE_URI}/logs
run_process_sleep() {
local name=$1
local cmd=$2
local sleepTime=${3:-1}
run_process "$name" "$cmd"
sleep ${sleepTime}
}
is_logstash_required() {
is_service_enabled monasca-log-persister \
|| is_service_enabled monasca-log-transformer \
|| is_service_enabled monasca-log-metrics \
|| is_service_enabled monasca-log-agent \
&& return 0
}
# TOP_LEVEL functions called from devstack coordinator
###############################################################################
function pre_install_logs_services {
install_elk
install_nodejs
install_gate_config_holder
}
function install_monasca_log {
build_kibana_plugin
install_log_agent
if $USE_OLD_LOG_API = true; then
install_old_log_api
fi
}
function install_elk {
install_logstash
install_elasticsearch
install_kibana
}
function install_gate_config_holder {
sudo install -d -o $STACK_USER $GATE_CONFIGURATION_DIR
}
function install_monasca_statsd {
if use_library_from_git "monasca-statsd"; then
git_clone_by_name "monasca-statsd"
setup_dev_lib "monasca-statsd"
fi
}
function configure_monasca_log {
configure_kafka
configure_elasticsearch
configure_kibana
install_kibana_plugin
if $USE_OLD_LOG_API = true; then
configure_old_monasca_log_api
fi
configure_monasca_log_api
configure_monasca_log_transformer
configure_monasca_log_metrics
configure_monasca_log_persister
configure_monasca_log_agent
}
function init_monasca_log {
enable_log_management
}
function init_monasca_grafana_dashboards {
if is_service_enabled horizon; then
echo_summary "Init Grafana dashboards"
sudo python "${PLUGIN_FILES}"/grafana/grafana.py "${PLUGIN_FILES}"/grafana/dashboards.d
fi
}
function install_old_log_api {
if python3_enabled; then
enable_python3_package monasca-log-api
fi
echo_summary "Installing monasca-log-api"
git_clone $MONASCA_LOG_API_REPO $MONASCA_LOG_API_DIR $MONASCA_LOG_API_BRANCH
setup_develop $MONASCA_LOG_API_DIR
install_keystonemiddleware
install_monasca_statsd
if [ "$MONASCA_LOG_API_DEPLOY" == "mod_wsgi" ]; then
install_apache_wsgi
elif [ "$MONASCA_LOG_API_DEPLOY" == "uwsgi" ]; then
pip_install uwsgi
else
pip_install gunicorn
fi
if [ "$MONASCA_LOG_API_DEPLOY" != "gunicorn" ]; then
if is_ssl_enabled_service "monasca-log-api"; then
enable_mod_ssl
fi
fi
}
function configure_old_monasca_log_api {
MONASCA_LOG_API_BIN_DIR=$(get_python_exec_prefix)
MONASCA_LOG_API_WSGI=$MONASCA_LOG_API_BIN_DIR/monasca-log-api-wsgi
if is_service_enabled monasca-log-api; then
echo_summary "Configuring monasca-log-api"
rm -rf $MONASCA_LOG_API_UWSGI_CONF
install -m 600 $MONASCA_LOG_API_DIR/etc/monasca/log-api-uwsgi.ini $MONASCA_LOG_API_UWSGI_CONF
write_uwsgi_config "$MONASCA_LOG_API_UWSGI_CONF" "$MONASCA_LOG_API_WSGI" "/logs"
fi
}
function configure_old_monasca_log_api_core {
# Put config files in ``$MONASCA_LOG_API_CONF_DIR`` for everyone to find
sudo install -d -o $STACK_USER $MONASCA_LOG_API_CONF_DIR
sudo install -m 700 -d -o $STACK_USER $MONASCA_LOG_API_CACHE_DIR
sudo install -d -o $STACK_USER $MONASCA_LOG_API_LOG_DIR
# ensure fresh installation of configuration files
rm -rf $MONASCA_LOG_API_CONF $MONASCA_LOG_API_PASTE $MONASCA_LOG_API_LOGGING_CONF
$MONASCA_LOG_API_BIN_DIR/oslo-config-generator \
--config-file $MONASCA_LOG_API_DIR/config-generator/monasca-log-api.conf \
--output-file /tmp/monasca-log-api.conf
install -m 600 /tmp/monasca-log-api.conf $MONASCA_LOG_API_CONF && rm -rf /tmp/monasca-log-api.conf
install -m 600 $MONASCA_LOG_API_DIR/etc/monasca/log-api-paste.ini $MONASCA_LOG_API_PASTE
install -m 600 $MONASCA_LOG_API_DIR/etc/monasca/log-api-logging.conf $MONASCA_LOG_API_LOGGING_CONF
# configure monasca-log-api.conf
iniset "$MONASCA_LOG_API_CONF" DEFAULT log_config_append $MONASCA_LOG_API_LOGGING_CONF
iniset "$MONASCA_LOG_API_CONF" service region $REGION_NAME
iniset "$MONASCA_LOG_API_CONF" log_publisher kafka_url $KAFKA_SERVICE_HOST:$KAFKA_SERVICE_PORT
iniset "$MONASCA_LOG_API_CONF" log_publisher topics log
iniset "$MONASCA_LOG_API_CONF" kafka_healthcheck kafka_url $KAFKA_SERVICE_HOST:$KAFKA_SERVICE_PORT
iniset "$MONASCA_LOG_API_CONF" kafka_healthcheck kafka_topics log
iniset "$MONASCA_LOG_API_CONF" roles_middleware path "/v2.0/log"
iniset "$MONASCA_LOG_API_CONF" roles_middleware default_roles monasca-user
iniset "$MONASCA_LOG_API_CONF" roles_middleware agent_roles monasca-agent
iniset "$MONASCA_LOG_API_CONF" roles_middleware delegate_roles admin
# configure keystone middleware
configure_auth_token_middleware "$MONASCA_LOG_API_CONF" "admin" $MONASCA_LOG_API_CACHE_DIR
iniset "$MONASCA_LOG_API_CONF" keystone_authtoken region_name $REGION_NAME
iniset "$MONASCA_LOG_API_CONF" keystone_authtoken project_name "admin"
iniset "$MONASCA_LOG_API_CONF" keystone_authtoken password $ADMIN_PASSWORD
# insecure
if is_service_enabled tls-proxy; then
iniset "$MONASCA_LOG_API_CONF" keystone_authtoken insecure False
fi
# configure log-api-paste.ini
iniset "$MONASCA_LOG_API_PASTE" server:main bind $MONASCA_LOG_API_SERVICE_HOST:$MONASCA_LOG_API_SERVICE_PORT
iniset "$MONASCA_LOG_API_PASTE" server:main chdir $MONASCA_LOG_API_DIR
iniset "$MONASCA_LOG_API_PASTE" server:main workers $API_WORKERS
}
function init_agent {
echo_summary "Init Monasca agent"
sudo cp -f "${PLUGIN_FILES}"/monasca-agent/http_check.yaml /etc/monasca/agent/conf.d/http_check.yaml
sudo cp -f "${PLUGIN_FILES}"/monasca-agent/process.yaml /etc/monasca/agent/conf.d/process.yaml
sudo cp -f "${PLUGIN_FILES}"/monasca-agent/elastic.yaml /etc/monasca/agent/conf.d/elastic.yaml
sudo sed -i "s/{{IP}}/$(ip -o -4 addr list eth1 | awk '{print $4}' | cut -d/ -f1 | head -1)/" /etc/monasca/agent/conf.d/*.yaml
sudo sed -i "s/127\.0\.0\.1/$(hostname)/" /etc/monasca/agent/conf.d/*.yaml
sudo systemctl restart monasca-collector
}
function stop_monasca_log {
stop_process "monasca-log-agent" || true
stop_monasca_log_api
stop_process "monasca-log-metrics" || true
stop_process "monasca-log-persister" || true
stop_process "monasca-log-transformer" || true
stop_process "kibana" || true
stop_process "elasticsearch" || true
}
function start_monasca_log {
start_elasticsearch
start_kibana
start_monasca_log_transformer
start_monasca_log_metrics
start_monasca_log_persister
if $USE_OLD_LOG_API = true; then
start_monasca_log_api
fi
start_monasca_log_agent
}
function clean_monasca_log {
clean_monasca_log_agent
clean_monasca_log_api
clean_monasca_log_persister
clean_monasca_log_transformer
clean_kibana
clean_elasticsearch
clean_logstash
clean_nodejs
clean_gate_config_holder
}
###############################################################################
function configure_monasca_log_api {
if is_service_enabled monasca-log; then
echo_summary "Configuring monasca-api"
iniset "$MONASCA_API_CONF" DEFAULT enable_logs_api "true"
iniset "$MONASCA_API_CONF" kafka logs_topics "log"
create_log_management_accounts
fi
}
function install_logstash {
if is_logstash_required; then
echo_summary "Installing Logstash ${LOGSTASH_VERSION}"
local logstash_tarball=logstash-${LOGSTASH_VERSION}.tar.gz
local logstash_url=http://download.elastic.co/logstash/logstash/${logstash_tarball}
local logstash_dest
logstash_dest=`get_extra_file ${logstash_url}`
tar xzf ${logstash_dest} -C $DEST
sudo chown -R $STACK_USER $DEST/logstash-${LOGSTASH_VERSION}
ln -sf $DEST/logstash-${LOGSTASH_VERSION} $LOGSTASH_DIR
fi
}
function clean_logstash {
if is_logstash_required; then
echo_summary "Cleaning Logstash ${LOGSTASH_VERSION}"
sudo rm -rf $LOGSTASH_DIR || true
sudo rm -rf $FILES/logstash-${LOGSTASH_VERSION}.tar.gz || true
sudo rm -rf $DEST/logstash-${LOGSTASH_VERSION} || true
fi
}
function install_elasticsearch {
if is_service_enabled elasticsearch; then
echo_summary "Installing ElasticSearch ${ELASTICSEARCH_VERSION}"
local es_tarball=elasticsearch-${ELASTICSEARCH_VERSION}.tar.gz
local es_url=https://download.elastic.co/elasticsearch/release/org/elasticsearch/distribution/tar/elasticsearch/${ELASTICSEARCH_VERSION}/${es_tarball}
local es_dest
es_dest=`get_extra_file ${es_url}`
tar xzf ${es_dest} -C $DEST
sudo chown -R $STACK_USER $DEST/elasticsearch-${ELASTICSEARCH_VERSION}
ln -sf $DEST/elasticsearch-${ELASTICSEARCH_VERSION} $ELASTICSEARCH_DIR
fi
}
function configure_elasticsearch {
if is_service_enabled elasticsearch; then
echo_summary "Configuring ElasticSearch ${ELASTICSEARCH_VERSION}"
local templateDir=$ELASTICSEARCH_CFG_DIR/templates
for dir in $ELASTICSEARCH_LOG_DIR $templateDir $ELASTICSEARCH_DATA_DIR; do
sudo install -m 755 -d -o $STACK_USER $dir
done
sudo cp -f "${PLUGIN_FILES}"/elasticsearch/elasticsearch.yml $ELASTICSEARCH_CFG_DIR/elasticsearch.yml
sudo chown -R $STACK_USER $ELASTICSEARCH_CFG_DIR/elasticsearch.yml
sudo chmod 0644 $ELASTICSEARCH_CFG_DIR/elasticsearch.yml
sudo sed -e "
s|%ES_SERVICE_BIND_HOST%|$ES_SERVICE_BIND_HOST|g;
s|%ES_SERVICE_BIND_PORT%|$ES_SERVICE_BIND_PORT|g;
s|%ES_SERVICE_PUBLISH_HOST%|$ES_SERVICE_PUBLISH_HOST|g;
s|%ES_SERVICE_PUBLISH_PORT%|$ES_SERVICE_PUBLISH_PORT|g;
s|%ES_DATA_DIR%|$ELASTICSEARCH_DATA_DIR|g;
s|%ES_LOG_DIR%|$ELASTICSEARCH_LOG_DIR|g;
" -i $ELASTICSEARCH_CFG_DIR/elasticsearch.yml
ln -sf $ELASTICSEARCH_CFG_DIR/elasticsearch.yml $GATE_CONFIGURATION_DIR/elasticsearch.yml
fi
}
function clean_elasticsearch {
if is_service_enabled elasticsearch; then
echo_summary "Cleaning Elasticsearch ${ELASTICSEARCH_VERSION}"
sudo rm -rf ELASTICSEARCH_DIR || true
sudo rm -rf ELASTICSEARCH_CFG_DIR || true
sudo rm -rf ELASTICSEARCH_LOG_DIR || true
sudo rm -rf ELASTICSEARCH_DATA_DIR || true
sudo rm -rf $FILES/elasticsearch-${ELASTICSEARCH_VERSION}.tar.gz || true
sudo rm -rf $DEST/elasticsearch-${ELASTICSEARCH_VERSION} || true
fi
}
function start_elasticsearch {
if is_service_enabled elasticsearch; then
echo_summary "Starting ElasticSearch ${ELASTICSEARCH_VERSION}"
# 5 extra seconds to ensure that ES started properly
local esSleepTime=${ELASTICSEARCH_SLEEP_TIME:-5}
run_process_sleep "elasticsearch" "$ELASTICSEARCH_DIR/bin/elasticsearch" $esSleepTime
fi
}
function install_kibana {
if is_service_enabled kibana; then
echo_summary "Installing Kibana ${KIBANA_VERSION}"
local kibana_tarball=kibana-${KIBANA_VERSION}.tar.gz
local kibana_tarball_url=http://download.elastic.co/kibana/kibana/${kibana_tarball}
local kibana_tarball_dest
kibana_tarball_dest=`get_extra_file ${kibana_tarball_url}`
tar xzf ${kibana_tarball_dest} -C $DEST
sudo chown -R $STACK_USER $DEST/kibana-${KIBANA_VERSION}
ln -sf $DEST/kibana-${KIBANA_VERSION} $KIBANA_DIR
fi
}
function configure_kibana {
if is_service_enabled kibana; then
echo_summary "Configuring Kibana ${KIBANA_VERSION}"
sudo install -m 755 -d -o $STACK_USER $KIBANA_CFG_DIR
sudo cp -f "${PLUGIN_FILES}"/kibana/kibana.yml $KIBANA_CFG_DIR/kibana.yml
sudo chown -R $STACK_USER $KIBANA_CFG_DIR/kibana.yml
sudo chmod 0644 $KIBANA_CFG_DIR/kibana.yml
sudo sed -e "
s|%KIBANA_SERVICE_HOST%|$KIBANA_SERVICE_HOST|g;
s|%KIBANA_SERVICE_PORT%|$KIBANA_SERVICE_PORT|g;
s|%KIBANA_SERVER_BASE_PATH%|$KIBANA_SERVER_BASE_PATH|g;
s|%ES_SERVICE_BIND_HOST%|$ES_SERVICE_BIND_HOST|g;
s|%ES_SERVICE_BIND_PORT%|$ES_SERVICE_BIND_PORT|g;
s|%KEYSTONE_AUTH_URI%|$KEYSTONE_AUTH_URI|g;
" -i $KIBANA_CFG_DIR/kibana.yml
ln -sf $KIBANA_CFG_DIR/kibana.yml $GATE_CONFIGURATION_DIR/kibana.yml
fi
}
function install_kibana_plugin {
if is_service_enabled kibana; then
echo_summary "Install Kibana plugin"
# note(trebskit) that needs to happen after kibana received
# its configuration otherwise the plugin fails to be installed
local pkg=file://$DEST/monasca-kibana-plugin.tar.gz
$KIBANA_DIR/bin/kibana plugin -r monasca-kibana-plugin
$KIBANA_DIR/bin/kibana plugin -i monasca-kibana-plugin -u $pkg
fi
}
function clean_kibana {
if is_service_enabled kibana; then
echo_summary "Cleaning Kibana ${KIBANA_VERSION}"
sudo rm -rf $KIBANA_DIR || true
sudo rm -rf $FILES/kibana-${KIBANA_VERSION}.tar.gz || true
sudo rm -rf $KIBANA_CFG_DIR || true
fi
}
function start_kibana {
if is_service_enabled kibana; then
echo_summary "Starting Kibana ${KIBANA_VERSION}"
local kibanaSleepTime=${KIBANA_SLEEP_TIME:-90} # kibana takes some time to load up
local kibanaCFG="$KIBANA_CFG_DIR/kibana.yml"
run_process_sleep "kibana" "$KIBANA_DIR/bin/kibana --config $kibanaCFG" $kibanaSleepTime
fi
}
function configure_monasca_log_persister {
if is_service_enabled monasca-log-persister; then
echo_summary "Configuring monasca-log-persister"
sudo install -m 755 -d -o $STACK_USER $LOG_PERSISTER_DIR
sudo cp -f "${PLUGIN_FILES}"/monasca-log-persister/persister.conf $LOG_PERSISTER_DIR/persister.conf
sudo chown $STACK_USER $LOG_PERSISTER_DIR/persister.conf
sudo chmod 0640 $LOG_PERSISTER_DIR/persister.conf
sudo sed -e "
s|%ES_SERVICE_BIND_HOST%|$ES_SERVICE_BIND_HOST|g;
" -i $LOG_PERSISTER_DIR/persister.conf
ln -sf $LOG_PERSISTER_DIR/persister.conf $GATE_CONFIGURATION_DIR/log-persister.conf
fi
}
function clean_monasca_log_persister {
if is_service_enabled monasca-log-persister; then
echo_summary "Cleaning monasca-log-persister"
sudo rm -rf $LOG_PERSISTER_DIR || true
fi
}
function start_monasca_log_persister {
if is_service_enabled monasca-log-persister; then
echo_summary "Starting monasca-log-persister"
local logstash="$LOGSTASH_DIR/bin/logstash"
run_process "monasca-log-persister" "$logstash -f $LOG_PERSISTER_DIR/persister.conf"
fi
}
function configure_monasca_log_transformer {
if is_service_enabled monasca-log-transformer; then
echo_summary "Configuring monasca-log-transformer"
sudo install -m 755 -d -o $STACK_USER $LOG_TRANSFORMER_DIR
sudo cp -f "${PLUGIN_FILES}"/monasca-log-transformer/transformer.conf $LOG_TRANSFORMER_DIR/transformer.conf
sudo chown $STACK_USER $LOG_TRANSFORMER_DIR/transformer.conf
sudo chmod 0640 $LOG_TRANSFORMER_DIR/transformer.conf
sudo sed -e "
s|%KAFKA_SERVICE_HOST%|$KAFKA_SERVICE_HOST|g;
s|%KAFKA_SERVICE_PORT%|$KAFKA_SERVICE_PORT|g;
" -i $LOG_TRANSFORMER_DIR/transformer.conf
ln -sf $LOG_TRANSFORMER_DIR/transformer.conf $GATE_CONFIGURATION_DIR/log-transformer.conf
fi
}
function clean_monasca_log_transformer {
if is_service_enabled monasca-log-transformer; then
echo_summary "Cleaning monasca-log-transformer"
sudo rm -rf $LOG_TRANSFORMER_DIR || true
fi
}
function start_monasca_log_transformer {
if is_service_enabled monasca-log-transformer; then
echo_summary "Starting monasca-log-transformer"
local logstash="$LOGSTASH_DIR/bin/logstash"
run_process "monasca-log-transformer" "$logstash -f $LOG_TRANSFORMER_DIR/transformer.conf"
fi
}
function configure_monasca_log_metrics {
if is_service_enabled monasca-log-metrics; then
echo_summary "Configuring monasca-log-metrics"
sudo install -m 755 -d -o $STACK_USER $LOG_METRICS_DIR
sudo cp -f "${PLUGIN_FILES}"/monasca-log-metrics/log-metrics.conf $LOG_METRICS_DIR/log-metrics.conf
sudo chown $STACK_USER $LOG_METRICS_DIR/log-metrics.conf
sudo chmod 0640 $LOG_METRICS_DIR/log-metrics.conf
sudo sed -e "
s|%KAFKA_SERVICE_HOST%|$KAFKA_SERVICE_HOST|g;
s|%KAFKA_SERVICE_PORT%|$KAFKA_SERVICE_PORT|g;
" -i $LOG_METRICS_DIR/log-metrics.conf
ln -sf $LOG_METRICS_DIR/log-metrics.conf $GATE_CONFIGURATION_DIR/log-metrics.conf
fi
}
function clean_monasca_log_metrics {
if is_service_enabled monasca-log-metrics; then
echo_summary "Cleaning monasca-log-metrics"
sudo rm -rf $LOG_METRICS_DIR || true
fi
}
function start_monasca_log_metrics {
if is_service_enabled monasca-log-metrics; then
echo_summary "Starting monasca-log-metrics"
local logstash="$LOGSTASH_DIR/bin/logstash"
run_process "monasca-log-metrics" "$logstash -f $LOG_METRICS_DIR/log-metrics.conf"
fi
}
function install_log_agent {
if is_service_enabled monasca-log-agent; then
echo_summary "Installing monasca-log-agent [monasca-output-plugin]"
$LOGSTASH_DIR/bin/plugin install --version \
"${LOGSTASH_OUTPUT_MONASCA_VERSION}" logstash-output-monasca_log_api
fi
}
function configure_monasca_log_agent {
if is_service_enabled monasca-log-agent; then
echo_summary "Configuring monasca-log-agent"
sudo install -m 755 -d -o $STACK_USER $LOG_AGENT_DIR
sudo cp -f "${PLUGIN_FILES}"/monasca-log-agent/agent.conf $LOG_AGENT_DIR/agent.conf
sudo chown $STACK_USER $LOG_AGENT_DIR/agent.conf
sudo chmod 0640 $LOG_AGENT_DIR/agent.conf
sudo sed -e "
s|%MONASCA_API_URI_V2%|$MONASCA_API_URI_V2|g;
s|%KEYSTONE_AUTH_URI%|$KEYSTONE_AUTH_URI_V3|g;
" -i $LOG_AGENT_DIR/agent.conf
ln -sf $LOG_AGENT_DIR/agent.conf $GATE_CONFIGURATION_DIR/log-agent.conf
fi
}
function clean_monasca_log_agent {
if is_service_enabled monasca-log-agent; then
echo_summary "Cleaning monasca-log-agent"
sudo rm -rf $LOG_AGENT_DIR || true
fi
}
function start_monasca_log_api {
if is_service_enabled monasca-log-api; then
echo_summary "Starting monasca-log-api"
local service_port=$MONASCA_LOG_API_SERVICE_PORT
local service_protocol=$MONASCA_LOG_API_SERVICE_PROTOCOL
if is_service_enabled tls-proxy; then
service_port=$MONASCA_LOG_API_SERVICE_PORT_INT
service_protocol="http"
fi
local service_uri
if [ "$MONASCA_LOG_API_DEPLOY" == "mod_wsgi" ]; then
local enabled_site_file
enabled_site_file=$(apache_site_config_for monasca-log-api)
service_uri=$service_protocol://$MONASCA_LOG_API_SERVICE_HOST/logs/v3.0
if [ -f ${enabled_site_file} ]; then
enable_apache_site monasca-log-api
restart_apache_server
tail_log monasca-log-api /var/log/$APACHE_NAME/monasca-log-api.log
fi
elif [ "$MONASCA_LOG_API_DEPLOY" == "uwsgi" ]; then
service_uri=$service_protocol://$MONASCA_LOG_API_SERVICE_HOST/logs/v3.0
run_process "monasca-log-api" "$MONASCA_LOG_API_BIN_DIR/uwsgi --ini $MONASCA_LOG_API_UWSGI_CONF" ""
else
service_uri=$service_protocol://$MONASCA_LOG_API_SERVICE_HOST:$service_port
run_process "monasca-log-api" "$MONASCA_LOG_API_BIN_DIR/gunicorn --paste $MONASCA_LOG_API_PASTE" ""
fi
echo "Waiting for monasca-log-api to start..."
if ! wait_for_service $SERVICE_TIMEOUT $service_uri; then
die $LINENO "monasca-log-api did not start"
fi
if is_service_enabled tls-proxy; then
start_tls_proxy monasca-log-api '*' $MONASCA_LOG_API_SERVICE_PORT $MONASCA_LOG_API_SERVICE_HOST $MONASCA_LOG_API_SERVICE_PORT_INT
fi
restart_service memcached
fi
}
function start_monasca_log_agent {
if is_service_enabled monasca-log-agent; then
echo_summary "Starting monasca-log-agent"
local logstash="$LOGSTASH_DIR/bin/logstash"
run_process "monasca-log-agent" "$logstash -f $LOG_AGENT_DIR/agent.conf" "root" "root"
fi
}
function install_nodejs {
if is_service_enabled kibana; then
# refresh installation
apt_get install nodejs npm
(
npm config set registry "http://registry.npmjs.org/"; \
npm config set proxy "${HTTP_PROXY}"; \
npm set strict-ssl false;
)
fi
}
function clean_nodejs {
if is_service_enabled kibana; then
echo_summary "Cleaning Node.js"
apt_get purge nodejs npm
fi
}
function clean_gate_config_holder {
sudo rm -rf $GATE_CONFIGURATION_DIR || true
}
function build_kibana_plugin {
if is_service_enabled kibana; then
echo "Building Kibana plugin"
git_clone $MONASCA_KIBANA_PLUGIN_REPO $MONASCA_KIBANA_PLUGIN_DIR \
$MONASCA_KIBANA_PLUGIN_BRANCH
pushd $MONASCA_KIBANA_PLUGIN_DIR
local monasca_kibana_plugin_version
monasca_kibana_plugin_version="$(python -c 'import json; \
obj = json.load(open("package.json")); print obj["version"]')"
npm install
npm run package
local pkg=$MONASCA_KIBANA_PLUGIN_DIR/target/monasca-kibana-plugin-${monasca_kibana_plugin_version}.tar.gz
local easyPkg=$DEST/monasca-kibana-plugin.tar.gz
ln -sf $pkg $easyPkg
popd
fi
}
function configure_kafka {
echo_summary "Configuring Kafka topics"
/opt/kafka/bin/kafka-topics.sh --create --zookeeper localhost:2181 \
--replication-factor 1 --partitions 4 --topic log
/opt/kafka/bin/kafka-topics.sh --create --zookeeper localhost:2181 \
--replication-factor 1 --partitions 4 --topic transformed-log
}
function delete_kafka_topics {
echo_summary "Deleting Kafka topics"
/opt/kafka/bin/kafka-topics.sh --delete --zookeeper localhost:2181 \
--topic log || true
/opt/kafka/bin/kafka-topics.sh --delete --zookeeper localhost:2181 \
--topic transformed-log || true
}
function create_log_management_accounts {
if is_service_enabled monasca-log; then
echo_summary "Enable Log Management in Keystone"
# note(trebskit) following points to Kibana which is bad,
# but we do not have search-api in monasca-log-api now
# this code will be removed in future
local log_search_url="http://$KIBANA_SERVICE_HOST:$KIBANA_SERVICE_PORT/"
get_or_create_service "logs" "logs" "Monasca Log service"
if $USE_OLD_LOG_API = true; then
get_or_create_endpoint \
"logs" \
"$REGION_NAME" \
"$MONASCA_LOG_API_BASE_URI" \
"$MONASCA_LOG_API_BASE_URI" \
"$MONASCA_LOG_API_BASE_URI"
else
get_or_create_endpoint \
"logs" \
"$REGION_NAME" \
"$MONASCA_API_URI_V2" \
"$MONASCA_API_URI_V2" \
"$MONASCA_API_URI_V2"
fi
get_or_create_service "logs-search" "logs-search" "Monasca Log search service"
get_or_create_endpoint \
"logs-search" \
"$REGION_NAME" \
"$log_search_url" \
"$log_search_url" \
"$log_search_url"
fi
}
#Restore errexit
${_ERREXIT_MON_LOG}
# Restore xtrace
${_XTRACE_MON_LOG}

View File

@ -36,9 +36,23 @@ function configure_ui {
cp $MONASCA_UI_DIR/monitoring/config/local_settings.py \
$HORIZON_DIR/openstack_dashboard/local/local_settings.d/_50_monasca_ui_settings.py
local localSettings=$HORIZON_DIR/openstack_dashboard/local/local_settings.d/_50_monasca_ui_settings.py
sed -e "
s#getattr(settings, 'GRAFANA_URL', None)#{'RegionOne': \"http:\/\/${SERVICE_HOST}:3000\", }#g;
" -i $HORIZON_DIR/openstack_dashboard/local/local_settings.d/_50_monasca_ui_settings.py
" -i ${localSettings}
if is_service_enabled horizon && is_service_enabled kibana && is_service_enabled monasca-log; then
echo_summary "Configure Horizon with Kibana access"
sudo sed -e "
s|KIBANA_HOST = getattr(settings, 'KIBANA_HOST', 'http://192.168.10.6:5601/')|KIBANA_HOST = getattr(settings, 'KIBANA_HOST', 'http://${KIBANA_SERVICE_HOST}:${KIBANA_SERVICE_PORT}/')|g;
" -i ${localSettings}
sudo sed -e "
s|'ENABLE_LOG_MANAGEMENT_BUTTON', False|'ENABLE_LOG_MANAGEMENT_BUTTON', True|g;
" -i ${localSettings}
fi
if python3_enabled; then
DJANGO_SETTINGS_MODULE=openstack_dashboard.settings python3 "${MONASCA_BASE}"/horizon/manage.py collectstatic --noinput
DJANGO_SETTINGS_MODULE=openstack_dashboard.settings python3 "${MONASCA_BASE}"/horizon/manage.py compress --force

View File

@ -54,6 +54,7 @@ source ${MONASCA_API_DIR}/devstack/lib/profile.sh
source ${MONASCA_API_DIR}/devstack/lib/client.sh
source ${MONASCA_API_DIR}/devstack/lib/persister.sh
source ${MONASCA_API_DIR}/devstack/lib/storm.sh
source ${MONASCA_API_DIR}/devstack/lib/monasca-log.sh
# source lib/*
# Set default implementations to python
@ -99,6 +100,8 @@ if [[ "${MONASCA_API_USE_MOD_WSGI}" == 'True' && "${MONASCA_API_IMPLEMENTATION_L
else
MONASCA_API_BASE_URI=${MONASCA_API_SERVICE_PROTOCOL}://${MONASCA_API_SERVICE_HOST}:${MONASCA_API_SERVICE_PORT}
fi
MONASCA_API_URI_V2=${MONASCA_API_BASE_URI}/v2.0
# Files inside this directory will be visible in gates log
@ -168,7 +171,7 @@ function configure_system_encoding_format {
function extra_monasca {
echo_summary "Installing additional monasca components"
create_metric_accounts
create_accounts
install_monasca_agent
install_monascaclient
install_monasca_profile
@ -829,6 +832,15 @@ function configure_monasca_api_python {
# default settings
iniset "$MONASCA_API_CONF" DEFAULT region $REGION_NAME
iniset "$MONASCA_API_CONF" DEFAULT log_config_append $MONASCA_API_LOGGING_CONF
if $USE_OLD_LOG_API = true; then
iniset "$MONASCA_API_CONF" DEFAULT enable_logs_api false
else
if is_service_enabled monasca-log; then
iniset "$MONASCA_API_CONF" DEFAULT enable_logs_api true
else
iniset "$MONASCA_API_CONF" DEFAULT enable_logs_api false
fi
fi
# logging
iniset "$MONASCA_API_LOGGING_CONF" handler_file args "('$MONASCA_API_LOG_DIR/monasca-api.log', 'a', 104857600, 5)"
@ -1057,7 +1069,7 @@ function clean_monasca_thresh {
}
function create_metric_accounts {
function create_accounts {
local projects=("mini-mon" "admin" "demo")
declare -A users=(
@ -1103,6 +1115,27 @@ function create_metric_accounts {
"${MONASCA_API_URI_V2}" \
"${MONASCA_API_URI_V2}" \
"${MONASCA_API_URI_V2}"
if is_service_enabled monasca-log; then
local log_search_url="http://$KIBANA_SERVICE_HOST:$KIBANA_SERVICE_PORT/"
get_or_create_service "logs" "logs" "Monasca Log service"
get_or_create_endpoint \
"logs" \
"{$REGION_NAME}" \
"{$MONASCA_API_URI_V2}" \
"{$MONASCA_API_URI_V2}" \
"{$MONASCA_API_URI_V2}"
get_or_create_service "logs-search" "logs-search" "Monasca Log search service"
get_or_create_endpoint \
"logs-search" \
"$REGION_NAME" \
"$log_search_url" \
"$log_search_url" \
"$log_search_url"
fi
}
function install_keystone_client {
@ -1447,6 +1480,50 @@ if is_service_enabled monasca; then
fi
fi
# check for service enabled
if is_service_enabled monasca-log; then
if [[ "$1" == "stack" && "$2" == "pre-install" ]]; then
# Set up system services
echo_summary "Configuring Monasca Log Management system services"
pre_install_logs_services
elif [[ "$1" == "stack" && "$2" == "install" ]]; then
# Perform installation of service source
echo_summary "Installing Monasca Log Management"
install_monasca_log
elif [[ "$1" == "stack" && "$2" == "post-config" ]]; then
# Configure after the other layer 1 and 2 services have been configured
echo_summary "Configuring Monasca Log Management"
configure_monasca_log
elif [[ "$1" == "stack" && "$2" == "extra" ]]; then
# Initialize and start the Monasca service
echo_summary "Initializing Monasca Log Management"
init_monasca_log
init_monasca_grafana_dashboards
if is_service_enabled monasca-agent; then
init_agent
fi
start_monasca_log
fi
if [[ "$1" == "unstack" ]]; then
# Shut down Monasca services
echo_summary "Unstacking Monasca Log Management"
stop_monasca_log
delete_kafka_topics
fi
if [[ "$1" == "clean" ]]; then
# Remove state and transient data
# Remember clean.sh first calls unstack.sh
echo_summary "Cleaning Monasca Log Management"
clean_monasca_log
fi
fi
#Restore errexit
$ERREXIT

View File

@ -82,6 +82,18 @@ enable_service monasca-agent
# monasca-cli depends on monasca-api
enable_service monasca-cli
#
# Monasca logs services
#
enable_service kibana
enable_service elasticsearch
enable_service monasca-log
enable_service monasca-log-persister
enable_service monasca-log-transformer
enable_service monasca-log-metrics
enable_service monasca-log-agent
#
# Dependent Software Versions
#
@ -110,6 +122,10 @@ STORM_VERSION=${STORM_VERSION:-1.2.2}
GO_VERSION=${GO_VERSION:-"1.7.1"}
NODE_JS_VERSION=${NODE_JS_VERSION:-"4.0.0"}
NVM_VERSION=${NVM_VERSION:-"0.32.1"}
KIBANA_VERSION=${KIBANA_VERSION:-4.6.3-linux-x86_64}
LOGSTASH_VERSION=${LOGSTASH_VERSION:-2.4.1}
ELASTICSEARCH_VERSION=${ELASTICSEARCH_VERSION:-2.4.6}
LOGSTASH_OUTPUT_MONASCA_VERSION=${LOGSTASH_OUTPUT_MONASCA_VERSION:-1.0.4}
# Path settings
MONASCA_BASE=${DEST}
@ -202,8 +218,34 @@ MONASCA_API_LOG_DIR=${MONASCA_API_LOG_DIR:-/var/log/monasca/api}
MONASCA_API_USE_MOD_WSGI=${MONASCA_API_USE_MOD_WSGI:-$ENABLE_HTTPD_MOD_WSGI_SERVICES}
MONASCA_API_UWSGI_CONF=${MONASCA_API_UWSGI_CONF:-$MONASCA_API_CONF_DIR/api-uwsgi.ini}
MONASCA_KIBANA_PLUGIN_REPO=${MONASCA_KIBANA_PLUGIN_REPO:-${GIT_BASE}/openstack/monasca-kibana-plugin.git}
MONASCA_KIBANA_PLUGIN_BRANCH=${MONASCA_KIBANA_PLUGIN_BRANCH:-master}
MONASCA_KIBANA_PLUGIN_DIR=${DEST}/monasca-kibana-plugin
# OLD LOG-API CONFIGURATION
MONASCA_LOG_API_SERVICE_HOST=${MONASCA_LOG_API_SERVICE_HOST:-${SERVICE_HOST}}
MONASCA_LOG_API_SERVICE_PORT=${MONASCA_LOG_API_SERVICE_PORT:-5607}
MONASCA_LOG_API_REPO=${MONASCA_LOG_API_REPO:-${GIT_BASE}/openstack/monasca-log-api.git}
MONASCA_LOG_API_BRANCH=${MONASCA_LOG_API_BRANCH:-master}
MONASCA_LOG_API_DIR=${DEST}/monasca-log-api
MONASCA_LOG_API_DEPLOY=uwsgi
MONASCA_LOG_API_CONF_DIR=${MONASCA_LOG_API_CONF_DIR:-/etc/monasca}
MONASCA_LOG_API_LOG_DIR=${MONASCA_LOG_API_LOG_DIR:-/var/log/monasca}
MONASCA_LOG_API_CACHE_DIR=${MONASCA_LOG_API_CACHE_DIR:-/var/cache/monasca-log-api}
MONASCA_LOG_API_WSGI_DIR=${MONASCA_LOG_API_WSGI_DIR:-/var/www/monasca-log-api}
MONASCA_LOG_API_CONF=${MONASCA_LOG_API_CONF:-$MONASCA_LOG_API_CONF_DIR/monasca-log-api.conf}
MONASCA_LOG_API_PASTE=${MONASCA_LOG_API_PASTE:-$MONASCA_LOG_API_CONF_DIR/log-api-paste.ini}
MONASCA_LOG_API_LOGGING_CONF=${MONASCA_LOG_API_LOGGING_CONF:-$MONASCA_LOG_API_CONF_DIR/log-api-logging.conf}
MONASCA_LOG_API_UWSGI_CONF=${MONASCA_LOG_API_UWSGI_CONF:-$MONASCA_LOG_API_CONF_DIR/log-api-uwsgi.ini}
USE_PYTHON3=${USE_PYTHON3:-true}
USE_OLD_LOG_API=${USE_OLD_LOG_API:-false}
## storm settings
STORM_UI_HOST=${STORM_UI_HOST:-${SERVICE_HOST}}
STORM_UI_PORT=${STORM_UI_PORT:-8089}
STORM_LOGVIEWER_PORT=${STORM_LOGVIEWER_PORT:-8090}
KAFKA_SERVICE_HOST=${KAFKA_SERVICE_HOST:-${SERVICE_HOST}}
KAFKA_SERVICE_PORT=${KAFKA_SERVICE_PORT:-9092}

View File

@ -3693,6 +3693,7 @@ ___
# License
(C) Copyright 2014-2016 Hewlett Packard Enterprise Development LP
(C) Copyright 2019 FUJITSU LIMITED
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.

View File

@ -1,5 +1,5 @@
[loggers]
keys = root, sqlalchemy, kafka
keys = root, sqlalchemy, kafka, kafkalib
[handlers]
keys = console, file
@ -26,6 +26,12 @@ level = DEBUG
handlers = console, file
propagate = 0
[logger_kafkalib]
qualname = monasca_common.kafka_lib
level = INFO
handlers = console, file
propagate = 0
[handler_console]
class = logging.StreamHandler
args = (sys.stderr,)