monasca-persister/docker/monasca-persister.conf.j2

97 lines
3.4 KiB
Django/Jinja

[DEFAULT]
# Provide logging configuration
log_config_append = /etc/monasca/persister-logging.conf
# Show debugging output in logs (sets DEBUG log level output)
debug = {{ DEBUG }}
# Show more verbose log output (sets INFO log level output) if debug is False
verbose = {{ VERBOSE }}
[repositories]
{% if DATABASE_BACKEND | lower == 'cassandra' %}
# The cassandra driver to use for the metrics repository
metrics_driver = monasca_persister.repositories.cassandra.metrics_repository:MetricCassandraRepository
# The cassandra driver to use for the alarm state history repository
alarm_state_history_driver = monasca_persister.repositories.cassandra.alarm_state_history_repository:AlarmStateHistCassandraRepository
{% else %}
# The influxdb driver to use for the metrics repository
metrics_driver = monasca_persister.repositories.influxdb.metrics_repository:MetricInfluxdbRepository
# The influxdb driver to use for the alarm state history repository
alarm_state_history_driver = monasca_persister.repositories.influxdb.alarm_state_history_repository:AlarmStateHistInfluxdbRepository
# Don't exit on InfluxDB parse point errors
ignore_parse_point_error = {{ INFLUX_IGNORE_PARSE_POINT_ERROR }}
{% endif %}
[zookeeper]
# Comma separated list of host:port
uri = {{ ZOOKEEPER_URI }}
partition_interval_recheck_seconds = 15
[kafka_alarm_history]
# Comma separated list of Kafka broker host:port.
uri = {{ KAFKA_URI }}
group_id = {{ KAFKA_ALARM_HISTORY_GROUP_ID }}
topic = alarm-state-transitions
consumer_id = 1
client_id = 1
batch_size = {{ KAFKA_ALARM_HISTORY_BATCH_SIZE }}
max_wait_time_seconds = {{ KAFKA_ALARM_HISTORY_WAIT_TIME }}
# The following 3 values are set to the kakfa-python defaults
fetch_size_bytes = 4096
buffer_size = 4096
# 8 times buffer size
max_buffer_size = 32768
# Path in zookeeper for kafka consumer group partitioning algo
zookeeper_path = /persister_partitions/alarm-state-transitions
num_processors = {{ KAFKA_ALARM_HISTORY_PROCESSORS | default(1) }}
legacy_kafka_client_enabled= {{ KAFKA_LEGACY_CLIENT_ENABLED | default(false) }}
[kafka_events]
# Comma separated list of Kafka broker host:port.
uri = {{ KAFKA_URI }}
enabled = {{ KAFKA_EVENTS_ENABLE | default(false) }}
group_id = 1_events
topic = monevents
batch_size = 1
[kafka_metrics]
# Comma separated list of Kafka broker host:port
uri = {{ KAFKA_URI }}
group_id = {{ KAFKA_METRICS_GROUP_ID }}
topic = metrics
consumer_id = 1
client_id = 1
batch_size = {{ KAFKA_METRICS_BATCH_SIZE }}
max_wait_time_seconds = {{ KAFKA_METRICS_WAIT_TIME }}
# The following 3 values are set to the kakfa-python defaults
fetch_size_bytes = 4096
buffer_size = 4096
# 8 times buffer size
max_buffer_size = 32768
# Path in zookeeper for kafka consumer group partitioning algo
zookeeper_path = /persister_partitions/metrics
num_processors = {{ KAFKA_METRICS_PROCESSORS | default(1) }}
legacy_kafka_client_enabled= {{ KAFKA_LEGACY_CLIENT_ENABLED | default(false) }}
{% if DATABASE_BACKEND | lower == 'cassandra' %}
[cassandra]
contact_points = {{ CASSANDRA_HOSTS }}
port = {{ CASSANDRA_PORT }}
keyspace = {{ CASSANDRA_KEY_SPACE }}
user = {{ CASSANDRA_USER }}
password = {{ CASSANDRA_PASSWORD }}
connection_timeout = {{ CASSANDRA_CONNECTION_TIMEOUT }}
max_definition_cache_size = {{ CASSANDRA_MAX_CACHE_SIZE }}
retention_policy = {{ CASSANDRA_RETENTION_POLICY }}
{% else %}
[influxdb]
database_name = {{ INFLUX_DB }}
ip_address = {{ INFLUX_HOST }}
port = {{ INFLUX_PORT }}
user = {{ INFLUX_USER }}
password = {{ INFLUX_PASSWORD }}
{% endif %}