openstack-helm/monasca/values.yaml

1142 lines
29 KiB
YAML

# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# Default values for monasca-api.
# This is a YAML-formatted file.
# Declare name/value pairs to be passed into your templates.
# name: value
---
images:
tags:
ks_user: docker.io/openstackhelm/heat:stein-ubuntu_bionic
ks_service: docker.io/openstackhelm/heat:stein-ubuntu_bionic
ks_endpoints: docker.io/openstackhelm/heat:stein-ubuntu_bionic
influxdb_init: radial/busyboxplus:curl
db_init: docker.io/openstackhelm/heat:stein-ubuntu_bionic
monasca_db_sync: docker.io/openstackhelm/monasca:stein-ubuntu_bionic
db_drop: docker.io/openstackhelm/heat:stein-ubuntu_bionic
monasca_api: docker.io/openstackhelm/monasca_api:stein-ubuntu_bionic
monasca_collector: docker.io/openstackhelm/monasca_agent:stein-ubuntu_bionic
monasca_forwarder: docker.io/openstackhelm/monasca_forwarder:stein-ubuntu_bionic
monasca_statsd: docker.io/openstackhelm/monasca_statsd:stein-ubuntu_bionic
monasca_notification: docker.io/openstackhelm/monasca_notification:stein-ubuntu_bionic
monasca_persister: docker.io/openstackhelm/monasca_persister:stein-ubuntu_bionic
monasca_thresh: docker.io/openstackhelm/monasca_thresh:stein-ubuntu_bionic
test: docker.io/openstackhelm/osh-selenium:latest-ubuntu_bionic
dep_check: quay.io/airshipit/kubernetes-entrypoint:v1.0.0
image_repo_sync: docker.io/docker:17.07.0
pull_policy: "IfNotPresent"
local_registry:
active: false
exclude:
- dep_check
- image_repo_sync
release_group: null
labels:
agent:
node_selector_key: openstack-compute-node
node_selector_value: enabled
api:
node_selector_key: openstack-control-plane
node_selector_value: enabled
notification:
node_selector_key: openstack-control-plane
node_selector_value: enabled
persister:
node_selector_key: openstack-control-plane
node_selector_value: enabled
thresh:
node_selector_key: openstack-control-plane
node_selector_value: enabled
job:
node_selector_key: openstack-control-plane
node_selector_value: enabled
network:
api:
ingress:
public: true
classes:
namespace: "nginx"
cluster: "nginx-cluster"
annotations:
nginx.ingress.kubernetes.io/rewrite-target: /
external_policy_local: false
node_port:
enabled: false
port: 31000
conf:
agent:
Api:
amplifier: 0
backlog_send_rate: 1000
ca_file: null
insecure: false
max_batch_size: 0
max_buffer_size: 1000
max_measurement_buffer_size: -1
service_type: monitoring
Logging:
disable_file_logging: false
collector_log_file: /var/log/monasca/agent/collector.log
forwarder_log_file: /var/log/monasca/agent/forwarder.log
statsd_log_file: /var/log/monasca/agent/statsd.log
enable_logrotate: true
log_level: WARN
log_to_event_viewer: false
log_to_syslog: false
syslog_host: None
syslog_port: None
Main:
check_freq: 15
autorestart: false
collector_restart_interval: 24
dimensions:
service: monitoring
# %AGENT_HOSTNAME% is replaced with the correct value in the init container.
hostname: "%AGENT_HOSTNAME%"
# %FORWARDER_IP% is replaced with the correct value in the init container.
forwarder_url: "http://%FORWARDER_IP%:17123"
num_collector_threads: 1
pool_full_max_retries: 4
sub_collection_warn: 6
non_local_traffic: true
Statsd:
monasca_statsd_interval: 20
monasca_statsd_port: 8125
# monasca_statsd_forward_host: None
# monasca_statsd_forward_port: 8125
agent_plugins:
# Plugin config has 2 fields;
# auto_detect: <boolean> specify if it is auto configured by monasca-setup or not
# config: plugin specific configurations.
# when auto_detect is false, it is rendered as config yaml file
# when auto_detect is true, it is used as args in monasca-setup
libvirt:
auto_detect: false
config:
init_config: {}
# auto_detect: true
# config:
# nova_refresh:
# vm_probation:
# Custom check plugins. Refer values_overrides/nvidia.yaml.
check_scripts: {}
# Custom detection plugins. Refer values_overrides/nvidia.yaml.
detection_scripts: {}
monasca_api:
DEFAULT:
enable_logs_api: true
region: RegionOne
log_config_append: /etc/monasca/api-logging.conf
messaging:
driver: monasca_api.common.messaging.kafka_publisher:KafkaPublisher
kafka:
logs_topics: log
uri: 127.0.0.1:9092
repositories:
metrics_driver: monasca_api.common.repositories.influxdb.metrics_repository:MetricsRepository
cassandra:
contact_points: 127.0.0.1
influxdb:
db_per_tenant: false
database_name: monasca
database:
max_retries: -1
keystone_authtoken:
auth_type: password
auth_version: v3
memcache_security_strategy: ENCRYPT
security:
default_authorized_roles: monasca-user
agent_authorized_roles: monasca-agent
read_only_authorized_roles: monasca-read-only-user
delegate_authorized_roles: monasca-agent
monasca_api_paste:
DEFAULT:
name: monasca_api
pipeline:main:
pipeline: request_id auth api
app:api:
paste.app_factory: monasca_api.api.server:launch
filter:auth:
paste.filter_factory: monasca_api.healthcheck.keystone_protocol:filter_factory
filter:request_id:
paste.filter_factory: oslo_middleware.request_id:RequestId.factory
server:main:
use: egg:gunicorn#main
host: 0.0.0.0
port: 8070
workers: 8
worker-connections: 2000
worker-class: eventlet
timeout: 30
backlog: 2048
keepalive: 2
proc_name: monasca-api
loglevel: DEBUG
logging:
loggers:
keys:
- root
- sqlalchemy
- kafka
- kafkalib
handlers:
keys:
- stdout
- stderr
- "null"
formatters:
keys:
- context
- default
logger_root:
level: INFO
handlers: 'stdout'
logger_sqlalchemy:
qualname: sqlalchemy.engine
level: DEBUG
handlers: 'stdout'
propagate: 0
logger_kafka:
qualname: kafka
level: DEBUG
handlers: 'stdout'
propagate: 0
logger_kafkalib:
qualname: monasca_common.kafka_lib
level: INFO
handlers: 'stdout'
propagate: 0
handler_null:
class: logging.NullHandler
formatter: default
args: ()
handler_stdout:
class: StreamHandler
args: (sys.stdout,)
formatter: context
handler_stderr:
class: StreamHandler
args: (sys.stderr,)
formatter: context
formatter_context:
class: oslo_log.formatters.ContextFormatter
datefmt: "%Y-%m-%d %H:%M:%S"
formatter_default:
format: "%(message)s"
datefmt: "%Y-%m-%d %H:%M:%S"
wsgi_monasca_api: |
{{- $portInt := tuple "monitoring" "internal" "api" $ | include "helm-toolkit.endpoints.endpoint_port_lookup" }}
Listen 0.0.0.0:{{ $portInt }}
LogFormat "%h %l %u %t \"%r\" %>s %b \"%{Referer}i\" \"%{User-Agent}i\"" combined
LogFormat "%{X-Forwarded-For}i %l %u %t \"%r\" %>s %b \"%{Referer}i\" \"%{User-Agent}i\"" proxy
SetEnvIf X-Forwarded-For "^.*\..*\..*\..*" forwarded
CustomLog /dev/stdout combined env=!forwarded
CustomLog /dev/stdout proxy env=forwarded
<VirtualHost *:{{ $portInt }}>
WSGIDaemonProcess monasca-api processes=1 threads=1 user=monasca-api group=monasca-api display-name=%{GROUP}
WSGIProcessGroup monasca-api
WSGIScriptAlias / /var/www/cgi-bin/monasca/monasca-api-wsgi
WSGIApplicationGroup %{GLOBAL}
WSGIPassAuthorization On
<IfVersion >= 2.4>
ErrorLogFormat "%{cu}t %M"
</IfVersion>
ErrorLog /dev/stdout
SetEnvIf X-Forwarded-For "^.*\..*\..*\..*" forwarded
CustomLog /dev/stdout combined env=!forwarded
CustomLog /dev/stdout proxy env=forwarded
</VirtualHost>
thresh_config:
metricSpoutThreads: 2
metricSpoutTasks: 2
statsdConfig:
host: "127.0.0.1"
port: 8125
prefix: monasca.storm.
dimensions: !!map
service: monitoring
component: storm
metricSpoutConfig:
kafkaConsumerConfiguration:
# See http://kafka.apache.org/documentation.html#api for semantics and defaults.
topic: "metrics"
numThreads: 1
groupId: "thresh-metric"
zookeeperConnect: "127.0.0.1:2181"
consumerId: 1
socketTimeoutMs: 30000
socketReceiveBufferBytes: 65536
fetchMessageMaxBytes: 1048576
autoCommitEnable: true
autoCommitIntervalMs: 60000
queuedMaxMessageChunks: 10
rebalanceMaxRetries: 4
fetchMinBytes: 1
fetchWaitMaxMs: 100
rebalanceBackoffMs: 2000
refreshLeaderBackoffMs: 200
autoOffsetReset: largest
consumerTimeoutMs: -1
clientId: 1
zookeeperSessionTimeoutMs: 60000
zookeeperConnectionTimeoutMs: 60000
zookeeperSyncTimeMs: 2000
eventSpoutConfig:
kafkaConsumerConfiguration:
# See http://kafka.apache.org/documentation.html#api for semantics and defaults.
topic: "events"
numThreads: 1
groupId: "thresh-event"
zookeeperConnect: "127.0.0.1:2181"
consumerId: 1
socketTimeoutMs: 30000
socketReceiveBufferBytes: 65536
fetchMessageMaxBytes: 1048576
autoCommitEnable: true
autoCommitIntervalMs: 60000
queuedMaxMessageChunks: 10
rebalanceMaxRetries: 4
fetchMinBytes: 1
fetchWaitMaxMs: 100
rebalanceBackoffMs: 2000
refreshLeaderBackoffMs: 200
autoOffsetReset: largest
consumerTimeoutMs: -1
clientId: 1
zookeeperSessionTimeoutMs: 60000
zookeeperConnectionTimeoutMs: 60000
zookeeperSyncTimeMs: 2000
kafkaProducerConfig:
# See http://kafka.apache.org/documentation.html#api for semantics and defaults.
topic: "alarm-state-transitions"
metadataBrokerList: "kafka:9092"
serializerClass: kafka.serializer.StringEncoder
partitionerClass: ""
requestRequiredAcks: 1
requestTimeoutMs: 10000
producerType: sync
keySerializerClass: ""
compressionCodec: none
compressedTopics: ""
messageSendMaxRetries: 3
retryBackoffMs: 100
topicMetadataRefreshIntervalMs: 600000
queueBufferingMaxMs: 5000
queueBufferingMaxMessages: 10000
queueEnqueueTimeoutMs: -1
batchNumMessages: 200
sendBufferBytes: 102400
clientId: Threshold_Engine
sporadicMetricNamespaces:
- foo
database:
driverClass: org.mariadb.jdbc.Driver
url: "jdbc:mariadb://%THRESH_DB_URL%"
user: "%THRESH_DB_USER%"
password: "%THRESH_DB_PASSWORD%"
properties:
ssl: false
# the maximum amount of time to wait on an empty pool before throwing an exception
maxWaitForConnection: 1s
# the SQL query to run when validating a connection's liveness
validationQuery: "/* mysql Health Check */ SELECT 1"
# the minimum number of connections to keep open
minSize: 8
# the maximum number of connections to keep open
maxSize: 41
notification_config:
kafka:
url: kafka:9092
database:
repo_driver: monasca_notification.common.repositories.mysql.mysql_repo:MysqlRepo
email_notifier:
grafana_url: grafana:3000
# from_addr: string
# server: email_server
# port: email_port
# timeout: 5 # min value is 1
# user: email_user
# password: email_password
jira_notifier: {}
# timeout: 5 # min value is 1
# user:
# password:
# custom_formatter:
# proxy:
pagerduty_notifier: {}
# timeout:
# url:
slack_notifier: {}
# timeout:
# insecure:
# ca_certs:
# proxy:
# message_template:
webhook_notifier: {}
# timeout: 5
keystone: {}
# auth_url: 199.204.45.231/identity/v3
notification_types:
enabled:
- email
- pagerduty
- webhook
- jira
- slack
mysql: {}
zookeeper:
url: ["127.0.0.1:2181"]
persister_config:
DEFAULT:
debug: "True"
default_log_levels: "monasca_common.kafka_lib.client=INFO"
logging_exception_prefix: "ERROR %(name)s %(instance)s"
logging_default_format_string: "%(color)s%(levelname)s %(name)s [-%(color)s] %(instance)s%(color)s%(message)s"
logging_context_format_string": "%(color)s%(levelname)s %(name)s [%(global_request_id)s %(request_id)s %(project_name)s %(user_name)s%(color)s] %(instance)s%(color)s%(message)s"
logging_debug_format_suffix: "{{(pid=%(process)d) %(funcName)s %(pathname)s:%(lineno)d}}"
use_syslog: "False"
influxdb:
database_name: monasca
db_per_tenant: false
kafka:
num_processors: 1
kafka_alarm_history:
batch_size: 1
topic: alarm-state-transitions
group_id: 1_alarm-state-transitions
uri: kafka:9092
kafka_events: {}
kafka_metrics:
batch_size: 30
topic: metrics
group_id: 1_metrics
uri: kafka:9092
repositories:
alarm_state_history_driver: monasca_persister.repositories.influxdb.alarm_state_history_repository:AlarmStateHistInfluxdbRepository
metrics_driver: monasca_persister.repositories.influxdb.metrics_repository:MetricInfluxdbRepository
zookeeper:
uri: zookeeper:2181
storm:
java.library.path: "/usr/local/lib:/opt/local/lib:/usr/lib"
storm.local.dir: "/var/storm"
storm.zookeeper.servers:
- "127.0.0.1"
storm.zookeeper.port: 2181
storm.zookeeper.retry.interval: 5000
storm.zookeeper.retry.times: 60
storm.zookeeper.root: /storm
storm.zookeeper.session.timeout: 3000
supervisor.slots.ports:
- 6701
- 6702
supervisor.childopts: -Xmx256m
worker.childopts: -Xmx1280m -XX:+UseConcMarkSweepGC -Dcom.sun.management.jmxremote
nimbus.seeds: ["127.0.0.1"]
nimbus.thrift.port: 6627
nimbus.childopts: -Xmx256m
ui.host: localhost
ui.port: 8089
ui.childopts: -Xmx768m
logviewer.port: 8090
logviewer.childopts: -Xmx128m
transactional.zookeeper.servers:
- "127.0.0.1"
transactional.zookeeper.port: 2181
transactional.zookeeper.root: /storm-transactional
topology.acker.executors: 1
topology.debug: False
software:
apache2:
binary: apache2
start_parameters: -DFOREGROUND
conf_dir: /etc/apache2/conf-enabled
site_dir: /etc/apache2/sites-enable
mods_dir: /etc/apache2/mods-available
a2enmod: null
a2dismod: null
pod:
probes:
rpc_timeout: 60
rpc_retries: 2
api:
default:
liveness:
enabled: true
params:
initialDelaySeconds: 120
periodSeconds: 90
timeoutSeconds: 70
readiness:
enabled: true
params:
initialDelaySeconds: 80
periodSeconds: 90
timeoutSeconds: 70
forwarder:
default:
liveness:
enabled: true
params:
initialDelaySeconds: 120
periodSeconds: 90
timeoutSeconds: 70
readiness:
enabled: true
params:
initialDelaySeconds: 80
periodSeconds: 90
timeoutSeconds: 70
statsd:
default: {}
security_context:
agent:
pod:
runAsUser: 42424
container:
agent_init:
readOnlyRootFilesystem: false
allowPrivilegeEscalation: false
runAsUser: 0
monasca_collector:
readOnlyRootFilesystem: false
allowPrivilegeEscalation: false
runAsUser: 0
monasca_forwarder:
readOnlyRootFilesystem: false
allowPrivilegeEscalation: false
runAsUser: 0
monasca_statsd:
readOnlyRootFilesystem: false
allowPrivilegeEscalation: false
runAsUser: 0
api:
pod:
runAsUser: 42424
container:
monasca-api:
readOnlyRootFilesystem: false
allowPrivilegeEscalation: false
runAsUser: 0
notification:
pod:
runAsUser: 42424
container:
monasca-notification:
readOnlyRootFilesystem: false
allowPrivilegeEscalation: false
runAsUser: 0
persister:
pod:
runAsUser: 42424
container:
monasca-persister:
readOnlyRootFilesystem: false
allowPrivilegeEscalation: false
runAsUser: 0
thresh:
pod:
runAsUser: 42424
container:
monasca-thresh:
readOnlyRootFilesystem: false
allowPrivilegeEscalation: false
runAsUser: 0
db_sync:
pod:
runAsUser: 42424
container:
monasca_db_sync:
readOnlyRootFilesystem: false
allowPrivilegeEscalation: false
runAsUser: 0
influxdb_init:
pod:
runAsUser: 42424
container:
influxdb_init:
readOnlyRootFilesystem: false
allowPrivilegeEscalation: false
runAsUser: 0
test:
pod:
runAsUser: 42424
container:
monasca_test:
readOnlyRootFilesystem: true
allowPrivilegeEscalation: false
affinity:
anti:
type:
default: preferredDuringSchedulingIgnoredDuringExecution
topologyKey:
default: kubernetes.io/hostname
weight:
default: 10
dns_policy: "ClusterFirstWithHostNet"
mounts:
monasca_agent:
init_container: null
monasca_collector:
volumeMounts:
- name: varliblibvirt
mountPath: /var/lib/libvirt
readOnly: true
- mountPath: /lib/modules
name: libmodules
readOnly: true
- name: varlibnova
mountPath: /var/lib/nova
volumes:
- name: libmodules
hostPath:
path: /lib/modules
- name: varliblibvirt
hostPath:
path: /var/lib/libvirt
- name: varlibnova
hostPath:
path: /var/lib/nova
monasca_forwarder:
volumeMounts:
volumes:
monasca_statsd:
volumeMounts:
volumes:
monasca_db_init:
init_container: null
monasca_db_init:
volumeMounts:
volumes:
monasca_db_sync:
init_container: null
monasca_db_sync:
volumeMounts:
volumes:
monasca_api:
init_container: null
monasca_api:
volumeMounts:
volumes:
monasca_notification:
init_container: null
monasca_notification:
volumeMounts:
volumes:
monasca_persister:
init_container: null
monasca_persister:
volumeMounts:
volumes:
monasca_thresh:
init_container: null
monasca_thresh:
volumeMounts:
volumes:
monasca_tests:
init_container: null
monasca_tests:
volumeMounts:
volumes:
replicas:
agent: 1
api: 1
notification: 1
persister: 1
thresh: 1
lifecycle:
upgrades:
deployments:
revision_history: 3
pod_replacement_strategy: RollingUpdate
rolling_update:
max_unavailable: 1
max_surge: 3
daemonsets:
pod_replacement_strategy: RollingUpdate
agent:
enabled: true
min_ready_seconds: 0
max_unavailable: 1
disruption_budget:
monasca:
min_available: 0
termination_grace_period:
monasca:
timeout: 30
resources:
enabled: false
agent_init:
requests:
memory: "128Mi"
cpu: "100m"
limits:
memory: "512Mi"
cpu: "500m"
api:
requests:
memory: "128Mi"
cpu: "100m"
limits:
memory: "1024Mi"
cpu: "2000m"
collector:
requests:
memory: "128Mi"
cpu: "100m"
limits:
memory: "512Mi"
cpu: "500m"
forwarder:
requests:
memory: "128Mi"
cpu: "100m"
limits:
memory: "512Mi"
cpu: "500m"
statsd:
requests:
memory: "128Mi"
cpu: "100m"
limits:
memory: "512Mi"
cpu: "500m"
notification:
requests:
memory: "128Mi"
cpu: "100m"
limits:
memory: "512Mi"
cpu: "500m"
persister:
requests:
memory: "128Mi"
cpu: "100m"
limits:
memory: "512Mi"
cpu: "500m"
thresh:
requests:
memory: "128Mi"
cpu: "100m"
limits:
memory: "512Mi"
cpu: "500m"
jobs:
db_init:
requests:
memory: "128Mi"
cpu: "100m"
limits:
memory: "1024Mi"
cpu: "2000m"
db_sync:
requests:
memory: "128Mi"
cpu: "100m"
limits:
memory: "1024Mi"
cpu: "2000m"
db_drop:
requests:
memory: "128Mi"
cpu: "100m"
limits:
memory: "1024Mi"
cpu: "2000m"
image_repo_sync:
requests:
memory: "128Mi"
cpu: "100m"
limits:
memory: "1024Mi"
cpu: "2000m"
influxdb_init:
requests:
memory: "128Mi"
cpu: "100m"
limits:
memory: "1024Mi"
cpu: "2000m"
tests:
requests:
memory: "128Mi"
cpu: "100m"
limits:
memory: "1024Mi"
cpu: "2000m"
bootstrap:
enabled: false
ks_user: monasca
script: null
# Names of secrets used by bootstrap and environmental checks
secrets:
identity:
admin: monasca-keystone-admin
monasca: monasca-keystone-user
monasca_agent: monasca-keystone-agent
monasca_read_only_user: monasca-keystone-ro
test: monasca-keystone-test
metrics_db: monasca-influxdb
oslo_db:
admin: monasca-db-admin
monasca: monasca-db-user
oslo_messaging:
admin: monasca-rabbitmq-admin
monasca: monasca-rabbitmq-user
tls:
monitoring:
api:
public: monasca-tls-public
internal: monasca-tls-api
statsd:
public: monasca-statsd-tls-public
internal: monasca-tls-statsd
events_api:
api:
public: monasca-events-tls-public
internal: monasca-events-tls-proxy
# typically overridden by environmental
# values, but should include all endpoints
# required by this chart
endpoints:
cluster_domain_suffix: cluster.local
monitoring:
name: monasca
hosts:
default: monasca-api
public: monasca-api
host_fqdn_override:
default: null
path:
default: /v2.0
scheme:
default: 'http'
port:
api:
default: 8070
public: 80
statsd:
default: 8125
public: 8125
forwarder:
default: 17123
public: 17123
logs_search:
name: monasca
hosts:
default: kibana
public: kibana
host_fqdn_override:
default: null
path:
default: /
scheme:
default: 'http'
port:
api:
default: 5601
public: 80
logs:
name: monasca
hosts:
default: monasca-api
public: monasca-api
host_fqdn_override:
default: null
path:
default: /v2.0
scheme:
default: 'http'
port:
api:
default: 8070
public: 80
local_image_registry:
name: docker-registry
namespace: docker-registry
hosts:
default: localhost
internal: docker-registry
node: localhost
host_fqdn_override:
default: null
port:
registry:
node: 5000
identity:
name: keystone
auth:
admin:
region_name: RegionOne
username: admin
password: password
project_name: admin
user_domain_name: default
project_domain_name: default
monasca:
role:
- admin
- monasca-user
region_name: RegionOne
username: monasca
password: password
project_name: admin
user_domain_name: default
project_domain_name: default
monasca_agent:
role: monasca-agent
region_name: RegionOne
username: monasca-agent
password: password
project_name: service
user_domain_name: service
project_domain_name: service
interface: private
monasca_read_only_user:
role: monasca-read-only-user
region_name: RegionOne
username: monasca-read-only-user
password: password
project_name: service
user_domain_name: service
project_domain_name: service
hosts:
default: keystone
internal: keystone-api
host_fqdn_override:
default: null
path:
default: /v3
scheme:
default: http
port:
api:
default: 80
internal: 5000
oslo_cache:
auth:
# NOTE(portdirect): this is used to define the value for keystone
# authtoken cache encryption key, if not set it will be populated
# automatically with a random value, but to take advantage of
# this feature all services should be set to use the same key,
# and memcache service.
memcache_secret_key: null
hosts:
default: memcached
host_fqdn_override:
default: null
port:
memcache:
default: 11211
oslo_db:
auth:
admin:
username: root
password: password
secret:
tls:
internal: mariadb-tls-direct
monasca:
username: monasca
password: password
hosts:
default: mariadb
host_fqdn_override:
default: null
path: /monasca
scheme: mysql+pymysql
port:
mysql:
default: 3306
metrics_db:
auth:
admin:
username: admin
password: password
monasca_api:
username: monasca-api
password: password
monasca_persister:
username: monasca-persister
password: password
hosts:
default: influxdb
host_fqdn_override:
default: null
path: /
scheme: http
port:
influxdb:
default: 8086
# NOTE(tp6510): these endpoints allow for things like DNS lookups and ingress
# They are using to enable the Egress K8s network policy.
kube_dns:
namespace: kube-system
name: kubernetes-dns
hosts:
default: kube-dns
host_fqdn_override:
default: null
path:
default: null
scheme: http
port:
dns:
default: 53
protocol: UDP
ingress:
namespace: null
name: ingress
hosts:
default: ingress
port:
ingress:
default: 80
network_policy:
monasca:
ingress:
- {}
egress:
- {}
dependencies:
dynamic:
common:
local_image_registry:
jobs:
- image-repo-sync
services:
- endpoint: node
service: local_image_registry
static:
agent:
services:
- endpoint: internal
service: monitoring
api:
jobs:
- monasca-db-sync
- monasca-ks-service
- monasca-ks-user
- monasca-ks-endpoints
services:
- endpoint: internal
service: oslo_db
- endpoint: internal
service: identity
notification:
jobs:
- monasca-db-sync
services:
- endpoint: internal
service: oslo_db
# - kafka
persister:
jobs:
- monasca-influxdb-init
services:
- endpoint: internal
service: metrics_db
thresh:
services:
- endpoint: internal
service: oslo_db
# - storm
ks_endpoints:
jobs:
- monasca-ks-service
services:
- endpoint: internal
service: identity
ks_service:
services:
- endpoint: internal
service: identity
ks_user:
services:
- endpoint: internal
service: identity
db_drop:
services:
- endpoint: internal
service: oslo_db
db_init:
services:
- endpoint: internal
service: oslo_db
db_sync:
jobs:
- monasca-db-init
services:
- endpoint: internal
service: oslo_db
influxdb_init:
services:
- endpoint: internal
service: metrics_db
manifests:
certificates: false
configmap_bin: true
configmap_etc: true
daemonset_agent: true
deployment_agent: false
deployment_api: true
deployment_notification: true
deployment_persister: true
ingress_api: true
job_db_init: true
job_db_sync: true
job_db_drop: false
job_influxdb_init: true
job_thresh: true
job_image_repo_sync: true
job_rabbit_init: false
job_ks_endpoints: true
job_ks_service: true
job_ks_user: true
network_policy: false
secret_db: true
secret_ingress_tls: true
secret_influxdb: true
secret_keystone: true
service_ingress: true
service: true
...