Merge "Merge Helm OSH-I values into static overrides"

This commit is contained in:
Zuul
2025-11-17 12:35:09 +00:00
committed by Gerrit Code Review
5 changed files with 1798 additions and 119 deletions

View File

@@ -6,19 +6,109 @@
---
release_group: osh-openstack-libvirt
ceph_client:
user_secret_name: cinder-volume-rbd-keyring
labels:
isApplication: false
agent:
libvirt:
node_selector_key: openstack-compute-node
node_selector_value: enabled
images:
tags:
libvirt: docker.io/starlingx/stx-libvirt:master-debian-stable-latest
libvirt_exporter: null
ceph_config_helper: 'docker.io/openstackhelm/ceph-config-helper:ubuntu_bionic-20201223'
dep_check: quay.io/airshipit/kubernetes-entrypoint:9ff5d2e488ad18187bccc48e9595f197d27110c4-ubuntu_jammy
image_repo_sync: null
kubectl: null
pre_apply_cleanup: docker.io/starlingx/stx-vault-manager:master-debian-stable-latest
pull_policy: "IfNotPresent"
local_registry:
active: false
exclude:
- dep_check
- image_repo_sync
network:
# provide what type of network wiring will be used
# possible options: openvswitch, linuxbridge, sriov
backend:
- openvswitch
endpoints:
cluster_domain_suffix: cluster.local
local_image_registry:
name: docker-registry
namespace: docker-registry
hosts:
default: localhost
internal: docker-registry
node: localhost
host_fqdn_override:
default: null
port:
registry:
node: 5000
oci_image_registry:
name: oci-image-registry
namespace: oci-image-registry
auth:
enabled: false
libvirt:
username: libvirt
password: password
hosts:
default: localhost
host_fqdn_override:
default: null
port:
registry:
default: null
libvirt_exporter:
port:
metrics:
default: 9474
network_policy:
libvirt:
ingress:
- {}
egress:
- {}
ceph_client:
configmap: ceph-etc
user_secret_name: cinder-volume-rbd-keyring
conf:
ceph:
enabled: true
admin_keyring: null
cinder:
user: "cinder"
keyring: null
secret_uuid: 457eb676-33da-42ec-9a8c-9293d545c337
# Cinder Ceph backend that is not configured by the k8s cluter
external_ceph:
enabled: false
user: null
secret_uuid: null
user_secret_name: null
libvirt:
listen_tcp: "1"
listen_tls: "0"
auth_tcp: "none"
ca_file: "/etc/pki/CA/cacert.pem"
cert_file: "/etc/pki/libvirt/servercert.pem"
key_file: "/etc/pki/libvirt/private/serverkey.pem"
auth_unix_rw: "none"
listen_addr: 127.0.0.1
log_level: "3"
log_outputs: "1:file:/var/log/libvirt/libvirtd.log"
qemu:
vnc_tls: "0"
vnc_tls_x509_verify: "0"
stdio_handler: "file"
user: "nova"
group: "kvm"
kubernetes:
cgroup: "k8s-infra"
# List of cgroup controller we want to use when breaking out of
# Kubernetes defined groups
cgroup_controllers:
- blkio
- cpu
@@ -31,33 +121,213 @@ conf:
- rdma
- misc
- pids
libvirt:
listen_addr: "::"
vencrypt:
# Issuer to use for the vencrypt certs.
issuer:
kind: ClusterIssuer
name: ca-clusterissuer
# Script is included here (vs in bin/) to allow overriding, in the case that
# communication happens over an IP other than the pod IP for some reason.
cert_init_sh: |
#!/bin/bash
set -x
HOSTNAME_FQDN=$(hostname --fqdn)
# Script to create certs for each libvirt pod based on pod IP (by default).
cat <<EOF | kubectl apply -f -
apiVersion: cert-manager.io/v1
kind: Certificate
metadata:
name: ${POD_NAME}-${TYPE}
namespace: ${POD_NAMESPACE}
ownerReferences:
- apiVersion: v1
kind: Pod
name: ${POD_NAME}
uid: ${POD_UID}
spec:
secretName: ${POD_NAME}-${TYPE}
commonName: ${POD_IP}
usages:
- client auth
- server auth
dnsNames:
- ${HOSTNAME}
- ${HOSTNAME_FQDN}
ipAddresses:
- ${POD_IP}
issuerRef:
kind: ${ISSUER_KIND}
name: ${ISSUER_NAME}
EOF
kubectl -n ${POD_NAMESPACE} wait --for=condition=Ready --timeout=300s \
certificate/${POD_NAME}-${TYPE}
# NOTE(mnaser): cert-manager does not clean-up the secrets when the certificate
# is deleted, so we should add an owner reference to the secret
# to ensure that it is cleaned up when the pod is deleted.
kubectl -n ${POD_NAMESPACE} patch secret ${POD_NAME}-${TYPE} \
--type=json -p='[{"op": "add", "path": "/metadata/ownerReferences", "value": [{"apiVersion": "v1", "kind": "Pod", "name": "'${POD_NAME}'", "uid": "'${POD_UID}'"}]}]'
kubectl -n ${POD_NAMESPACE} get secret ${POD_NAME}-${TYPE} -o jsonpath='{.data.tls\.crt}' | base64 -d > /tmp/${TYPE}.crt
kubectl -n ${POD_NAMESPACE} get secret ${POD_NAME}-${TYPE} -o jsonpath='{.data.tls\.key}' | base64 -d > /tmp/${TYPE}.key
kubectl -n ${POD_NAMESPACE} get secret ${POD_NAME}-${TYPE} -o jsonpath='{.data.ca\.crt}' | base64 -d > /tmp/${TYPE}-ca.crt
pod:
probes:
libvirt:
libvirt:
liveness:
enabled: true
params:
initialDelaySeconds: 30
periodSeconds: 60
timeoutSeconds: 5
readiness:
enabled: true
params:
initialDelaySeconds: 15
periodSeconds: 60
timeoutSeconds: 5
security_context:
libvirt:
pod:
runAsUser: 0
container:
ceph_admin_keyring_placement:
readOnlyRootFilesystem: false
ceph_keyring_placement:
readOnlyRootFilesystem: false
libvirt:
privileged: true
readOnlyRootFilesystem: false
libvirt_exporter:
privileged: true
sidecars:
libvirt_exporter: false
affinity:
anti:
type:
default: preferredDuringSchedulingIgnoredDuringExecution
topologyKey:
default: kubernetes.io/hostname
weight:
default: 10
tolerations:
libvirt:
enabled: true
tolerations:
- key: node-role.kubernetes.io/master
operator: Exists
effect: NoSchedule
- key: node-role.kubernetes.io/control-plane
operator: Exists
effect: NoSchedule
- key: openstack-compute-node
operator: Exists
effect: NoSchedule
- key: node-role.kubernetes.io/master
operator: Exists
effect: NoSchedule
- key: node-role.kubernetes.io/control-plane
operator: Exists
effect: NoSchedule
- key: openstack-compute-node
operator: Exists
effect: NoSchedule
dns_policy: "ClusterFirstWithHostNet"
mounts:
libvirt:
init_container: null
libvirt:
lifecycle:
upgrades:
daemonsets:
pod_replacement_strategy: RollingUpdate
libvirt:
enabled: true
min_ready_seconds: 0
max_unavailable: 1
resources:
enabled: false
libvirt:
requests:
memory: "128Mi"
cpu: "100m"
limits:
memory: "1024Mi"
cpu: "2000m"
jobs:
image_repo_sync:
requests:
memory: "128Mi"
cpu: "100m"
limits:
memory: "1024Mi"
cpu: "2000m"
libvirt_exporter:
requests:
memory: "128Mi"
cpu: "100m"
limits:
memory: "256Mi"
cpu: "500m"
dependencies:
dynamic:
common:
local_image_registry:
jobs:
- libvirt-image-repo-sync
services:
- endpoint: node
service: local_image_registry
targeted:
ovn:
libvirt:
pod:
- requireSameNode: true
labels:
application: ovn
component: ovn-controller
openvswitch:
libvirt:
pod:
- requireSameNode: true
labels:
application: neutron
component: neutron-ovs-agent
linuxbridge:
libvirt:
pod:
- requireSameNode: true
labels:
application: neutron
component: neutron-lb-agent
sriov:
libvirt:
pod:
- requireSameNode: true
labels:
application: neutron
component: neutron-sriov-agent
static:
libvirt:
services: null
image_repo_sync:
services:
- endpoint: internal
service: local_image_registry
manifests:
configmap_bin: true
configmap_etc: true
daemonset_libvirt: true
job_pre_apply_cleanup: true
job_image_repo_sync: true
network_policy: false
role_cert_manager: false
secret_registry: true
secrets:
oci_image_registry:
libvirt: libvirt-oci-image-registry-key
tls:
server: libvirt-tls-server
client: libvirt-tls-client
libvirt:
listen_addr: "::"
annotations:
pod:
libvirt:
starlingx.io/core_pattern: "/dev/null"
images:
tags:
ceph_config_helper: docker.io/openstackhelm/ceph-config-helper:ubuntu_bionic-20201223
dep_check: quay.io/airshipit/kubernetes-entrypoint:9ff5d2e488ad18187bccc48e9595f197d27110c4-ubuntu_jammy
image_repo_sync: null
libvirt: docker.io/starlingx/stx-libvirt:master-debian-stable-latest
pre_apply_cleanup: docker.io/starlingx/stx-vault-manager:master-debian-stable-latest
libvirt_exporter: null
kubectl: null
...

View File

@@ -6,70 +6,514 @@
---
release_group: osh-openstack-mariadb
manifests:
pod_test: false
monitoring:
prometheus:
enabled: false
images:
tags:
mariadb: docker.io/openstackhelm/mariadb:ubuntu_focal-20250809
ingress: null
error_pages: null
prometheus_create_mysql_user: null
prometheus_mysql_exporter: null
prometheus_mysql_exporter_helm_tests: null
dep_check: quay.io/airshipit/kubernetes-entrypoint:9ff5d2e488ad18187bccc48e9595f197d27110c4-ubuntu_jammy
image_repo_sync: null
mariadb_backup: null
ks_user: docker.io/starlingx/stx-heat:master-debian-stable-latest
scripted_test: null
pre_apply_cleanup: docker.io/starlingx/stx-vault-manager:master-debian-stable-latest
mariadb_controller: docker.io/openstackhelm/mariadb:ubuntu_focal-20250809
pull_policy: "IfNotPresent"
local_registry:
active: false
exclude:
- dep_check
- image_repo_sync
labels:
isApplication: false
server:
node_selector_key: openstack-control-plane
node_selector_value: enabled
ingress:
node_selector_key: openstack-control-plane
node_selector_value: enabled
prometheus_mysql_exporter:
node_selector_key: openstack-control-plane
node_selector_value: enabled
images:
tags:
dep_check: quay.io/airshipit/kubernetes-entrypoint:9ff5d2e488ad18187bccc48e9595f197d27110c4-ubuntu_jammy
ingress: null
error_pages: null
image_repo_sync: null
prometheus_create_mysql_user: null
prometheus_mysql_exporter: null
prometheus_mysql_exporter_helm_tests: null
ks_user: docker.io/starlingx/stx-heat:master-debian-stable-latest
mariadb: docker.io/openstackhelm/mariadb:ubuntu_focal-20250809
mariadb_backup: null
pre_apply_cleanup: docker.io/starlingx/stx-vault-manager:master-debian-stable-latest
mariadb_controller: docker.io/openstackhelm/mariadb:ubuntu_focal-20250809
scripted_test: null
error_server:
node_selector_key: openstack-control-plane
node_selector_value: enabled
job:
node_selector_key: openstack-control-plane
node_selector_value: enabled
test:
node_selector_key: openstack-control-plane
node_selector_value: enabled
controller:
node_selector_key: openstack-control-plane
node_selector_value: enabled
pod:
env:
mariadb_controller:
MARIADB_CONTROLLER_DEBUG: 0
MARIADB_CONTROLLER_CHECK_PODS_DELAY: 10
MARIADB_CONTROLLER_PYKUBE_REQUEST_TIMEOUT: 60
probes:
server:
mariadb:
readiness:
enabled: true
startup:
enabled: true
params:
initialDelaySeconds: 30
periodSeconds: 30
timeoutSeconds: 15
failureThreshold: 3
liveness:
enabled: true
params:
initialDelaySeconds: 120
periodSeconds: 30
timeoutSeconds: 15
failureThreshold: 5
startup:
enabled: true
params:
initialDelaySeconds: 60
periodSeconds: 60
failureThreshold: 10
security_context:
server:
pod:
runAsUser: 999
container:
perms:
runAsUser: 0
readOnlyRootFilesystem: true
mariadb:
runAsUser: 999
allowPrivilegeEscalation: false
readOnlyRootFilesystem: true
ingress:
pod:
runAsUser: 65534
container:
server:
runAsUser: 0
readOnlyRootFilesystem: false
error_pages:
pod:
runAsUser: 65534
container:
server:
allowPrivilegeEscalation: false
readOnlyRootFilesystem: true
prometheus_mysql_exporter:
pod:
runAsUser: 99
container:
exporter:
runAsUser: 99
allowPrivilegeEscalation: false
readOnlyRootFilesystem: true
prometheus_create_mysql_user:
pod:
runAsUser: 0
container:
main:
allowPrivilegeEscalation: false
readOnlyRootFilesystem: true
mariadb_backup:
pod:
runAsUser: 65534
container:
backup_perms:
runAsUser: 0
readOnlyRootFilesystem: true
verify_perms:
runAsUser: 0
readOnlyRootFilesystem: true
mariadb_backup:
runAsUser: 65534
readOnlyRootFilesystem: true
allowPrivilegeEscalation: false
tests:
pod:
runAsUser: 999
container:
test:
runAsUser: 999
readOnlyRootFilesystem: true
controller:
pod:
runAsUser: 65534
container:
controller:
allowPrivilegeEscalation: false
readOnlyRootFilesystem: true
affinity:
anti:
type:
default: requiredDuringSchedulingIgnoredDuringExecution
topologyKey:
default: kubernetes.io/hostname
weight:
default: 10
tolerations:
mariadb:
enabled: true
tolerations:
- key: node-role.kubernetes.io/master
operator: Exists
effect: NoSchedule
- key: node-role.kubernetes.io/control-plane
operator: Exists
effect: NoSchedule
- key: openstack-compute-node
operator: Exists
effect: NoSchedule
- key: node-role.kubernetes.io/master
operator: Exists
effect: NoSchedule
- key: node-role.kubernetes.io/control-plane
operator: Exists
effect: NoSchedule
- key: openstack-compute-node
operator: Exists
effect: NoSchedule
replicas:
server: 3
ingress: 2
error_page: 1
prometheus_mysql_exporter: 1
controller: 1
lifecycle:
upgrades:
deployments:
revision_history: 3
pod_replacement_strategy: RollingUpdate
rolling_update:
max_unavailable: 1
max_surge: 3
termination_grace_period:
prometheus_mysql_exporter:
timeout: 30
error_pages:
timeout: 10
disruption_budget:
mariadb:
min_available: 0
resources:
enabled: true
prometheus_mysql_exporter:
limits:
memory: "1024Mi"
cpu: "2000m"
requests:
memory: "128Mi"
cpu: "500m"
server:
requests:
memory: "128Mi"
cpu: "100m"
limits:
memory: "1024Mi"
cpu: "2000m"
ingress:
requests:
memory: "128Mi"
cpu: "100m"
ephemeral-storage: "500Ki"
limits:
memory: "1024Mi"
cpu: "2000m"
jobs:
tests:
limits:
memory: "1024Mi"
cpu: "2000m"
requests:
memory: "128Mi"
cpu: "100m"
prometheus_create_mysql_user:
limits:
memory: "1024Mi"
cpu: "2000m"
requests:
memory: "128Mi"
cpu: "100m"
image_repo_sync:
requests:
memory: "128Mi"
cpu: "100m"
limits:
memory: "1024Mi"
cpu: "2000m"
mariadb_backup:
requests:
memory: "128Mi"
cpu: "100m"
limits:
memory: "1024Mi"
cpu: "2000m"
ks_user:
requests:
memory: "128Mi"
cpu: "100m"
limits:
memory: "1024Mi"
cpu: "2000m"
dependencies:
dynamic:
common:
local_image_registry:
jobs:
- mariadb-image-repo-sync
services:
- endpoint: node
service: local_image_registry
static:
error_pages:
jobs: null
ingress:
jobs: null
services:
- endpoint: error_pages
service: oslo_db
mariadb_backup:
jobs:
- mariadb-ks-user
services:
- endpoint: internal
service: oslo_db
prometheus_create_mysql_user:
services:
- endpoint: internal
service: oslo_db
prometheus_mysql_exporter:
jobs:
- exporter-create-sql-user
services:
- endpoint: internal
service: oslo_db
prometheus_mysql_exporter_tests:
services:
- endpoint: internal
service: prometheus_mysql_exporter
- endpoint: internal
service: monitoring
image_repo_sync:
services:
- endpoint: internal
service: local_image_registry
tests:
services:
- endpoint: internal
service: oslo_db
controller:
services: null
volume:
# this value is used for single pod deployments of mariadb to prevent losing all data
# if the pod is restarted
use_local_path_for_single_pod_cluster:
enabled: false
host_path: "/tmp/mysql-data"
chown_on_start: true
enabled: true
class_name: general
size: 5Gi
backup:
enabled: true
class_name: general
size: 5Gi
jobs:
exporter_create_sql_user:
backoffLimit: 87600
activeDeadlineSeconds: 3600
mariadb_backup:
# activeDeadlineSeconds == 0 means no deadline
activeDeadlineSeconds: 0
backoffLimit: 6
cron: "0 0 * * *"
history:
success: 3
failed: 1
ks_user:
# activeDeadlineSeconds == 0 means no deadline
activeDeadlineSeconds: 0
backoffLimit: 6
conf:
tests:
# This may either be:
# * internal: which will hit the endpoint exposed by the ingress controller
# * direct: which will hit the backends directly via a k8s service ip
# Note, deadlocks and failure are to be expected with concurrency if
# hitting the `direct` endpoint.
endpoint: internal
# This is a list of tuning params passed to mysqlslap:
params:
- --auto-generate-sql
- --concurrency=100
- --number-of-queries=1000
- --number-char-cols=1
- --number-int-cols=1
ingress: null
ingress_conf:
worker-processes: '4'
worker-processes: "4"
log-format-stream: "\"$remote_addr [$time_local] $protocol $status $bytes_received $bytes_sent $upstream_addr $upstream_connect_time $upstream_first_byte_time $upstream_session_time $session_time\""
mariadb_server:
setup_wait:
iteration: 30
duration: 5
backup:
enabled: false
base_path: /var/backup
validateData:
ageOffset: 120
mysqldump_options: >
--single-transaction --quick --add-drop-database --add-drop-table --add-locks --databases
days_to_keep: 3
remote_backup:
enabled: false
container_name: mariadb
days_to_keep: 14
storage_policy: default-placement
number_of_retries: 5
delay_range:
min: 30
max: 60
throttle_backups:
enabled: false
sessions_limit: 480
lock_expire_after: 7200
retry_after: 3600
container_name: throttle-backups-manager
galera:
cluster_leader_ttl: 60
database:
mysql_histfile: "/dev/null"
my: |
[mysqld]
datadir=/var/lib/mysql
basedir=/usr
ignore-db-dirs=lost+found
[client-server]
!includedir /etc/mysql/conf.d/
00_base: |
[mysqld]
# Charset
character_set_server=utf8
collation_server=utf8_general_ci
skip-character-set-client-handshake
# Logging
slow_query_log=off
slow_query_log_file=/var/log/mysql/mariadb-slow.log
log_warnings=2
# General logging has huge performance penalty therefore is disabled by default
general_log=off
general_log_file=/var/log/mysql/mariadb-error.log
long_query_time=3
log_queries_not_using_indexes=on
# Networking
bind_address=0.0.0.0
port={{ tuple "oslo_db" "direct" "mysql" . | include "helm-toolkit.endpoints.endpoint_port_lookup" }}
# When a client connects, the server will perform hostname resolution,
# and when DNS is slow, establishing the connection will become slow as well.
# It is therefore recommended to start the server with skip-name-resolve to
# disable all DNS lookups. The only limitation is that the GRANT statements
# must then use IP addresses only.
skip_name_resolve
# Tuning
user=mysql
max_allowed_packet=256M
open_files_limit=10240
max_connections=8192
max-connect-errors=1000000
# General security settings
# Reference: https://dev.mysql.com/doc/mysql-security-excerpt/8.0/en/general-security-issues.html
# secure_file_priv is set to '/home' because it is read-only, which will
# disable this feature completely.
secure_file_priv=/home
local_infile=0
symbolic_links=0
sql_mode="STRICT_ALL_TABLES,STRICT_TRANS_TABLES,ERROR_FOR_DIVISION_BY_ZERO,NO_AUTO_CREATE_USER,NO_ENGINE_SUBSTITUTION"
## Generally, it is unwise to set the query cache to be larger than 64-128M
## as the costs associated with maintaining the cache outweigh the performance
## gains.
## The query cache is a well known bottleneck that can be seen even when
## concurrency is moderate. The best option is to disable it from day 1
## by setting query_cache_size=0 (now the default on MySQL 5.6)
## and to use other ways to speed up read queries: good indexing, adding
## replicas to spread the read load or using an external cache.
query_cache_size=0
query_cache_type=0
sync_binlog=0
thread_cache_size=16
table_open_cache=2048
table_definition_cache=1024
#
# InnoDB
#
# The buffer pool is where data and indexes are cached: having it as large as possible
# will ensure you use memory and not disks for most read operations.
# Typical values are 50..75% of available RAM.
# TODO(tomasz.paszkowski): This needs to by dynamic based on available RAM.
innodb_buffer_pool_size=1024M
innodb_doublewrite=0
innodb_file_format=Barracuda
innodb_file_per_table=1
innodb_flush_method=O_DIRECT
innodb_io_capacity=500
innodb_locks_unsafe_for_binlog=1
innodb_log_file_size=128M
innodb_old_blocks_time=1000
innodb_read_io_threads=8
innodb_write_io_threads=8
# Clustering
binlog_format=ROW
default-storage-engine=InnoDB
innodb_autoinc_lock_mode=2
innodb_flush_log_at_trx_commit=2
wsrep_cluster_name={{ tuple "oslo_db" "direct" . | include "helm-toolkit.endpoints.hostname_namespaced_endpoint_lookup" | replace "." "_" }}
wsrep_on=1
wsrep_provider=/usr/lib/galera/libgalera_smm.so
wsrep_slave_threads=12
wsrep_sst_auth={{ .Values.endpoints.oslo_db.auth.sst.username }}:{{ .Values.endpoints.oslo_db.auth.sst.password }}
wsrep_sst_method=mariabackup
{{ if .Values.manifests.certificates }}
wsrep_provider_options="socket.ssl_ca=/etc/mysql/certs/ca.crt; socket.ssl_cert=/etc/mysql/certs/tls.crt; socket.ssl_key=/etc/mysql/certs/tls.key; evs.suspect_timeout=PT30S; gmcast.peer_timeout=PT15S; gmcast.listen_addr=tcp://0.0.0.0:{{ tuple "oslo_db" "direct" "wsrep" . | include "helm-toolkit.endpoints.endpoint_port_lookup" }}"
# TLS
ssl_ca=/etc/mysql/certs/ca.crt
ssl_key=/etc/mysql/certs/tls.key
ssl_cert=/etc/mysql/certs/tls.crt
# tls_version = TLSv1.2,TLSv1.3
[sst]
encrypt = 3
tca=/etc/mysql/certs/ca.crt
tkey=/etc/mysql/certs/tls.key
tcert=/etc/mysql/certs/tls.crt
{{ else }}
wsrep_provider_options="evs.suspect_timeout=PT30S; gmcast.peer_timeout=PT15S; gmcast.listen_addr=tcp://0.0.0.0:{{ tuple "oslo_db" "direct" "wsrep" . | include "helm-toolkit.endpoints.endpoint_port_lookup" }}"
{{ end }}
[mysqldump]
max-allowed-packet=16M
[client]
default_character_set=utf8
protocol=tcp
port={{ tuple "oslo_db" "direct" "mysql" . | include "helm-toolkit.endpoints.endpoint_port_lookup" }}
{{ if .Values.manifests.certificates }}
# TLS
ssl_ca=/etc/mysql/certs/ca.crt
ssl_key=/etc/mysql/certs/tls.key
ssl_cert=/etc/mysql/certs/tls.crt
# tls_version = TLSv1.2,TLSv1.3
ssl-verify-server-cert
{{ end }}
config_override: |
[mysqld]
wait_timeout=3600
@@ -80,6 +524,234 @@ conf:
wsrep_provider_options="gmcast.listen_addr=tcp://[::]:{{ tuple "oslo_db" "direct" "wsrep" . | include "helm-toolkit.endpoints.endpoint_port_lookup" }}"
bind_address=::
wsrep_sst_method=rsync
99_force: |
[mysqld]
datadir=/var/lib/mysql
tmpdir=/tmp
monitoring:
prometheus:
enabled: false
mysqld_exporter:
scrape: true
secrets:
identity:
admin: keystone-admin-user
mariadb: mariadb-backup-user
mariadb:
backup_restore: mariadb-backup-restore
oci_image_registry:
mariadb: mariadb-oci-image-registry-key
tls:
oslo_db:
server:
public: mariadb-tls-server
internal: mariadb-tls-direct
# typically overridden by environmental
# values, but should include all endpoints
# required by this chart
endpoints:
cluster_domain_suffix: cluster.local
local_image_registry:
name: docker-registry
namespace: docker-registry
hosts:
default: localhost
internal: docker-registry
node: localhost
host_fqdn_override:
default: null
port:
registry:
node: 5000
oci_image_registry:
name: oci-image-registry
namespace: oci-image-registry
auth:
enabled: false
mariadb:
username: mariadb
password: password
hosts:
default: localhost
host_fqdn_override:
default: null
port:
registry:
default: null
monitoring:
name: prometheus
namespace: null
hosts:
default: prom-metrics
public: prometheus
host_fqdn_override:
default: null
path:
default: null
scheme:
default: 'http'
port:
api:
default: 9090
public: 80
prometheus_mysql_exporter:
namespace: null
hosts:
default: mysql-exporter
host_fqdn_override:
default: null
path:
default: /metrics
scheme:
default: 'http'
port:
metrics:
default: 9104
oslo_db:
namespace: null
auth:
admin:
username: root
password: password
sst:
username: sst
password: password
audit:
username: audit
password: password
exporter:
username: exporter
password: password
hosts:
default: mariadb
direct: mariadb-server
discovery: mariadb-discovery
error_pages: mariadb-ingress-error-pages
host_fqdn_override:
default: null
path: null
scheme: mysql+pymysql
port:
mysql:
default: 3306
wsrep:
default: 4567
ist:
default: 4568
sst:
default: 4444
kube_dns:
namespace: kube-system
name: kubernetes-dns
hosts:
default: kube-dns
host_fqdn_override:
default: null
path:
default: null
scheme: http
port:
dns_tcp:
default: 53
dns:
default: 53
protocol: UDP
identity:
name: backup-storage-auth
namespace: openstack
auth:
admin:
# Auth URL of null indicates local authentication
# HTK will form the URL unless specified here
auth_url: null
region_name: RegionOne
username: admin
password: password
project_name: admin
user_domain_name: default
project_domain_name: default
mariadb:
# Auth URL of null indicates local authentication
# HTK will form the URL unless specified here
auth_url: null
role: admin
region_name: RegionOne
username: mariadb-backup-user
password: password
project_name: service
user_domain_name: service
project_domain_name: service
hosts:
default: keystone
internal: keystone-api
host_fqdn_override:
default: null
path:
default: /v3
scheme:
default: 'http'
port:
api:
default: 80
internal: 5000
network:
mariadb: {}
mariadb_discovery: {}
mariadb_ingress: {}
mariadb_ingress_error_pages: {}
mariadb_master: {}
ip_family_policy: PreferDualStack
network_policy:
mariadb:
ingress:
- {}
egress:
- {}
prometheus-mysql-exporter:
ingress:
- {}
egress:
- {}
# Helm hook breaks for helm2.
# Set helm3_hook: false in case helm2 is used.
helm3_hook: true
manifests:
certificates: false
configmap_bin: true
configmap_etc: true
configmap_ingress_conf: false
configmap_ingress_etc: false
configmap_services_tcp: true
deployment_error: false
deployment_ingress: false
job_pre_apply_cleanup: true
job_image_repo_sync: true
cron_job_mariadb_backup: false
job_ks_user: false
pvc_backup: false
monitoring:
prometheus:
configmap_bin: true
deployment_exporter: true
job_user_create: true
secret_etc: true
service_exporter: true
network_policy_exporter: false
pdb_server: true
network_policy: false
pod_test: false
secret_dbadmin_password: true
secret_sst_password: true
secret_dbaudit_password: true
secret_backup_restore: false
secret_etc: true
secret_registry: true
service_discovery: true
service_ingress: false
service_error: false
service: true
statefulset: true
config_ipv6: false
deployment_controller: true
service_master: true
...

View File

@@ -6,6 +6,113 @@
---
release_group: osh-openstack-memcached
conf:
memcached:
max_connections: 8192
# NOTE(pordirect): this should match the value in
# `pod.resources.memcached.memory`
memory: 1024
stats_cachedump:
enabled: true
dependencies:
dynamic:
common:
local_image_registry:
jobs:
- memcached-image-repo-sync
services:
- endpoint: node
service: local_image_registry
static:
memcached:
jobs: null
image_repo_sync:
services:
- endpoint: internal
service: local_image_registry
secrets:
oci_image_registry:
memcached: memcached-oci-image-registry-key
endpoints:
cluster_domain_suffix: cluster.local
local_image_registry:
name: docker-registry
namespace: docker-registry
hosts:
default: localhost
internal: docker-registry
node: localhost
host_fqdn_override:
default: null
port:
registry:
node: 5000
oci_image_registry:
name: oci-image-registry
namespace: oci-image-registry
auth:
enabled: false
memcached:
username: memcached
password: password
hosts:
default: localhost
host_fqdn_override:
default: null
port:
registry:
default: null
oslo_cache:
namespace: null
host_fqdn_override:
default: null
hosts:
default: memcached
port:
memcache:
default: 11211
metrics:
default: 9150
kube_dns:
namespace: kube-system
name: kubernetes-dns
hosts:
default: kube-dns
host_fqdn_override:
default: null
path:
default: null
scheme: http
port:
dns_tcp:
default: 53
dns:
default: 53
protocol: UDP
network_policy:
memcached:
ingress:
- {}
egress:
- {}
monitoring:
prometheus:
enabled: false
memcached_exporter:
scrape: true
images:
pull_policy: IfNotPresent
tags:
dep_check: 'quay.io/airshipit/kubernetes-entrypoint:9ff5d2e488ad18187bccc48e9595f197d27110c4-ubuntu_jammy'
memcached: 'docker.io/library/memcached:1.5.5'
prometheus_memcached_exporter: null
image_repo_sync: null
pre_apply_cleanup: docker.io/starlingx/stx-vault-manager:master-debian-stable-latest
local_registry:
active: false
exclude:
- dep_check
- image_repo_sync
labels:
isApplication: false
server:
@@ -14,28 +121,82 @@ labels:
prometheus_memcached_exporter:
node_selector_key: openstack-control-plane
node_selector_value: enabled
manifests:
configmap_bin: true
deployment: true
job_pre_apply_cleanup: true
job_image_repo_sync: true
network_policy: false
service: true
secret_registry: true
pod:
security_context:
server:
pod:
runAsUser: 65534
container:
memcached:
allowPrivilegeEscalation: false
readOnlyRootFilesystem: true
memcached_exporter:
allowPrivilegeEscalation: false
readOnlyRootFilesystem: true
affinity:
anti:
topologyKey:
default: kubernetes.io/hostname
type:
default: preferredDuringSchedulingIgnoredDuringExecution
weight:
default: 10
tolerations:
memcached:
enabled: true
tolerations:
- key: node-role.kubernetes.io/master
operator: Exists
effect: NoSchedule
- key: node-role.kubernetes.io/control-plane
operator: Exists
effect: NoSchedule
- key: openstack-compute-node
operator: Exists
effect: NoSchedule
monitoring:
prometheus:
- key: node-role.kubernetes.io/master
operator: Exists
effect: NoSchedule
- key: node-role.kubernetes.io/control-plane
operator: Exists
effect: NoSchedule
- key: openstack-compute-node
operator: Exists
effect: NoSchedule
lifecycle:
upgrades:
deployments:
pod_replacement_strategy: RollingUpdate
revision_history: 3
rolling_update:
max_surge: 3
max_unavailable: 1
termination_grace_period:
memcached:
timeout: 30
replicas:
server: 1
resources:
enabled: false
images:
tags:
dep_check: quay.io/airshipit/kubernetes-entrypoint:9ff5d2e488ad18187bccc48e9595f197d27110c4-ubuntu_jammy
memcached: docker.io/library/memcached:1.5.5
pre_apply_cleanup: docker.io/starlingx/stx-vault-manager:master-debian-stable-latest
image_repo_sync: null
prometheus_memcached_exporter: null
memcached:
limits:
cpu: "2000m"
memory: "1024Mi"
requests:
cpu: "500m"
memory: "128Mi"
prometheus_memcached_exporter:
limits:
memory: "1024Mi"
cpu: "2000m"
requests:
cpu: 500m
memory: 128Mi
jobs:
image_repo_sync:
requests:
memory: "128Mi"
cpu: "100m"
limits:
memory: "1024Mi"
cpu: "2000m"
...

View File

@@ -6,30 +6,225 @@
---
release_group: osh-openstack-openvswitch
images:
tags:
openvswitch_db_server: docker.io/starlingx/stx-ovs:master-debian-stable-latest
openvswitch_vswitchd: docker.io/starlingx/stx-ovs:master-debian-stable-latest
dep_check: quay.io/airshipit/kubernetes-entrypoint:9ff5d2e488ad18187bccc48e9595f197d27110c4-ubuntu_jammy
image_repo_sync: null
pre_apply_cleanup: docker.io/starlingx/stx-vault-manager:master-debian-stable-latest
pull_policy: "IfNotPresent"
local_registry:
active: false
exclude:
- dep_check
- image_repo_sync
labels:
isApplication: false
ovs:
node_selector_key: openvswitch
node_selector_value: enabled
images:
tags:
dep_check: quay.io/airshipit/kubernetes-entrypoint:9ff5d2e488ad18187bccc48e9595f197d27110c4-ubuntu_jammy
image_repo_sync: null
openvswitch_db_server: docker.io/starlingx/stx-ovs:master-debian-stable-latest
openvswitch_vswitchd: docker.io/starlingx/stx-ovs:master-debian-stable-latest
pre_apply_cleanup: docker.io/starlingx/stx-vault-manager:master-debian-stable-latest
pod:
tini:
enabled: true
tolerations:
openvswitch:
enabled: true
tolerations:
- key: node-role.kubernetes.io/master
operator: Exists
effect: NoSchedule
- key: node-role.kubernetes.io/control-plane
operator: Exists
effect: NoSchedule
- key: openstack-compute-node
operator: Exists
effect: NoSchedule
- key: node-role.kubernetes.io/master
operator: Exists
effect: NoSchedule
- key: node-role.kubernetes.io/control-plane
operator: Exists
effect: NoSchedule
- key: openstack-compute-node
operator: Exists
effect: NoSchedule
probes:
ovs:
ovs_db:
liveness:
enabled: true
params:
initialDelaySeconds: 60
periodSeconds: 30
timeoutSeconds: 5
readiness:
enabled: true
params:
initialDelaySeconds: 90
periodSeconds: 30
timeoutSeconds: 5
ovs_vswitch:
liveness:
enabled: true
params:
initialDelaySeconds: 60
periodSeconds: 30
timeoutSeconds: 5
readiness:
enabled: true
params:
failureThreshold: 3
periodSeconds: 10
timeoutSeconds: 1
security_context:
ovs:
pod:
runAsUser: 42424
container:
perms:
runAsUser: 0
allowPrivilegeEscalation: false
readOnlyRootFilesystem: true
server:
runAsUser: 42424
allowPrivilegeEscalation: false
readOnlyRootFilesystem: true
modules:
runAsUser: 0
capabilities:
add:
- SYS_MODULE
- SYS_CHROOT
readOnlyRootFilesystem: true
vswitchd:
runAsUser: 0
capabilities:
add:
- NET_ADMIN
readOnlyRootFilesystem: true
dns_policy: "ClusterFirstWithHostNet"
lifecycle:
upgrades:
daemonsets:
pod_replacement_strategy: RollingUpdate
ovs:
enabled: true
min_ready_seconds: 0
max_unavailable: 1
resources:
enabled: false
ovs:
db:
requests:
memory: "128Mi"
cpu: "100m"
limits:
memory: "1024Mi"
cpu: "2000m"
vswitchd:
requests:
memory: "128Mi"
cpu: "100m"
limits:
memory: "1024Mi"
cpu: "2000m"
# set resources to enabled and specify one of the following when using dpdk
# hugepages-1Gi: "1Gi"
# hugepages-2Mi: "512Mi"
jobs:
image_repo_sync:
requests:
memory: "128Mi"
cpu: "100m"
limits:
memory: "1024Mi"
cpu: "2000m"
user:
nova:
uid: 42424
secrets:
oci_image_registry:
openvswitch: openvswitch-oci-image-registry-key
endpoints:
cluster_domain_suffix: cluster.local
local_image_registry:
name: docker-registry
namespace: docker-registry
hosts:
default: localhost
internal: docker-registry
node: localhost
host_fqdn_override:
default: null
port:
registry:
node: 5000
oci_image_registry:
name: oci-image-registry
namespace: oci-image-registry
auth:
enabled: false
openvswitch:
username: openvswitch
password: password
hosts:
default: localhost
host_fqdn_override:
default: null
port:
registry:
default: null
network_policy:
openvswitch:
ingress:
- {}
egress:
- {}
dependencies:
dynamic:
common:
local_image_registry:
jobs:
- openvswitch-image-repo-sync
services:
- endpoint: node
service: local_image_registry
static:
ovs: null
image_repo_sync:
services:
- endpoint: internal
service: local_image_registry
manifests:
configmap_bin: true
daemonset: true
daemonset_ovs_vswitchd: true
job_pre_apply_cleanup: true
job_image_repo_sync: true
network_policy: false
secret_registry: true
conf:
poststart:
timeout: 5
rootUser: "root"
extraCommand: null
openvswitch_db_server:
ptcp_port: null
ovs_other_config:
handler_threads: null
revalidator_threads: null
ovs_hw_offload:
enabled: false
ovs_dpdk:
enabled: false
## Mandatory parameters. Please uncomment when enabling DPDK
# socket_memory: 1024
# hugepages_mountpath: /dev/hugepages
# vhostuser_socket_dir: vhostuser
#
## Optional hardware specific parameters: modify to match NUMA topology
# mem_channels: 4
# lcore_mask: 0x1
# pmd_cpu_mask: 0x4
#
## Optional driver to use. Driver name should be the same as the one
## specified in the ovs_dpdk section in the Neutron values and vice versa
# driver: vfio-pci
#
## Optional security feature
# vHost IOMMU feature restricts the vhost memory that a virtio device
# access, available with DPDK v17.11
# vhost_iommu_support: true
...

View File

@@ -6,11 +6,6 @@
---
release_group: osh-openstack-rabbitmq
forceBoot:
enabled: true
monitoring:
prometheus:
enabled: false
labels:
isApplication: false
server:
@@ -19,53 +14,439 @@ labels:
prometheus_rabbitmq_exporter:
node_selector_key: openstack-control-plane
node_selector_value: enabled
test:
node_selector_key: openstack-control-plane
node_selector_value: enabled
jobs:
node_selector_key: openstack-control-plane
node_selector_value: enabled
images:
tags:
dep_check: quay.io/airshipit/kubernetes-entrypoint:9ff5d2e488ad18187bccc48e9595f197d27110c4-ubuntu_jammy
image_repo_sync: null
prometheus_rabbitmq_exporter: null
prometheus_rabbitmq_exporter_helm_tests: null
pre_apply_cleanup: docker.io/starlingx/stx-vault-manager:master-debian-stable-latest
rabbitmq_init: docker.io/starlingx/stx-heat:master-debian-stable-latest
scripted_test: docker.io/library/rabbitmq:3.9.29-management
rabbitmq: docker.io/library/rabbitmq:3.9.29
network:
management:
ingress:
classes:
namespace: "nginx-openstack"
cluster: "nginx"
dep_check: quay.io/airshipit/kubernetes-entrypoint:9ff5d2e488ad18187bccc48e9595f197d27110c4-ubuntu_jammy
scripted_test: docker.io/library/rabbitmq:3.9.29-management
image_repo_sync: null
pre_apply_cleanup: docker.io/starlingx/stx-vault-manager:master-debian-stable-latest
pull_policy: "IfNotPresent"
local_registry:
active: false
exclude:
- dep_check
- image_repo_sync
# forceBoot: executes 'rabbitmqctl force_boot' to force boot on
# cluster shut down unexpectedly in an unknown order.
# ref: https://www.rabbitmq.com/rabbitmqctl.8.html#force_boot
forceBoot:
enabled: true
pod:
probes:
prometheus_rabbitmq_exporter:
rabbitmq_exporter:
readiness:
enabled: true
params:
initialDelaySeconds: 30
periodSeconds: 30
timeoutSeconds: 5
liveness:
enabled: true
params:
initialDelaySeconds: 120
periodSeconds: 90
timeoutSeconds: 5
rabbitmq:
rabbitmq:
readiness:
enabled: true
params:
initialDelaySeconds: 10
periodSeconds: 30
timeoutSeconds: 10
successThreshold: 1
failureThreshold: 3
liveness:
enabled: true
params:
initialDelaySeconds: 60
periodSeconds: 30
timeoutSeconds: 10
successThreshold: 1
failureThreshold: 5
security_context:
exporter:
pod:
runAsUser: 65534
container:
rabbitmq_exporter:
readOnlyRootFilesystem: true
allowPrivilegeEscalation: false
server:
pod:
runAsUser: 999
container:
rabbitmq_password:
runAsUser: 0
readOnlyRootFilesystem: true
rabbitmq_cookie:
runAsUser: 0
readOnlyRootFilesystem: true
rabbitmq_perms:
runAsUser: 0
readOnlyRootFilesystem: true
rabbitmq:
allowPrivilegeEscalation: false
runAsUser: 999
readOnlyRootFilesystem: false
cluster_wait:
pod:
runAsUser: 999
container:
rabbitmq_cluster_wait:
allowPrivilegeEscalation: false
readOnlyRootFilesystem: true
rabbitmq_cookie:
allowPrivilegeEscalation: false
readOnlyRootFilesystem: true
test:
pod:
runAsUser: 999
container:
rabbitmq_test:
allowPrivilegeEscalation: false
readOnlyRootFilesystem: true
affinity:
anti:
type:
default: requiredDuringSchedulingIgnoredDuringExecution
topologyKey:
default: kubernetes.io/hostname
weight:
default: 10
tolerations:
rabbitmq:
enabled: true
tolerations:
- key: node-role.kubernetes.io/master
operator: Exists
effect: NoSchedule
- key: node-role.kubernetes.io/control-plane
operator: Exists
effect: NoSchedule
- key: openstack-compute-node
operator: Exists
effect: NoSchedule
probes:
- key: node-role.kubernetes.io/master
operator: Exists
effect: NoSchedule
- key: node-role.kubernetes.io/control-plane
operator: Exists
effect: NoSchedule
- key: openstack-compute-node
operator: Exists
effect: NoSchedule
replicas:
server: 2
prometheus_rabbitmq_exporter: 1
lifecycle:
upgrades:
deployments:
revision_history: 3
pod_replacement_strategy: RollingUpdate
rolling_update:
max_unavailable: 1
max_surge: 3
termination_grace_period:
prometheus_rabbitmq_exporter:
timeout: 30
disruption_budget:
mariadb:
min_available: 0
resources:
enabled: false
prometheus_rabbitmq_exporter:
limits:
memory: "1024Mi"
cpu: "2000m"
requests:
memory: "128Mi"
cpu: "500m"
server:
limits:
memory: "128Mi"
cpu: "500m"
requests:
memory: "128Mi"
cpu: "500m"
jobs:
tests:
limits:
memory: "1024Mi"
cpu: "2000m"
requests:
memory: "128Mi"
cpu: "100m"
image_repo_sync:
requests:
memory: "128Mi"
cpu: "100m"
limits:
memory: "1024Mi"
cpu: "2000m"
conf:
enabled_plugins:
- rabbitmq_management
- rabbitmq_peer_discovery_k8s
prometheus_exporter:
capabilities:
- no_sort
log_level: info
skipverify: 1
skip_queues: "^$"
include_queues: ".*"
rabbit_exporters: "overview,exchange,node,queue"
rabbitmq_mgmt_metrics_collector_disabled: false
# This IP could be IPv4/IPv6 and the tcp port will be appended to it and eventually it is set to rabbitmq.listeners.tcp.1
bind_address: "::"
rabbitmq:
listeners:
tcp:
# NOTE(portdirect): This is always defined via the endpoints section.
1: null
cluster_formation:
peer_discovery_backend: rabbit_peer_discovery_k8s
k8s:
address_type: hostname
node_cleanup:
interval: "10"
only_log_warning: "true"
cluster_partition_handling: autoheal
queue_master_locator: min-masters
loopback_users.guest: "false"
management.load_definitions: "/var/lib/rabbitmq/definitions.json"
rabbit_additonal_conf:
# This confinguration is used for non TLS deployments
management.listener.ip: "::"
management.listener.port: null
rabbit_advanced_config:
enabled: false
default_consumer_prefetch: 250
rabbitmq_exporter:
rabbit_timeout: 30
# Feature Flags is introduced in RabbitMQ 3.8.0
# To deploy with standard list of feature, leave as default
# To deploy with specific feature, separate each feature with comma
# To deploy with all features disabled, leave blank or empty
feature_flags: default
dependencies:
dynamic:
common:
local_image_registry:
jobs:
- rabbitmq-image-repo-sync
services:
- endpoint: node
service: local_image_registry
static:
prometheus_rabbitmq_exporter:
services:
- endpoint: internal
service: oslo_messaging
prometheus_rabbitmq_exporter_tests:
services:
- endpoint: internal
service: prometheus_rabbitmq_exporter
- endpoint: internal
service: monitoring
rabbitmq:
jobs: null
tests:
services:
- endpoint: internal
service: oslo_messaging
# NOTE (portdirect): this key is somewhat special, if set to the string
# `cluster_wait` then the job dep will be populated with a single value
# containing the generated name for the `cluster_wait` job name.
jobs: cluster_wait
cluster_wait:
services:
- endpoint: internal
service: oslo_messaging
image_repo_sync:
services:
- endpoint: internal
service: local_image_registry
monitoring:
prometheus:
enabled: false
rabbitmq_exporter:
scrape: true
network:
host_namespace: false
management:
ingress:
public: true
classes:
namespace: "nginx-openstack"
cluster: "nginx"
annotations:
nginx.ingress.kubernetes.io/rewrite-target: /
secrets:
oci_image_registry:
rabbitmq: rabbitmq-oci-image-registry-key
tls:
oslo_messaging:
server:
internal: rabbitmq-tls-direct
# typically overridden by environmental
# values, but should include all endpoints
# required by this chart
endpoints:
cluster_domain_suffix: cluster.local
local_image_registry:
name: docker-registry
namespace: docker-registry
hosts:
default: localhost
internal: docker-registry
node: localhost
host_fqdn_override:
default: null
port:
registry:
node: 5000
oci_image_registry:
name: oci-image-registry
namespace: oci-image-registry
auth:
enabled: false
rabbitmq:
readiness:
params:
periodSeconds: 30
liveness:
params:
periodSeconds: 30
username: rabbitmq
password: password
hosts:
default: localhost
host_fqdn_override:
default: null
port:
registry:
default: null
monitoring:
name: prometheus
namespace: null
hosts:
default: prom-metrics
public: prometheus
host_fqdn_override:
default: null
path:
default: null
scheme:
default: 'http'
port:
api:
default: 9090
public: 80
oslo_messaging:
auth:
erlang_cookie: openstack-cookie
user:
username: rabbitmq
password: password
hosts:
default: rabbitmq
# NOTE(portdirect): the public host is only used to the management WUI
# If left empty, the release name sha suffixed with mgr, will be used to
# produce an unique hostname.
public: null
host_fqdn_override:
default: null
path: /
scheme: rabbit
port:
clustering:
# NOTE(portdirect): the value for this port is driven by amqp+20000
# it should not be set manually.
default: null
amqp:
default: 5672
http:
default: 15672
public: 80
metrics:
default: 15692
prometheus_rabbitmq_exporter:
namespace: null
hosts:
default: rabbitmq-exporter
host_fqdn_override:
default: null
path:
default: /metrics
scheme:
default: 'http'
port:
metrics:
default: 9095
kube_dns:
namespace: kube-system
name: kubernetes-dns
hosts:
default: kube-dns
host_fqdn_override:
default: null
path:
default: null
scheme: http
port:
dns_tcp:
default: 53
dns:
default: 53
protocol: UDP
network_policy:
prometheus_rabbitmq_exporter:
ingress:
- {}
egress:
- {}
rabbitmq:
ingress:
- {}
egress:
- {}
# TODO: Revert to upstream defaults once the following LP is resolved:
# https://bugs.launchpad.net/starlingx/+bug/1814595. By changing this PV
# size to 1Gi from the default 265Mi, this avoids the kernel hang from the
# filesystem race as seen in the LP.
volume:
use_local_path:
enabled: false
host_path: /var/lib/rabbitmq
chown_on_start: true
enabled: true
class_name: general
size: 1Gi
# Hook break for helm2.
# Set helm3_hook to false while using helm2
helm3_hook: true
io_thread_pool:
enabled: false
size: 64
manifests:
certificates: false
configmap_bin: true
configmap_etc: true
config_ipv6: false
ingress_management: true
job_pre_apply_cleanup: true
job_cluster_wait: true
job_image_repo_sync: true
monitoring:
prometheus:
configmap_bin: false
deployment_exporter: false
service_exporter: false
network_policy_exporter: false
network_policy: false
pod_test: true
secret_admin_user: true
secret_erlang_cookie: true
secret_registry: true
service_discovery: true
service_ingress_management: true
service: true
statefulset: true
...