openstack-helm/octavia/values.yaml
Gage Hugo 44882d60e2 Update xrally version to 2.0.0
This change updates the xrally image from 1.3.0 to 2.0.0
in order to better match the current versions of openstack
we are running in the gate.

Change-Id: I3f417a20e0f6d34b9e7ed569207a3df90c6ddfd2
2020-07-31 20:00:24 +00:00

684 lines
16 KiB
YAML

# Copyright 2019 Samsung Electronics Co., Ltd.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# Default values for octavia.
# This is a YAML-formatted file.
# Declare name/value pairs to be passed into your templates.
# name: value
---
release_group: null
labels:
api:
node_selector_key: openstack-control-plane
node_selector_value: enabled
worker:
node_selector_key: openstack-control-plane
node_selector_value: enabled
housekeeping:
node_selector_key: openstack-control-plane
node_selector_value: enabled
health_manager:
node_selector_key: openstack-control-plane
node_selector_value: enabled
job:
node_selector_key: openstack-control-plane
node_selector_value: enabled
images:
tags:
test: docker.io/xrally/xrally-openstack:2.0.0
bootstrap: docker.io/openstackhelm/heat:ocata
db_init: docker.io/openstackhelm/heat:ocata
octavia_db_sync: docker.io/loci/octavia:master-ubuntu
db_drop: docker.io/openstackhelm/heat:ocata
rabbit_init: docker.io/rabbitmq:3.7-management
ks_user: docker.io/openstackhelm/heat:ocata
ks_service: docker.io/openstackhelm/heat:ocata
ks_endpoints: docker.io/openstackhelm/heat:ocata
dep_check: quay.io/airshipit/kubernetes-entrypoint:v1.0.0
image_repo_sync: docker.io/docker:17.07.0
octavia_api: docker.io/loci/octavia:master-ubuntu
octavia_worker: docker.io/loci/octavia:master-ubuntu
octavia_housekeeping: docker.io/loci/octavia:master-ubuntu
octavia_health_manager: docker.io/loci/octavia:master-ubuntu
octavia_health_manager_init: docker.io/kolla/ubuntu-source-octavia-health-manager:rocky
openvswitch_vswitchd: docker.io/kolla/centos-source-openvswitch-vswitchd:rocky
pull_policy: "IfNotPresent"
local_registry:
active: false
exclude:
- dep_check
- image_repo_sync
bootstrap:
enabled: true
ks_user: admin
script: |
openstack role create --or-show load-balancer_admin
openstack role create --or-show load-balancer_observer
openstack role create --or-show load-balancer_global_observer
openstack role create --or-show load-balancer_quota_admin
openstack role create --or-show load-balancer_member
network:
api:
ingress:
public: true
classes:
namespace: "nginx"
cluster: "nginx-cluster"
annotations:
nginx.ingress.kubernetes.io/rewrite-target: /
external_policy_local: false
node_port:
enabled: false
port: 30826
dependencies:
dynamic:
common:
local_image_registry:
jobs:
- heat-image-repo-sync
services:
- endpoint: node
service: local_image_registry
static:
api:
jobs:
- octavia-db-sync
- octavia-ks-user
- octavia-ks-endpoints
- octavia-rabbit-init
services:
- endpoint: internal
service: oslo_db
- endpoint: internal
service: identity
- endpoint: internal
service: oslo_messaging
- endpoint: internal
service: oslo_cache
- endpoint: internal
service: network
worker:
jobs:
- octavia-db-sync
- octavia-ks-user
- octavia-ks-endpoints
- octavia-rabbit-init
services:
- endpoint: internal
service: oslo_db
- endpoint: internal
service: identity
- endpoint: internal
service: oslo_messaging
- endpoint: internal
service: oslo_cache
- endpoint: internal
service: network
- endpoint: internal
service: load_balancer
housekeeping:
jobs:
- octavia-db-sync
- octavia-ks-user
- octavia-ks-endpoints
- octavia-rabbit-init
services:
- endpoint: internal
service: oslo_db
- endpoint: internal
service: identity
- endpoint: internal
service: oslo_messaging
- endpoint: internal
service: oslo_cache
- endpoint: internal
service: network
- endpoint: internal
service: load_balancer
health_manager:
jobs:
- octavia-db-sync
- octavia-ks-user
- octavia-ks-endpoints
- octavia-rabbit-init
services:
- endpoint: internal
service: oslo_db
- endpoint: internal
service: identity
- endpoint: internal
service: oslo_messaging
- endpoint: internal
service: oslo_cache
- endpoint: internal
service: network
- endpoint: internal
service: load_balancer
db_init:
services:
- endpoint: internal
service: oslo_db
db_sync:
jobs:
- octavia-db-init
services:
- endpoint: internal
service: oslo_db
ks_endpoints:
jobs:
- octavia-ks-service
services:
- endpoint: internal
service: identity
ks_service:
services:
- endpoint: internal
service: identity
ks_user:
services:
- endpoint: internal
service: identity
rabbit_init:
services:
- endpoint: internal
service: oslo_messaging
image_repo_sync:
services:
- endpoint: internal
service: local_image_registry
conf:
octavia:
DEFAULT:
log_config_append: /etc/octavia/logging.conf
api_settings:
api_handler: queue_producer
bind_host: 0.0.0.0
database:
max_retries: -1
health_manager:
bind_port: 5555
bind_ip: 0.0.0.0
controller_ip_port_list: 0.0.0.0:5555
heartbeat_key: insecure
keystone_authtoken:
auth_type: password
auth_version: v3
memcache_security_strategy: ENCRYPT
certificates:
ca_private_key_passphrase: foobar
ca_private_key: /etc/octavia/certs/private/cakey.pem
ca_certificate: /etc/octavia/certs/ca_01.pem
haproxy_amphora:
server_ca: /etc/octavia/certs/ca_01.pem
client_cert: /etc/octavia/certs/client.pem
base_path: /var/lib/octavia
base_cert_dir: /var/lib/octavia/certs
connection_max_retries: 1500
connection_retry_interval: 1
rest_request_conn_timeout: 10
rest_request_read_timeout: 120
controller_worker:
amp_image_owner_id: null
amp_secgroup_list: null
amp_flavor_id: null
amp_boot_network_list: null
amp_ssh_key_name: octavia_ssh_key
amp_image_tag: amphora
network_driver: allowed_address_pairs_driver
compute_driver: compute_nova_driver
amphora_driver: amphora_haproxy_rest_driver
workers: 2
amp_active_retries: 100
amp_active_wait_sec: 2
loadbalancer_topology: SINGLE
oslo_messaging:
topic: octavia_prov
rpc_thread_pool_size: 2
oslo_messaging_notifications:
driver: messagingv2
house_keeping:
load_balancer_expiry_age: 3600
amphora_expiry_age: 3600
service_auth:
auth_type: password
cafile: ""
auth_version: v3
memcache_security_strategy: ENCRYPT
logging:
loggers:
keys:
- root
- octavia
handlers:
keys:
- stdout
- stderr
- "null"
formatters:
keys:
- context
- default
logger_root:
level: WARNING
handlers: 'null'
logger_octavia:
level: WARNING
handlers:
- stdout
qualname: octavia
logger_amqp:
level: WARNING
handlers: stderr
qualname: amqp
logger_amqplib:
level: WARNING
handlers: stderr
qualname: amqplib
logger_eventletwsgi:
level: WARNING
handlers: stderr
qualname: eventlet.wsgi.server
logger_sqlalchemy:
level: WARNING
handlers: stderr
qualname: sqlalchemy
logger_boto:
level: WARNING
handlers: stderr
qualname: boto
handler_null:
class: logging.NullHandler
formatter: default
args: ()
handler_stdout:
class: StreamHandler
args: (sys.stdout,)
formatter: context
handler_stderr:
class: StreamHandler
args: (sys.stderr,)
formatter: context
formatter_context:
class: oslo_log.formatters.ContextFormatter
formatter_default:
format: "%(message)s"
rabbitmq:
# NOTE(rk760n): adding rmq policy to mirror messages from notification queues and set expiration time for the ones
policies:
- vhost: "octavia"
name: "ha_ttl_octavia"
definition:
# mirror messges to other nodes in rmq cluster
ha-mode: "all"
ha-sync-mode: "automatic"
# 70s
message-ttl: 70000
priority: 0
apply-to: all
pattern: '(notifications)\.'
secrets:
identity:
admin: octavia-keystone-admin
octavia: octavia-keystone-user
test: octavia-keystone-test
oslo_db:
admin: octavia-db-admin
octavia: octavia-db-user
oslo_messaging:
admin: octavia-rabbitmq-admin
octavia: octavia-rabbitmq-user
tls:
load_balancer:
api:
public: octavia-tls-public
endpoints:
cluster_domain_suffix: cluster.local
local_image_registry:
name: docker-registry
namespace: docker-registry
hosts:
default: localhost
internal: docker-registry
node: localhost
host_fqdn_override:
default: null
port:
registry:
node: 5000
identity:
name: keystone
auth:
admin:
region_name: RegionOne
username: admin
password: password
project_name: admin
user_domain_name: default
project_domain_name: default
octavia:
role: admin
region_name: RegionOne
username: octavia
password: password
project_name: service
user_domain_name: service
project_domain_name: service
test:
role: admin
region_name: RegionOne
username: test
password: password
project_name: test
user_domain_name: service
project_domain_name: service
hosts:
default: keystone
internal: keystone-api
host_fqdn_override:
default: null
path:
default: /v3
scheme:
default: 'http'
port:
api:
default: 80
internal: 5000
load_balancer:
name: octavia
hosts:
default: octavia-api
public: octavia
host_fqdn_override:
default: null
path:
default: null
scheme:
default: http
port:
api:
default: 9876
public: 80
oslo_db:
auth:
admin:
username: root
password: password
octavia:
username: octavia
password: password
hosts:
default: mariadb
host_fqdn_override:
default: null
path: /octavia
scheme: mysql+pymysql
port:
mysql:
default: 3306
oslo_cache:
auth:
# NOTE(portdirect): this is used to define the value for keystone
# authtoken cache encryption key, if not set it will be populated
# automatically with a random value, but to take advantage of
# this feature all services should be set to use the same key,
# and memcache service.
memcache_secret_key: null
hosts:
default: memcached
host_fqdn_override:
default: null
port:
memcache:
default: 11211
oslo_messaging:
auth:
admin:
username: rabbitmq
password: password
octavia:
username: octavia
password: password
hosts:
default: rabbitmq
host_fqdn_override:
default: null
path: /octavia
scheme: rabbit
port:
amqp:
default: 5672
http:
default: 15672
network:
name: neutron
hosts:
default: neutron-server
public: neutron
host_fqdn_override:
default: null
path:
default: null
scheme:
default: 'http'
port:
api:
default: 9696
public: 80
pod:
user:
octavia:
uid: 42424
affinity:
anti:
type:
default: preferredDuringSchedulingIgnoredDuringExecution
topologyKey:
default: kubernetes.io/hostname
mounts:
octavia_api:
init_container: null
octavia_api:
volumeMounts:
volumes:
octavia_worker:
init_container: null
octavia_worker:
volumeMounts:
volumes:
octavia_housekeeping:
init_container: null
octavia_housekeeping:
volumeMounts:
volumes:
octavia_health_manager:
init_container: null
octavia_health_manager:
volumeMounts:
volumes:
octavia_bootstrap:
init_container: null
octavia_bootstrap:
volumeMounts:
volumes:
replicas:
api: 1
worker: 1
housekeeping: 1
lifecycle:
upgrades:
deployments:
revision_history: 3
pod_replacement_strategy: RollingUpdate
rolling_update:
max_unavailable: 1
max_surge: 3
daemonsets:
pod_replacement_strategy: RollingUpdate
health_manager:
enabled: true
min_ready_seconds: 0
max_unavailable: 1
disruption_budget:
api:
min_available: 0
termination_grace_period:
api:
timeout: 30
resources:
enabled: false
api:
requests:
memory: "128Mi"
cpu: "100m"
limits:
memory: "1024Mi"
cpu: "2000m"
worker:
requests:
memory: "128Mi"
cpu: "100m"
limits:
memory: "1024Mi"
cpu: "2000m"
housekeeping:
requests:
memory: "128Mi"
cpu: "100m"
limits:
memory: "1024Mi"
cpu: "2000m"
health_manager:
requests:
memory: "128Mi"
cpu: "100m"
limits:
memory: "1024Mi"
cpu: "2000m"
jobs:
bootstrap:
requests:
memory: "128Mi"
cpu: "100m"
limits:
memory: "1024Mi"
cpu: "2000m"
rabbit_init:
requests:
memory: "128Mi"
cpu: "100m"
limits:
memory: "1024Mi"
cpu: "2000m"
db_init:
requests:
memory: "128Mi"
cpu: "100m"
limits:
memory: "1024Mi"
cpu: "2000m"
db_sync:
requests:
memory: "128Mi"
cpu: "100m"
limits:
memory: "1024Mi"
cpu: "2000m"
db_drop:
requests:
memory: "128Mi"
cpu: "100m"
limits:
memory: "1024Mi"
cpu: "2000m"
ks_endpoints:
requests:
memory: "128Mi"
cpu: "100m"
limits:
memory: "1024Mi"
cpu: "2000m"
ks_service:
requests:
memory: "128Mi"
cpu: "100m"
limits:
memory: "1024Mi"
cpu: "2000m"
ks_user:
requests:
memory: "128Mi"
cpu: "100m"
limits:
memory: "1024Mi"
cpu: "2000m"
tests:
requests:
memory: "128Mi"
cpu: "100m"
limits:
memory: "1024Mi"
cpu: "2000m"
image_repo_sync:
requests:
memory: "128Mi"
cpu: "100m"
limits:
memory: "1024Mi"
cpu: "2000m"
network_policy:
octavia:
ingress:
- {}
manifests:
configmap_bin: true
configmap_etc: true
daemonset_health_manager: true
deployment_api: true
deployment_worker: true
deployment_housekeeping: true
ingress_api: true
job_bootstrap: true
job_db_init: true
job_db_sync: true
job_db_drop: false
job_image_repo_sync: true
job_rabbit_init: true
job_ks_endpoints: true
job_ks_service: true
job_ks_user: true
pdb_api: true
pod_rally_test: false
network_policy: false
secret_credential_keys: true
secret_db: true
secret_ingress_tls: true
secret_keystone: true
secret_rabbitmq: true
service_ingress_api: true
service_api: true
...