kolla-kubernetes/etc/kolla-kubernetes/kolla-kubernetes.yml

273 lines
10 KiB
YAML
Executable File

---
# Any config options specified here will overwrite anything in globals.yml
# at run time.
##############################
# Kolla Kubernetes options
##############################
# For now, set kolla_internal_vip_address in /etc/kolla/globals.yml to use as
# the ip address for all the services.
# kolla_internal_vip_address: "10.10.10.254"
# This address is used in ALL public endpoints and it serves as an entry point
# into kolla kubernetes cluster, needs to be changed by the operator.
kolla_kubernetes_external_vip: "10.57.120.254"
kolla_kubernetes_external_subnet: "24"
kolla_kubernetes_namespace: "kolla"
##############################
# Kolla Kubernetes labels
##############################
# Labels can be overwridden like so
#kolla_kubernetes_hostlabel_specific_thing:
# key: kolla_other
# value: 'true'
# To split your network nodes of from
# the controllers, do something like
# this:
#kolla_kubernetes_hostlabel_network:
# key: kolla_network
# value: 'true'
kolla_kubernetes_hostlabel_controller:
key: kolla_controller
value: 'true'
kolla_kubernetes_hostlabel_compute:
key: kolla_compute
value: 'true'
#kolla_kubernetes_hostlabel_storage_lvm:
# key: kolla_storage
# value: 'true'
########################
# Kubernetes Cluster
########################
keystone_replicas: "1" # may be > 1
memcached_replicas: "1" # must == 1 even for multinode
rabbitmq_replicas: "1" # must == 1 even for multinode
horizon_replicas: "1" # may be > 1
glance_api_replicas: "1" # may be > 1
glance_registry_replicas: "1" # must == 1 even for multinode
neutron_server_replicas: "1" # may be > 1
nova_api_replicas: "1"
nova_conductor_replicas: "1"
nova_scheduler_replicas: "1"
cinder_api_replicas: "1"
cinder_scheduler_replicas: "1"
# !!!ALERT!!! Changing the number of replica's for the cinder
# volume manager backed with ceph is not safe unless you are
# running at least newton and have configured a lock manager.
# This is not done out of the box currently.
cinder_volume_ceph_replicas: "1"
nova_consoleauth_replicas: "1"
nova_novncproxy_replicas: "1"
# !!!ALERT!!! Changing number of replicas for elasticsearch
# might cause issues and possible data corruption as currently
# Kubernetes does no support mutli-write access. It might
# change in future.
elasticsearch_replicas: "1"
kibana_replicas: "1"
enable_openvswitch_tcp: "no"
enable_libvirt_tcp: "no"
#################################
# Kubernetes Cluster DNS setting
#################################
dns_replicas: "1"
#dns_server_ip: ""
dns_domain_name: "openstack.kolla"
########################
# Persistent Storage
########################
storage_provider: "host" # host, ceph, gce, aws
storage_provider_fstype: "ext4"
storage_ceph:
# - WARNING: These sample defaults configure ceph access using the
# ceph "admin" user/key, because it involves the least amount of
# work for the a user to get ceph volumes working. However, it is
# highly recommended that the operator create a dedicated ceph
# user/key with access only to the ceph pool to be used by this
# Kubernetes cluster.
#
# Kubernetes nodes act as ceph clients because they must mount ceph
# volumes on behalf of pods. For a particular ceph user, there
# are two ways to pass the ceph secret keyring to Kubernetes.
#
# 1) The ceph user secret keyring may be loaded as a kubernetes
# secret. The base64-encoded secret must be referenced by
# storage_ceph.key. To disable this method, comment out the
# storage_ceph.key definition or set the value to "". The
# encoded secret may be created with this command:
# $ ssh ceph-mon cat /etc/ceph/path/to/ceph.client.keyring \
# | grep key | awk '{print $3}' | base64
#
# 2) The ceph user secret keyring may be stored on the Kubernetes
# node's filesystem, and then referenced by
# storage_ceph.keyring. To disable this method, comment out
# the storage_ceph.keyring definition or set the value to "".
#
# If both configurations are defined, Method 1) above takes
# precedence over method 2). Prefer using Method 1) to avoid
# provisioning the ceph key on every node, which is difficult if
# using cloud provider auto-provisioning.
#
# List of ceph monitor nodes
monitors:
- x.x.x.x
- y.y.y.y
# Default ceph user for authenticated access
user: admin
# The default pool to locate ceph volumes
pool: rbd
# Default user to use in order to run remote SSH commands
# e.g. kolla-kubernetes may execute:
# ssh root@ceph-mon rbd create pool/resourceName --size 1024
ssh_user: root
# Any unique secret string within the kube env
secretName: ceph-secret
# The base64-encoded secret key which nodes need for mounting ceph volumes
key: EXAMPLEEXAMPLEEXAMPLEEXAMPLEEXAMPLEEXAMPLE=
# The ceph keyring file location on each kubernetes node's filesystem
keyring: /etc/ceph/ceph.client.admin.keyring
initial_mon: minikube
################################
# Persistent volumes sizes in GB
################################
#glance_volume_size: ""
keystone_auth_url: "http://keystone-admin:35357"
########################
# Glance variables
########################
openstack_glance_auth: "{'auth_url':'{{ keystone_auth_url }}','username':'{{ openstack_auth.username }}','password':'$KEYSTONE_ADMIN_PASSWORD','project_name':'{{ openstack_auth.project_name }}','domain_name':'default'}"
glance_admin_endpoint: "http://glance-api:{{ glance_api_port }}"
glance_public_endpoint: "http://{{ kolla_kubernetes_external_vip }}:{{ glance_api_port }}"
glance_internal_endpoint: "http://glance-api:{{ glance_api_port }}"
########################
# Neutron variables
########################
openstack_neutron_auth: "{'auth_url':'{{ keystone_auth_url }}','username':'{{ openstack_auth.username }}','password':'$KEYSTONE_ADMIN_PASSWORD','project_name':'{{ openstack_auth.project_name }}','domain_name':'default'}"
neutron_admin_endpoint: "http://neutron-server:{{ neutron_server_port }}"
neutron_public_endpoint: "http://{{ kolla_kubernetes_external_vip }}:{{ neutron_server_port }}"
neutron_internal_endpoint: "http://neutron-server:{{ neutron_server_port }}"
########################
# Keystone variables
########################
keystone_admin_url: "{{ admin_protocol }}://keystone-admin:{{ keystone_admin_port }}/v3"
keystone_internal_url: "{{ internal_protocol }}://keystone-internal:{{ keystone_public_port }}/v3"
keystone_public_url: "{{ public_protocol }}://{{ kolla_kubernetes_external_vip }}:{{ keystone_public_port }}/v3"
keystone_database_address: "mariadb"
########################
# NOVA variables
########################
openstack_nova_auth: "{'auth_url':'{{ keystone_auth_url }}','username':'{{ openstack_auth.username }}','password':'$KEYSTONE_ADMIN_PASSWORD','project_name':'{{ openstack_auth.project_name }}','domain_name':'default'}"
nova_admin_endpoint: "http://nova-api:{{ nova_api_port }}/v2/%(tenant_id)s"
nova_public_endpoint: "http://{{ kolla_kubernetes_external_vip }}:{{ nova_api_port }}/v2/%(tenant_id)s"
nova_internal_endpoint: "http://nova-api:{{ nova_api_port }}/v2/%(tenant_id)s"
novncproxy_base_url: "{{ public_protocol }}://{{ kolla_kubernetes_external_vip }}:{{ nova_novncproxy_port }}/vnc_auto.html"
########################
# Cinder variables
########################
openstack_cinder_auth: "{'auth_url':'{{ keystone_auth_url }}','username':'{{ openstack_auth.username }}','password':'$KEYSTONE_ADMIN_PASSWORD','project_name':'{{ openstack_auth.project_name }}','domain_name':'default'}"
cinder_admin_endpoint: "http://cinder-api:{{ cinder_api_port }}/v1/%(tenant_id)s"
cinder_public_endpoint: "http://{{ kolla_kubernetes_external_vip }}:{{ cinder_api_port }}/v1/%(tenant_id)s"
cinder_internal_endpoint: "http://cinder-api:{{ cinder_api_port }}/v1/%(tenant_id)s"
cinder_admin_endpointv2: "http://cinder-api:{{ cinder_api_port }}/v2/%(tenant_id)s"
cinder_public_endpointv2: "http://{{ kolla_kubernetes_external_vip }}:{{ cinder_api_port }}/v2/%(tenant_id)s"
cinder_internal_endpointv2: "http://cinder-api:{{ cinder_api_port }}/v2/%(tenant_id)s"
#######################
# Configuration below, allows to configure multiple backends for Cinder.
# At this point only iSCSI/LVM2 backend is supported. For Cinder to use
# iSCSI/LVM2 backend, IP address of the server hosting VG group as well
# as VG group name must be specified in the following format.
# iscsi_target_1 and iscsi_target_2 are just names and could be changed
# as long as they are unique. For each ip/vg_name pair, cinder.conf will have
# a new section created and autogenerated backend name consisted of:
# "ip"_"vgname" will be added to "enabled_backends" parameter. Example for these
# two iscsi targets, cinder.conf will automaticaly gets these lines:
#[DEFAULT]
#enabled_backends = {already existing backends},10.57.120.14_cinder-volumes,10.57.120.13_local-volumes
#
#[10.57.120.14_cinder-volumes]
#volume_group = cinder-volumes
#volume_backend_name = 10.57.120.14_cinder-volumes
#olume_driver = cinder.volume.drivers.lvm.LVMVolumeDriver
#scsi_helper = tgtadm
#iscsi_protocol = iscsi
#
#[10.57.120.13_local-volumes]
#volume_group = local-volumes
#volume_backend_name = 10.57.120.13_local-volumes
#volume_driver = cinder.volume.drivers.lvm.LVMVolumeDriver
#iscsi_helper = tgtadm
#iscsi_protocol = iscsi
#######################
cinder_storage_list:
- lvm_backends:
- iscsi_target_1:
ip: "10.57.120.14"
vg_name: "cinder-volumes"
- iscsi_target_2:
ip: "10.57.120.13"
vg_name: "local-volumes"
storage_interface: "eth0"
########################
# Workaround variables
########################
kolla_kubernetes_ovs_setup_bridge: "yes"
kolla_kubernetes_ovs_add_port: "yes"
########################
# Ceph variables
########################
# WARNING! reminder, this ceph setup is only intended for testing.
ceph_osd_journal_dev:
- "/dev/loop0p1"
- "/dev/loop1p1"
ceph_osd_data_dev:
- "/dev/loop0p2"
- "/dev/loop1p2"
########################
# Rabbitmq variables
########################
rabbitmq_management_external: "no"
########################
# Nodeport variables
########################
#horizon_http_nodeport: "30080"
#horizon_https_nodeport: "30433"
#cinder_api_port_nodeport: "30877"
#rabbitmq_management_nodeport: "30877"
########################
# List os PODs with disabled logging
########################
log_disable:
- memcached
- nova-consoleauth
- openvswitch-ovsdb-compute
- openvswitch-ovsdb-network
- openvswitch-vswitchd-compute
- openvswitch-vswitchd-network
- neutron-dhcp-agent
- kube-dns-v11
- iscsi-iscsid
- iscsi-tgtd
- elasticsearch
- keepalived