kayobe/ansible/roles/kolla-ansible/templates/globals.yml.j2

575 lines
21 KiB
Django/Jinja

---
# {{ ansible_managed }}
# You can use this file to override _any_ variable throughout Kolla.
# Additional options can be found in the
# 'kolla-ansible/ansible/group_vars/all.yml' file. Default value of all the
# commented parameters are shown here, To override the default value uncomment
# the parameter and change its value.
###############
# Kolla options
###############
# Valid options are [ COPY_ONCE, COPY_ALWAYS ]
config_strategy: "COPY_ALWAYS"
# Valid options are ['centos', 'debian', 'oraclelinux', 'rhel', 'ubuntu']
kolla_base_distro: "{{ kolla_base_distro }}"
# Valid options are [ binary, source ]
kolla_install_type: "{{ kolla_install_type }}"
# Valid option is Docker repository tag
openstack_release: "{{ kolla_openstack_release }}"
# Location of configuration overrides
node_custom_config: "{{ kolla_node_custom_config_path }}"
# This should be a VIP, an unused IP on your network that will float between
# the hosts running keepalived for high-availability. If you want to run an
# All-In-One without haproxy and keepalived, you can set enable_haproxy to no
# in "OpenStack options" section, and set this value to the IP of your
# 'network_interface' as set in the Networking section below.
kolla_internal_vip_address: "{{ kolla_internal_vip_address }}"
# This is the DNS name that maps to the kolla_internal_vip_address VIP. By
# default it is the same as kolla_internal_vip_address.
kolla_internal_fqdn: "{{ kolla_internal_fqdn }}"
{% if kolla_external_vip_address %}
# This should be a VIP, an unused IP on your network that will float between
# the hosts running keepalived for high-availability. It defaults to the
# kolla_internal_vip_address, allowing internal and external communication to
# share the same address. Specify a kolla_external_vip_address to separate
# internal and external requests between two VIPs.
kolla_external_vip_address: "{{ kolla_external_vip_address }}"
{% endif %}
{% if kolla_external_fqdn %}
# The Public address used to communicate with OpenStack as set in the public_url
# for the endpoints that will be created. This DNS name should map to
# kolla_external_vip_address.
kolla_external_fqdn: "{{ kolla_external_fqdn }}"
{% endif %}
################
# Docker options
################
### Example: Private repository with authentication
#docker_registry: "172.16.0.10:4000"
{% if kolla_docker_registry %}
docker_registry: "{{ kolla_docker_registry }}"
{% endif %}
docker_namespace: "{{ kolla_docker_namespace }}"
{% if kolla_docker_registry_username %}
docker_registry_username: "{{ kolla_docker_registry_username }}"
{% endif %}
docker_storage_driver: "{{ docker_storage_driver }}"
docker_custom_config: {{ kolla_docker_custom_config | to_nice_json | indent(2) }}
#docker_configure_for_zun: "no"
###################
# Messaging options
###################
# Below is an example of an separate backend that provides brokerless
# messaging for oslo.messaging RPC communications
#om_rpc_transport: "amqp"
#om_rpc_user: "{% raw %}{{ qdrouterd_user }}{% endraw %}"
#om_rpc_password: "{% raw %}{{ qdrouterd_password }}{% endraw %}"
#om_rpc_port: "{% raw %}{{ qdrouterd_port }}{% endraw %}"
#om_rpc_group: "qdrouterd"
##############################
# Neutron - Networking Options
##############################
# This interface is what all your api services will be bound to by default.
# Additionally, all vxlan/tunnel and storage network traffic will go over this
# interface by default. This interface must contain an IPv4 address.
# It is possible for hosts to have non-matching names of interfaces - these can
# be set in an inventory file per host or per group or stored separately, see
# http://docs.ansible.com/ansible/intro_inventory.html
# Yet another way to workaround the naming problem is to create a bond for the
# interface on all hosts and give the bond name here. Similar strategy can be
# followed for other types of interfaces.
#
# NOTE: Most network interfaces are configured via the inventory and are
# therefore not set here.
# Configure the address family (AF) per network.
# Valid options are [ ipv4, ipv6 ]
#network_address_family: "ipv4"
#api_address_family: "{% raw %}{{ network_address_family }}{% endraw %}"
#storage_address_family: "{% raw %}{{ network_address_family }}{% endraw %}"
#cluster_address_family: "{% raw %}{{ network_address_family }}{% endraw %}"
#swift_storage_address_family: "{% raw %}{{ storage_address_family }}{% endraw %}"
#swift_replication_address_family: "{% raw %}{{ swift_storage_address_family }}{% endraw %}"
#migration_address_family: "{% raw %}{{ network_address_family }}{% endraw %}"
#tunnel_address_family: "{% raw %}{{ network_address_family }}{% endraw %}"
#octavia_network_address_family: "{% raw %}{{ api_address_family }}{% endraw %}"
#bifrost_network_address_family: "{% raw %}{{ network_address_family }}{% endraw %}"
#dns_address_family: "{% raw %}{{ network_address_family }}{% endraw %}"
# Valid options are [ openvswitch, linuxbridge ]
neutron_plugin_agent: "openvswitch"
# Valid options are [ internal, infoblox ]
#neutron_ipam_driver: "internal"
# Configure Neutron upgrade option, currently Kolla support
# two upgrade ways for Neutron: legacy_upgrade and rolling_upgrade
# The variable "neutron_enable_rolling_upgrade: yes" is meaning rolling_upgrade
# were enabled and opposite
# Neutron rolling upgrade were enable by default
#neutron_enable_rolling_upgrade: "yes"
# Comma-separated names of neutron ML2 type drivers.
{% if kolla_neutron_ml2_type_drivers %}
neutron_type_drivers: {{ kolla_neutron_ml2_type_drivers | join(',') }}
{% endif %}
# Comma-separated types of tenant networks (should be listed in 'neutron_type_drivers')
# NOTE: for ironic this list should also contain 'flat'
{% if kolla_neutron_ml2_tenant_network_types %}
neutron_tenant_network_types: {{ kolla_neutron_ml2_tenant_network_types | join(',') }}
{% endif %}
####################
# keepalived options
####################
# Arbitrary unique number from 0..255
# This should be changed from the default in the event of a multi-region deployment
# where the VIPs of different regions reside on a common subnet.
#keepalived_virtual_router_id: "51"
###################
# Dimension options
###################
# This is to provide an extra option to deploy containers with Resource constraints.
# We call it dimensions here.
# The dimensions for each container are defined by a mapping, where each dimension value should be a
# string.
# Reference_Docs
# https://docs.docker.com/config/containers/resource_constraints/
# eg:
# <container_name>_dimensions:
# blkio_weight:
# cpu_period:
# cpu_quota:
# cpu_shares:
# cpuset_cpus:
# cpuset_mems:
# mem_limit:
# mem_reservation:
# memswap_limit:
# kernel_memory:
# ulimits:
#############
# TLS options
#############
# To provide encryption and authentication on the kolla_external_vip_interface,
# TLS can be enabled. When TLS is enabled, certificates must be provided to
# allow clients to perform authentication.
kolla_enable_tls_internal: {{ kolla_enable_tls_internal | bool }}
kolla_enable_tls_external: {{ kolla_enable_tls_external | bool }}
{% if kolla_external_tls_cert is not none and kolla_external_tls_cert | length > 0 %}
kolla_external_fqdn_cert: "{{ kolla_external_fqdn_cert }}"
{% endif %}
{% if kolla_internal_tls_cert is not none and kolla_internal_tls_cert | length > 0 %}
kolla_internal_fqdn_cert: "{{ kolla_internal_fqdn_cert }}"
{% endif %}
kolla_external_fqdn_cacert: "{{ kolla_external_fqdn_cacert }}"
kolla_internal_fqdn_cacert: "{{ kolla_internal_fqdn_cacert }}"
################
# Region options
################
# Use this option to change the name of this region.
#openstack_region_name: "RegionOne"
# Use this option to define a list of region names - only needs to be configured
# in a multi-region deployment, and then only in the *first* region.
#multiple_regions_names: ["{% raw %}{{ openstack_region_name }}{% endraw %}"]
###################
# OpenStack options
###################
# Use these options to set the various log levels across all OpenStack projects
# Valid options are [ True, False ]
openstack_logging_debug: {{ kolla_openstack_logging_debug | bool }}
# Valid options are [ none, novnc, spice, rdp ]
#nova_console: "novnc"
# OpenStack services can be enabled or disabled with these options
{% for feature_flag in kolla_feature_flags %}
{% if ('kolla_enable_' ~ feature_flag) in hostvars[inventory_hostname] %}
enable_{{ feature_flag }}: {{ hostvars[inventory_hostname]['kolla_enable_' ~ feature_flag] | bool }}
{% endif %}
{% endfor %}
##################
# RabbitMQ options
##################
# Options passed to RabbitMQ server startup script via the
# RABBITMQ_SERVER_ADDITIONAL_ERL_ARGS environment var.
# See Kolla Ansible docs RabbitMQ section for details.
# These are appended to args already provided by Kolla Ansible
# to configure IPv6 in RabbitMQ server.
#rabbitmq_server_additional_erl_args: ""
##############
# Ceph options
##############
# Ceph can be setup with a caching to improve performance. To use the cache you
# must provide separate disks than those for the OSDs
#ceph_enable_cache: "no"
# Set to no if using external Ceph without cephx.
#external_ceph_cephx_enabled: "yes"
# Ceph is not able to determine the size of a cache pool automatically,
# so the configuration on the absolute size is required here, otherwise the flush/evict will not work.
#ceph_target_max_bytes: ""
#ceph_target_max_objects: ""
# Valid options are [ forward, none, writeback ]
#ceph_cache_mode: "writeback"
# A requirement for using the erasure-coded pools is you must setup a cache tier
# Valid options are [ erasure, replicated ]
#ceph_pool_type: "replicated"
# Integrate ceph rados object gateway with openstack keystone
#enable_ceph_rgw_keystone: "no"
# Set the pgs and pgps for pool
# WARNING! These values are dependant on the size and shape of your cluster -
# the default values are not suitable for production use. Please refer to the
# Kolla Ceph documentation for more information.
#ceph_pool_pg_num: 8
#ceph_pool_pgp_num: 8
#############################
# Keystone - Identity Options
#############################
# Valid options are [ fernet ]
#keystone_token_provider: 'fernet'
#keystone_admin_user: "admin"
#keystone_admin_project: "admin"
# Interval to rotate fernet keys by (in seconds). Must be an interval of
# 60(1 min), 120(2 min), 180(3 min), 240(4 min), 300(5 min), 360(6 min),
# 600(10 min), 720(12 min), 900(15 min), 1200(20 min), 1800(30 min),
# 3600(1 hour), 7200(2 hour), 10800(3 hour), 14400(4 hour), 21600(6 hour),
# 28800(8 hour), 43200(12 hour), 86400(1 day), 604800(1 week).
#fernet_token_expiry: 86400
########################
# Glance - Image Options
########################
# Configure image backend.
#glance_backend_ceph: "no"
#glance_backend_file: "yes"
#glance_backend_swift: "no"
#glance_backend_vmware: "no"
#enable_glance_image_cache: "no"
# Configure glance upgrade option.
# Due to this feature being experimental in glance,
# the default value is "no".
#glance_enable_rolling_upgrade: "no"
####################
# Osprofiler options
####################
# valid values: ["elasticsearch", "redis"]
#osprofiler_backend: "elasticsearch"
##################
# Barbican options
##################
# Valid options are [ simple_crypto, p11_crypto ]
#barbican_crypto_plugin: "simple_crypto"
#barbican_library_path: "/usr/lib/libCryptoki2_64.so"
################
## Panko options
################
# Valid options are [ mysql ]
#panko_database_type: "mysql"
#################
# Gnocchi options
#################
# Valid options are [ file, ceph ]
#gnocchi_backend_storage: "{% raw %}{{ 'ceph' if enable_ceph|bool else 'file' }}{% endraw %}"
# Valid options are [redis, '']
#gnocchi_incoming_storage: "{% raw %}{{ 'redis' if enable_redis | bool else '' }}{% endraw %}"
################################
# Cinder - Block Storage Options
################################
# Enable / disable Cinder backends
#cinder_backend_ceph: "{% raw %}{{ enable_ceph }}{% endraw %}"
#cinder_backend_vmwarevc_vmdk: "no"
#cinder_volume_group: "cinder-volumes"
# Valid options are [ '', redis, etcd ]
#cinder_coordination_backend: "{% raw %}{{ 'redis' if enable_redis|bool else 'etcd' if enable_etcd|bool else '' }}{% endraw %}"
# Valid options are [ nfs, swift, ceph ]
#cinder_backup_driver: "ceph"
#cinder_backup_share: ""
#cinder_backup_mount_options_nfs: ""
#######################
# Cloudkitty options
#######################
# Valid option is gnocchi
#cloudkitty_collector_backend: "gnocchi"
# Valid options are 'sqlalchemy' or 'influxdb'. The default value is
# 'influxdb', which matches the default in Cloudkitty since the Stein release.
# When the backend is "influxdb", we also enable Influxdb.
# Also, when using 'influxdb' as the backend, we trigger the configuration/use
# of Cloudkitty storage backend version 2.
#cloudkitty_storage_backend: "influxdb"
###################
# Designate options
###################
# Valid options are [ bind9 ]
#designate_backend: "bind9"
#designate_ns_record: "sample.openstack.org"
# Valid options are [ '', redis, etcd ]
#designate_coordination_backend: "{% raw %}{{ 'redis' if enable_redis|bool else 'etcd' if enable_etcd|bool else '' }}{% endraw %}"
########################
# Nova - Compute Options
########################
#nova_backend_ceph: "{% raw %}{{ enable_ceph }}{% endraw %}"
# Valid options are [ qemu, kvm, vmware ]
#nova_compute_virt_type: "kvm"
# The number of fake driver per compute node
#num_nova_fake_per_node: 5
# The flag "nova_safety_upgrade" need to be consider when
# "nova_enable_rolling_upgrade" is enabled. The "nova_safety_upgrade"
# controls whether the nova services are all stopped before rolling
# upgrade to the new version, for the safety and availability.
# If "nova_safety_upgrade" is "yes", that will stop all nova services (except
# nova-compute) for no failed API operations before upgrade to the
# new version. And opposite.
#nova_safety_upgrade: "no"
# Valid options are [ none, novnc, spice, rdp ]
#nova_console: "novnc"
#################
# Hyper-V options
#################
# Hyper-V can be used as hypervisor
#hyperv_username: "user"
#hyperv_password: "password"
#vswitch_name: "vswitch"
# URL from which Nova Hyper-V MSI is downloaded
#nova_msi_url: "https://www.cloudbase.it/downloads/HyperVNovaCompute_Beta.msi"
#############################
# Horizon - Dashboard Options
#############################
#horizon_backend_database: "{% raw %}{{ enable_murano | bool }}{% endraw %}"
#############################
# Ironic options
#############################
{% if kolla_inspector_dhcp_pool_start and kolla_inspector_dhcp_pool_end %}
ironic_dnsmasq_dhcp_range: {{ kolla_inspector_dhcp_pool_start }},{{ kolla_inspector_dhcp_pool_end }}
{% endif %}
{% if kolla_inspector_default_gateway %}
ironic_dnsmasq_default_gateway: {{ kolla_inspector_default_gateway }}
{% endif %}
{% if kolla_inspector_extra_kernel_options %}
ironic_inspector_kernel_cmdline_extras:
{{ kolla_inspector_extra_kernel_options | to_nice_yaml }}
{% endif %}
# PXE bootloader file for Ironic Inspector, relative to /tftpboot.
#ironic_dnsmasq_boot_file: "pxelinux.0"
# Configure ironic upgrade option, due to currently kolla support
# two upgrade ways for ironic: legacy_upgrade and rolling_upgrade
# The variable "ironic_enable_rolling_upgrade: yes" is meaning rolling_upgrade
# were enabled and opposite
# Rolling upgrade were enable by default
#ironic_enable_rolling_upgrade: "yes"
# List of extra kernel parameters passed to the kernel used during inspection
#ironic_inspector_kernel_cmdline_extras: []
######################################
# Manila - Shared File Systems Options
######################################
# HNAS backend configuration
#hnas_ip:
#hnas_user:
#hnas_password:
#hnas_evs_id:
#hnas_evs_ip:
#hnas_file_system_name:
################################
# Swift - Object Storage Options
################################
# Swift expects block devices to be available for storage. Two types of storage
# are supported: 1 - storage device with a special partition name and filesystem
# label, 2 - unpartitioned disk with a filesystem. The label of this filesystem
# is used to detect the disk which Swift will be using.
# Swift support two matching modes, valid options are [ prefix, strict ]
#swift_devices_match_mode: "strict"
# This parameter defines matching pattern: if "strict" mode was selected,
# for swift_devices_match_mode then swift_device_name should specify the name of
# the special swift partition for example: "KOLLA_SWIFT_DATA", if "prefix" mode was
# selected then swift_devices_name should specify a pattern which would match to
# filesystems' labels prepared for swift.
#swift_devices_name: "KOLLA_SWIFT_DATA"
# Configure swift upgrade option, due to currently kolla support
# two upgrade ways for swift: legacy_upgrade and rolling_upgrade
# The variable "swift_enable_rolling_upgrade: yes" is meaning rolling_upgrade
# were enabled and opposite
# Rolling upgrade were enable by default
#swift_enable_rolling_upgrade: "yes"
################################################
# Tempest - The OpenStack Integration Test Suite
################################################
# The following values must be set when enabling tempest
#tempest_image_id:
#tempest_flavor_ref_id:
#tempest_public_network_id:
#tempest_floating_network_name:
# tempest_image_alt_id: "{% raw %}{{ tempest_image_id }}{% endraw %}"
# tempest_flavor_ref_alt_id: "{% raw %}{{ tempest_flavor_ref_id }}{% endraw %}"
###################################
# VMware - OpenStack VMware support
###################################
#vmware_vcenter_host_ip:
#vmware_vcenter_host_username:
#vmware_vcenter_host_password:
#vmware_datastore_name:
#vmware_vcenter_name:
#vmware_vcenter_cluster_name:
############
# Prometheus
############
#enable_prometheus_haproxy_exporter: "{% raw %}{{ enable_haproxy | bool }}{% endraw %}"
#enable_prometheus_mysqld_exporter: "{% raw %}{{ enable_mariadb | bool }}{% endraw %}"
#enable_prometheus_node_exporter: "{% raw %}{{ enable_prometheus | bool }}{% endraw %}"
#enable_prometheus_cadvisor: "{% raw %}{{ enable_prometheus | bool }}{% endraw %}"
#enable_prometheus_memcached: "{% raw %}{{ enable_prometheus | bool }}{% endraw %}"
#enable_prometheus_alertmanager: "{% raw %}{{ enable_prometheus | bool }}{% endraw %}"
#enable_prometheus_ceph_mgr_exporter: "{% raw %}{{ enable_prometheus | bool and enable_ceph | bool }}{% endraw %}"
#enable_prometheus_openstack_exporter: "{% raw %}{{ enable_prometheus | bool }}{% endraw %}"
#enable_prometheus_elasticsearch_exporter: "{% raw %}{{ enable_prometheus | bool and enable_elasticsearch | bool }}{% endraw %}"
#enable_prometheus_blackbox_exporter: "{% raw %}{{ enable_prometheus | bool }}{% endraw %}"
# List of extra parameters passed to prometheus. You can add as many to the list.
#prometheus_cmdline_extras:
#########
# Freezer
#########
# Freezer can utilize two different database backends, elasticsearch or mariadb.
# Elasticsearch is preferred, however it is not compatible with the version deployed
# by kolla-ansible. You must first setup an external elasticsearch with 2.3.0.
# By default, kolla-ansible deployed mariadb is the used database backend.
#freezer_database_backend: "mariadb"
##########
# Telegraf
##########
# Configure telegraf to use the docker daemon itself as an input for
# telemetry data.
#telegraf_enable_docker_input: "no"
#####################################
# Bifrost - Bare Metal Provisioning
#####################################
# The Bifrost deployment image only supports a source installation.
bifrost_install_type: source
#################################
# Monasca - Monitoring & Alerting
#################################
# Monasca doesn't support binary type container builds
monasca_install_type: source
############################################
# Grafana - Data visualisation & Monitoring
############################################
# When using the Monasca fork of Grafana if an OpenStack user with the
# same name as the admin user logs into Grafana it overwrites user data
# in the Grafana database, breaking the local admin account, and
# preventing admin API calls to Grafana. To reduce the chance of this
# happening the local admin account is renamed here.
{% if kolla_enable_grafana and grafana_local_admin_user_name is defined %}
grafana_admin_username: "{{ grafana_local_admin_user_name }}"
{% endif %}
#########################################
# Bootstrap-servers - Host Configuration
#########################################
{% if kolla_selinux_state is not none %}
selinux_state: {{ kolla_selinux_state }}
{% endif %}
install_epel: {{ kolla_ansible_install_epel | bool }}
{% if kolla_enable_host_ntp is not none %}
enable_host_ntp: {{ kolla_enable_host_ntp | bool }}
{% endif %}
# Kayobe performs creation of the Kolla Ansible user account, so there is no
# need for Kolla Ansible to repeat this.
create_kolla_user: false
# User account to use for Kolla SSH access.
kolla_user: "{{ kolla_ansible_user }}"
# Primary group of Kolla SSH user.
kolla_group: "{{ kolla_ansible_group }}"
{% if kolla_ansible_target_venv %}
virtualenv: {{ kolla_ansible_target_venv }}
{% endif %}
{% if kolla_extra_globals %}
#######################
# Extra configuration
#######################
{{ kolla_extra_globals|to_nice_yaml }}
{% endif %}