ansible-playbooks/playbookconfig/src/playbooks/host_vars/default.yml

275 lines
9.2 KiB
YAML

---
# SYSTEM PROPERTIES
# =================
system_mode: simplex
# configure distributed cloud role, valid values are 'none', 'systemcontroller',
# and 'subcloud'. However subclouds are automatically provisioned during their
# creation in dcmanager and are not meant to be configured manually by the user.
distributed_cloud_role: none
timezone: UTC
# At least one DNS server is required and maximum 3 servers are allowed
dns_servers:
- 8.8.8.8
- 8.8.4.4
# NETWORK PROPERTIES
# ==================
#
# Unless specified in the host override file, the start and end addresses of
# each subnet are derived from the provided CIDR as follows:
# For pxebook, management and cluster host subnets:
# - start address: index 2 of CIDR
# - end address: index -2 of CIDR
# e.g. management_subnet (provided/default): 192.168.204.0/28
# management_start_address (derived): 192.168.204.2
# management_end_address (derived): 192.168.204.14
#
# For cluster pod, cluster service, oam and multicast subnets:
# - start address: index 1 of CIDR
# - end address: index -2 of CIDR
# e.g. multicast_subnet (provided/default): 239.1.1.0/28
# multicast_start_address (derived): 239.1.1.1
# multicast_end_address (derived): 238.1.1.14
#
# Unless specified, the external_oam_node_0_address and external_oam_node_1_address
# are derived from the external_oam_floating address as follows:
# external_oam_node_0_address: next address after external_oam_floating_address
# external_oam_node_0_address: next address after external_oam_node_0_address
# e.g. external_oam_floating_address (provided/default): 10.10.10.2
# external_oam_node_0_address (derived): 10.10.10.3
# external_oam_node_1_address (derived): 10.10.10.4
#
# These addresses are only applicable to duplex or duplex-direct system mode.
#
pxeboot_subnet: 169.254.202.0/24
# pxeboot_start_address:
# pxeboot_end_address:
management_subnet: 192.168.204.0/28
# management_start_address:
# management_end_address:
cluster_host_subnet: 192.168.206.0/24
# cluster_host_start_address:
# cluster_host_end_address:
cluster_pod_subnet: 172.16.0.0/16
# cluster_pod_start_address:
# cluster_pod_end_address:
cluster_service_subnet: 10.96.0.0/12
# cluster_service_start_address:
# cluster_service_end_address:
external_oam_subnet: 10.10.10.0/24
external_oam_gateway_address: 10.10.10.1
external_oam_floating_address: 10.10.10.2
# external_oam_start_address:
# external_oam_end_address:
# external_oam_node_0_address:
# external_oam_node_1_address:
management_multicast_subnet: 239.1.1.0/28
# mangement_multicast_start_address:
# management_multicast_end_address:
# Management network address allocation (true = dynamic, false = static)
management_dynamic_address_allocation: true
# Cluster-host network address allocation (true = dynamic, false = static)
cluster_host_dynamic_address_allocation: true
# DOCKER PROXIES
# ==============
#
# If the host OAM network is behind a proxy, Docker must be configured with
# the same proxy. When an http and/or https proxy is provided, a no-proxy
# address list can optionally be provided. This list will be added to the
# default no-proxy list derived from localhost, loopback, management and oam
# floating addresses at run time. Each address in the list must neither
# contain a wildcard nor have subnet format.
# docker_http_proxy: http://proxy.com:1234
# docker_https_proxy: https://proxy.com:1234
# docker_no_proxy:
# - 1.2.3.4
# - 5.6.7.8
# DOCKER REGISTRIES
# =================
#
# The docker_registries is a map of known registry keys and their
# source attributes. Each key is a fully scoped registry name and the
# same name is used as the default url attribute. Other attributes include
# username and password for authenticated registries. For instance,
# k8s.gcr.io registry which hosts Kubernetes related images, has
# the default registry url value of k8s.gcr.io
#
# To overwrite a particular registry url, use the original registry
# value as the key followed by a custom IP address or domain for
# the value. If the registry is authenticated, specify username and password
# e.g.
# docker_registries:
# k8s.gcr.io:
# url: my.k8sregistry.io
# username: k8sreguser
# password: K8sregPass*
#
# The "unified" is a special registry key. Defining and giving
# it a value implies all images are to be retrieved from this
# single source. Hence, registry values of all other registry keys
# if specified will be ignored.
#
# The docker registries map can be extended with new custom keys in
# the near future.
#
# The valid formats for a registry url value are:
# - domain (e.g. example.domain)
# - domain with port (e.g. example.domain:5000)
# - IPv4 address (e.g. 1.2.3.4)
# - IPv4 address with port (e.g. 1.2.3.4:5000)
# - IPv6 address (e.g. FD01::0100)
# - IPv6 address with port (e.g. [FD01::0100]:5000
#
# Parameter is_secure_registry is only relevant when a unified registry is
# used.
docker_registries:
k8s.gcr.io:
url:
gcr.io:
url:
quay.io:
url:
docker.io:
url:
# unified:
# url: example.domain
# is_secure_registry: True
# CERTIFICATES
# ============
#
# These values provide a means to install certificates onto the system.
# For example the ssl_ca_cert parameter may be used to install a trusted CA to
# the system. A trusted CA certificate will be required if the end user
# configures a private docker registry that is signed by an unknown Certificate
# Authority.
#
# The certificate value is the absolute path of the certificate file.
# The certificate must be in PEM format.
# The supported certificates are:
# - ssl_ca_cert
# - k8s_root_ca_cert
# - k8s_root_ca_key
#
# ssl_ca_cert: /path/to/ssl_ca_cert_file
# KUBERNETES PARAMETERS
# =====================
#
# These values provide a means to specify different configuration parameters
# for kubernetes
#
# The apiserver_cert_sans is a list of Subject Alternative Names that will be
# applied to apiserver certificate. This value must be a list and each entry
# in the list must be a domain name or IP address.
# apiserver_cert_sans:
# - hostname.domain
# - 198.51.100.75
# These values are used to configure the Kubernetes cluster for authentication
# with OpenID Connect. By default, apiserver_oidc is disabled. When the
# required three fields of the apiserver_oidc parameter are defined, the
# feature is considered active. Their values will be used to configure the
# Kubernetes cluster for authentication with OpenID Connect.
#
# apiserver_oidc:
# client_id:
# issuer_url:
# username_claim:
# ADMIN CREDENTIALS
# =================
#
# WARNING: It is strongly recommended to store these settings in Ansible vault
# file named "secret" under override files directory. Configuration parameters
# stored in vault must start with vault_ prefix (i.e. vault_admin_username,
# vault_admin_password).
#
admin_username: admin
admin_password: St8rlingX*
# INITIAL PASSWORD CHANGE RESPONSE SEQUENCE
# =========================================
#
# This parameter is only relevant when the target host is bootstrapped remotely
# and the user wishes to change the initial sysadmin password as part of the
# bootstrap.
#
# WARNING: It is strongly recommended to store this setting in Ansible vault
# file named "secret" under override files directory. Configuration parameters
# stored in vault must start with vault_ prefix (i.e. vault_password_change_responses)
#
password_change_responses:
yes/no: 'yes'
sysadmin*: 'sysadmin'
\(current\) UNIX password: 'sysadmin'
(?i)New password: 'St8rlingX*'
(?i)Retype new password: 'St8rlingX*'
# OVERRIDE FILES DIRECTORY
# ========================
#
# Default directory where user override file(s) can be found
#
override_files_dir: "{{ lookup('env', 'HOME') }}"
# BACKUP AND RESTORE
# ==================
#
# Location where the backup tar file is placed to perform the restore.
# This location must be specified at the command line via ansible-playbook -e option.
initial_backup_dir:
# This variable refers to the tar file that is generated by the backup
# procedure and used in the restore phase. The filename must be specified
# at the command line via ansible-playbook -e option.
backup_filename:
# Default directory where the backup tar file(s) can be found
# on the active controller
backup_dir: /opt/backups
# The platform backup tarball will be named in this format:
# <platform_backup_filename_prefix>_<timestamp>.tgz
#
platform_backup_filename_prefix: "{{ inventory_hostname }}_platform_backup"
# The stx-openstack application backup tarball will be named in this format:
# <openstack_backup_filename_prefix>_<timestamp>.tgz
#
openstack_backup_filename_prefix: "{{ inventory_hostname }}_openstack_backup"
# An indication whether it is a full restore or partial restore.
# true: a full restore where storage partition(s) is/are wiped during
# platform restore and ceph data needs restored
# false: a partial restore where ceph data remain intact during restore
#
# This variable is used for StarlingX OpenStack application restore only
#
restore_ceph_data: false
# Default directory where the system backup tarballs fetched from the
# active controller can be found
#
host_backup_dir: "{{ lookup('env', 'HOME') }}"
# Flag file to indicate if platform restore is in progress
#
restore_in_progress_flag: /etc/platform/.restore_in_progress