Since ansible 2.8, usage of dashes in group names is against convention, so by default Ansible issues a warning for such group names. This patch aims to finaly rename groups for all new deployments, while making the transition for existing ones as smooth as possible. For existing deployments old defenitions will be respected, and a new set of groups produced. It's up do operators to perform the clean up of old groups. We also change the value of ANSIBLE_TRANSFORM_INVALID_GROUP_CHARS to show the warning, but konvert groups names to the new format. This is needed to ensure roles and playbooks will have expected group names in them, even though this may affect negatively cases, where OSA deploy hosts are also being used for internal operations. Depends-On: https://review.opendev.org/c/openstack/openstack-ansible-plugins/+/978458 Change-Id: I94907bcce7947df48f8cd19c3a79a8c7242dbc2d Signed-off-by: Dmitriy Rabotyagov <dmitriy.rabotyagov@cleura.com>
320 lines
7.2 KiB
Plaintext
320 lines
7.2 KiB
Plaintext
---
|
|
cidr_networks:
|
|
management: 172.29.236.0/22
|
|
tunnel: 172.29.240.0/22
|
|
storage: 172.29.244.0/22
|
|
|
|
used_ips:
|
|
- "172.29.236.1,172.29.236.50"
|
|
- "172.29.240.1,172.29.240.50"
|
|
- "172.29.244.1,172.29.244.50"
|
|
- "172.29.248.1,172.29.248.50"
|
|
|
|
global_overrides:
|
|
internal_lb_vip_address: 172.29.236.9
|
|
#
|
|
# The below domain name must resolve to an IP address
|
|
# in the CIDR specified in haproxy_keepalived_external_vip_cidr.
|
|
# If using different protocols (https/http) for the public/internal
|
|
# endpoints the two addresses must be different.
|
|
#
|
|
external_lb_vip_address: openstack.example.com
|
|
management_bridge: "br-mgmt"
|
|
provider_networks:
|
|
- network:
|
|
container_bridge: "br-mgmt"
|
|
container_type: "veth"
|
|
container_interface: "eth1"
|
|
ip_from_q: "management"
|
|
type: "raw"
|
|
group_binds:
|
|
- all_containers
|
|
- hosts
|
|
is_management_address: true
|
|
- network:
|
|
container_bridge: "br-vxlan"
|
|
container_type: "veth"
|
|
container_interface: "eth10"
|
|
ip_from_q: "tunnel"
|
|
type: "geneve"
|
|
range: "1:1000"
|
|
net_name: "geneve"
|
|
group_binds:
|
|
- neutron_ovn_controller
|
|
- network:
|
|
container_bridge: "br-vlan"
|
|
container_type: "veth"
|
|
container_interface: "eth12"
|
|
host_bind_override: "eth12"
|
|
type: "flat"
|
|
net_name: "physnet1"
|
|
group_binds:
|
|
- neutron_ovn_controller
|
|
- network:
|
|
container_bridge: "br-vlan"
|
|
container_type: "veth"
|
|
container_interface: "eth11"
|
|
type: "vlan"
|
|
range: "101:200,301:400"
|
|
net_name: "physnet2"
|
|
group_binds:
|
|
- neutron_ovn_controller
|
|
- network:
|
|
container_bridge: "br-storage"
|
|
container_type: "veth"
|
|
container_interface: "eth2"
|
|
ip_from_q: "storage"
|
|
type: "raw"
|
|
group_binds:
|
|
- glance_api
|
|
- cinder_api
|
|
- cinder_volume
|
|
- nova_compute
|
|
|
|
###
|
|
### Infrastructure
|
|
###
|
|
|
|
# galera, memcache, rabbitmq, utility
|
|
shared_infra_hosts:
|
|
infra1:
|
|
ip: 172.29.236.11
|
|
infra2:
|
|
ip: 172.29.236.12
|
|
infra3:
|
|
ip: 172.29.236.13
|
|
|
|
# zookeeper
|
|
coordination_hosts:
|
|
infra1:
|
|
ip: 172.29.236.11
|
|
infra2:
|
|
ip: 172.29.236.12
|
|
infra3:
|
|
ip: 172.29.236.13
|
|
|
|
# repository (apt cache, python packages, etc)
|
|
repo_infra_hosts:
|
|
infra1:
|
|
ip: 172.29.236.11
|
|
infra2:
|
|
ip: 172.29.236.12
|
|
infra3:
|
|
ip: 172.29.236.13
|
|
|
|
# load balancer
|
|
# Ideally the load balancer should not use the Infrastructure hosts.
|
|
# Dedicated hardware is best for improved performance and security.
|
|
load_balancer_hosts:
|
|
infra1:
|
|
ip: 172.29.236.11
|
|
infra2:
|
|
ip: 172.29.236.12
|
|
infra3:
|
|
ip: 172.29.236.13
|
|
|
|
###
|
|
### OpenStack
|
|
###
|
|
|
|
# keystone
|
|
identity_hosts:
|
|
infra1:
|
|
ip: 172.29.236.11
|
|
infra2:
|
|
ip: 172.29.236.12
|
|
infra3:
|
|
ip: 172.29.236.13
|
|
|
|
# cinder api services
|
|
storage_infra_hosts:
|
|
infra1:
|
|
ip: 172.29.236.11
|
|
infra2:
|
|
ip: 172.29.236.12
|
|
infra3:
|
|
ip: 172.29.236.13
|
|
|
|
# glance
|
|
# The settings here are repeated for each infra host.
|
|
# They could instead be applied as global settings in
|
|
# user_variables, but are left here to illustrate that
|
|
# each container could have different storage targets.
|
|
image_hosts:
|
|
infra1:
|
|
ip: 172.29.236.11
|
|
container_vars:
|
|
limit_container_types: glance
|
|
glance_remote_client:
|
|
- what: "172.29.244.15:/images"
|
|
where: "/var/lib/glance/images"
|
|
type: "nfs"
|
|
options: "_netdev,auto"
|
|
infra2:
|
|
ip: 172.29.236.12
|
|
container_vars:
|
|
limit_container_types: glance
|
|
glance_remote_client:
|
|
- what: "172.29.244.15:/images"
|
|
where: "/var/lib/glance/images"
|
|
type: "nfs"
|
|
options: "_netdev,auto"
|
|
infra3:
|
|
ip: 172.29.236.13
|
|
container_vars:
|
|
limit_container_types: glance
|
|
glance_remote_client:
|
|
- what: "172.29.244.15:/images"
|
|
where: "/var/lib/glance/images"
|
|
type: "nfs"
|
|
options: "_netdev,auto"
|
|
|
|
# placement
|
|
placement_infra_hosts:
|
|
infra1:
|
|
ip: 172.29.236.11
|
|
infra2:
|
|
ip: 172.29.236.12
|
|
infra3:
|
|
ip: 172.29.236.13
|
|
|
|
# nova api, conductor, etc services
|
|
compute_infra_hosts:
|
|
infra1:
|
|
ip: 172.29.236.11
|
|
infra2:
|
|
ip: 172.29.236.12
|
|
infra3:
|
|
ip: 172.29.236.13
|
|
|
|
# heat
|
|
orchestration_hosts:
|
|
infra1:
|
|
ip: 172.29.236.11
|
|
infra2:
|
|
ip: 172.29.236.12
|
|
infra3:
|
|
ip: 172.29.236.13
|
|
|
|
# horizon
|
|
dashboard_hosts:
|
|
infra1:
|
|
ip: 172.29.236.11
|
|
infra2:
|
|
ip: 172.29.236.12
|
|
infra3:
|
|
ip: 172.29.236.13
|
|
|
|
# neutron api
|
|
network_infra_hosts:
|
|
infra1:
|
|
ip: 172.29.236.11
|
|
infra2:
|
|
ip: 172.29.236.12
|
|
infra3:
|
|
ip: 172.29.236.13
|
|
|
|
network_northd_hosts:
|
|
infra1:
|
|
ip: 172.29.236.11
|
|
infra2:
|
|
ip: 172.29.236.12
|
|
infra3:
|
|
ip: 172.29.236.13
|
|
|
|
# neutron ovn controller
|
|
network_gateway_hosts:
|
|
net1:
|
|
ip: 172.29.236.21
|
|
net2:
|
|
ip: 172.29.236.22
|
|
net3:
|
|
ip: 172.29.236.23
|
|
|
|
# ceilometer (telemetry data collection)
|
|
metering_infra_hosts:
|
|
infra1:
|
|
ip: 172.29.236.11
|
|
infra2:
|
|
ip: 172.29.236.12
|
|
infra3:
|
|
ip: 172.29.236.13
|
|
|
|
# aodh (telemetry alarm service)
|
|
metering_alarm_hosts:
|
|
infra1:
|
|
ip: 172.29.236.11
|
|
infra2:
|
|
ip: 172.29.236.12
|
|
infra3:
|
|
ip: 172.29.236.13
|
|
|
|
# gnocchi (telemetry metrics storage)
|
|
metrics_hosts:
|
|
infra1:
|
|
ip: 172.29.236.11
|
|
infra2:
|
|
ip: 172.29.236.12
|
|
infra3:
|
|
ip: 172.29.236.13
|
|
|
|
# nova hypervisors
|
|
compute_hosts:
|
|
compute1:
|
|
ip: 172.29.236.16
|
|
compute2:
|
|
ip: 172.29.236.17
|
|
|
|
# ceilometer compute agent (telemetry data collection)
|
|
metering_compute_hosts:
|
|
compute1:
|
|
ip: 172.29.236.16
|
|
compute2:
|
|
ip: 172.29.236.17
|
|
|
|
# cinder volume hosts (NFS-backed)
|
|
# The settings here are repeated for each infra host.
|
|
# They could instead be applied as global settings in
|
|
# user_variables, but are left here to illustrate that
|
|
# each container could have different storage targets.
|
|
storage_hosts:
|
|
infra1:
|
|
ip: 172.29.236.11
|
|
container_vars:
|
|
cinder_backends:
|
|
limit_container_types: cinder_volume
|
|
nfs_volume:
|
|
volume_backend_name: NFS_VOLUME1
|
|
volume_driver: cinder.volume.drivers.nfs.NfsDriver
|
|
nfs_mount_options: "rsize=65535,wsize=65535,timeo=1200,actimeo=120"
|
|
nfs_shares_config: /etc/cinder/nfs_shares
|
|
shares:
|
|
- ip: "172.29.244.15"
|
|
share: "/vol/cinder"
|
|
infra2:
|
|
ip: 172.29.236.12
|
|
container_vars:
|
|
cinder_backends:
|
|
limit_container_types: cinder_volume
|
|
nfs_volume:
|
|
volume_backend_name: NFS_VOLUME1
|
|
volume_driver: cinder.volume.drivers.nfs.NfsDriver
|
|
nfs_mount_options: "rsize=65535,wsize=65535,timeo=1200,actimeo=120"
|
|
nfs_shares_config: /etc/cinder/nfs_shares
|
|
shares:
|
|
- ip: "172.29.244.15"
|
|
share: "/vol/cinder"
|
|
infra3:
|
|
ip: 172.29.236.13
|
|
container_vars:
|
|
cinder_backends:
|
|
limit_container_types: cinder_volume
|
|
nfs_volume:
|
|
volume_backend_name: NFS_VOLUME1
|
|
volume_driver: cinder.volume.drivers.nfs.NfsDriver
|
|
nfs_mount_options: "rsize=65535,wsize=65535,timeo=1200,actimeo=120"
|
|
nfs_shares_config: /etc/cinder/nfs_shares
|
|
shares:
|
|
- ip: "172.29.244.15"
|
|
share: "/vol/cinder"
|