19ccee7dd4
Update docs so that they mention about nspawn removal. Also drop nspawn from example configurations as much as we can. Change-Id: I45dc64ef785cc4a61d09af5ca9e5181dff066400
308 lines
7.7 KiB
Plaintext
308 lines
7.7 KiB
Plaintext
---
|
|
cidr_networks:
|
|
container: 172.29.236.0/22
|
|
tunnel: 172.29.240.0/22
|
|
storage: 172.29.244.0/22
|
|
|
|
used_ips:
|
|
- "172.29.236.1,172.29.236.50"
|
|
- "172.29.240.1,172.29.240.50"
|
|
- "172.29.244.1,172.29.244.50"
|
|
- "172.29.248.1,172.29.248.50"
|
|
|
|
global_overrides:
|
|
internal_lb_vip_address: 172.29.236.9
|
|
#
|
|
# The below domain name must resolve to an IP address
|
|
# in the CIDR specified in haproxy_keepalived_external_vip_cidr.
|
|
# If using different protocols (https/http) for the public/internal
|
|
# endpoints the two addresses must be different.
|
|
#
|
|
external_lb_vip_address: openstack.example.com
|
|
management_bridge: "br-mgmt"
|
|
provider_networks:
|
|
- network:
|
|
container_bridge: "br-mgmt"
|
|
container_type: "veth"
|
|
container_interface: "eth1"
|
|
ip_from_q: "container"
|
|
type: "raw"
|
|
group_binds:
|
|
- all_containers
|
|
- hosts
|
|
is_container_address: true
|
|
#
|
|
# The below provider network defines details related to overlay traffic,
|
|
# including the range of VXLAN VNIs to assign to project/tenant networks
|
|
# and other attributes.
|
|
#
|
|
- network:
|
|
container_bridge: "br-vxlan"
|
|
container_type: "veth"
|
|
container_interface: "eth10"
|
|
ip_from_q: "tunnel"
|
|
type: "vxlan"
|
|
range: "1:1000"
|
|
net_name: "vxlan"
|
|
group_binds:
|
|
- neutron_linuxbridge_agent
|
|
#
|
|
# The below provider network define details related to a given provider
|
|
# network: physnet1. Details include the name of the veth interface to
|
|
# connect to the bridge when agent on_metal is False (container_interface)
|
|
# or the physical interface to connect to the bridge when agent on_metal
|
|
# is True (host_bind_override), as well as the network type. The provider
|
|
# network name (net_name) will be used to build a physical network mapping
|
|
# to a network interface using either container_interface or
|
|
# host_bind_override (when defined).
|
|
#
|
|
# The network details will be used to populate the respective network
|
|
# configuration file(s) on the members of the listed groups. In this
|
|
# example, host_bind_override specifies the bond1 interface and applies
|
|
# only to the members of the neutron_linuxbridge_agent inventory group.
|
|
#
|
|
- network:
|
|
container_bridge: "br-vlan"
|
|
container_type: "veth"
|
|
container_interface: "eth11"
|
|
host_bind_override: "bond1"
|
|
type: "vlan"
|
|
range: "101:200,301:400"
|
|
net_name: "physnet1"
|
|
group_binds:
|
|
- neutron_linuxbridge_agent
|
|
#
|
|
# The below provider network defines details related to storage traffic.
|
|
#
|
|
- network:
|
|
container_bridge: "br-storage"
|
|
container_type: "veth"
|
|
container_interface: "eth2"
|
|
ip_from_q: "storage"
|
|
type: "raw"
|
|
group_binds:
|
|
- glance_api
|
|
- cinder_api
|
|
- cinder_volume
|
|
- nova_compute
|
|
|
|
###
|
|
### Infrastructure
|
|
###
|
|
|
|
# galera, memcache, rabbitmq, utility
|
|
shared-infra_hosts:
|
|
infra1:
|
|
ip: 172.29.236.11
|
|
infra2:
|
|
ip: 172.29.236.12
|
|
infra3:
|
|
ip: 172.29.236.13
|
|
|
|
# repository (apt cache, python packages, etc)
|
|
repo-infra_hosts:
|
|
infra1:
|
|
ip: 172.29.236.11
|
|
infra2:
|
|
ip: 172.29.236.12
|
|
infra3:
|
|
ip: 172.29.236.13
|
|
|
|
# load balancer
|
|
# Ideally the load balancer should not use the Infrastructure hosts.
|
|
# Dedicated hardware is best for improved performance and security.
|
|
haproxy_hosts:
|
|
infra1:
|
|
ip: 172.29.236.11
|
|
infra2:
|
|
ip: 172.29.236.12
|
|
infra3:
|
|
ip: 172.29.236.13
|
|
|
|
# rsyslog server
|
|
log_hosts:
|
|
log1:
|
|
ip: 172.29.236.14
|
|
|
|
###
|
|
### OpenStack
|
|
###
|
|
|
|
# keystone
|
|
identity_hosts:
|
|
infra1:
|
|
ip: 172.29.236.11
|
|
infra2:
|
|
ip: 172.29.236.12
|
|
infra3:
|
|
ip: 172.29.236.13
|
|
|
|
# cinder api services
|
|
storage-infra_hosts:
|
|
infra1:
|
|
ip: 172.29.236.11
|
|
infra2:
|
|
ip: 172.29.236.12
|
|
infra3:
|
|
ip: 172.29.236.13
|
|
|
|
# glance
|
|
# The settings here are repeated for each infra host.
|
|
# They could instead be applied as global settings in
|
|
# user_variables, but are left here to illustrate that
|
|
# each container could have different storage targets.
|
|
image_hosts:
|
|
infra1:
|
|
ip: 172.29.236.11
|
|
container_vars:
|
|
limit_container_types: glance
|
|
glance_nfs_client:
|
|
- server: "172.29.244.15"
|
|
remote_path: "/images"
|
|
local_path: "/var/lib/glance/images"
|
|
type: "nfs"
|
|
options: "_netdev,auto"
|
|
infra2:
|
|
ip: 172.29.236.12
|
|
container_vars:
|
|
limit_container_types: glance
|
|
glance_nfs_client:
|
|
- server: "172.29.244.15"
|
|
remote_path: "/images"
|
|
local_path: "/var/lib/glance/images"
|
|
type: "nfs"
|
|
options: "_netdev,auto"
|
|
infra3:
|
|
ip: 172.29.236.13
|
|
container_vars:
|
|
limit_container_types: glance
|
|
glance_nfs_client:
|
|
- server: "172.29.244.15"
|
|
remote_path: "/images"
|
|
local_path: "/var/lib/glance/images"
|
|
type: "nfs"
|
|
options: "_netdev,auto"
|
|
|
|
# nova api, conductor, etc services
|
|
compute-infra_hosts:
|
|
infra1:
|
|
ip: 172.29.236.11
|
|
infra2:
|
|
ip: 172.29.236.12
|
|
infra3:
|
|
ip: 172.29.236.13
|
|
|
|
# heat
|
|
orchestration_hosts:
|
|
infra1:
|
|
ip: 172.29.236.11
|
|
infra2:
|
|
ip: 172.29.236.12
|
|
infra3:
|
|
ip: 172.29.236.13
|
|
|
|
# horizon
|
|
dashboard_hosts:
|
|
infra1:
|
|
ip: 172.29.236.11
|
|
infra2:
|
|
ip: 172.29.236.12
|
|
infra3:
|
|
ip: 172.29.236.13
|
|
|
|
# neutron server, agents (L3, etc)
|
|
network_hosts:
|
|
infra1:
|
|
ip: 172.29.236.11
|
|
infra2:
|
|
ip: 172.29.236.12
|
|
infra3:
|
|
ip: 172.29.236.13
|
|
|
|
# ceilometer (telemetry data collection)
|
|
metering-infra_hosts:
|
|
infra1:
|
|
ip: 172.29.236.11
|
|
infra2:
|
|
ip: 172.29.236.12
|
|
infra3:
|
|
ip: 172.29.236.13
|
|
|
|
# aodh (telemetry alarm service)
|
|
metering-alarm_hosts:
|
|
infra1:
|
|
ip: 172.29.236.11
|
|
infra2:
|
|
ip: 172.29.236.12
|
|
infra3:
|
|
ip: 172.29.236.13
|
|
|
|
# gnocchi (telemetry metrics storage)
|
|
metrics_hosts:
|
|
infra1:
|
|
ip: 172.29.236.11
|
|
infra2:
|
|
ip: 172.29.236.12
|
|
infra3:
|
|
ip: 172.29.236.13
|
|
|
|
# nova hypervisors
|
|
compute_hosts:
|
|
compute1:
|
|
ip: 172.29.236.16
|
|
compute2:
|
|
ip: 172.29.236.17
|
|
|
|
# ceilometer compute agent (telemetry data collection)
|
|
metering-compute_hosts:
|
|
compute1:
|
|
ip: 172.29.236.16
|
|
compute2:
|
|
ip: 172.29.236.17
|
|
|
|
# cinder volume hosts (NFS-backed)
|
|
# The settings here are repeated for each infra host.
|
|
# They could instead be applied as global settings in
|
|
# user_variables, but are left here to illustrate that
|
|
# each container could have different storage targets.
|
|
storage_hosts:
|
|
infra1:
|
|
ip: 172.29.236.11
|
|
container_vars:
|
|
cinder_backends:
|
|
limit_container_types: cinder_volume
|
|
nfs_volume:
|
|
volume_backend_name: NFS_VOLUME1
|
|
volume_driver: cinder.volume.drivers.nfs.NfsDriver
|
|
nfs_mount_options: "rsize=65535,wsize=65535,timeo=1200,actimeo=120"
|
|
nfs_shares_config: /etc/cinder/nfs_shares
|
|
shares:
|
|
- ip: "172.29.244.15"
|
|
share: "/vol/cinder"
|
|
infra2:
|
|
ip: 172.29.236.12
|
|
container_vars:
|
|
cinder_backends:
|
|
limit_container_types: cinder_volume
|
|
nfs_volume:
|
|
volume_backend_name: NFS_VOLUME1
|
|
volume_driver: cinder.volume.drivers.nfs.NfsDriver
|
|
nfs_mount_options: "rsize=65535,wsize=65535,timeo=1200,actimeo=120"
|
|
nfs_shares_config: /etc/cinder/nfs_shares
|
|
shares:
|
|
- ip: "172.29.244.15"
|
|
share: "/vol/cinder"
|
|
infra3:
|
|
ip: 172.29.236.13
|
|
container_vars:
|
|
cinder_backends:
|
|
limit_container_types: cinder_volume
|
|
nfs_volume:
|
|
volume_backend_name: NFS_VOLUME1
|
|
volume_driver: cinder.volume.drivers.nfs.NfsDriver
|
|
nfs_mount_options: "rsize=65535,wsize=65535,timeo=1200,actimeo=120"
|
|
nfs_shares_config: /etc/cinder/nfs_shares
|
|
shares:
|
|
- ip: "172.29.244.15"
|
|
share: "/vol/cinder"
|