Add documentation to user config file

Change-Id: I9c30d5beb4d5106ec544050c5b0216c5bf40cb44
This commit is contained in:
Matthew Kassawara 2015-05-05 14:57:00 -05:00
parent ac95fc29bf
commit b0933ed26b

View File

@ -12,217 +12,613 @@
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and # See the License for the specific language governing permissions and
# limitations under the License. # limitations under the License.
#
# User defined container networks in CIDR notation. The inventory generator # Overview
# assigns IP addresses to network interfaces inside containers from these # ========
# ranges. #
cidr_networks: # This file contains the configuration for OpenStack Ansible Deployment
# Management (same range as br-mgmt on the target hosts) # (OSAD) core services. Optional service configuration resides in the
management: 172.29.236.0/22 # conf.d directory.
# Service (optional, same range as br-snet on the target hosts) #
snet: 172.29.248.0/22 # You can customize the options in this file and copy it to
# Tunnel endpoints for VXLAN tenant networks # /etc/openstack_deploy/openstack_user_config.yml or create a new
# (same range as br-vxlan on the target hosts) # file containing only necessary options for your environment
tunnel: 172.29.240.0/22 # before deployment.
# Storage (same range as br-storage on the target hosts) #
storage: 172.29.244.0/22 # OSAD implements PyYAML to parse YAML files and therefore supports structure
# and formatting options that augment traditional YAML. For example, aliases
# User defined list of consumed IP addresses that may intersect # or references. For more information on PyYAML, see the documentation at
# with the provided CIDR. If you want to use a range, split the #
# desired range with the lower and upper IP address in the range # http://pyyaml.org/wiki/PyYAMLDocumentation
# using a comma. IE "10.0.0.1,10.0.0.100". #
used_ips: # Configuration reference
- 10.240.0.1,10.240.0.50 # =======================
- 172.29.244.1,172.29.244.50 #
# Level: cidr_networks (required)
# As a user you can define anything that you may wish to "globally" # Contains an arbitrary list of networks for the deployment. For each network,
# override from within the openstack_deploy configuration file. Anything # the inventory generator uses the IP address range to create a pool of IP
# specified here will take precedence over anything else any where. # addresses for network interfaces inside containers. A deployment requires
global_overrides: # at least one network for management.
# Internal Management vip address #
internal_lb_vip_address: 10.240.0.1 # Option: <value> (required, string)
# External DMZ VIP address # Name of network and IP address range in CIDR notation. This IP address
external_lb_vip_address: 192.168.1.1 # range coincides with the IP address range of the bridge for this network
# Bridged interface to use with tunnel type networks # on the target host.
tunnel_bridge: "br-vxlan" #
# Bridged interface to build containers with # Example:
management_bridge: "br-mgmt" #
# Define your Add on container networks. # Define networks for a typical deployment.
# group_binds: bind a provided network to a particular group #
# container_bridge: instructs inventory where a bridge is plugged # - Management network on 172.29.236.0/22. Control plane for infrastructure
# into on the host side of a veth pair # services, OpenStack APIs, and horizon.
# container_interface: interface name within a container # - Tunnel network on 172.29.240.0/22. Data plane for project (tenant) VXLAN
# ip_from_q: name of a cidr to pull an IP address from # networks.
# type: Networks must have a type. types are: ["raw", "vxlan", "flat", "vlan"] # - Storage network on 172.29.244.0/22. Data plane for storage services such
# range: Optional value used in "vxlan" and "vlan" type networks # as cinder and swift.
# net_name: Optional value used in mapping network names used in neutron ml2 #
# You must have a management network. # cidr_networks:
provider_networks: # management: 172.29.236.0/22
- network: # tunnel: 172.29.240.0/22
group_binds: # storage: 172.29.244.0/22
- all_containers #
- hosts # Example:
type: "raw" #
container_bridge: "br-mgmt" # Define additional service network on 172.29.248.0/22 for deployment in a
container_interface: "eth1" # Rackspace data center.
container_type: "veth" #
ip_from_q: "management" # snet: 172.29.248.0/22
is_container_address: true #
is_ssh_address: true # --------
- network: #
group_binds: # Level: used_ips (optional)
- glance_api # For each network in the 'cidr_networks' level, specify a list of IP addresses
- cinder_api # or a range of IP addresses that the inventory generator should exclude from
- cinder_volume # the pools of IP addresses for network interfaces inside containers. To use a
- nova_compute # range, specify the lower and upper IP addresses (inclusive) with a comma
# - swift_proxy ## If you are using the storage network for swift_proxy add it to the group_binds # separator.
type: "raw" #
container_bridge: "br-storage" # Example:
container_type: "veth" #
container_interface: "eth2" # The management network includes a router (gateway) on 172.29.236.1 and
ip_from_q: "storage" # DNS servers on 172.29.236.11-12. The deployment includes seven target
- network: # servers on 172.29.236.101-103, 172.29.236.111, 172.29.236.121, and
group_binds: # 172.29.236.131. However, the inventory generator automatically excludes
- glance_api # these IP addresses. Network policy at this particular example organization
- nova_compute # also reserves 231-254 in the last octet at the high end of the range for
- neutron_linuxbridge_agent # network device management.
type: "raw" #
container_bridge: "br-snet" # used_ips:
container_type: "veth" # - 172.29.236.1
container_interface: "eth3" # - 172.29.236.11,172.29.236.12
ip_from_q: "snet" # - 172.29.239.231,172.29.239.254
- network: #
group_binds: # --------
- neutron_linuxbridge_agent #
container_bridge: "br-vxlan" # Level: global_overrides (required)
container_type: "veth" # Contains global options that require customization for a deployment. For
container_interface: "eth10" # example, load balancer virtual IP addresses (VIP). This level also provides
ip_from_q: "tunnel" # a mechanism to override other options defined in the playbook structure.
type: "vxlan" #
range: "1:1000" # Option: internal_lb_vip_address (required, string)
net_name: "vxlan" # Load balancer VIP for the following items:
- network: #
group_binds: # - Local package repository
- neutron_linuxbridge_agent # - Galera SQL database cluster
container_bridge: "br-vlan" # - Administrative and internal API endpoints for all OpenStack services
container_type: "veth" # - Glance registry
container_interface: "eth11" # - Nova compute source of images
type: "vlan" # - Cinder source of images
range: "1:1" # - Instance metadata
net_name: "vlan" #
- network: # Option: external_lb_vip_address (required, string)
group_binds: # Load balancer VIP for the following items:
- neutron_linuxbridge_agent #
container_bridge: "br-vlan" # - Public API endpoints for all OpenStack services
container_type: "veth" # - Horizon
container_interface: "eth12" #
host_bind_override: "eth12" # Option: management_bridge (required, string)
type: "flat" # Name of management network bridge on target hosts. Typically 'br-mgmt'.
net_name: "flat" #
# Option: tunnel_bridge (optional, string)
# Shared infrastructure parts # Name of tunnel network bridge on target hosts. Typically 'br-vxlan'.
shared-infra_hosts: #
infra1: # Level: provider_networks (required)
ip: 10.240.0.100 # List of container and bare metal networks on target hosts.
infra2: #
ip: 10.240.0.101 # Level: network (required)
infra3: # Defines a container or bare metal network. Create a level for each
ip: 10.240.0.102 # network.
#
# OpenStack Compute infrastructure parts # Option: type (required, string)
os-infra_hosts: # Type of network. Networks other than those for neutron such as
infra1: # management and storage typically use 'raw'. Neutron networks use
ip: 10.240.0.100 # 'flat', 'vlan', or 'vxlan'. Coincides with ML2 plug-in configuration
infra2: # options.
ip: 10.240.0.101 #
infra3: # Option: container_bridge (required, string)
ip: 10.240.0.102 # Name of unique bridge on target hosts to use for this network. Typical
# values include 'br-mgmt', 'br-storage', 'br-vlan', 'br-vxlan', etc.
# OpenStack Compute infrastructure parts #
storage-infra_hosts: # Option: container_interface (required, string)
infra1: # Name of unique interface in containers to use for this network.
ip: 10.240.0.100 # Typical values include 'eth1', 'eth2', etc.
infra2: #
ip: 10.240.0.101 # Option: container_type (required, string)
infra3: # Name of mechanism that connects interfaces in containers to the bridge
ip: 10.240.0.102 # on target hosts for this network. Typically 'veth'.
#
# Keystone Identity infrastructure parts # Option: ip_from_q (optional, string)
identity_hosts: # Name of network in 'cidr_networks' level to use for IP address pool. Only
infra1: # valid for 'raw' and 'vxlan' types.
ip: 10.240.0.100 #
infra2: # Option: is_container_address (required, boolean)
ip: 10.240.0.101 # If true, the load balancer uses this IP address to access services
infra3: # in the container. Only valid for networks with 'ip_from_q' option.
ip: 10.240.0.102 #
# Option: is_ssh_address (required, boolean)
# User defined Compute Hosts, this should be a required group # If true, Ansible uses this IP address to access the container via SSH.
compute_hosts: # Only valid for networks with 'ip_from_q' option.
compute1: #
ip: 10.240.0.103 # Option: group_binds (required, string)
# List of one or more Ansible groups that contain this
# User defined Storage Hosts, this should be a required group # network. For more information, see the openstack_environment.yml file.
storage_hosts: #
cinder1: # Option: net_name (optional, string)
ip: 10.240.0.104 # Name of network for 'flat' or 'vlan' types. Only valid for these
# "container_vars" can be set outside of all other options as # types. Coincides with ML2 plug-in configuration options.
# host specific optional variables. #
container_vars: # Option: range (optional, string)
# If you would like to define a cinder availability zone this can # For 'vxlan' type neutron networks, range of VXLAN network identifiers
# be done with the name spaced variable. # (VNI). For 'vlan' type neutron networks, range of VLAN tags. Coincides
cinder_storage_availability_zone: cinderAZ_1 # with ML2 plug-in configuration options.
# When creating more than ONE availability zone you should define a #
# sane default for the system to use when scheduling volume creation. # Example:
cinder_default_availability_zone: cinderAZ_1 #
# In this example we are defining what cinder volumes are # Define a typical network architecture:
# on a given host. #
cinder_backends: # - Network of type 'raw' that uses the 'br-mgmt' bridge and 'management'
# if the "limit_container_types" argument is set, within # IP address pool. Maps to the 'eth1' device in containers. Applies to all
# the top level key of the provided option the inventory # containers and bare metal hosts. Both the load balancer and Ansible
# process will perform a string match on the container name with # use this network to access containers and services.
# the value found within the "limit_container_types" argument. # - Network of type 'raw' that uses the 'br-storage' bridge and 'storage'
# If any part of the string found within the container # IP address pool. Maps to the 'eth2' device in containers. Applies to
# name the options are appended as host_vars inside of inventory. # nova compute and all storage service containers. Optionally applies to
limit_container_types: cinder_volume # to the swift proxy service.
lvm: # - Network of type 'vxlan' that contains neutron VXLAN tenant networks
volume_group: cinder-volumes # 1 to 1000 and uses 'br-vxlan' bridge on target hosts. Maps to the
volume_driver: cinder.volume.drivers.lvm.LVMVolumeDriver # 'eth10' device in containers. Applies to all neutron agent containers
volume_backend_name: LVM_iSCSI # and neutron agents on bare metal hosts.
# The ``cinder_nfs_client`` values is an optional component available # - Network of type 'vlan' that contains neutron VLAN networks 101 to 200
# when configuring cinder. # and uses the 'br-vlan' bridge on target hosts. Maps to the 'eth11' device
cinder_nfs_client: # in containers. Applies to all neutron agent containers and neutron agents
nfs_shares_config: /etc/cinder/nfs_shares # on bare metal hosts.
shares: # - Network of type 'flat' that contains one neutron flat network and uses
- { ip: "{{ ip_nfs_server }}", share: "/vol/cinder" } # the 'br-vlan' bridge on target hosts. Maps to the 'eth12' device in
# containers. Applies to all neutron agent containers and neutron agents
cinder2: # on bare metal hosts.
ip: 10.240.0.105 #
container_vars: # Note: A pair of 'vlan' and 'flat' networks can use the same bridge because
cinder_storage_availability_zone: cinderAZ_2 # one only handles tagged frames and the other only handles untagged frames
cinder_default_availability_zone: cinderAZ_1 # (the native VLAN in some parlance). However, additional 'vlan' or 'flat'
cinder_backends: # networks require additional bridges.
limit_container_types: cinder_volume #
lvm_ssd: # provider_networks:
volume_group: cinder-volumes # - network:
volume_driver: cinder.volume.drivers.lvm.LVMVolumeDriver # group_binds:
volume_backend_name: LVM_iSCSI_SSD # - all_containers
# - hosts
# User defined Logging Hosts, this should be a required group # type: "raw"
log_hosts: # container_bridge: "br-mgmt"
logger1: # container_interface: "eth1"
ip: 10.240.0.107 # container_type: "veth"
# ip_from_q: "management"
# User defined Networking Hosts, this should be a required group # is_container_address: true
network_hosts: # is_ssh_address: true
network1: # - network:
ip: 10.240.0.108 # group_binds:
# - glance_api
# User defined Repository Hosts, this is an optional group # - cinder_api
repo-infra_hosts: # - cinder_volume
infra1: # - nova_compute
ip: 10.240.0.100 # # Uncomment the next line if using swift with a storage network.
infra2: # # - swift_proxy
ip: 10.240.0.101 # type: "raw"
infra3: # container_bridge: "br-storage"
ip: 10.240.0.102 # container_type: "veth"
# container_interface: "eth2"
# ip_from_q: "storage"
# - network:
# group_binds:
# - neutron_linuxbridge_agent
# container_bridge: "br-vxlan"
# container_type: "veth"
# container_interface: "eth10"
# ip_from_q: "tunnel"
# type: "vxlan"
# range: "1:1000"
# net_name: "vxlan"
# - network:
# group_binds:
# - neutron_linuxbridge_agent
# container_bridge: "br-vlan"
# container_type: "veth"
# container_interface: "eth11"
# type: "vlan"
# range: "101:200"
# net_name: "vlan"
# - network:
# group_binds:
# - neutron_linuxbridge_agent
# container_bridge: "br-vlan"
# container_type: "veth"
# container_interface: "eth12"
# host_bind_override: "eth12"
# type: "flat"
# net_name: "flat"
#
# Example:
#
# Add the service network to the previous example:
#
# - Network of type 'raw' that uses the 'br-snet' bridge and 'snet' IP
# address pool. Maps to the 'eth3' device in containers. Applies to
# glance, nova compute, neutron agent containers, and any of these
# services on bare metal hosts.
#
# provider_networks:
# - network:
# group_binds:
# - glance_api
# - nova_compute
# - neutron_linuxbridge_agent
# type: "raw"
# container_bridge: "br-snet"
# container_type: "veth"
# container_interface: "eth3"
# ip_from_q: "snet"
#
# --------
#
# Level: shared-infra_hosts (required)
# List of target hosts on which to deploy shared infrastructure services
# including the Galera SQL database cluster, RabbitMQ, and Memcached. Recommend
# three minimum target hosts for these services.
#
# Level: <value> (required, string)
# Hostname of a target host.
#
# Option: ip (required, string)
# IP address of this target host, typically the IP address assigned to
# the management bridge.
#
# Example:
#
# Define three shared infrastructure hosts:
#
# shared-infra_hosts:
# infra1:
# ip: 172.29.236.101
# infra2:
# ip: 172.29.236.102
# infra3:
# ip: 172.29.236.103
#
# --------
#
# Level: repo_hosts (optional)
# List of target hosts on which to deploy the package repository. Recommend
# minimum three target hosts for this service. Typically contains the same
# target hosts as the 'shared-infra_hosts' level.
#
# Level: <value> (required, string)
# Hostname of a target host.
#
# Option: ip (required, string)
# IP address of this target host, typically the IP address assigned to
# the management bridge.
#
# Example:
#
# Define three package repository hosts:
#
# repo_hosts:
# infra1:
# ip: 172.29.236.101
# infra2:
# ip: 172.29.236.102
# infra3:
# ip: 172.29.236.103
#
# --------
#
# Level: os-infra_hosts (required)
# List of target hosts on which to deploy the glance API, nova API, heat API,
# and horizon. Recommend three minimum target hosts for these services.
# Typically contains the same target hosts as 'shared-infra_hosts' level.
#
# Level: <value> (required, string)
# Hostname of a target host.
#
# Option: ip (required, string)
# IP address of this target host, typically the IP address assigned to
# the management bridge.
#
# Example:
#
# Define three OpenStack infrastructure hosts:
#
# os-infra_hosts:
# infra1:
# ip: 172.29.236.100
# infra2:
# ip: 172.29.236.101
# infra3:
# ip: 172.29.236.102
#
# --------
#
# Level: identity_hosts (required)
# List of target hosts on which to deploy the keystone service. Recommend
# three minimum target hosts for this service. Typically contains the same
# target hosts as the 'shared-infra_hosts' level.
#
# Level: <value> (required, string)
# Hostname of a target host.
#
# Option: ip (required, string)
# IP address of this target host, typically the IP address assigned to
# the management bridge.
#
# Example:
#
# Define three OpenStack identity hosts:
#
# identity_hosts:
# infra1:
# ip: 172.29.236.101
# infra2:
# ip: 172.29.236.102
# infra3:
# ip: 172.29.236.103
#
# --------
#
# Level: network_hosts (required)
# List of target hosts on which to deploy neutron services. Recommend three
# minimum target hosts for this service. Typically contains the same target
# hosts as the 'shared-infra_hosts' level.
#
# Level: <value> (required, string)
# Hostname of a target host.
#
# Option: ip (required, string)
# IP address of this target host, typically the IP address assigned to
# the management bridge.
#
# Example:
#
# Define three OpenStack network hosts:
#
# network_hosts:
# infra1:
# ip: 172.29.236.101
# infra2:
# ip: 172.29.236.102
# infra3:
# ip: 172.29.236.103
#
# --------
#
# Level: compute_hosts (required)
# List of target hosts on which to deploy the nova compute service. Recommend
# one minimum target host for this service. Typically contains target hosts
# that do not reside in other levels.
#
# Level: <value> (required, string)
# Hostname of a target host.
#
# Option: ip (required, string)
# IP address of this target host, typically the IP address assigned to
# the management bridge.
#
# Example:
#
# Define an OpenStack compute host:
#
# compute_hosts:
# compute1:
# ip: 172.29.236.111
#
# --------
#
# Level: storage-infra_hosts (required)
# List of target hosts on which to deploy the cinder API. Recommend three
# minimum target hosts for this service. Typically contains the same target
# hosts as the 'shared-infra_hosts' level.
#
# Level: <value> (required, string)
# Hostname of a target host.
#
# Option: ip (required, string)
# IP address of this target host, typically the IP address assigned to
# the management bridge.
#
# Example:
#
# Define three OpenStack storage infrastructure hosts:
#
# storage-infra_hosts:
# infra1:
# ip: 172.29.236.101
# infra2:
# ip: 172.29.236.102
# infra3:
# ip: 172.29.236.103
#
# --------
#
# Level: storage_hosts (required)
# List of target hosts on which to deploy the cinder volume service. Recommend
# one minimum target host for this service. Typically contains target hosts
# that do not reside in other levels.
#
# Level: <value> (required, string)
# Hostname of a target host.
#
# Option: ip (required, string)
# IP address of this target host, typically the IP address assigned to
# the management bridge.
#
# Level: container_vars (required)
# Contains storage options for this target host.
#
# Option: cinder_storage_availability_zone (optional, string)
# Cinder availability zone.
#
# Option: cinder_default_availability_zone (optional, string)
# If the deployment contains more than one cinder availability zone,
# specify a default availability zone.
#
# Level: cinder_backends (required)
# Contains cinder backends.
#
# Option: limit_container_types (optional, string)
# Container name string in which to apply these options. Typically
# any container with 'cinder_volume' in the name.
#
# Level: <value> (required, string)
# Arbitrary name of the backend. Each backend contains one or more
# options for the particular backend driver. The template for the
# cinder.conf file can generate configuration for any backend
# providing that it includes the necessary driver options.
#
# Option: volume_backend_name (required, string)
# Name of backend, arbitrary.
#
# The following options apply to the LVM backend driver:
#
# Option: volume_driver (required, string)
# Name of volume driver, typically
# 'cinder.volume.drivers.lvm.LVMVolumeDriver'.
#
# Option: volume_group (required, string)
# Name of LVM volume group, typically 'cinder-volumes'.
#
# The following options apply to the NFS backend driver:
#
# Option: volume_driver (required, string)
# Name of volume driver,
# 'cinder.volume.drivers.nfs.NfsDriver'.
#
# Option: nfs_shares_config (optional, string)
# File containing list of NFS shares available to cinder, typically
# '/etc/cinder/nfs_shares'.
#
# Option: nfs_mount_point_base (optional, string)
# Location in which to mount NFS shares, typically
# '$state_path/mnt'.
#
# The following options apply to the NetApp backend driver:
#
# Option: volume_driver (required, string)
# Name of volume driver,
# 'cinder.volume.drivers.netapp.common.NetAppDriver'.
#
# Option: netapp_storage_family (required, string)
# Access method, typically 'ontap_7mode' or 'ontap_cluster'.
#
# Option: netapp_storage_protocol (required, string)
# Transport method, typically 'scsi' or 'nfs'. NFS transport also
# requires the 'nfs_shares_config' option.
#
# Option: nfs_shares_config (required, string)
# For NFS transport, name of the file containing shares. Typically
# '/etc/cinder/nfs_shares'.
#
# Option: netapp_server_hostname (required, string)
# NetApp server hostname.
#
# Option: netapp_server_port (required, integer)
# NetApp server port, typically 80 or 443.
#
# Option: netapp_login (required, string)
# NetApp server username.
#
# Option: netapp_password (required, string)
# NetApp server password.
#
# Level: cinder_nfs_client (optional)
# Automates management of the file that cinder references for a list of
# NFS mounts.
#
# Option: nfs_shares_config (required, string)
# File containing list of NFS shares available to cinder, typically
# typically /etc/cinder/nfs_shares.
#
# Level: shares (required)
# List of shares to populate the 'nfs_shares_config' file. Each share
# uses the following format:
#
# - { ip: "{{ ip_nfs_server }}", share: "/vol/cinder" }
#
# Example:
#
# Define an OpenStack storage host:
#
# storage_hosts:
# storage1:
# ip: 172.29.236.121
#
# Example:
#
# Use the LVM iSCSI backend in availability zone 'cinderAZ_1':
#
# container_vars:
# cinder_storage_availability_zone: cinderAZ_1
# cinder_default_availability_zone: cinderAZ_1
# cinder_backends:
# lvm:
# volume_backend_name: LVM_iSCSI
# volume_driver: cinder.volume.drivers.lvm.LVMVolumeDriver
# volume_group: cinder-volumes
# limit_container_types: cinder_volume
#
# Example:
#
# Use the NetApp iSCSI backend via Data ONTAP 7-mode in availability zone
# 'cinderAZ_2':
#
# container_vars:
# cinder_storage_availability_zone: cinderAZ_2
# cinder_default_availability_zone: cinderAZ_1
# cinder_backends:
# netapp:
# volume_backend_name: NETAPP_iSCSI
# volume_driver: cinder.volume.drivers.netapp.common.NetAppDriver
# netapp_storage_family: ontap_7mode
# netapp_storage_protocol: iscsi
# netapp_server_hostname: hostname
# netapp_server_port: 443
# netapp_login: username
# netapp_password: password
#
# --------
#
# Level: log_hosts (required)
# List of target hosts on which to deploy logging services. Recommend
# one minimum target host for this service.
#
# Level: <value> (required, string)
# Hostname of a target host.
#
# Option: ip (required, string)
# IP address of this target host, typically the IP address assigned to
# the management bridge.
#
# Example:
#
# Define a logging host:
#
# log_hosts:
# log1:
# ip: 172.29.236.131