openstack-ansible/etc/rpc_deploy/rpc_user_config.yml.example
Kevin Carter 757b4701ea Merge pull request #563 from miguelgrinberg/nfs_client_config
NFS client configuration for cinder (fixes #506)
2014-11-24 10:36:34 -06:00

225 lines
8.6 KiB
Plaintext

---
# Copyright 2014, Rackspace US, Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# This is the md5 of the environment file
# this will ensure consistency when deploying.
environment_version: 5e7155d022462c5a82384c1b2ed8b946
# User defined container networks in CIDR notation. The inventory generator
# assigns IP addresses to network interfaces inside containers from these
# ranges.
cidr_networks:
# Management (same range as br-mgmt on the target hosts)
container: 172.29.236.0/22
# Service (optional, same range as br-snet on the target hosts)
snet: 172.29.248.0/22
# Tunnel endpoints for VXLAN tenant networks
# (same range as br-vxlan on the target hosts)
tunnel: 172.29.240.0/22
# Storage (same range as br-storage on the target hosts)
storage: 172.29.244.0/22
# User defined list of consumed IP addresses that may intersect
# with the provided CIDR.
used_ips:
- 172.29.236.1,172.29.236.50
- 172.29.244.1,172.29.244.50
# As a user you can define anything that you may wish to "globally"
# override from within the rpc_deploy configuration file. Anything
# specified here will take precedence over anything else any where.
global_overrides:
# Internal Management vip address
internal_lb_vip_address: 172.29.236.10
# External DMZ VIP address
external_lb_vip_address: 192.168.1.1
# Name of load balancer
lb_name: lb_name_in_core
# Bridged interface to use with tunnel type networks
tunnel_bridge: "br-vxlan"
# Bridged interface to build containers with
management_bridge: "br-mgmt"
# Define your Add on container networks.
# group_binds: bind a provided network to a particular group
# container_bridge: instructs inventory where a bridge is plugged
# into on the host side of a veth pair
# container_interface: interface name within a container
# ip_from_q: name of a cidr to pull an IP address from
# type: Networks must have a type. types are: ["raw", "vxlan", "flat", "vlan"]
# range: Optional value used in "vxlan" and "vlan" type networks
# net_name: Optional value used in mapping network names used in neutron ml2
# You must have a management network.
provider_networks:
- network:
group_binds:
- all_containers
- hosts
type: "raw"
container_bridge: "br-mgmt"
container_interface: "eth1"
ip_from_q: "container"
- network:
group_binds:
- glance_api
- cinder_api
- cinder_volume
- nova_compute
# If you are using the storage network for swift_proxy add it to the group_binds
# - swift_proxy
type: "raw"
container_bridge: "br-storage"
container_interface: "eth2"
ip_from_q: "storage"
- network:
group_binds:
- glance_api
- nova_compute
- neutron_linuxbridge_agent
type: "raw"
container_bridge: "br-snet"
container_interface: "eth3"
ip_from_q: "snet"
- network:
group_binds:
- neutron_linuxbridge_agent
container_bridge: "br-vxlan"
container_interface: "eth10"
ip_from_q: "tunnel"
type: "vxlan"
range: "1:1000"
net_name: "vxlan"
- network:
group_binds:
- neutron_linuxbridge_agent
container_bridge: "br-vlan"
container_interface: "eth11"
type: "flat"
net_name: "vlan"
- network:
group_binds:
- neutron_linuxbridge_agent
container_bridge: "br-vlan"
container_interface: "eth11"
type: "vlan"
range: "1:1"
net_name: "vlan"
# Other options you may want
debug: True
### Cinder default volume type option
# # This can be set to use a specific volume type. This is
# # an optional variable because you may have different volume
# # types on different hosts named different things. For this
# # Reason if you choose to set this variable please set it
# # to the name of one of your setup volume types
# cinder_default_volume_type: lvm
### Cinder default volume type option
# User defined Infrastructure Hosts, this should be a required group
infra_hosts:
infra1:
ip: 10.240.0.100
infra2:
ip: 10.240.0.101
infra3:
ip: 10.240.0.102
# User defined Compute Hosts, this should be a required group
compute_hosts:
compute1:
ip: 10.240.0.103
host_vars:
host_networks:
- { type: raw, device_name: eth0, bond_master: bond0, bond_primary: true }
- { type: raw, device_name: eth4, bond_master: bond0, bond_primary: false }
- { type: vlan_tagged, device_name: bond0, tagged_device_name: bond0.2176 }
- { type: vlan_tagged, device_name: bond0, tagged_device_name: bond1.1998 }
- { type: bonded, device_name: bond0 }
- { type: bridged, device_name: br-mgmt, bridge_ports: ["bond0.2176"], address: "172.29.236.103", netmask: "255.255.255.0", gateway: "172.29.236.1", dns_nameservers: ["69.20.0.164", "69.20.0.196"] }
- { type: bridged, device_name: br-vxlan, bridge_ports: ["bond1.1998"], address: "172.29.240.103", netmask: "255.255.255.0" }
- { type: bridged, device_name: br-vlan, bridge_ports: ["bond1"] }
# User defined Storage Hosts, this should be a required group
storage_hosts:
cinder1:
ip: 172.29.236.104
container_vars:
cinder_storage_availability_zone: cinderAZ_1
cinder_default_availability_zone: cinderAZ_1
cinder_backends:
limit_container_types: cinder_volume
lvm:
volume_group: cinder-volumes
volume_driver: cinder.volume.drivers.lvm.LVMISCSIDriver
volume_backend_name: LVM_iSCSI
cinder2:
ip: 172.29.236.105
container_vars:
cinder_storage_availability_zone: cinderAZ_2
cinder_default_availability_zone: cinderAZ_1
cinder_backends:
limit_container_types: cinder_volume
lvm_ssd:
volume_group: cinder-volumes
volume_driver: cinder.volume.drivers.lvm.LVMISCSIDriver
volume_backend_name: LVM_iSCSI_SSD
cinder3:
ip: 10.240.0.106
container_vars:
cinder_storage_availability_zone: cinderAZ_3
cinder_default_availability_zone: cinderAZ_1
cinder_backends:
limit_container_types: cinder_volume
netapp:
netapp_storage_family: ontap_7mode
netapp_storage_protocol: iscsi
netapp_server_hostname: "{{ cinder_netapp_hostname }}"
netapp_server_port: 80
netapp_login: "{{ cinder_netapp_username }}"
netapp_password: "{{ cinder_netapp_password }}"
volume_driver: cinder.volume.drivers.netapp.common.NetAppDriver
volume_backend_name: NETAPP_iSCSI
nfs_client:
nfs_shares_config: /etc/cinder/nfs_shares
shares:
- { ip: "{{ cinder_netapp_hostname }}", share: "/vol/cinder" }
# User defined Logging Hosts, this should be a required group
log_hosts:
logger1:
ip: 10.240.0.107
# User defined Networking Hosts, this should be a required group
network_hosts:
network1:
ip: 10.240.0.108
host_vars:
host_networks:
- { type: raw, device_name: eth0, bond_master: bond0, bond_primary: true }
- { type: raw, device_name: eth4, bond_master: bond0, bond_primary: false }
- { type: vlan_tagged, device_name: bond0, tagged_device_name: bond0.2176 }
- { type: vlan_tagged, device_name: bond0, tagged_device_name: bond1.1998 }
- { type: bonded, device_name: bond0 }
- { type: bridged, device_name: br-mgmt, bridge_ports: ["bond0.2176"], address: "172.29.236.108", netmask: "255.255.255.0", gateway: "172.29.236.1", dns_nameservers: ["69.20.0.164", "69.20.0.196"] }
- { type: bridged, device_name: br-vxlan, bridge_ports: ["bond1.1998"], address: "172.29.240.108", netmask: "255.255.255.0" }
- { type: bridged, device_name: br-vlan, bridge_ports: ["bond1"] }
# Other hosts can be added whenever needed. Note that containers will not be
# assigned to "other" hosts by default. If you would like to have containers
# assigned to hosts that are outside of the predefined groups, you will need to
# make an edit to the rpc_environment.yml file.
# haproxy_hosts:
# haproxy1:
# ip: 10.0.0.12