openstack-ansible/etc/openstack_deploy/openstack_user_config.yml.example
git-harry f9d2d574b5 Update cinder services to kilo
The project is moving to support kilo in master. This requires that the
cinder galaxy role be updated to support installing the kilo release of
cinder.

This commit makes changes not added by the minimum viable kilo install
patch - https://review.openstack.org/#/c/166986/

Changes:

cinder.conf
- [DEFAULT] backup_metadata_version is now configurable because the
version has changed. The default is 2, in juno it was 1.
- [DEFAULT] client_socket_timeout is now configurable because the value
has changed. The default is 900, in juno it was 0.
- [profiler] profiler_enabled is now configurable but disabled by
default. Although this feature is part of juno the api-paste.ini file
was not updated in os-a-d juno to make use of it.
- [profiler] trace_sqlalchemy is now configurable but disabled by
default.
- [DEFAULT] rabbit_port -> [oslo_messaging_rabbit] rabbit_port
- [DEFAULT] rabbit_userid -> [oslo_messaging_rabbit] rabbit_userid
- [DEFAULT] rabbit_password -> [oslo_messaging_rabbit] rabbit_password
- [DEFAULT] rabbit_hosts -> [oslo_messaging_rabbit] rabbit_hosts
- [DEFAULT] lock_path -> [oslo_concurrency] lock_path
- [DEFAULT] enable_v1_api is now configurable. The default is true.
This has been added because the v1 API is deprecated and will be
removed in liberty.
- [DEFAULT] enable_v2_api is now configurable. The default is true.

policy.json
- Update policy.json from icehouse default to kilo default version. This
adds/modifies a number of rules and also updates the format of the file
to the current version.

api-paste.ini
- Add the osprofiler filter. This file is now deployed using a template
so that the hmac_keys configuration option can be set using the var
cinder_profiler_hmac_key.
- replace deprecated middleware with oslo_middleware versions.

rootwrap.conf
Updates the file to match kilo default.

volume.filters
Updates the file to match the kilo default.

The volume_driver var has been updated to use the new LVM driver class.

The signing_dir, /var/cache/cinder, permissions changed from 0755 to
0700 for fix warning by keystonemiddleware.

Implements: blueprint master-kilofication
Change-Id: I91f2385969568b18635bc534a98138d3dd5c5af2
2015-04-03 18:03:17 +00:00

233 lines
7.4 KiB
Plaintext

---
# Copyright 2014, Rackspace US, Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# This is the md5 of the environment file
# this will ensure consistency when deploying.
environment_version: 35946eced47eb8461f1eea62fa01bcf0
# User defined container networks in CIDR notation. The inventory generator
# assigns IP addresses to network interfaces inside containers from these
# ranges.
cidr_networks:
# Management (same range as br-mgmt on the target hosts)
management: 172.29.236.0/22
# Service (optional, same range as br-snet on the target hosts)
snet: 172.29.248.0/22
# Tunnel endpoints for VXLAN tenant networks
# (same range as br-vxlan on the target hosts)
tunnel: 172.29.240.0/22
# Storage (same range as br-storage on the target hosts)
storage: 172.29.244.0/22
# User defined list of consumed IP addresses that may intersect
# with the provided CIDR. If you want to use a range, split the
# desired range with the lower and upper IP address in the range
# using a comma. IE "10.0.0.1,10.0.0.100".
used_ips:
- 10.240.0.1,10.240.0.50
- 172.29.244.1,172.29.244.50
# As a user you can define anything that you may wish to "globally"
# override from within the openstack_deploy configuration file. Anything
# specified here will take precedence over anything else any where.
global_overrides:
# Internal Management vip address
internal_lb_vip_address: 10.240.0.1
# External DMZ VIP address
external_lb_vip_address: 192.168.1.1
# Bridged interface to use with tunnel type networks
tunnel_bridge: "br-vxlan"
# Bridged interface to build containers with
management_bridge: "br-mgmt"
# Define your Add on container networks.
# group_binds: bind a provided network to a particular group
# container_bridge: instructs inventory where a bridge is plugged
# into on the host side of a veth pair
# container_interface: interface name within a container
# ip_from_q: name of a cidr to pull an IP address from
# type: Networks must have a type. types are: ["raw", "vxlan", "flat", "vlan"]
# range: Optional value used in "vxlan" and "vlan" type networks
# net_name: Optional value used in mapping network names used in neutron ml2
# You must have a management network.
provider_networks:
- network:
group_binds:
- all_containers
- hosts
type: "raw"
container_bridge: "br-mgmt"
container_interface: "eth1"
container_type: "veth"
ip_from_q: "management"
is_container_address: true
is_ssh_address: true
- network:
group_binds:
- glance_api
- cinder_api
- cinder_volume
- nova_compute
# - swift_proxy ## If you are using the storage network for swift_proxy add it to the group_binds
type: "raw"
container_bridge: "br-storage"
container_type: "veth"
container_interface: "eth2"
ip_from_q: "storage"
- network:
group_binds:
- glance_api
- nova_compute
- neutron_linuxbridge_agent
type: "raw"
container_bridge: "br-snet"
container_type: "veth"
container_interface: "eth3"
ip_from_q: "snet"
- network:
group_binds:
- neutron_linuxbridge_agent
container_bridge: "br-vxlan"
container_type: "veth"
container_interface: "eth10"
ip_from_q: "tunnel"
type: "vxlan"
range: "1:1000"
net_name: "vxlan"
- network:
group_binds:
- neutron_linuxbridge_agent
container_bridge: "br-vlan"
container_type: "veth"
container_interface: "eth11"
type: "vlan"
range: "1:1"
net_name: "vlan"
- network:
group_binds:
- neutron_linuxbridge_agent
container_bridge: "br-vlan"
container_type: "veth"
container_interface: "eth12"
host_bind_override: "eth12"
type: "flat"
net_name: "flat"
# Shared infrastructure parts
shared-infra_hosts:
infra1:
ip: 10.240.0.100
infra2:
ip: 10.240.0.101
infra3:
ip: 10.240.0.102
# OpenStack Compute infrastructure parts
os-infra_hosts:
infra1:
ip: 10.240.0.100
infra2:
ip: 10.240.0.101
infra3:
ip: 10.240.0.102
# OpenStack Compute infrastructure parts
storage-infra_hosts:
infra1:
ip: 10.240.0.100
infra2:
ip: 10.240.0.101
infra3:
ip: 10.240.0.102
# Keystone Identity infrastructure parts
identity_hosts:
infra1:
ip: 10.240.0.100
infra2:
ip: 10.240.0.101
infra3:
ip: 10.240.0.102
# User defined Compute Hosts, this should be a required group
compute_hosts:
compute1:
ip: 10.240.0.103
# User defined Storage Hosts, this should be a required group
storage_hosts:
cinder1:
ip: 10.240.0.104
# "container_vars" can be set outside of all other options as
# host specific optional variables.
container_vars:
# If you would like to define a cinder availability zone this can
# be done with the name spaced variable.
cinder_storage_availability_zone: cinderAZ_1
# When creating more than ONE availability zone you should define a
# sane default for the system to use when scheduling volume creation.
cinder_default_availability_zone: cinderAZ_1
# In this example we are defining what cinder volumes are
# on a given host.
cinder_backends:
# if the "limit_container_types" argument is set, within
# the top level key of the provided option the inventory
# process will perform a string match on the container name with
# the value found within the "limit_container_types" argument.
# If any part of the string found within the container
# name the options are appended as host_vars inside of inventory.
limit_container_types: cinder_volume
lvm:
volume_group: cinder-volumes
volume_driver: cinder.volume.drivers.lvm.LVMVolumeDriver
volume_backend_name: LVM_iSCSI
# The ``cinder_nfs_client`` values is an optional component available
# when configuring cinder.
cinder_nfs_client:
nfs_shares_config: /etc/cinder/nfs_shares
shares:
- { ip: "{{ ip_nfs_server }}", share: "/vol/cinder" }
cinder2:
ip: 10.240.0.105
container_vars:
cinder_storage_availability_zone: cinderAZ_2
cinder_default_availability_zone: cinderAZ_1
cinder_backends:
limit_container_types: cinder_volume
lvm_ssd:
volume_group: cinder-volumes
volume_driver: cinder.volume.drivers.lvm.LVMVolumeDriver
volume_backend_name: LVM_iSCSI_SSD
# User defined Logging Hosts, this should be a required group
log_hosts:
logger1:
ip: 10.240.0.107
# User defined Networking Hosts, this should be a required group
network_hosts:
network1:
ip: 10.240.0.108
# User defined Repository Hosts, this is an optional group
repo_hosts:
infra1:
ip: 10.240.0.100
infra2:
ip: 10.240.0.101
infra3:
ip: 10.240.0.102