d3cfb2052a
Due to poor planning on our variable names we have a situation where we have "internal_address" which must be a VIP, but "external_address" which should be a DNS name. Now with two vips "external_vip_address" is a new variable. This corrects that issue by deprecating kolla_internal_address and replacing it with 4 nicely named variables. kolla_internal_vip_address kolla_internal_fqdn kolla_external_vip_address kolla_external_fqdn The default behaviour will remain the same, and the way the variable inheritance is setup the kolla_internal_address variable can still be set in globals.yml and propogate out to these 4 new variables like it normally would, but all reference to kolla_internal_address has been completely removed. Change-Id: I4556dcdbf4d91a8d2751981ef9c64bad44a719e5 Partially-Implements: blueprint ssl-kolla
88 lines
2.7 KiB
Django/Jinja
88 lines
2.7 KiB
Django/Jinja
[DEFAULT]
|
|
debug = {{ cinder_logging_debug }}
|
|
|
|
log_dir = /var/log/kolla/cinder
|
|
use_forwarded_for = true
|
|
|
|
# Set use_stderr to False or the logs will also be sent to stderr
|
|
# and collected by Docker
|
|
use_stderr = False
|
|
|
|
enable_v1_api=false
|
|
volume_name_template = %s
|
|
|
|
glance_api_servers = {{ internal_protocol }}://{{ kolla_internal_fqdn }}:{{ glance_api_port }}
|
|
glance_api_version = 2
|
|
|
|
os_region_name = {{ openstack_region_name }}
|
|
|
|
{% if cinder_volume_driver == "lvm" %}
|
|
default_volume_type = lvmdriver-1
|
|
enabled_backends = lvmdriver-1
|
|
{% elif cinder_volume_driver == "ceph" %}
|
|
default_volume_type = rbd-1
|
|
enabled_backends = rbd-1
|
|
{% endif %}
|
|
|
|
{% if service_name == "cinder-backup" and cinder_volume_driver == "ceph" %}
|
|
backup_driver = cinder.backup.drivers.ceph
|
|
backup_ceph_conf = /etc/ceph/ceph.conf
|
|
backup_ceph_user = cinder-backup
|
|
backup_ceph_chunk_size = 134217728
|
|
backup_ceph_pool = {{ ceph_cinder_backup_pool_name }}
|
|
backup_ceph_stripe_unit = 0
|
|
backup_ceph_stripe_count = 0
|
|
restore_discard_excess_bytes = true
|
|
{% endif %}
|
|
|
|
osapi_volume_listen = {{ hostvars[inventory_hostname]['ansible_' + api_interface]['ipv4']['address'] }}
|
|
osapi_volume_listen_port = {{ cinder_api_port }}
|
|
|
|
api_paste_config = /etc/cinder/api-paste.ini
|
|
nova_catalog_info = compute:nova:internalURL
|
|
|
|
auth_strategy = keystone
|
|
|
|
[database]
|
|
connection = mysql+pymysql://{{ cinder_database_user }}:{{ cinder_database_password }}@{{ cinder_database_address }}/{{ cinder_database_name }}
|
|
|
|
[keystone_authtoken]
|
|
auth_uri = {{ internal_protocol }}://{{ kolla_internal_fqdn }}:{{ keystone_public_port }}
|
|
auth_url = {{ admin_protocol }}://{{ kolla_internal_fqdn }}:{{ keystone_admin_port }}
|
|
auth_type = password
|
|
project_domain_id = default
|
|
user_domain_id = default
|
|
project_name = service
|
|
username = {{ cinder_keystone_user }}
|
|
password = {{ cinder_keystone_password }}
|
|
|
|
[oslo_concurrency]
|
|
lock_path = /var/lib/cinder/tmp
|
|
|
|
[oslo_messaging_rabbit]
|
|
rabbit_userid = {{ rabbitmq_user }}
|
|
rabbit_password = {{ rabbitmq_password }}
|
|
rabbit_ha_queues = true
|
|
rabbit_hosts = {% for host in groups['rabbitmq'] %}{{ hostvars[host]['ansible_' + hostvars[host]['api_interface']]['ipv4']['address'] }}:{{ rabbitmq_port }}{% if not loop.last %},{% endif %}{% endfor %}
|
|
|
|
|
|
{% if cinder_volume_driver == "lvm" %}
|
|
[lvmdriver-1]
|
|
lvm_type = default
|
|
volume_group = cinder-volumes
|
|
volume_driver = cinder.volume.drivers.lvm.LVMVolumeDriver
|
|
volume_backend_name = lvmdriver-1
|
|
{% elif cinder_volume_driver == "ceph" %}
|
|
[rbd-1]
|
|
volume_driver = cinder.volume.drivers.rbd.RBDDriver
|
|
rbd_pool = {{ ceph_cinder_pool_name }}
|
|
rbd_ceph_conf = /etc/ceph/ceph.conf
|
|
rbd_flatten_volume_from_snapshot = false
|
|
rbd_max_clone_depth = 5
|
|
rbd_store_chunk_size = 4
|
|
rados_connect_timeout = -1
|
|
rbd_user = cinder
|
|
rbd_secret_uuid = {{ rbd_secret_uuid }}
|
|
{% endif %}
|
|
|