Pierre Riteau c81772024c Reduce the use of SQLAlchemy connection pooling
When the internal VIP is moved in the event of a failure of the active
controller, OpenStack services can become unresponsive as they try to
talk with MariaDB using connections from the SQLAlchemy pool.

It has been argued that OpenStack doesn't really need to use connection
pooling with MariaDB [1]. This commit reduces the use of connection
pooling via two configuration options:

- max_pool_size is set to 1 to allow only a single connection in the
  pool (it is not possible to disable connection pooling entirely via
  oslo.db, and max_pool_size = 0 means unlimited pool size)
- lower connection_recycle_time from the default of one hour to 10
  seconds, which means the single connection in the pool will be
  recreated regularly

These settings have shown better reactivity of the system in the event
of a failover.

[1] http://lists.openstack.org/pipermail/openstack-dev/2015-April/061808.html

Change-Id: Ib6a62d4428db9b95569314084090472870417f3d
Closes-Bug: #1896635
2020-09-22 17:54:45 +02:00

225 lines
7.4 KiB
Django/Jinja

[DEFAULT]
debug = {{ cinder_logging_debug }}
log_dir = /var/log/kolla/cinder
use_forwarded_for = true
# Set use_stderr to False or the logs will also be sent to stderr
# and collected by Docker
use_stderr = False
my_ip = {{ api_interface_address }}
osapi_volume_workers = {{ openstack_service_workers }}
volume_name_template = volume-%s
glance_api_servers = {{ glance_internal_endpoint }}
glance_num_retries = {{ groups['glance-api'] | length }}
glance_api_version = 2
glance_ca_certificates_file = {{ openstack_cacert }}
os_region_name = {{ openstack_region_name }}
{% if cinder_enabled_backends %}
enabled_backends = {{ cinder_enabled_backends|map(attribute='name')|join(',') }}
{% endif %}
{% if service_name == "cinder-backup" and enable_cinder_backup | bool %}
{% if cinder_backup_driver == "ceph" %}
backup_driver = cinder.backup.drivers.ceph.CephBackupDriver
backup_ceph_conf = /etc/ceph/ceph.conf
backup_ceph_user = {{ ceph_cinder_backup_user }}
backup_ceph_chunk_size = 134217728
backup_ceph_pool = {{ ceph_cinder_backup_pool_name }}
backup_ceph_stripe_unit = 0
backup_ceph_stripe_count = 0
restore_discard_excess_bytes = true
{% elif cinder_backup_driver == "nfs" %}
backup_driver = cinder.backup.drivers.nfs.NFSBackupDriver
backup_mount_options = {{ cinder_backup_mount_options_nfs }}
backup_mount_point_base = /var/lib/cinder/backup
backup_share = {{ cinder_backup_share }}
backup_file_size = 327680000
{% elif enable_swift | bool and cinder_backup_driver == "swift" %}
backup_driver = cinder.backup.drivers.swift.SwiftBackupDriver
backup_swift_url = {{ swift_internal_base_endpoint }}/v1/AUTH_
backup_swift_auth = per_user
backup_swift_auth_version = 1
backup_swift_user =
backup_swift_key =
{% endif %}
{% endif %}
osapi_volume_listen = {{ api_interface_address }}
osapi_volume_listen_port = {{ cinder_api_listen_port }}
api_paste_config = /etc/cinder/api-paste.ini
auth_strategy = keystone
transport_url = {{ rpc_transport_url }}
[oslo_messaging_notifications]
transport_url = {{ notify_transport_url }}
{% if cinder_enabled_notification_topics %}
driver = messagingv2
topics = {{ cinder_enabled_notification_topics | map(attribute='name') | join(',') }}
{% else %}
driver = noop
{% endif %}
[oslo_middleware]
enable_proxy_headers_parsing = True
{% if cinder_policy_file is defined %}
[oslo_policy]
policy_file = {{ cinder_policy_file }}
{% endif %}
[nova]
interface = internal
auth_url = {{ keystone_admin_url }}
auth_type = password
project_domain_id = {{ default_project_domain_id }}
user_domain_id = {{ default_user_domain_id }}
region_name = {{ openstack_region_name }}
project_name = service
username = {{ nova_keystone_user }}
password = {{ nova_keystone_password }}
cafile = {{ openstack_cacert }}
[database]
connection = mysql+pymysql://{{ cinder_database_user }}:{{ cinder_database_password }}@{{ cinder_database_address }}/{{ cinder_database_name }}
connection_recycle_time = {{ database_connection_recycle_time }}
max_pool_size = {{ database_max_pool_size }}
max_retries = -1
[keystone_authtoken]
www_authenticate_uri = {{ keystone_internal_url }}
auth_url = {{ keystone_admin_url }}
auth_type = password
project_domain_id = {{ default_project_domain_id }}
user_domain_id = {{ default_user_domain_id }}
project_name = service
username = {{ cinder_keystone_user }}
password = {{ cinder_keystone_password }}
cafile = {{ openstack_cacert }}
memcache_security_strategy = ENCRYPT
memcache_secret_key = {{ memcache_secret_key }}
memcached_servers = {% for host in groups['memcached'] %}{{ 'api' | kolla_address(host) | put_address_in_context('memcache') }}:{{ memcached_port }}{% if not loop.last %},{% endif %}{% endfor %}
[oslo_concurrency]
lock_path = /var/lib/cinder/tmp
{% if enable_cinder_backend_lvm | bool %}
[lvm-1]
volume_group = {{ cinder_volume_group }}
volume_driver = cinder.volume.drivers.lvm.LVMVolumeDriver
volume_backend_name = lvm-1
target_helper = {{ cinder_target_helper }}
target_protocol = iscsi
{% endif %}
{% if cinder_backend_ceph | bool %}
[rbd-1]
volume_driver = cinder.volume.drivers.rbd.RBDDriver
volume_backend_name = rbd-1
rbd_pool = {{ ceph_cinder_pool_name }}
rbd_ceph_conf = /etc/ceph/ceph.conf
rbd_flatten_volume_from_snapshot = false
rbd_max_clone_depth = 5
rbd_store_chunk_size = 4
rados_connect_timeout = 5
rbd_user = {{ ceph_cinder_user }}
rbd_secret_uuid = {{ cinder_rbd_secret_uuid }}
report_discard_supported = True
image_upload_use_cinder_backend = True
{% endif %}
{% if enable_cinder_backend_nfs | bool %}
[nfs-1]
volume_driver = cinder.volume.drivers.nfs.NfsDriver
volume_backend_name = nfs-1
nfs_shares_config = /etc/cinder/nfs_shares
nfs_snapshot_support = True
nas_secure_file_permissions = False
nas_secure_file_operations = False
{% endif %}
{% if enable_cinder_backend_hnas_nfs | bool %}
[hnas-nfs]
volume_driver = cinder.volume.drivers.hitachi.hnas_nfs.HNASNFSDriver
nfs_shares_config = /home/cinder/nfs_shares
volume_backend_name = {{ hnas_nfs_backend }}
hnas_username = {{ hnas_nfs_username }}
hnas_password = {{ hnas_nfs_password }}
hnas_mgmt_ip0 = {{ hnas_nfs_mgmt_ip0 }}
hnas_svc0_volume_type = {{ hnas_nfs_svc0_volume_type }}
hnas_svc0_hdp = {{ hnas_nfs_svc0_hdp }}
{% endif %}
{% if cinder_backend_vmwarevc_vmdk | bool %}
[vmwarevc-vmdk]
volume_backend_name=vmwarevc-vmdk
volume_driver = cinder.volume.drivers.vmware.vmdk.VMwareVcVmdkDriver
vmware_host_ip = {{ vmware_vcenter_host_ip }}
vmware_host_username = {{ vmware_vcenter_host_username }}
vmware_host_password = {{ vmware_vcenter_host_password }}
vmware_cluster_name = {{ vmware_vcenter_cluster_name }}
vmware_insecure = True
{% endif %}
{% if enable_cinder_backend_zfssa_iscsi | bool %}
[zfssa-iscsi]
volume_backend_name = {{ zfssa_iscsi_backend }}
volume_driver = cinder.volume.drivers.zfssa.zfssaiscsi.ZFSSAISCSIDriver
san_ip = {{ zfssa_iscsi_san_ip }}
san_login = {{ zfssa_iscsi_login }}
san_password = {{ zfssa_iscsi_password }}
zfssa_pool = {{ zfssa_iscsi_pool }}
zfssa_project = {{ zfssa_iscsi_project }}
zfssa_initiator_group = {{ zfssa_iscsi_initiator_group }}
zfssa_target_portal = {{ zfssa_iscsi_target_portal }}
zfssa_target_interfaces = {{ zfssa_iscsi_target_interfaces }}
{% endif %}
{% if enable_cinder_backend_quobyte | bool %}
[QuobyteHD]
volume_driver = cinder.volume.drivers.quobyte.QuobyteDriver
quobyte_volume_url = quobyte://{{ quobyte_storage_host }}/{{ quobyte_storage_volume }}
{% endif %}
[privsep_entrypoint]
helper_command=sudo cinder-rootwrap /etc/cinder/rootwrap.conf privsep-helper --config-file /etc/cinder/cinder.conf
{% if enable_osprofiler | bool %}
[profiler]
enabled = true
trace_sqlalchemy = true
hmac_keys = {{ osprofiler_secret }}
connection_string = {{ osprofiler_backend_connection_string }}
{% endif %}
{% if enable_barbican | bool %}
[barbican]
auth_endpoint = {{ keystone_internal_url }}
barbican_endpoint_type = internal
verify_ssl_path = {{ openstack_cacert }}
{% endif %}
[coordination]
{% if cinder_coordination_backend == 'redis' %}
backend_url = {{ redis_connection_string }}
{% elif cinder_coordination_backend == 'etcd' %}
# NOTE(yoctozepto): etcd-compatible tooz drivers do not support multiple endpoints here (verified in Stein, Train)
# NOTE(yoctozepto): we must use etcd3gw (aka etcd3+http) due to issues with alternative (etcd3) and eventlet (as used by cinder)
# see https://bugs.launchpad.net/kolla-ansible/+bug/1854932
# and https://review.opendev.org/466098 for details
backend_url = etcd3+{{ etcd_protocol }}://{{ 'api' | kolla_address(groups['etcd'][0]) | put_address_in_context('url') }}:{{ etcd_client_port }}
{% endif %}