
After all of the discussions we had on "https://review.opendev.org/#/c/670626/2", I studied all projects that have an "oslo_messaging" section. Afterwards, I applied the same method that is already used in "oslo_messaging" section in Nova, Cinder, and others. This guarantees that we have a consistent method to enable/disable notifications across projects based on components (e.g. Ceilometer) being enabled or disabled. Here follows the list of components, and the respective changes I did. * Aodh: The section is declared, but it is not used. Therefore, it will be removed in an upcomming PR. * Congress: The section is declared, but it is not used. Therefore, it will be removed in an upcomming PR. * Cinder: It was already properly configured. * Octavia: The section is declared, but it is not used. Therefore, it will be removed in an upcomming PR. * Heat: It was already using a similar scheme; I just modified it a little bit to be the same as we have in all other components * Ceilometer: Ceilometer publishes some messages in the rabbitMQ. However, the default driver is "messagingv2", and not ''(empty) as defined in Oslo; these configurations are defined in ceilometer/publisher/messaging.py. Therefore, we do not need to do anything for the "oslo_messaging_notifications" section in Ceilometer * Tacker: It was already using a similar scheme; I just modified it a little bit to be the same as we have in all other components * Neutron: It was already properly configured. * Nova It was already properly configured. However, we found another issue with its configuration. Kolla-ansible does not configure nova notifications as it should. If 'searchlight' is not installed (enabled) the 'notification_format' should be 'unversioned'. The default is 'both'; so nova will send a notification to the queue versioned_notifications; but that queue has no consumer when 'searchlight' is disabled. In our case, the queue got 511k messages. The huge amount of "stuck" messages made the Rabbitmq cluster unstable. https://bugzilla.redhat.com/show_bug.cgi?id=1478274 https://bugs.launchpad.net/ceilometer/+bug/1665449 * Nova_hyperv: I added the same configurations as in Nova project. * Vitrage It was already using a similar scheme; I just modified it a little bit to be the same as we have in all other components * Searchlight I created a mechanism similar to what we have in AODH, Cinder, Nova, and others. * Ironic I created a mechanism similar to what we have in AODH, Cinder, Nova, and others. * Glance It was already properly configured. * Trove It was already using a similar scheme; I just modified it a little bit to be the same as we have in all other components * Blazar It was already using a similar scheme; I just modified it a little bit to be the same as we have in all other components * Sahara It was already using a similar scheme; I just modified it a little bit to be the same as we have in all other components * Watcher I created a mechanism similar to what we have in AODH, Cinder, Nova, and others. * Barbican I created a mechanism similar to what we have in Cinder, Nova, and others. I also added a configuration to 'keystone_notifications' section. Barbican needs its own queue to capture events from Keystone. Otherwise, it has an impact on Ceilometer and other systems that are connected to the "notifications" default queue. * Keystone Keystone is the system that triggered this work with the discussions that followed on https://review.opendev.org/#/c/670626/2. After a long discussion, we agreed to apply the same approach that we have in Nova, Cinder and other systems in Keystone. That is what we did. Moreover, we introduce a new topic "barbican_notifications" when barbican is enabled. We also removed the "variable" enable_cadf_notifications, as it is obsolete, and the default in Keystone is CADF. * Mistral: It was hardcoded "noop" as the driver. However, that does not seem a good practice. Instead, I applied the same standard of using the driver and pushing to "notifications" queue if Ceilometer is enabled. * Cyborg: I created a mechanism similar to what we have in AODH, Cinder, Nova, and others. * Murano It was already using a similar scheme; I just modified it a little bit to be the same as we have in all other components * Senlin It was already using a similar scheme; I just modified it a little bit to be the same as we have in all other components * Manila It was already using a similar scheme; I just modified it a little bit to be the same as we have in all other components * Zun The section is declared, but it is not used. Therefore, it will be removed in an upcomming PR. * Designate It was already using a similar scheme; I just modified it a little bit to be the same as we have in all other components * Magnum It was already using a similar scheme; I just modified it a little bit to be the same as we have in all other components Closes-Bug: #1838985 Change-Id: I88bdb004814f37c81c9a9c4e5e491fac69f6f202 Signed-off-by: Rafael Weingärtner <rafael@apache.org>
221 lines
10 KiB
YAML
221 lines
10 KiB
YAML
---
|
|
project_name: "designate"
|
|
|
|
designate_services:
|
|
designate-api:
|
|
container_name: designate_api
|
|
group: designate-api
|
|
enabled: true
|
|
image: "{{ designate_api_image_full }}"
|
|
volumes: "{{ designate_api_default_volumes + designate_api_extra_volumes }}"
|
|
dimensions: "{{ designate_api_dimensions }}"
|
|
haproxy:
|
|
designate_api:
|
|
enabled: "{{ enable_designate }}"
|
|
mode: "http"
|
|
external: false
|
|
port: "{{ designate_api_port }}"
|
|
listen_port: "{{ designate_api_listen_port }}"
|
|
designate_api_external:
|
|
enabled: "{{ enable_designate }}"
|
|
mode: "http"
|
|
external: true
|
|
port: "{{ designate_api_port }}"
|
|
listen_port: "{{ designate_api_listen_port }}"
|
|
designate-backend-bind9:
|
|
container_name: designate_backend_bind9
|
|
group: designate-backend-bind9
|
|
enabled: "{{ designate_backend == 'bind9' }}"
|
|
image: "{{ designate_backend_bind9_image_full }}"
|
|
volumes: "{{ designate_backend_bind9_default_volumes + designate_backend_bind9_extra_volumes }}"
|
|
dimensions: "{{ designate_backend_bind9_dimensions }}"
|
|
designate-central:
|
|
container_name: designate_central
|
|
group: designate-central
|
|
enabled: true
|
|
image: "{{ designate_central_image_full }}"
|
|
volumes: "{{ designate_central_default_volumes + designate_central_extra_volumes }}"
|
|
dimensions: "{{ designate_central_dimensions }}"
|
|
designate-mdns:
|
|
container_name: designate_mdns
|
|
group: designate-mdns
|
|
enabled: true
|
|
image: "{{ designate_mdns_image_full }}"
|
|
volumes: "{{ designate_mdns_default_volumes + designate_mdns_extra_volumes }}"
|
|
dimensions: "{{ designate_mdns_dimensions }}"
|
|
designate-producer:
|
|
container_name: designate_producer
|
|
group: designate-producer
|
|
enabled: true
|
|
image: "{{ designate_producer_image_full }}"
|
|
volumes: "{{ designate_producer_default_volumes + designate_producer_extra_volumes }}"
|
|
dimensions: "{{ designate_producer_dimensions }}"
|
|
designate-worker:
|
|
container_name: designate_worker
|
|
group: designate-worker
|
|
enabled: true
|
|
image: "{{ designate_worker_image_full }}"
|
|
volumes: "{{ designate_worker_default_volumes + designate_worker_extra_volumes }}"
|
|
dimensions: "{{ designate_worker_dimensions }}"
|
|
designate-sink:
|
|
container_name: designate_sink
|
|
group: designate-sink
|
|
enabled: true
|
|
image: "{{ designate_sink_image_full }}"
|
|
volumes: "{{ designate_sink_default_volumes + designate_sink_extra_volumes }}"
|
|
dimensions: "{{ designate_sink_dimensions }}"
|
|
|
|
|
|
####################
|
|
# Database
|
|
####################
|
|
designate_database_name: "designate"
|
|
designate_database_user: "{% if use_preconfigured_databases | bool and use_common_mariadb_user | bool %}{{ database_user }}{% else %}designate{% endif %}"
|
|
designate_database_address: "{{ database_address }}:{{ database_port }}"
|
|
|
|
designate_pool_manager_database_name: "designate_pool_manager"
|
|
designate_pool_manager_database_user: "{% if use_preconfigured_databases | bool and use_common_mariadb_user | bool %}{{ database_user }}{% else %}designate_pool_manager{% endif %}"
|
|
designate_pool_manager_database_address: "{{ database_address }}:{{ database_port }}"
|
|
|
|
|
|
####################
|
|
# Docker
|
|
####################
|
|
designate_install_type: "{{ kolla_install_type }}"
|
|
designate_tag: "{{ openstack_release }}"
|
|
|
|
designate_central_image: "{{ docker_registry ~ '/' if docker_registry else '' }}{{ docker_namespace }}/{{ kolla_base_distro }}-{{ designate_install_type }}-designate-central"
|
|
designate_central_tag: "{{ designate_tag }}"
|
|
designate_central_image_full: "{{ designate_central_image }}:{{ designate_central_tag }}"
|
|
|
|
designate_producer_image: "{{ docker_registry ~ '/' if docker_registry else '' }}{{ docker_namespace }}/{{ kolla_base_distro }}-{{ designate_install_type }}-designate-producer"
|
|
designate_producer_tag: "{{ designate_tag }}"
|
|
designate_producer_image_full: "{{ designate_producer_image }}:{{ designate_producer_tag }}"
|
|
|
|
designate_api_image: "{{ docker_registry ~ '/' if docker_registry else '' }}{{ docker_namespace }}/{{ kolla_base_distro }}-{{ designate_install_type }}-designate-api"
|
|
designate_api_tag: "{{ designate_tag }}"
|
|
designate_api_image_full: "{{ designate_api_image }}:{{ designate_api_tag }}"
|
|
|
|
designate_backend_bind9_image: "{{ docker_registry ~ '/' if docker_registry else '' }}{{ docker_namespace }}/{{ kolla_base_distro }}-{{ designate_install_type }}-designate-backend-bind9"
|
|
designate_backend_bind9_tag: "{{ designate_tag }}"
|
|
designate_backend_bind9_image_full: "{{ designate_backend_bind9_image }}:{{ designate_backend_bind9_tag }}"
|
|
|
|
designate_mdns_image: "{{ docker_registry ~ '/' if docker_registry else '' }}{{ docker_namespace }}/{{ kolla_base_distro }}-{{ designate_install_type }}-designate-mdns"
|
|
designate_mdns_tag: "{{ designate_tag }}"
|
|
designate_mdns_image_full: "{{ designate_mdns_image }}:{{ designate_mdns_tag }}"
|
|
|
|
designate_sink_image: "{{ docker_registry ~ '/' if docker_registry else '' }}{{ docker_namespace }}/{{ kolla_base_distro }}-{{ designate_install_type }}-designate-sink"
|
|
designate_sink_tag: "{{ designate_tag }}"
|
|
designate_sink_image_full: "{{ designate_sink_image }}:{{ designate_sink_tag }}"
|
|
|
|
designate_worker_image: "{{ docker_registry ~ '/' if docker_registry else '' }}{{ docker_namespace }}/{{ kolla_base_distro }}-{{ designate_install_type }}-designate-worker"
|
|
designate_worker_tag: "{{ designate_tag }}"
|
|
designate_worker_image_full: "{{ designate_worker_image }}:{{ designate_worker_tag }}"
|
|
|
|
designate_api_dimensions: "{{ default_container_dimensions }}"
|
|
designate_backend_bind9_dimensions: "{{ default_container_dimensions }}"
|
|
designate_central_dimensions: "{{ default_container_dimensions }}"
|
|
designate_mdns_dimensions: "{{ default_container_dimensions }}"
|
|
designate_producer_dimensions: "{{ default_container_dimensions }}"
|
|
designate_worker_dimensions: "{{ default_container_dimensions }}"
|
|
designate_sink_dimensions: "{{ default_container_dimensions }}"
|
|
|
|
designate_api_default_volumes:
|
|
- "{{ node_config_directory }}/designate-api/:{{ container_config_directory }}/:ro"
|
|
- "/etc/localtime:/etc/localtime:ro"
|
|
- "kolla_logs:/var/log/kolla/"
|
|
- "{{ kolla_dev_repos_directory ~ '/designate/designate:/var/lib/kolla/venv/lib/python2.7/site-packages/designate' if designate_dev_mode | bool else '' }}"
|
|
designate_backend_bind9_default_volumes:
|
|
- "{{ node_config_directory }}/designate-backend-bind9/:{{ container_config_directory }}/:ro"
|
|
- "/etc/localtime:/etc/localtime:ro"
|
|
- "kolla_logs:/var/log/kolla/"
|
|
- "designate_backend_bind9:/var/lib/named/"
|
|
designate_central_default_volumes:
|
|
- "{{ node_config_directory }}/designate-central/:{{ container_config_directory }}/:ro"
|
|
- "/etc/localtime:/etc/localtime:ro"
|
|
- "kolla_logs:/var/log/kolla/"
|
|
- "{{ kolla_dev_repos_directory ~ '/designate/designate:/var/lib/kolla/venv/lib/python2.7/site-packages/designate' if designate_dev_mode | bool else '' }}"
|
|
designate_mdns_default_volumes:
|
|
- "{{ node_config_directory }}/designate-mdns/:{{ container_config_directory }}/:ro"
|
|
- "/etc/localtime:/etc/localtime:ro"
|
|
- "kolla_logs:/var/log/kolla/"
|
|
- "{{ kolla_dev_repos_directory ~ '/designate/designate:/var/lib/kolla/venv/lib/python2.7/site-packages/designate' if designate_dev_mode | bool else '' }}"
|
|
designate_producer_default_volumes:
|
|
- "{{ node_config_directory }}/designate-producer/:{{ container_config_directory }}/:ro"
|
|
- "/etc/localtime:/etc/localtime:ro"
|
|
- "kolla_logs:/var/log/kolla/"
|
|
- "{{ kolla_dev_repos_directory ~ '/designate/designate:/var/lib/kolla/venv/lib/python2.7/site-packages/designate' if designate_dev_mode | bool else '' }}"
|
|
designate_worker_default_volumes:
|
|
- "{{ node_config_directory }}/designate-worker/:{{ container_config_directory }}/:ro"
|
|
- "/etc/localtime:/etc/localtime:ro"
|
|
- "kolla_logs:/var/log/kolla/"
|
|
- "{{ kolla_dev_repos_directory ~ '/designate/designate:/var/lib/kolla/venv/lib/python2.7/site-packages/designate' if designate_dev_mode | bool else '' }}"
|
|
designate_sink_default_volumes:
|
|
- "{{ node_config_directory }}/designate-sink/:{{ container_config_directory }}/:ro"
|
|
- "/etc/localtime:/etc/localtime:ro"
|
|
- "kolla_logs:/var/log/kolla/"
|
|
- "{{ kolla_dev_repos_directory ~ '/designate/designate:/var/lib/kolla/venv/lib/python2.7/site-packages/designate' if designate_dev_mode | bool else '' }}"
|
|
|
|
designate_extra_volumes: "{{ default_extra_volumes }}"
|
|
designate_api_extra_volumes: "{{ designate_extra_volumes }}"
|
|
designate_backend_bind9_extra_volumes: "{{ designate_extra_volumes }}"
|
|
designate_central_extra_volumes: "{{ designate_extra_volumes }}"
|
|
designate_mdns_extra_volumes: "{{ designate_extra_volumes }}"
|
|
designate_producer_extra_volumes: "{{ designate_extra_volumes }}"
|
|
designate_worker_extra_volumes: "{{ designate_extra_volumes }}"
|
|
designate_sink_extra_volumes: "{{ designate_extra_volumes }}"
|
|
|
|
####################
|
|
# OpenStack
|
|
####################
|
|
designate_admin_endpoint: "{{ admin_protocol }}://{{ designate_internal_fqdn }}:{{ designate_api_port }}"
|
|
designate_internal_endpoint: "{{ internal_protocol }}://{{ designate_internal_fqdn }}:{{ designate_api_port }}"
|
|
designate_public_endpoint: "{{ public_protocol }}://{{ designate_external_fqdn }}:{{ designate_api_port }}"
|
|
|
|
designate_logging_debug: "{{ openstack_logging_debug }}"
|
|
|
|
designate_keystone_user: "designate"
|
|
|
|
openstack_designate_auth: "{{ openstack_auth }}"
|
|
|
|
|
|
####################
|
|
# Kolla
|
|
####################
|
|
designate_git_repository: "{{ kolla_dev_repos_git }}/{{ project_name }}"
|
|
designate_dev_repos_pull: "{{ kolla_dev_repos_pull }}"
|
|
designate_dev_mode: "{{ kolla_dev_mode }}"
|
|
designate_source_version: "{{ kolla_source_version }}"
|
|
|
|
####################
|
|
## Designate
|
|
#####################
|
|
designate_dnssec_validation: "yes"
|
|
designate_recursion: "no"
|
|
## Example for designate_forwarders_addresses: "10.199.200.1; 10.199.100.1"
|
|
designate_forwarders_addresses: ""
|
|
|
|
####################
|
|
# Infoblox
|
|
####################
|
|
designate_backend_infoblox_nameservers: ""
|
|
designate_infoblox_host: ""
|
|
designate_infoblox_wapi_url: ""
|
|
designate_infoblox_ssl_verify: "true"
|
|
designate_infoblox_auth_username: ""
|
|
designate_infoblox_auth_password: ""
|
|
designate_infoblox_multi_tenant: "false"
|
|
designate_infoblox_ns_group: ""
|
|
|
|
|
|
####################
|
|
# Notifications
|
|
####################
|
|
designate_notifications_topic_name: "notifications_designate"
|
|
|
|
designate_notification_topics:
|
|
- name: "{{ designate_notifications_topic_name }}"
|
|
enabled: True
|
|
|
|
designate_enabled_notification_topics: "{{ designate_notification_topics | selectattr('enabled', 'equalto', true) | list }}"
|