22a6223b1b
After all of the discussions we had on "https://review.opendev.org/#/c/670626/2", I studied all projects that have an "oslo_messaging" section. Afterwards, I applied the same method that is already used in "oslo_messaging" section in Nova, Cinder, and others. This guarantees that we have a consistent method to enable/disable notifications across projects based on components (e.g. Ceilometer) being enabled or disabled. Here follows the list of components, and the respective changes I did. * Aodh: The section is declared, but it is not used. Therefore, it will be removed in an upcomming PR. * Congress: The section is declared, but it is not used. Therefore, it will be removed in an upcomming PR. * Cinder: It was already properly configured. * Octavia: The section is declared, but it is not used. Therefore, it will be removed in an upcomming PR. * Heat: It was already using a similar scheme; I just modified it a little bit to be the same as we have in all other components * Ceilometer: Ceilometer publishes some messages in the rabbitMQ. However, the default driver is "messagingv2", and not ''(empty) as defined in Oslo; these configurations are defined in ceilometer/publisher/messaging.py. Therefore, we do not need to do anything for the "oslo_messaging_notifications" section in Ceilometer * Tacker: It was already using a similar scheme; I just modified it a little bit to be the same as we have in all other components * Neutron: It was already properly configured. * Nova It was already properly configured. However, we found another issue with its configuration. Kolla-ansible does not configure nova notifications as it should. If 'searchlight' is not installed (enabled) the 'notification_format' should be 'unversioned'. The default is 'both'; so nova will send a notification to the queue versioned_notifications; but that queue has no consumer when 'searchlight' is disabled. In our case, the queue got 511k messages. The huge amount of "stuck" messages made the Rabbitmq cluster unstable. https://bugzilla.redhat.com/show_bug.cgi?id=1478274 https://bugs.launchpad.net/ceilometer/+bug/1665449 * Nova_hyperv: I added the same configurations as in Nova project. * Vitrage It was already using a similar scheme; I just modified it a little bit to be the same as we have in all other components * Searchlight I created a mechanism similar to what we have in AODH, Cinder, Nova, and others. * Ironic I created a mechanism similar to what we have in AODH, Cinder, Nova, and others. * Glance It was already properly configured. * Trove It was already using a similar scheme; I just modified it a little bit to be the same as we have in all other components * Blazar It was already using a similar scheme; I just modified it a little bit to be the same as we have in all other components * Sahara It was already using a similar scheme; I just modified it a little bit to be the same as we have in all other components * Watcher I created a mechanism similar to what we have in AODH, Cinder, Nova, and others. * Barbican I created a mechanism similar to what we have in Cinder, Nova, and others. I also added a configuration to 'keystone_notifications' section. Barbican needs its own queue to capture events from Keystone. Otherwise, it has an impact on Ceilometer and other systems that are connected to the "notifications" default queue. * Keystone Keystone is the system that triggered this work with the discussions that followed on https://review.opendev.org/#/c/670626/2. After a long discussion, we agreed to apply the same approach that we have in Nova, Cinder and other systems in Keystone. That is what we did. Moreover, we introduce a new topic "barbican_notifications" when barbican is enabled. We also removed the "variable" enable_cadf_notifications, as it is obsolete, and the default in Keystone is CADF. * Mistral: It was hardcoded "noop" as the driver. However, that does not seem a good practice. Instead, I applied the same standard of using the driver and pushing to "notifications" queue if Ceilometer is enabled. * Cyborg: I created a mechanism similar to what we have in AODH, Cinder, Nova, and others. * Murano It was already using a similar scheme; I just modified it a little bit to be the same as we have in all other components * Senlin It was already using a similar scheme; I just modified it a little bit to be the same as we have in all other components * Manila It was already using a similar scheme; I just modified it a little bit to be the same as we have in all other components * Zun The section is declared, but it is not used. Therefore, it will be removed in an upcomming PR. * Designate It was already using a similar scheme; I just modified it a little bit to be the same as we have in all other components * Magnum It was already using a similar scheme; I just modified it a little bit to be the same as we have in all other components Closes-Bug: #1838985 Change-Id: I88bdb004814f37c81c9a9c4e5e491fac69f6f202 Signed-off-by: Rafael Weingärtner <rafael@apache.org>
76 lines
2.2 KiB
Django/Jinja
76 lines
2.2 KiB
Django/Jinja
[DEFAULT]
|
|
compute_driver = compute_hyperv.driver.HyperVDriver
|
|
instances_path = C:\OpenStack\Instances
|
|
use_cow_images = true
|
|
flat_injected = true
|
|
mkisofs_cmd = C:\Program Files\Cloudbase Solutions\OpenStack\Nova\bin\mkisofs.exe
|
|
debug = {{ openstack_logging_debug }}
|
|
allow_resize_to_same_host = true
|
|
running_deleted_instance_action = reap
|
|
running_deleted_instance_poll_interval = 120
|
|
resize_confirm_window = 5
|
|
resume_guests_state_on_host_boot = true
|
|
rpc_response_timeout = 1800
|
|
lock_path = C:\OpenStack\Log
|
|
vif_plugging_is_fatal = false
|
|
vif_plugging_timeout = 60
|
|
rpc_backend = rabbit
|
|
log_dir = C:\OpenStack\Log
|
|
log_file = nova-compute.log
|
|
force_config_drive = True
|
|
transport_url = {{ rpc_transport_url }}
|
|
|
|
[placement]
|
|
auth_type = password
|
|
auth_url = {{ keystone_admin_url }}/v3
|
|
project_name = service
|
|
username = {{ placement_keystone_user }}
|
|
password = {{ placement_keystone_password }}
|
|
project_domain_name = {{ default_project_domain_name }}
|
|
user_domain_name = {{ default_user_domain_name }}
|
|
os_region_name = {{ openstack_region_name }}
|
|
|
|
[glance]
|
|
api_servers = {{ internal_protocol }}://{{ glance_internal_fqdn }}:{{ glance_api_port }}
|
|
|
|
|
|
[hyperv]
|
|
vswitch_name = {{ vswitch_name }}
|
|
limit_cpu_features = false
|
|
config_drive_inject_password = true
|
|
qemu_img_cmd = C:\Program Files\Cloudbase Solutions\OpenStack\Nova\bin\qemu-img.exe
|
|
config_drive_cdrom = true
|
|
dynamic_memory_ratio = 1
|
|
enable_instance_metrics_collection = false
|
|
|
|
[rdp]
|
|
enabled = true
|
|
html5_proxy_base_url = {{ public_protocol }}://{{ kolla_internal_vip_address }}:{{ rdp_port }}
|
|
|
|
[neutron]
|
|
url = {{ internal_protocol }}://{{ neutron_internal_fqdn }}:{{ neutron_server_port }}
|
|
auth_strategy = keystone
|
|
project_domain_name = default
|
|
project_name = service
|
|
user_domain_name = default
|
|
username = {{ neutron_keystone_user }}
|
|
password = {{ neutron_keystone_password }}
|
|
auth_url = {{ keystone_admin_url }}/v3
|
|
auth_type = v3password
|
|
|
|
[oslo_messaging_notifications]
|
|
transport_url = {{ notify_transport_url }}
|
|
{% if nova_enabled_notification_topics %}
|
|
driver = messagingv2
|
|
topics = {{ nova_enabled_notification_topics | map(attribute='name') | join(',') }}
|
|
{% else %}
|
|
driver = noop
|
|
{% endif %}
|
|
|
|
[notifications]
|
|
{% if not enable_searchlight | bool %}
|
|
notification_format = unversioned
|
|
{% else %}
|
|
notification_format = both
|
|
{% endif %}
|