tripleo-heat-templates/deployment/haproxy/haproxy-pacemaker-puppet.yaml
Rajesh Tailor 6861fb324b Fix typos in comments and parameter descriptions
This change fixes typos in comments, parameter descriptions
and ansible task names.

Change-Id: I82b67ca834077b66ebd71744face3bba0b43da2f
2022-08-03 17:07:11 +05:30

595 lines
26 KiB
YAML

heat_template_version: wallaby
description: >
OpenStack containerized HAproxy service for pacemaker
parameters:
ContainerHAProxyImage:
description: image
type: string
tags:
- role_specific
ContainerHAProxyConfigImage:
description: The container image to use for the haproxy config_volume
type: string
tags:
- role_specific
ClusterCommonTag:
default: false
description: When set to false, a pacemaker service is configured
to use a floating tag for its container image name,
e.g. 'REGISTRY/NAMESPACE/IMAGENAME:pcmklatest'. When
set to true, the service uses a floating prefix as
well, e.g. 'cluster.common.tag/IMAGENAME:pcmklatest'.
type: boolean
ClusterFullTag:
default: false
description: When set to true, the pacemaker service uses a fully
constant tag for its container image name, e.g.
'cluster.common.tag/SERVICENAME:pcmklatest'.
type: boolean
ServiceData:
default: {}
description: Dictionary packing service data
type: json
ServiceNetMap:
default: {}
description: Mapping of service_name -> network name. Typically set
via parameter_defaults in the resource registry. Use
parameter_merge_strategies to merge it with the defaults.
type: json
EndpointMap:
default: {}
description: Mapping of service endpoint -> protocol. Typically set
via parameter_defaults in the resource registry.
type: json
SSLCertificate:
default: ''
description: >
The content of the SSL certificate (without Key) in PEM format.
type: string
PublicSSLCertificateAutogenerated:
default: false
description: >
Whether the public SSL certificate was autogenerated or not.
type: boolean
EnablePublicTLS:
default: true
description: >
Whether to enable TLS on the public interface or not.
type: boolean
DeployedSSLCertificatePath:
default: '/etc/pki/tls/private/overcloud_endpoint.pem'
description: >
The filepath of the certificate as it will be stored in the controller.
type: string
RoleName:
default: ''
description: Role name on which the service is applied
type: string
RoleParameters:
default: {}
description: Parameters specific to the role
type: json
EnableInternalTLS:
type: boolean
default: false
InternalTLSCAFile:
default: '/etc/ipa/ca.crt'
type: string
description: Specifies the default CA cert to use if TLS is used for
services in the internal network.
HAProxyInternalTLSCertsDirectory:
default: '/etc/pki/tls/certs/haproxy'
type: string
HAProxyInternalTLSKeysDirectory:
default: '/etc/pki/tls/private/haproxy'
type: string
HAProxyLoggingSource:
type: json
default:
tag: openstack.haproxy
file: /var/log/containers/haproxy/haproxy.log
startmsg.regex: "^[a-zA-Z]{3} [0-9]{2} [:0-9]{8}"
HAProxySyslogAddress:
default: /dev/log
description: Syslog address where HAproxy will send its log
type: string
HAProxySyslogFacility:
default: local0
description: Syslog facility HAProxy will use for its logs
type: string
ConfigDebug:
default: false
description: Whether to run config management (e.g. Puppet) in debug mode.
type: boolean
ContainerCli:
type: string
default: 'podman'
description: CLI tool used to manage containers.
constraints:
- allowed_values: ['podman']
DeployIdentifier:
default: ''
type: string
description: >
Setting this to a unique value will re-run any deployment tasks which
perform configuration on a Heat stack-update.
conditions:
public_tls_enabled:
and:
- {get_param: EnablePublicTLS}
- or:
- not:
equals:
- {get_param: SSLCertificate}
- ""
- {get_param: PublicSSLCertificateAutogenerated}
resources:
ContainersCommon:
type: ../containers-common.yaml
HAProxyBase:
type: ./haproxy-container-puppet.yaml
properties:
ServiceData: {get_param: ServiceData}
ServiceNetMap: {get_param: ServiceNetMap}
EndpointMap: {get_param: EndpointMap}
RoleName: {get_param: RoleName}
RoleParameters: {get_param: RoleParameters}
HAProxyPublicTLS:
type: OS::TripleO::Services::HAProxyPublicTLS
properties:
ServiceData: {get_param: ServiceData}
ServiceNetMap: {get_param: ServiceNetMap}
EndpointMap: {get_param: EndpointMap}
RoleName: {get_param: RoleName}
RoleParameters: {get_param: RoleParameters}
HAProxyInternalTLS:
type: OS::TripleO::Services::HAProxyInternalTLS
properties:
ServiceData: {get_param: ServiceData}
ServiceNetMap: {get_param: ServiceNetMap}
EndpointMap: {get_param: EndpointMap}
RoleName: {get_param: RoleName}
RoleParameters: {get_param: RoleParameters}
RoleParametersValue:
type: OS::Heat::Value
properties:
type: json
value:
map_replace:
- map_replace:
- ContainerHAProxyImage: ContainerHAProxyImage
ContainerHAProxyConfigImage: ContainerHAProxyConfigImage
- values: {get_param: [RoleParameters]}
- values:
ContainerHAProxyImage: {get_param: ContainerHAProxyImage}
ContainerHAProxyConfigImage: {get_param: ContainerHAProxyConfigImage}
outputs:
role_data:
description: Role data for the HAproxy role.
value:
service_name: haproxy
monitoring_subscription: {get_attr: [HAProxyBase, role_data, monitoring_subscription]}
ansible_group_vars: {get_attr: [HAProxyBase, role_data, ansible_group_vars]}
config_settings:
map_merge:
- get_attr: [HAProxyBase, role_data, config_settings]
- tripleo::haproxy::haproxy_service_manage: false
tripleo::haproxy::mysql_clustercheck: true
tripleo::haproxy::haproxy_log_address: {get_param: HAProxySyslogAddress}
tripleo::haproxy::haproxy_log_facility: {get_param: HAProxySyslogFacility}
- haproxy_docker: true
tripleo::profile::pacemaker::haproxy_bundle::container_backend: {get_param: ContainerCli}
# the list of directories that contain the certs to bind mount in the container
# bind-mounting the directories rather than all the cert, key and pem files ensures
# that docker won't create directories on the host when then pem files do not exist
tripleo::profile::pacemaker::haproxy_bundle::tls_mapping: &tls_mapping
list_concat:
- if:
- public_tls_enabled
- - get_param: DeployedSSLCertificatePath
- if:
- {get_param: EnableInternalTLS}
- - get_param: InternalTLSCAFile
- get_param: HAProxyInternalTLSKeysDirectory
- get_param: HAProxyInternalTLSCertsDirectory
# The init bundle users the container_puppet_apply_volumes list. That already contains InternalTLSCAFile
# and newer podmans refuse to start with duplicated mountpoints. That is why we cannot use tls_mapping
# but need a new mapping
tripleo::profile::pacemaker::haproxy_bundle::tls_mapping_init_bundle: &tls_mapping_init_bundle
list_concat:
- if:
- public_tls_enabled
- - get_param: DeployedSSLCertificatePath
- if:
- {get_param: EnableInternalTLS}
- - get_param: HAProxyInternalTLSKeysDirectory
- get_param: HAProxyInternalTLSCertsDirectory
tripleo::profile::pacemaker::haproxy_bundle::internal_certs_directory: {get_param: HAProxyInternalTLSCertsDirectory}
tripleo::profile::pacemaker::haproxy_bundle::internal_keys_directory: {get_param: HAProxyInternalTLSKeysDirectory}
tripleo::profile::pacemaker::haproxy_bundle::haproxy_docker_image: &haproxy_image_pcmklatest
if:
- {get_param: ClusterFullTag}
- "cluster.common.tag/haproxy:pcmklatest"
- yaql:
data:
if:
- {get_param: ClusterCommonTag}
- yaql:
data: {get_attr: [RoleParametersValue, value, ContainerHAProxyImage]}
expression: concat("cluster.common.tag/", $.data.rightSplit(separator => "/", maxSplits => 1)[1])
- {get_attr: [RoleParametersValue, value, ContainerHAProxyImage]}
expression: concat($.data.rightSplit(separator => ":", maxSplits => 1)[0], ":pcmklatest")
service_config_settings:
rsyslog:
tripleo_logging_sources_haproxy:
- {get_param: HAProxyLoggingSource}
# BEGIN DOCKER SETTINGS
puppet_config:
config_volume: haproxy
puppet_tags: haproxy_config
step_config:
list_join:
- "\n"
- - "exec {'wait-for-settle': command => '/bin/true' }"
- "['pcmk_bundle', 'pcmk_resource', 'pcmk_property', 'pcmk_constraint', 'pcmk_resource_default'].each |String $val| { noop_resource($val) }"
- 'include tripleo::profile::pacemaker::haproxy_bundle'
config_image: {get_attr: [RoleParametersValue, value, ContainerHAProxyConfigImage]}
volumes: &deployed_cert_mount
yaql:
expression: $.data.select($+":"+$+":ro")
data: *tls_mapping
kolla_config:
/var/lib/kolla/config_files/haproxy.json:
# HAProxy 1.8 doesn't ship haproxy-systemd-wrapper, we have
# to use a new dedicated option for live config reload.
# Note: we can't use quotes in kolla command, hence the workaround
command: bash -c $* -- eval if [ -f /usr/sbin/haproxy-systemd-wrapper ]; then exec /usr/sbin/haproxy-systemd-wrapper -f /etc/haproxy/haproxy.cfg; else exec /usr/sbin/haproxy -f /etc/haproxy/haproxy.cfg -Ws; fi
config_files:
- source: "/var/lib/kolla/config_files/src/*"
dest: "/"
merge: true
preserve_properties: true
optional: true
- source: "/var/lib/kolla/config_files/src-tls/*"
dest: "/"
merge: true
optional: true
preserve_properties: true
permissions:
- path: /var/lib/haproxy
owner: haproxy:haproxy
recurse: true
- path:
list_join:
- ''
- - {get_param: HAProxyInternalTLSCertsDirectory}
- '/*'
owner: haproxy:haproxy
perm: '0600'
optional: true
- path:
list_join:
- ''
- - {get_param: HAProxyInternalTLSKeysDirectory}
- '/*'
owner: haproxy:haproxy
perm: '0600'
optional: true
container_config_scripts: {get_attr: [ContainersCommon, container_config_scripts]}
host_prep_tasks: {get_attr: [HAProxyBase, role_data, host_prep_tasks]}
metadata_settings:
{get_attr: [HAProxyBase, role_data, metadata_settings]}
deploy_steps_tasks:
list_concat:
- - name: Configure rsyslog for HAproxy container managed by Pacemaker
when: step|int == 1
block:
- name: Check if rsyslog exists
shell: systemctl is-active rsyslog
register: rsyslog_config
- when:
- rsyslog_config is changed
- rsyslog_config.rc == 0
block:
- name: Forward logging to haproxy.log file
blockinfile:
content: |
if $syslogfacility-text == '{{facility}}' and $programname == 'haproxy' then -/var/log/containers/haproxy/haproxy.log
& stop
create: true
path: /etc/rsyslog.d/openstack-haproxy.conf
vars:
facility: {get_param: HAProxySyslogFacility}
register: logconfig
- name: restart rsyslog service after logging conf change
service:
name: rsyslog
state: restarted
when: logconfig is changed
- name: Validate SSLCertificate is properly defined if PublicSSLCertificateAutogenerated is False
when:
- {get_param: EnablePublicTLS}
- step|int == 2
vars:
ssl_cert: {get_param: SSLCertificate}
auto_gen: {get_param: PublicSSLCertificateAutogenerated}
protocol: {get_param: [EndpointMap, KeystonePublic, protocol]}
block:
- name: Verify SSL certificate
shell: |
cat << EOF | openssl verify
{{ssl_cert}}
EOF
register: openssl_output
when:
- ( ssl_cert | length ) > 512
- protocol == "https"
failed_when:
( ( "self signed certificate" not in openssl_output.stderr ) and ( "OK" not in openssl_output.stdout ) ) or ("expired" in openssl_output.stderr)
- fail:
msg: >
SSLCertificate is empty or too short and PublicSSLCertificateAutogenerated
is False and at least one endpoint is configured with https
when:
- ( ssl_cert | length ) < 512
- not ( auto_gen | bool )
- protocol == "https"
- name: HAproxy tag container image for pacemaker
when: step|int == 1
import_role:
name: tripleo_container_tag
vars:
container_image: {get_attr: [RoleParametersValue, value, ContainerHAProxyImage]}
container_image_latest: *haproxy_image_pcmklatest
- name: HAproxy HA Wrappers Step
when: step|int == 2
block: &haproxy_puppet_bundle
- name: HAproxy puppet bundle
import_role:
name: tripleo_ha_wrapper
vars:
tripleo_ha_wrapper_service_name: haproxy
tripleo_ha_wrapper_resource_name: haproxy-bundle
tripleo_ha_wrapper_bundle_name: haproxy-bundle
tripleo_ha_wrapper_resource_state: Started
tripleo_ha_wrapper_puppet_config_volume: haproxy
tripleo_ha_wrapper_puppet_execute: 'include ::tripleo::profile::base::pacemaker; include ::tripleo::profile::pacemaker::haproxy_bundle'
tripleo_ha_wrapper_puppet_tags: 'pacemaker::resource::bundle,pacemaker::property,pacemaker::resource::ip,pacemaker::resource::ocf,pacemaker::constraint::order,pacemaker::constraint::colocation'
tripleo_ha_wrapper_puppet_debug: {get_param: ConfigDebug}
- if:
- public_tls_enabled
- get_attr: [HAProxyPublicTLS, role_data, deploy_steps_tasks]
- []
- if:
- {get_param: EnableInternalTLS}
- get_attr: [HAProxyInternalTLS, role_data, deploy_steps_tasks]
update_tasks:
- name: Set HAProxy upgrade facts
block: &haproxy_update_upgrade_facts
- name: set is_haproxy_bootstrap_node fact
tags: common
set_fact: is_haproxy_bootstrap_node={{haproxy_short_bootstrap_node_name|lower == ansible_facts['hostname']|lower}}
when:
- haproxy_short_bootstrap_node_name|default(false)
- name: Mount TLS cert if needed
when:
- step|int == 1
- is_haproxy_bootstrap_node
block:
- name: Check haproxy public certificate configuration in pacemaker
command: cibadmin --query --xpath "//storage-mapping[@id='haproxy-cert']"
failed_when: false
register: haproxy_cert_mounted
- name: Disable the haproxy cluster resource
pacemaker_resource:
resource: haproxy-bundle
state: disable
wait_for_resource: true
register: output
retries: 5
until: output.rc == 0
# rc == 6 means the configuration doesn't exist in the CIB
when: haproxy_cert_mounted.rc == 6
- name: Set HAProxy public cert volume mount fact
set_fact:
haproxy_public_cert_path: {get_param: DeployedSSLCertificatePath}
haproxy_public_tls_enabled: {if: [public_tls_enabled, true, false]}
- name: Add a bind mount for public certificate in the haproxy bundle
command: pcs resource bundle update haproxy-bundle storage-map add id=haproxy-cert source-dir={{ haproxy_public_cert_path }} target-dir=/var/lib/kolla/config_files/src-tls/{{ haproxy_public_cert_path }} options=ro
when: haproxy_cert_mounted.rc == 6 and haproxy_public_tls_enabled|bool
- name: Enable the haproxy cluster resource
pacemaker_resource:
resource: haproxy-bundle
state: enable
wait_for_resource: true
register: output
retries: 5
until: output.rc == 0
when: haproxy_cert_mounted.rc == 6
- name: Haproxy fetch and retag container image for pacemaker
when:
- step|int == 2
block: &haproxy_fetch_retag_container_tasks
- name: Get container haproxy image
set_fact:
haproxy_image: {get_attr: [RoleParametersValue, value, ContainerHAProxyImage]}
haproxy_image_latest: *haproxy_image_pcmklatest
- name: Pull latest haproxy images
command: "{{container_cli}} pull {{haproxy_image}}"
register: result
retries: 3
delay: 3
until: result.rc == 0
- name: Get previous haproxy image id
shell: "{{container_cli}} inspect --format '{{'{{'}}.Id{{'}}'}}' {{haproxy_image_latest}}"
register: old_haproxy_image_id
failed_when: false
- name: Get new haproxy image id
shell: "{{container_cli}} inspect --format '{{'{{'}}.Id{{'}}'}}' {{haproxy_image}}"
register: new_haproxy_image_id
- name: Retag pcmklatest to latest haproxy image
include_role:
name: tripleo_container_tag
vars:
container_image: "{{haproxy_image}}"
container_image_latest: "{{haproxy_image_latest}}"
when:
- old_haproxy_image_id.stdout != new_haproxy_image_id.stdout
post_update_tasks:
- name: HAProxy bundle post update
when: step|int == 1
block: *haproxy_puppet_bundle
vars:
tripleo_ha_wrapper_minor_update: true
upgrade_tasks:
- name: Prepare switch of haproxy image name
when:
- step|int == 0
block:
- name: Get haproxy image id currently used by pacemaker
shell: "pcs resource config haproxy-bundle | grep -Eo 'image=[^ ]+' | awk -F= '{print $2;}'"
register: haproxy_image_current_res
failed_when: false
- name: Image facts for haproxy
set_fact:
haproxy_image_latest: *haproxy_image_pcmklatest
haproxy_image_current: "{{haproxy_image_current_res.stdout}}"
- name: Temporarily tag the current haproxy image id with the upgraded image name
import_role:
name: tripleo_container_tag
vars:
container_image: "{{haproxy_image_current}}"
container_image_latest: "{{haproxy_image_latest}}"
pull_image: false
when:
- haproxy_image_current != ''
- haproxy_image_current != haproxy_image_latest
# During an OS Upgrade, the cluster may not exist so we use
# the shell module instead.
# TODO(odyssey4me):
# Fix the pacemaker_resource module to handle the exception
# for a non-existent cluster more gracefully.
- name: Check haproxy cluster resource status
shell: pcs resource config haproxy-bundle
failed_when: false
changed_when: false
register: haproxy_pcs_res_result
- name: Set upgrade haproxy facts
set_fact:
haproxy_pcs_res: "{{haproxy_pcs_res_result.rc == 0}}"
is_haproxy_bootstrap_node: "{{haproxy_short_bootstrap_node_name|lower == ansible_facts['hostname']|lower}}"
- name: Update haproxy pcs resource bundle for new container image
when:
- step|int == 1
- is_haproxy_bootstrap_node|bool
- haproxy_pcs_res|bool
- haproxy_image_current != haproxy_image_latest
block:
- name: Disable the haproxy cluster resource before container upgrade
pacemaker_resource:
resource: haproxy-bundle
state: disable
wait_for_resource: true
register: output
retries: 5
until: output.rc == 0
- name: Expose HAProxy stats socket on the host and mount TLS cert if needed
block:
- name: Check haproxy stats socket configuration in pacemaker
command: cibadmin --query --xpath "//storage-mapping[@id='haproxy-var-lib']"
failed_when: false
register: haproxy_stats_exposed
- name: Check haproxy public certificate configuration in pacemaker
command: cibadmin --query --xpath "//storage-mapping[@id='haproxy-cert']"
failed_when: false
register: haproxy_cert_mounted
- name: Add a bind mount for stats socket in the haproxy bundle
command: pcs resource bundle update haproxy-bundle storage-map add id=haproxy-var-lib source-dir=/var/lib/haproxy target-dir=/var/lib/haproxy options=rw
# rc == 6 means the configuration doesn't exist in the CIB
when: haproxy_stats_exposed.rc == 6
- name: Set HAProxy public cert volume mount fact
set_fact:
haproxy_public_cert_path: {get_param: DeployedSSLCertificatePath}
haproxy_public_tls_enabled: {if: [public_tls_enabled, true, false]}
- name: Add a bind mount for public certificate in the haproxy bundle
command: pcs resource bundle update haproxy-bundle storage-map add id=haproxy-cert source-dir={{ haproxy_public_cert_path }} target-dir=/var/lib/kolla/config_files/src-tls/{{ haproxy_public_cert_path }} options=ro
when:
- haproxy_cert_mounted.rc == 6
- haproxy_public_tls_enabled|bool
- name: Update the haproxy bundle to use the new container image name
command: "pcs resource bundle update haproxy-bundle container image={{haproxy_image_latest}}"
- name: Enable the haproxy cluster resource
pacemaker_resource:
resource: haproxy-bundle
state: enable
wait_for_resource: true
register: output
retries: 5
until: output.rc == 0
- name: Create hiera data to upgrade haproxy in a stepwise manner.
when:
- step|int == 1
- cluster_recreate|bool
block:
- name: set haproxy upgrade node facts in a single-node environment
set_fact:
haproxy_short_node_names_upgraded: "{{ haproxy_short_node_names }}"
cacheable: false
when: groups['haproxy'] | length <= 1
- name: set haproxy upgrade node facts from the limit option
set_fact:
haproxy_short_node_names_upgraded: "{{ haproxy_short_node_names_upgraded|default([]) + [item.split('.')[0]] }}"
cacheable: false
when:
- groups['haproxy'] | length > 1
- item.split('.')[0] in ansible_limit.split(':')
loop: "{{ haproxy_short_node_names | default([]) }}"
- fail:
msg: >
You can't upgrade haproxy without staged
upgrade. You need to use the limit option in order
to do so.
when: >-
haproxy_short_node_names_upgraded is not defined or
haproxy_short_node_names_upgraded | length == 0
- debug:
msg: "Prepare haproxy upgrade for {{ haproxy_short_node_names_upgraded }}"
- name: remove haproxy init container on upgrade-scaleup to force re-init
include_role:
name: tripleo_container_rm
vars:
tripleo_containers_to_rm:
- haproxy_init_bundle
when:
- haproxy_short_node_names_upgraded | length > 1
- name: add the haproxy short name to hiera data for the upgrade.
include_role:
name: tripleo_upgrade_hiera
tasks_from: set.yml
vars:
tripleo_upgrade_key: haproxy_short_node_names_override
tripleo_upgrade_value: "{{haproxy_short_node_names_upgraded}}"
- name: remove the extra hiera data needed for the upgrade.
include_role:
name: tripleo_upgrade_hiera
tasks_from: remove.yml
vars:
tripleo_upgrade_key: haproxy_short_node_names_override
when: haproxy_short_node_names_upgraded | length == haproxy_short_node_names | length
- name: Retag the pacemaker image if containerized
when:
- step|int == 3
block: *haproxy_fetch_retag_container_tasks