447 lines
20 KiB
YAML
447 lines
20 KiB
YAML
heat_template_version: wallaby
|
|
|
|
description: >
|
|
OpenStack containerized Rabbitmq service
|
|
|
|
parameters:
|
|
ContainerRabbitmqImage:
|
|
description: image
|
|
type: string
|
|
ContainerRabbitmqConfigImage:
|
|
description: The container image to use for the rabbitmq config_volume
|
|
type: string
|
|
ClusterCommonTag:
|
|
default: false
|
|
description: When set to false, a pacemaker service is configured
|
|
to use a floating tag for its container image name,
|
|
e.g. 'REGISTRY/NAMESPACE/IMAGENAME:pcmklatest'. When
|
|
set to true, the service uses a floating prefix as
|
|
well, e.g. 'cluster.common.tag/IMAGENAME:pcmklatest'.
|
|
type: boolean
|
|
ClusterFullTag:
|
|
default: false
|
|
description: When set to true, the pacemaker service uses a fully
|
|
constant tag for its container image name, e.g.
|
|
'cluster.common.tag/SERVICENAME:pcmklatest'.
|
|
type: boolean
|
|
EndpointMap:
|
|
default: {}
|
|
description: Mapping of service endpoint -> protocol. Typically set
|
|
via parameter_defaults in the resource registry.
|
|
type: json
|
|
ServiceData:
|
|
default: {}
|
|
description: Dictionary packing service data
|
|
type: json
|
|
ServiceNetMap:
|
|
default: {}
|
|
description: Mapping of service_name -> network name. Typically set
|
|
via parameter_defaults in the resource registry. Use
|
|
parameter_merge_strategies to merge it with the defaults.
|
|
type: json
|
|
RabbitCookie:
|
|
type: string
|
|
default: ''
|
|
hidden: true
|
|
RoleName:
|
|
default: ''
|
|
description: Role name on which the service is applied
|
|
type: string
|
|
RoleParameters:
|
|
default: {}
|
|
description: Parameters specific to the role
|
|
type: json
|
|
ConfigDebug:
|
|
default: false
|
|
description: Whether to run config management (e.g. Puppet) in debug mode.
|
|
type: boolean
|
|
ContainerCli:
|
|
type: string
|
|
default: 'podman'
|
|
description: CLI tool used to manage containers.
|
|
constraints:
|
|
- allowed_values: ['docker', 'podman']
|
|
DeployIdentifier:
|
|
default: ''
|
|
type: string
|
|
description: >
|
|
Setting this to a unique value will re-run any deployment tasks which
|
|
perform configuration on a Heat stack-update.
|
|
|
|
resources:
|
|
ContainersCommon:
|
|
type: ../containers-common.yaml
|
|
|
|
RabbitmqBase:
|
|
type: ./rabbitmq-messaging-notify-container-puppet.yaml
|
|
properties:
|
|
EndpointMap: {get_param: EndpointMap}
|
|
ServiceData: {get_param: ServiceData}
|
|
ServiceNetMap: {get_param: ServiceNetMap}
|
|
RoleName: {get_param: RoleName}
|
|
RoleParameters: {get_param: RoleParameters}
|
|
|
|
outputs:
|
|
role_data:
|
|
description: Role data for the Rabbitmq API role.
|
|
value:
|
|
service_name: {get_attr: [RabbitmqBase, role_data, service_name]}
|
|
firewall_rules:
|
|
'109 rabbitmq-bundle':
|
|
dport:
|
|
- 3122
|
|
- 4369
|
|
- 5672
|
|
- 25672
|
|
- 25673-25683
|
|
global_config_settings: {get_attr: [RabbitmqBase, role_data, global_config_settings]}
|
|
config_settings:
|
|
map_merge:
|
|
- {get_attr: [RabbitmqBase, role_data, config_settings]}
|
|
- rabbitmq::service_manage: false
|
|
tripleo::profile::pacemaker::rabbitmq_bundle::rabbitmq_docker_image: &rabbitmq_image_pcmklatest
|
|
if:
|
|
- {get_param: ClusterFullTag}
|
|
- "cluster.common.tag/rabbitmq:pcmklatest"
|
|
- yaql:
|
|
data:
|
|
if:
|
|
- {get_param: ClusterCommonTag}
|
|
- yaql:
|
|
data: {get_param: ContainerRabbitmqImage}
|
|
expression: concat("cluster.common.tag/", $.data.rightSplit(separator => "/", maxSplits => 1)[1])
|
|
- {get_param: ContainerRabbitmqImage}
|
|
expression: concat($.data.rightSplit(separator => ":", maxSplits => 1)[0], ":pcmklatest")
|
|
tripleo::profile::pacemaker::rabbitmq_bundle::rabbitmq_docker_control_port: 3122
|
|
tripleo::profile::pacemaker::rabbitmq_bundle::container_backend: {get_param: ContainerCli}
|
|
service_config_settings: {get_attr: [RabbitmqBase, role_data, service_config_settings]}
|
|
# BEGIN DOCKER SETTINGS
|
|
puppet_config:
|
|
config_volume: rabbitmq
|
|
puppet_tags: file
|
|
step_config:
|
|
list_join:
|
|
- "\n"
|
|
- - "['Rabbitmq_policy', 'Rabbitmq_user'].each |String $val| { noop_resource($val) }"
|
|
- "include tripleo::profile::base::rabbitmq"
|
|
config_image: {get_param: ContainerRabbitmqConfigImage}
|
|
kolla_config:
|
|
/var/lib/kolla/config_files/rabbitmq.json:
|
|
command: /usr/sbin/pacemaker_remoted
|
|
config_files:
|
|
- dest: /etc/libqb/force-filesystem-sockets
|
|
source: /dev/null
|
|
owner: root
|
|
perm: '0644'
|
|
- dest: /var/log/btmp
|
|
source: /dev/null
|
|
owner: root:utmp
|
|
perm: '0600'
|
|
- source: "/var/lib/kolla/config_files/src/*"
|
|
dest: "/"
|
|
merge: true
|
|
preserve_properties: true
|
|
- source: "/var/lib/kolla/config_files/src-tls/*"
|
|
dest: "/"
|
|
merge: true
|
|
optional: true
|
|
preserve_properties: true
|
|
permissions:
|
|
- path: /var/lib/rabbitmq
|
|
owner: rabbitmq:rabbitmq
|
|
recurse: true
|
|
- path: /var/log/rabbitmq
|
|
owner: rabbitmq:rabbitmq
|
|
recurse: true
|
|
- path: /etc/pki/tls/certs/rabbitmq.crt
|
|
owner: rabbitmq:rabbitmq
|
|
perm: '0600'
|
|
optional: true
|
|
- path: /etc/pki/tls/private/rabbitmq.key
|
|
owner: rabbitmq:rabbitmq
|
|
perm: '0600'
|
|
optional: true
|
|
# When using pacemaker we don't launch the container, instead that is done by pacemaker
|
|
# itself.
|
|
container_config_scripts: {get_attr: [ContainersCommon, container_config_scripts]}
|
|
docker_config:
|
|
step_1:
|
|
rabbitmq_bootstrap:
|
|
start_order: 0
|
|
image: {get_param: ContainerRabbitmqImage}
|
|
net: host
|
|
privileged: false
|
|
volumes:
|
|
- /var/lib/kolla/config_files/rabbitmq.json:/var/lib/kolla/config_files/config.json:ro
|
|
- /var/lib/config-data/puppet-generated/rabbitmq:/var/lib/kolla/config_files/src:ro
|
|
- /etc/hosts:/etc/hosts:ro
|
|
- /etc/localtime:/etc/localtime:ro
|
|
- /var/lib/rabbitmq:/var/lib/rabbitmq:z
|
|
environment:
|
|
KOLLA_CONFIG_STRATEGY: COPY_ALWAYS
|
|
KOLLA_BOOTSTRAP: true
|
|
# NOTE: this should force this container to re-run on each
|
|
# update (scale-out, etc.)
|
|
TRIPLEO_DEPLOY_IDENTIFIER: {get_param: DeployIdentifier}
|
|
RABBITMQ_CLUSTER_COOKIE: {get_param: RabbitCookie}
|
|
step_2:
|
|
rabbitmq_wait_bundle:
|
|
start_order: 0
|
|
detach: false
|
|
net: host
|
|
ipc: host
|
|
user: root
|
|
command: # '/container_puppet_apply.sh "STEP" "TAGS" "CONFIG" "DEBUG"'
|
|
list_concat:
|
|
- - '/container_puppet_apply.sh'
|
|
- '2'
|
|
- 'file,file_line,concat,augeas,rabbitmq_policy,rabbitmq_user,rabbitmq_ready'
|
|
- 'include tripleo::profile::pacemaker::rabbitmq_bundle'
|
|
- if:
|
|
- {get_param: ConfigDebug}
|
|
- - '--debug'
|
|
image: {get_param: ContainerRabbitmqImage}
|
|
volumes:
|
|
list_concat:
|
|
- {get_attr: [ContainersCommon, container_puppet_apply_volumes]}
|
|
- - /bin/true:/bin/epmd
|
|
environment:
|
|
# https://launchpad.net/bugs/1822673 (lang/lc_all to utf-8 are an elixir requirement)
|
|
LANG: 'en_US.UTF-8'
|
|
LC_ALL: 'en_US.UTF-8'
|
|
# NOTE: this should force this container to re-run on each
|
|
# update (scale-out, etc.)
|
|
TRIPLEO_DEPLOY_IDENTIFIER: {get_param: DeployIdentifier}
|
|
host_prep_tasks:
|
|
- name: create persistent directories
|
|
file:
|
|
path: "{{ item.path }}"
|
|
state: directory
|
|
setype: "{{ item.setype }}"
|
|
mode: "{{ item.mode|default(omit) }}"
|
|
with_items:
|
|
- { 'path': /var/lib/rabbitmq, 'setype': container_file_t }
|
|
- { 'path': /var/log/containers/rabbitmq, 'setype': container_file_t, 'mode': '0750' }
|
|
- name: stop the Erlang port mapper on the host and make sure it cannot bind to the port used by container
|
|
shell: |
|
|
echo 'export ERL_EPMD_ADDRESS=127.0.0.1' > /etc/rabbitmq/rabbitmq-env.conf
|
|
echo 'export ERL_EPMD_PORT=4370' >> /etc/rabbitmq/rabbitmq-env.conf
|
|
for pid in $(pgrep epmd --ns 1 --nslist pid); do kill $pid; done
|
|
metadata_settings:
|
|
get_attr: [RabbitmqBase, role_data, metadata_settings]
|
|
deploy_steps_tasks:
|
|
list_concat:
|
|
- get_attr: [RabbitmqBase, role_data, deploy_steps_tasks]
|
|
- - name: RabbitMQ tag container image for pacemaker
|
|
when: step|int == 1
|
|
import_role:
|
|
name: tripleo_container_tag
|
|
vars:
|
|
container_image: {get_param: ContainerRabbitmqImage}
|
|
container_image_latest: *rabbitmq_image_pcmklatest
|
|
- name: RabbitMQ Notify HA Wrappers Step
|
|
when: step|int == 2
|
|
block: &oslo_messaging_notify_puppet_bundle
|
|
- name: RabbitMQ notify puppet bundle
|
|
import_role:
|
|
name: tripleo_ha_wrapper
|
|
vars:
|
|
tripleo_ha_wrapper_service_name: oslo_messaging_notify
|
|
tripleo_ha_wrapper_resource_name: rabbitmq
|
|
tripleo_ha_wrapper_bundle_name: rabbitmq-bundle
|
|
tripleo_ha_wrapper_resource_state: Started
|
|
tripleo_ha_wrapper_puppet_config_volume: rabbitmq
|
|
tripleo_ha_wrapper_puppet_execute: '["Rabbitmq_policy", "Rabbitmq_user"].each |String $val| { noop_resource($val) }; include ::tripleo::profile::base::pacemaker; include ::tripleo::profile::pacemaker::rabbitmq_bundle'
|
|
tripleo_ha_wrapper_puppet_tags: 'pacemaker::resource::bundle,pacemaker::property,pacemaker::resource::ip,pacemaker::resource::ocf,pacemaker::constraint::order,pacemaker::constraint::colocation'
|
|
tripleo_ha_wrapper_puppet_debug: {get_param: ConfigDebug}
|
|
|
|
update_tasks:
|
|
- name: Tear-down non-HA rabbitmq container
|
|
when:
|
|
- step|int == 1
|
|
block: &rabbitmq_teardown_nonha
|
|
- name: Remove non-HA rabbitmq container
|
|
include_role:
|
|
name: tripleo_container_rm
|
|
vars:
|
|
tripleo_container_cli: "{{ container_cli }}"
|
|
tripleo_containers_to_rm:
|
|
- rabbitmq
|
|
- name: Rabbit fetch and retag container image for pacemaker
|
|
when: step|int == 2
|
|
block: &rabbitmq_fetch_retag_container_tasks
|
|
- name: Get container rabbitmq image
|
|
set_fact:
|
|
rabbitmq_image: {get_param: ContainerRabbitmqImage}
|
|
rabbitmq_image_latest: *rabbitmq_image_pcmklatest
|
|
- name: Pull latest rabbitmq images
|
|
command: "{{container_cli}} pull {{rabbitmq_image}}"
|
|
register: result
|
|
retries: 3
|
|
delay: 3
|
|
until: result.rc == 0
|
|
- name: Get previous rabbitmq image id
|
|
shell: "{{container_cli}} inspect --format '{{'{{'}}.Id{{'}}'}}' {{rabbitmq_image_latest}}"
|
|
register: old_rabbitmq_image_id
|
|
failed_when: false
|
|
- name: Get new rabbitmq image id
|
|
shell: "{{container_cli}} inspect --format '{{'{{'}}.Id{{'}}'}}' {{rabbitmq_image}}"
|
|
register: new_rabbitmq_image_id
|
|
- name: Retag pcmklatest to latest rabbitmq image
|
|
include_role:
|
|
name: tripleo_container_tag
|
|
vars:
|
|
container_image: "{{rabbitmq_image}}"
|
|
container_image_latest: "{{rabbitmq_image_latest}}"
|
|
when:
|
|
- old_rabbitmq_image_id.stdout != new_rabbitmq_image_id.stdout
|
|
|
|
post_update_tasks:
|
|
- name: Rabbitmq notify bundle post update
|
|
when: step|int == 1
|
|
block: *oslo_messaging_notify_puppet_bundle
|
|
vars:
|
|
tripleo_ha_wrapper_minor_update: true
|
|
|
|
upgrade_tasks:
|
|
- name: Tear-down non-HA rabbitmq container
|
|
when:
|
|
- step|int == 0
|
|
block: *rabbitmq_teardown_nonha
|
|
- name: Prepare switch of rabbitmq image name
|
|
when:
|
|
- step|int == 0
|
|
block:
|
|
- name: Get rabbitmq image id currently used by pacemaker
|
|
shell: "pcs resource config rabbitmq-bundle | grep -Eo 'image=[^ ]+' | awk -F= '{print $2;}'"
|
|
register: rabbitmq_image_current_res
|
|
failed_when: false
|
|
- name: Image facts for rabbitmq
|
|
set_fact:
|
|
rabbitmq_image_latest: *rabbitmq_image_pcmklatest
|
|
rabbitmq_image_current: "{{rabbitmq_image_current_res.stdout}}"
|
|
- name: Prepare the switch to new rabbitmq container image name in pacemaker
|
|
block:
|
|
- name: Temporarily tag the current rabbitmq image id with the upgraded image name
|
|
import_role:
|
|
name: tripleo_container_tag
|
|
vars:
|
|
container_image: "{{rabbitmq_image_current}}"
|
|
container_image_latest: "{{rabbitmq_image_latest}}"
|
|
pull_image: false
|
|
when:
|
|
- rabbitmq_image_current != ''
|
|
- rabbitmq_image_current != rabbitmq_image_latest
|
|
- name: Check rabbitmq cluster resource status
|
|
shell: pcs resource config rabbitmq-bundle
|
|
failed_when: false
|
|
register: rabbitmq_pcs_res_result
|
|
- name: Set fact rabbitmq_pcs_res
|
|
set_fact:
|
|
rabbitmq_pcs_res: "{{rabbitmq_pcs_res_result.rc == 0}}"
|
|
- name: set is_notify_rabbitmq_bootstrap_node fact
|
|
set_fact: is_notify_rabbitmq_bootstrap_node={{oslo_messaging_notify_short_bootstrap_node_name|lower == ansible_facts['hostname']|lower}}
|
|
- name: Update rabbitmq-bundle pcs resource bundle for new container image
|
|
when:
|
|
- step|int == 1
|
|
- is_notify_rabbitmq_bootstrap_node|bool
|
|
- rabbitmq_pcs_res|bool
|
|
- rabbitmq_image_current != rabbitmq_image_latest
|
|
block:
|
|
- name: Disable the rabbitmq cluster resource before container upgrade
|
|
pacemaker_resource:
|
|
resource: rabbitmq-bundle
|
|
state: disable
|
|
wait_for_resource: true
|
|
register: output
|
|
retries: 5
|
|
until: output.rc == 0
|
|
- name: Move rabbitmq logging to /var/log/containers
|
|
block:
|
|
- name: Check rabbitmq logging configuration in pacemaker
|
|
command: cibadmin --query --xpath "//storage-mapping[@id='rabbitmq-log']"
|
|
failed_when: false
|
|
register: rabbitmq_logs_moved
|
|
- name: Add a bind mount for logging in the rabbitmq bundle
|
|
# rc == 6 means the configuration doesn't exist in the CIB
|
|
when: rabbitmq_logs_moved.rc == 6
|
|
command: pcs resource bundle update rabbitmq-bundle storage-map add id=rabbitmq-log source-dir=/var/log/containers/rabbitmq target-dir=/var/log/rabbitmq options=rw
|
|
- name: Update the rabbitmq bundle to use the new container image name
|
|
command: "pcs resource bundle update rabbitmq-bundle container image={{rabbitmq_image_latest}}"
|
|
- name: Enable the rabbitmq cluster resource
|
|
pacemaker_resource:
|
|
resource: rabbitmq-bundle
|
|
state: enable
|
|
wait_for_resource: true
|
|
register: output
|
|
retries: 5
|
|
until: output.rc == 0
|
|
- name: Create hiera data to upgrade oslo messaging notify in a stepwise manner.
|
|
when:
|
|
- step|int == 1
|
|
- cluster_recreate|bool
|
|
block:
|
|
- name: set oslo_messaging_notify upgrade node facts in a single-node environment
|
|
set_fact:
|
|
oslo_messaging_notify_short_node_names_upgraded: "{{ oslo_messaging_notify_short_node_names }}"
|
|
oslo_messaging_notify_node_names_upgraded: "{{ oslo_messaging_notify_node_names }}"
|
|
cacheable: no
|
|
when: groups['oslo_messaging_notify'] | length <= 1
|
|
- name: set oslo_messaging_notify upgrade node facts from the limit option
|
|
set_fact:
|
|
oslo_messaging_notify_short_node_names_upgraded: "{{ oslo_messaging_notify_short_node_names_upgraded|default([]) + [item.split('.')[0]] }}"
|
|
oslo_messaging_notify_node_names_upgraded: "{{ oslo_messaging_notify_node_names_upgraded|default([]) + [item] }}"
|
|
cacheable: no
|
|
when:
|
|
- groups['oslo_messaging_notify'] | length > 1
|
|
- item.split('.')[0] in ansible_limit.split(':')
|
|
loop: "{{ oslo_messaging_notify_node_names | default([]) }}"
|
|
- fail:
|
|
msg: >
|
|
You can't upgrade oslo_messaging_notify without
|
|
staged upgrade. You need to use the limit option in order
|
|
to do so.
|
|
when: >-
|
|
oslo_messaging_notify_short_node_names_upgraded is not defined or
|
|
oslo_messaging_notify_short_node_names_upgraded | length == 0 or
|
|
oslo_messaging_notify_node_names_upgraded is not defined or
|
|
oslo_messaging_notify_node_names_upgraded | length == 0
|
|
- debug:
|
|
msg: "Prepare oslo_messaging_notify upgrade for {{ oslo_messaging_notify_short_node_names_upgraded }}"
|
|
- name: remove rabbitmq init container on upgrade-scaleup to force re-init
|
|
include_role:
|
|
name: tripleo_container_rm
|
|
vars:
|
|
tripleo_containers_to_rm:
|
|
- rabbitmq_wait_bundle
|
|
when:
|
|
- oslo_messaging_notify_short_node_names_upgraded | length > 1
|
|
- name: add the oslo_messaging_notify short name to hiera data for the upgrade.
|
|
include_role:
|
|
name: tripleo_upgrade_hiera
|
|
tasks_from: set.yml
|
|
vars:
|
|
tripleo_upgrade_key: oslo_messaging_notify_short_node_names_override
|
|
tripleo_upgrade_value: "{{oslo_messaging_notify_short_node_names_upgraded}}"
|
|
- name: add the oslo_messaging_notify long name to hiera data for the upgrade
|
|
include_role:
|
|
name: tripleo_upgrade_hiera
|
|
tasks_from: set.yml
|
|
vars:
|
|
tripleo_upgrade_key: oslo_messaging_notify_node_names_override
|
|
tripleo_upgrade_value: "{{oslo_messaging_notify_node_names_upgraded}}"
|
|
- name: remove the extra hiera data needed for the upgrade.
|
|
include_role:
|
|
name: tripleo_upgrade_hiera
|
|
tasks_from: remove.yml
|
|
vars:
|
|
tripleo_upgrade_key: "{{item}}"
|
|
loop:
|
|
- oslo_messaging_notify_short_node_names_override
|
|
- oslo_messaging_notify_node_names_override
|
|
when: oslo_messaging_notify_short_node_names_upgraded | length == oslo_messaging_notify_node_names | length
|
|
- name: Retag the pacemaker image if containerized
|
|
when:
|
|
- step|int == 3
|
|
block: *rabbitmq_fetch_retag_container_tasks
|