[train-only][ffwd] Fix pacemaker container selinux context

Change If796d3c1d37c72655bd6b99e851a3b3e7617b9a4 introduced
upgrade tasks fixing the selinux context but in some cases
only for non pacemaker controlled services while majority
of the installations use the pacemaker controlled ones.

Resolves-bug: rhbz#2021525
Change-Id: I0038872197f34d8a1acd954f90b5b69281cea7dc
This commit is contained in:
Lukas Bezdicka
2022-02-10 21:51:23 +01:00
parent d6f2a3b3c2
commit 8f4faae199
3 changed files with 288 additions and 275 deletions

View File

@@ -373,6 +373,16 @@ outputs:
tripleo_ha_wrapper_minor_update: true
upgrade_tasks:
- name: "Ensure correct label on {{ item }}"
when:
- step|int == 0
file:
path: "{{ item }}"
setype: svirt_sandbox_file_t
recurse: true
loop:
- "/var/lib/mysql"
- "/var/log/containers/mysql"
- vars:
mysql_upgrade_persist: {get_param: MysqlUpgradePersist}
when:

View File

@@ -422,142 +422,143 @@ outputs:
tripleo_ha_wrapper_minor_update: true
upgrade_tasks:
- name: Prepare switch of haproxy image name
when:
- step|int == 0
block:
- name: Get haproxy image id currently used by pacemaker
shell: "pcs resource config haproxy-bundle | grep -Eo 'image=[^ ]+' | awk -F= '{print $2;}'"
register: haproxy_image_current_res
failed_when: false
- name: Image facts for haproxy
set_fact:
haproxy_image_latest: *haproxy_image_pcmklatest
haproxy_image_current: "{{haproxy_image_current_res.stdout}}"
- name: Temporarily tag the current haproxy image id with the upgraded image name
import_role:
name: tripleo-container-tag
vars:
container_image: "{{haproxy_image_current}}"
container_image_latest: "{{haproxy_image_latest}}"
pull_image: false
when:
- haproxy_image_current != ''
- haproxy_image_current != haproxy_image_latest
# During an OS Upgrade, the cluster may not exist so we use
# the shell module instead.
# TODO(odyssey4me):
# Fix the pacemaker_resource module to handle the exception
# for a non-existant cluster more gracefully.
- name: Check haproxy cluster resource status
shell: pcs resource config haproxy-bundle
failed_when: false
changed_when: false
register: haproxy_pcs_res_result
- name: Set upgrade haproxy facts
set_fact:
haproxy_pcs_res: "{{haproxy_pcs_res_result.rc == 0}}"
is_haproxy_bootstrap_node: "{{haproxy_short_bootstrap_node_name|lower == ansible_facts['hostname']|lower}}"
- name: Update haproxy pcs resource bundle for new container image
when:
- step|int == 1
- is_haproxy_bootstrap_node|bool
- haproxy_pcs_res|bool
- haproxy_image_current != haproxy_image_latest
block:
- name: Disable the haproxy cluster resource before container upgrade
pacemaker_resource:
resource: haproxy-bundle
state: disable
wait_for_resource: true
register: output
retries: 5
until: output.rc == 0
- name: Expose HAProxy stats socket on the host and mount TLS cert if needed
block:
- name: Check haproxy stats socket configuration in pacemaker
command: cibadmin --query --xpath "//storage-mapping[@id='haproxy-var-lib']"
failed_when: false
register: haproxy_stats_exposed
- name: Check haproxy public certificate configuration in pacemaker
command: cibadmin --query --xpath "//storage-mapping[@id='haproxy-cert']"
failed_when: false
register: haproxy_cert_mounted
- name: Add a bind mount for stats socket in the haproxy bundle
command: pcs resource bundle update haproxy-bundle storage-map add id=haproxy-var-lib source-dir=/var/lib/haproxy target-dir=/var/lib/haproxy options=rw
# rc == 6 means the configuration doesn't exist in the CIB
when: haproxy_stats_exposed.rc == 6
- name: Set HAProxy public cert volume mount fact
set_fact:
haproxy_public_cert_path: {get_param: DeployedSSLCertificatePath}
haproxy_public_tls_enabled: {if: [public_tls_enabled, true, false]}
- name: Add a bind mount for public certificate in the haproxy bundle
command: pcs resource bundle update haproxy-bundle storage-map add id=haproxy-cert source-dir={{ haproxy_public_cert_path }} target-dir=/var/lib/kolla/config_files/src-tls/{{ haproxy_public_cert_path }} options=ro
when:
- haproxy_cert_mounted.rc == 6
- haproxy_public_tls_enabled|bool
- name: Update the haproxy bundle to use the new container image name
command: "pcs resource bundle update haproxy-bundle container image={{haproxy_image_latest}}"
- name: Enable the haproxy cluster resource
pacemaker_resource:
resource: haproxy-bundle
state: enable
wait_for_resource: true
register: output
retries: 5
until: output.rc == 0
- name: Create hiera data to upgrade haproxy in a stepwise manner.
when:
- step|int == 1
- cluster_recreate|bool
block:
- name: set haproxy upgrade node facts in a single-node environment
set_fact:
haproxy_short_node_names_upgraded: "{{ haproxy_short_node_names }}"
cacheable: no
when: groups['haproxy'] | length <= 1
- name: set haproxy upgrade node facts from the limit option
set_fact:
haproxy_short_node_names_upgraded: "{{ haproxy_short_node_names_upgraded|default([]) + [item.split('.')[0]] }}"
cacheable: no
when:
- groups['haproxy'] | length > 1
- item.split('.')[0] in ansible_limit.split(':')
loop: "{{ haproxy_short_node_names }}"
- fail:
msg: >
You can't upgrade haproxy without staged
upgrade. You need to use the limit option in order
to do so.
when: >-
haproxy_short_node_names_upgraded is not defined or
haproxy_short_node_names_upgraded | length == 0
- debug:
msg: "Prepare haproxy upgrade for {{ haproxy_short_node_names_upgraded }}"
- name: remove haproxy init container on upgrade-scaleup to force re-init
include_role:
name: tripleo-container-rm
vars:
tripleo_containers_to_rm:
- haproxy_init_bundle
when:
- haproxy_short_node_names_upgraded | length > 1
- name: add the haproxy short name to hiera data for the upgrade.
include_role:
name: tripleo-upgrade-hiera
tasks_from: set.yml
vars:
tripleo_upgrade_key: haproxy_short_node_names_override
tripleo_upgrade_value: "{{haproxy_short_node_names_upgraded}}"
- name: remove the extra hiera data needed for the upgrade.
include_role:
name: tripleo-upgrade-hiera
tasks_from: remove.yml
vars:
tripleo_upgrade_key: haproxy_short_node_names_override
when: haproxy_short_node_names_upgraded | length == haproxy_short_node_names | length
- name: Retag the pacemaker image if containerized
when:
- step|int == 3
block: *haproxy_fetch_retag_container_tasks
list_concat:
- {get_attr: [HAProxyBase, role_data, upgrade_tasks]}
- - name: Prepare switch of haproxy image name
when:
- step|int == 0
block:
- name: Get haproxy image id currently used by pacemaker
shell: "pcs resource config haproxy-bundle | grep -Eo 'image=[^ ]+' | awk -F= '{print $2;}'"
register: haproxy_image_current_res
failed_when: false
- name: Image facts for haproxy
set_fact:
haproxy_image_latest: *haproxy_image_pcmklatest
haproxy_image_current: "{{haproxy_image_current_res.stdout}}"
- name: Temporarily tag the current haproxy image id with the upgraded image name
import_role:
name: tripleo-container-tag
vars:
container_image: "{{haproxy_image_current}}"
container_image_latest: "{{haproxy_image_latest}}"
pull_image: false
when:
- haproxy_image_current != ''
- haproxy_image_current != haproxy_image_latest
# During an OS Upgrade, the cluster may not exist so we use
# the shell module instead.
# TODO(odyssey4me):
# Fix the pacemaker_resource module to handle the exception
# for a non-existant cluster more gracefully.
- name: Check haproxy cluster resource status
shell: pcs resource config haproxy-bundle
failed_when: false
changed_when: false
register: haproxy_pcs_res_result
- name: Set upgrade haproxy facts
set_fact:
haproxy_pcs_res: "{{haproxy_pcs_res_result.rc == 0}}"
is_haproxy_bootstrap_node: "{{haproxy_short_bootstrap_node_name|lower == ansible_facts['hostname']|lower}}"
- name: Update haproxy pcs resource bundle for new container image
when:
- step|int == 1
- is_haproxy_bootstrap_node|bool
- haproxy_pcs_res|bool
- haproxy_image_current != haproxy_image_latest
block:
- name: Disable the haproxy cluster resource before container upgrade
pacemaker_resource:
resource: haproxy-bundle
state: disable
wait_for_resource: true
register: output
retries: 5
until: output.rc == 0
- name: Expose HAProxy stats socket on the host and mount TLS cert if needed
block:
- name: Check haproxy stats socket configuration in pacemaker
command: cibadmin --query --xpath "//storage-mapping[@id='haproxy-var-lib']"
failed_when: false
register: haproxy_stats_exposed
- name: Check haproxy public certificate configuration in pacemaker
command: cibadmin --query --xpath "//storage-mapping[@id='haproxy-cert']"
failed_when: false
register: haproxy_cert_mounted
- name: Add a bind mount for stats socket in the haproxy bundle
command: pcs resource bundle update haproxy-bundle storage-map add id=haproxy-var-lib source-dir=/var/lib/haproxy target-dir=/var/lib/haproxy options=rw
# rc == 6 means the configuration doesn't exist in the CIB
when: haproxy_stats_exposed.rc == 6
- name: Set HAProxy public cert volume mount fact
set_fact:
haproxy_public_cert_path: {get_param: DeployedSSLCertificatePath}
haproxy_public_tls_enabled: {if: [public_tls_enabled, true, false]}
- name: Add a bind mount for public certificate in the haproxy bundle
command: pcs resource bundle update haproxy-bundle storage-map add id=haproxy-cert source-dir={{ haproxy_public_cert_path }} target-dir=/var/lib/kolla/config_files/src-tls/{{ haproxy_public_cert_path }} options=ro
when:
- haproxy_cert_mounted.rc == 6
- haproxy_public_tls_enabled|bool
- name: Update the haproxy bundle to use the new container image name
command: "pcs resource bundle update haproxy-bundle container image={{haproxy_image_latest}}"
- name: Enable the haproxy cluster resource
pacemaker_resource:
resource: haproxy-bundle
state: enable
wait_for_resource: true
register: output
retries: 5
until: output.rc == 0
- name: Create hiera data to upgrade haproxy in a stepwise manner.
when:
- step|int == 1
- cluster_recreate|bool
block:
- name: set haproxy upgrade node facts in a single-node environment
set_fact:
haproxy_short_node_names_upgraded: "{{ haproxy_short_node_names }}"
cacheable: no
when: groups['haproxy'] | length <= 1
- name: set haproxy upgrade node facts from the limit option
set_fact:
haproxy_short_node_names_upgraded: "{{ haproxy_short_node_names_upgraded|default([]) + [item.split('.')[0]] }}"
cacheable: no
when:
- groups['haproxy'] | length > 1
- item.split('.')[0] in ansible_limit.split(':')
loop: "{{ haproxy_short_node_names }}"
- fail:
msg: >
You can't upgrade haproxy without staged
upgrade. You need to use the limit option in order
to do so.
when: >-
haproxy_short_node_names_upgraded is not defined or
haproxy_short_node_names_upgraded | length == 0
- debug:
msg: "Prepare haproxy upgrade for {{ haproxy_short_node_names_upgraded }}"
- name: remove haproxy init container on upgrade-scaleup to force re-init
include_role:
name: tripleo-container-rm
vars:
tripleo_containers_to_rm:
- haproxy_init_bundle
when:
- haproxy_short_node_names_upgraded | length > 1
- name: add the haproxy short name to hiera data for the upgrade.
include_role:
name: tripleo-upgrade-hiera
tasks_from: set.yml
vars:
tripleo_upgrade_key: haproxy_short_node_names_override
tripleo_upgrade_value: "{{haproxy_short_node_names_upgraded}}"
- name: remove the extra hiera data needed for the upgrade.
include_role:
name: tripleo-upgrade-hiera
tasks_from: remove.yml
vars:
tripleo_upgrade_key: haproxy_short_node_names_override
when: haproxy_short_node_names_upgraded | length == haproxy_short_node_names | length
- name: Retag the pacemaker image if containerized
when:
- step|int == 3
block: *haproxy_fetch_retag_container_tasks

View File

@@ -309,139 +309,141 @@ outputs:
tripleo_ha_wrapper_minor_update: true
upgrade_tasks:
- name: Prepare switch of rabbitmq image name
when:
- step|int == 0
block:
- name: Get rabbitmq image id currently used by pacemaker
shell: "pcs resource config rabbitmq-bundle | grep -Eo 'image=[^ ]+' | awk -F= '{print $2;}'"
register: rabbitmq_image_current_res
failed_when: false
- name: Image facts for rabbitmq
set_fact:
rabbitmq_image_latest: *rabbitmq_image_pcmklatest
rabbitmq_image_current: "{{rabbitmq_image_current_res.stdout}}"
- name: Prepare the switch to new rabbitmq container image name in pacemaker
block:
- name: Temporarily tag the current rabbitmq image id with the upgraded image name
import_role:
name: tripleo-container-tag
vars:
container_image: "{{rabbitmq_image_current}}"
container_image_latest: "{{rabbitmq_image_latest}}"
pull_image: false
when:
- rabbitmq_image_current != ''
- rabbitmq_image_current != rabbitmq_image_latest
- name: Check rabbitmq cluster resource status
shell: pcs resource config rabbitmq-bundle
failed_when: false
register: rabbitmq_pcs_res_result
- name: Set fact rabbitmq_pcs_res
set_fact:
rabbitmq_pcs_res: "{{rabbitmq_pcs_res_result.rc == 0}}"
- name: set is_rpc_rabbitmq_bootstrap_node fact
set_fact: is_rpc_rabbitmq_bootstrap_node={{oslo_messaging_rpc_short_bootstrap_node_name|lower == ansible_facts['hostname']|lower}}
- name: Update rabbitmq-bundle pcs resource bundle for new container image
when:
- step|int == 1
- is_rpc_rabbitmq_bootstrap_node|bool
- rabbitmq_pcs_res|bool
- rabbitmq_image_current != rabbitmq_image_latest
block:
- name: Disable the rabbitmq cluster resource before container upgrade
pacemaker_resource:
resource: rabbitmq-bundle
state: disable
wait_for_resource: true
register: output
retries: 5
until: output.rc == 0
- name: Move rabbitmq logging to /var/log/containers
block:
- name: Check rabbitmq logging configuration in pacemaker
command: cibadmin --query --xpath "//storage-mapping[@id='rabbitmq-log']"
failed_when: false
register: rabbitmq_logs_moved
- name: Add a bind mount for logging in the rabbitmq bundle
# rc == 6 means the configuration doesn't exist in the CIB
when: rabbitmq_logs_moved.rc == 6
command: pcs resource bundle update rabbitmq-bundle storage-map add id=rabbitmq-log source-dir=/var/log/containers/rabbitmq target-dir=/var/log/rabbitmq options=rw
- name: Update the rabbitmq bundle to use the new container image name
command: "pcs resource bundle update rabbitmq-bundle container image={{rabbitmq_image_latest}}"
- name: Enable the rabbitmq cluster resource
pacemaker_resource:
resource: rabbitmq-bundle
state: enable
wait_for_resource: true
register: output
retries: 5
until: output.rc == 0
- name: Create hiera data to upgrade oslo messaging rpc in a stepwise manner.
when:
- step|int == 1
- cluster_recreate|bool
block:
- name: set oslo_messaging_rpc upgrade node facts in a single-node environment
set_fact:
oslo_messaging_rpc_short_node_names_upgraded: "{{ oslo_messaging_rpc_short_node_names }}"
oslo_messaging_rpc_node_names_upgraded: "{{ oslo_messaging_rpc_node_names }}"
cacheable: no
when: groups['oslo_messaging_rpc'] | length <= 1
- name: set oslo_messaging_rpc upgrade node facts from the limit option
set_fact:
oslo_messaging_rpc_short_node_names_upgraded: "{{ oslo_messaging_rpc_short_node_names_upgraded|default([]) + [item.split('.')[0]] }}"
oslo_messaging_rpc_node_names_upgraded: "{{ oslo_messaging_rpc_node_names_upgraded|default([]) + [item] }}"
cacheable: no
when:
- groups['oslo_messaging_rpc'] | length > 1
- item.split('.')[0] in ansible_limit.split(':')
loop: "{{ oslo_messaging_rpc_node_names }}"
- fail:
msg: >
You can't upgrade oslo_messaging_rpc without
staged upgrade. You need to use the limit option in order
to do so.
when: >-
oslo_messaging_rpc_short_node_names_upgraded is not defined or
oslo_messaging_rpc_short_node_names_upgraded | length == 0 or
oslo_messaging_rpc_node_names_upgraded is not defined or
oslo_messaging_rpc_node_names_upgraded | length == 0
- debug:
msg: "Prepare oslo_messaging_rpc upgrade for {{ oslo_messaging_rpc_short_node_names_upgraded }}"
- name: remove rabbitmq init container on upgrade-scaleup to force re-init
include_role:
name: tripleo-container-rm
vars:
tripleo_containers_to_rm:
- rabbitmq_wait_bundle
when:
- oslo_messaging_rpc_short_node_names_upgraded | length > 1
- name: add the oslo_messaging_rpc short name to hiera data for the upgrade.
include_role:
name: tripleo-upgrade-hiera
tasks_from: set.yml
vars:
tripleo_upgrade_key: oslo_messaging_rpc_short_node_names_override
tripleo_upgrade_value: "{{oslo_messaging_rpc_short_node_names_upgraded}}"
- name: add the oslo_messaging_rpc long name to hiera data for the upgrade
include_role:
name: tripleo-upgrade-hiera
tasks_from: set.yml
vars:
tripleo_upgrade_key: oslo_messaging_rpc_node_names_override
tripleo_upgrade_value: "{{oslo_messaging_rpc_node_names_upgraded}}"
- name: remove the extra hiera data needed for the upgrade.
include_role:
name: tripleo-upgrade-hiera
tasks_from: remove.yml
vars:
tripleo_upgrade_key: "{{item}}"
loop:
- oslo_messaging_rpc_short_node_names_override
- oslo_messaging_rpc_node_names_override
when: oslo_messaging_rpc_short_node_names_upgraded | length == oslo_messaging_rpc_node_names | length
- name: Retag the pacemaker image if containerized
when:
- step|int == 3
block: *rabbitmq_fetch_retag_container_tasks
list_concat:
- {get_attr: [RabbitmqBase, role_data, upgrade_tasks]}
- - name: Prepare switch of rabbitmq image name
when:
- step|int == 0
block:
- name: Get rabbitmq image id currently used by pacemaker
shell: "pcs resource config rabbitmq-bundle | grep -Eo 'image=[^ ]+' | awk -F= '{print $2;}'"
register: rabbitmq_image_current_res
failed_when: false
- name: Image facts for rabbitmq
set_fact:
rabbitmq_image_latest: *rabbitmq_image_pcmklatest
rabbitmq_image_current: "{{rabbitmq_image_current_res.stdout}}"
- name: Prepare the switch to new rabbitmq container image name in pacemaker
block:
- name: Temporarily tag the current rabbitmq image id with the upgraded image name
import_role:
name: tripleo-container-tag
vars:
container_image: "{{rabbitmq_image_current}}"
container_image_latest: "{{rabbitmq_image_latest}}"
pull_image: false
when:
- rabbitmq_image_current != ''
- rabbitmq_image_current != rabbitmq_image_latest
- name: Check rabbitmq cluster resource status
shell: pcs resource config rabbitmq-bundle
failed_when: false
register: rabbitmq_pcs_res_result
- name: Set fact rabbitmq_pcs_res
set_fact:
rabbitmq_pcs_res: "{{rabbitmq_pcs_res_result.rc == 0}}"
- name: set is_rpc_rabbitmq_bootstrap_node fact
set_fact: is_rpc_rabbitmq_bootstrap_node={{oslo_messaging_rpc_short_bootstrap_node_name|lower == ansible_facts['hostname']|lower}}
- name: Update rabbitmq-bundle pcs resource bundle for new container image
when:
- step|int == 1
- is_rpc_rabbitmq_bootstrap_node|bool
- rabbitmq_pcs_res|bool
- rabbitmq_image_current != rabbitmq_image_latest
block:
- name: Disable the rabbitmq cluster resource before container upgrade
pacemaker_resource:
resource: rabbitmq-bundle
state: disable
wait_for_resource: true
register: output
retries: 5
until: output.rc == 0
- name: Move rabbitmq logging to /var/log/containers
block:
- name: Check rabbitmq logging configuration in pacemaker
command: cibadmin --query --xpath "//storage-mapping[@id='rabbitmq-log']"
failed_when: false
register: rabbitmq_logs_moved
- name: Add a bind mount for logging in the rabbitmq bundle
# rc == 6 means the configuration doesn't exist in the CIB
when: rabbitmq_logs_moved.rc == 6
command: pcs resource bundle update rabbitmq-bundle storage-map add id=rabbitmq-log source-dir=/var/log/containers/rabbitmq target-dir=/var/log/rabbitmq options=rw
- name: Update the rabbitmq bundle to use the new container image name
command: "pcs resource bundle update rabbitmq-bundle container image={{rabbitmq_image_latest}}"
- name: Enable the rabbitmq cluster resource
pacemaker_resource:
resource: rabbitmq-bundle
state: enable
wait_for_resource: true
register: output
retries: 5
until: output.rc == 0
- name: Create hiera data to upgrade oslo messaging rpc in a stepwise manner.
when:
- step|int == 1
- cluster_recreate|bool
block:
- name: set oslo_messaging_rpc upgrade node facts in a single-node environment
set_fact:
oslo_messaging_rpc_short_node_names_upgraded: "{{ oslo_messaging_rpc_short_node_names }}"
oslo_messaging_rpc_node_names_upgraded: "{{ oslo_messaging_rpc_node_names }}"
cacheable: no
when: groups['oslo_messaging_rpc'] | length <= 1
- name: set oslo_messaging_rpc upgrade node facts from the limit option
set_fact:
oslo_messaging_rpc_short_node_names_upgraded: "{{ oslo_messaging_rpc_short_node_names_upgraded|default([]) + [item.split('.')[0]] }}"
oslo_messaging_rpc_node_names_upgraded: "{{ oslo_messaging_rpc_node_names_upgraded|default([]) + [item] }}"
cacheable: no
when:
- groups['oslo_messaging_rpc'] | length > 1
- item.split('.')[0] in ansible_limit.split(':')
loop: "{{ oslo_messaging_rpc_node_names }}"
- fail:
msg: >
You can't upgrade oslo_messaging_rpc without
staged upgrade. You need to use the limit option in order
to do so.
when: >-
oslo_messaging_rpc_short_node_names_upgraded is not defined or
oslo_messaging_rpc_short_node_names_upgraded | length == 0 or
oslo_messaging_rpc_node_names_upgraded is not defined or
oslo_messaging_rpc_node_names_upgraded | length == 0
- debug:
msg: "Prepare oslo_messaging_rpc upgrade for {{ oslo_messaging_rpc_short_node_names_upgraded }}"
- name: remove rabbitmq init container on upgrade-scaleup to force re-init
include_role:
name: tripleo-container-rm
vars:
tripleo_containers_to_rm:
- rabbitmq_wait_bundle
when:
- oslo_messaging_rpc_short_node_names_upgraded | length > 1
- name: add the oslo_messaging_rpc short name to hiera data for the upgrade.
include_role:
name: tripleo-upgrade-hiera
tasks_from: set.yml
vars:
tripleo_upgrade_key: oslo_messaging_rpc_short_node_names_override
tripleo_upgrade_value: "{{oslo_messaging_rpc_short_node_names_upgraded}}"
- name: add the oslo_messaging_rpc long name to hiera data for the upgrade
include_role:
name: tripleo-upgrade-hiera
tasks_from: set.yml
vars:
tripleo_upgrade_key: oslo_messaging_rpc_node_names_override
tripleo_upgrade_value: "{{oslo_messaging_rpc_node_names_upgraded}}"
- name: remove the extra hiera data needed for the upgrade.
include_role:
name: tripleo-upgrade-hiera
tasks_from: remove.yml
vars:
tripleo_upgrade_key: "{{item}}"
loop:
- oslo_messaging_rpc_short_node_names_override
- oslo_messaging_rpc_node_names_override
when: oslo_messaging_rpc_short_node_names_upgraded | length == oslo_messaging_rpc_node_names | length
- name: Retag the pacemaker image if containerized
when:
- step|int == 3
block: *rabbitmq_fetch_retag_container_tasks