Merge "Normalise all pacemaker resource upgrade tasks for staged upgrades"

This commit is contained in:
Zuul 2019-10-17 11:44:59 +00:00 committed by Gerrit Code Review
commit 6df0fbfa83
7 changed files with 280 additions and 224 deletions

View File

@ -224,65 +224,76 @@ outputs:
container_image: {get_param: ContainerCinderBackupImage}
container_image_latest: *cinder_backup_image_pcmklatest
update_tasks:
- name: Cinder-Backup fetch and retag container image for pacemaker
- name: cinder_backup fetch and retag container image for pacemaker
when: step|int == 2
block: &cinder_backup_fetch_retag_container_tasks
- name: Get docker Cinder-Backup image
- name: Get container cinder_backup image
set_fact:
docker_image: {get_param: ContainerCinderBackupImage}
docker_image_latest: *cinder_backup_image_pcmklatest
- name: Get previous Cinder-Backup image id
shell: "{{container_cli}} images | awk '/cinder-backup.* pcmklatest/{print $3}' | uniq"
register: cinder_backup_image_id
cinder_backup_image: {get_param: ContainerCinderBackupImage}
cinder_backup_image_latest: *cinder_backup_image_pcmklatest
- name: Pull latest cinder_backup images
command: "{{container_cli}} pull {{cinder_backup_image}}"
- name: Get previous cinder_backup image id
shell: "{{container_cli}} inspect --format '{{'{{'}}.Id{{'}}'}}' {{cinder_backup_image_latest}}"
register: old_cinder_backup_image_id
failed_when: false
- name: Get new cinder_backup image id
shell: "{{container_cli}} inspect --format '{{'{{'}}.Id{{'}}'}}' {{cinder_backup_image}}"
register: new_cinder_backup_image_id
- name: Retag pcmklatest to latest cinder_backup image
include_role:
name: tripleo-container-tag
vars:
container_image: "{{cinder_backup_image}}"
container_image_latest: "{{cinder_backup_image_latest}}"
when:
- old_cinder_backup_image_id.stdout != new_cinder_backup_image_id.stdout
- block:
- name: Get a list of container using Cinder-Backup image
shell: "{{container_cli}} ps -a -q -f 'ancestor={{cinder_backup_image_id.stdout}}'"
- name: Get a list of containers using cinder_backup image
shell: "{{container_cli}} ps -a -q -f 'ancestor={{old_cinder_backup_image_id.stdout}}'"
register: cinder_backup_containers_to_destroy
# It will be recreated with the deploy step.
- name: Remove any container using the same Cinder-Backup image
- name: Remove any containers using the same cinder_backup image
shell: "{{container_cli}} rm -fv {{item}}"
with_items: "{{ cinder_backup_containers_to_destroy.stdout_lines }}"
- name: Remove previous Cinder-Backup images
shell: "{{container_cli}} rmi -f {{cinder_backup_image_id.stdout}}"
- name: Remove previous cinder_backup images
shell: "{{container_cli}} rmi -f {{old_cinder_backup_image_id.stdout}}"
when:
- cinder_backup_image_id.stdout != ''
- name: Pull latest Cinder-Backup images
command: "{{container_cli}} pull {{docker_image}}"
- name: Retag pcmklatest to latest Cinder-Backup image
- old_cinder_backup_image_id.stdout != ''
- old_cinder_backup_image_id.stdout != new_cinder_backup_image_id.stdout
upgrade_tasks:
- name: Prepare switch of cinder_backup image name
when:
- step|int == 0
block:
- name: Get cinder_backup image id currently used by pacemaker
shell: "pcs resource config openstack-cinder-backup | grep -Eo 'image=[^ ]+' | awk -F= '{print $2;}'"
register: cinder_backup_image_current_res
failed_when: false
- name: cinder_backup image facts
set_fact:
cinder_backup_image_latest: *cinder_backup_image_pcmklatest
cinder_backup_image_current: "{{cinder_backup_image_current_res.stdout}}"
- name: Temporarily tag the current cinder_backup image id with the upgraded image name
import_role:
name: tripleo-container-tag
vars:
container_image: "{{docker_image}}"
container_image_latest: "{{docker_image_latest}}"
# Got to check that pacemaker_is_active is working fine with bundle.
# TODO: pacemaker_is_active resource doesn't support bundle.
upgrade_tasks:
- when: step|int == 0
tags: common
block:
- name: Get docker Cinder-Backup image
set_fact:
cinder_backup_docker_image_latest: *cinder_backup_image_pcmklatest
- name: Prepare the switch to new cinder_backup container image name in pacemaker
when: cinder_backup_containerized|bool
block:
- name: Get cinder_backup image id currently used by pacemaker
shell: "{{container_cli}} images | awk '/cinder-backup.* pcmklatest/{print $3}' | uniq"
register: cinder_backup_current_pcmklatest_id
- name: Temporarily tag the current cinder_backup image id with the upgraded image name
when: cinder_backup_current_pcmklatest_id.stdout != ''
import_role:
name: tripleo-container-tag
vars:
container_image: "{{cinder_backup_current_pcmklatest_id.stdout}}"
container_image_latest: "{{cinder_backup_docker_image_latest}}"
pull_image: false
container_image: "{{cinder_backup_current_pcmklatest_id.stdout}}"
container_image_latest: "{{cinder_backup_docker_image_latest}}"
pull_image: false
when:
- cinder_backup_image_current != ''
- cinder_backup_image_current != cinder_backup_image_latest
# During an OS Upgrade, the cluster may not exist so we use
# the shell module instead.
# TODO(odyssey4me):
# Fix the pacemaker_resource module to handle the exception
# for a non-existant cluster more gracefully.
- name: Check openstack-cinder-backup cluster resource status
pacemaker_resource:
resource: openstack-cinder-backup
state: show
check_mode: false
ignore_errors: true
shell: pcs resource config openstack-cinder-backup
failed_when: false
changed_when: false
register: cinder_backup_pcs_res_result
- name: Set fact cinder_backup_pcs_res
set_fact:
@ -295,6 +306,7 @@ outputs:
- step|int == 1
- is_cinder_backup_bootstrap_node
- cinder_backup_pcs_res|bool
- cinder_backup_image_current != cinder_backup_image_latest
block:
- name: Disable the cinder_backup cluster resource before container upgrade
pacemaker_resource:
@ -305,7 +317,7 @@ outputs:
retries: 5
until: output.rc == 0
- name: Update the cinder_backup bundle to use the new container image name
command: "pcs resource bundle update openstack-cinder-backup container image={{cinder_backup_docker_image_latest}}"
command: "pcs resource bundle update openstack-cinder-backup container image={{cinder_backup_image_latest}}"
- name: Enable the cinder_backup cluster resource
pacemaker_resource:
resource: openstack-cinder-backup
@ -318,6 +330,7 @@ outputs:
when:
- step|int == 3
block: *cinder_backup_fetch_retag_container_tasks
fast_forward_upgrade_tasks:
- when:
- step|int == 0
@ -329,7 +342,7 @@ outputs:
resource: openstack-cinder-backup
state: show
check_mode: false
ignore_errors: true
failed_when: false
register: cinder_backup_res_result
- name: Set fact cinder_backup_res
set_fact:

View File

@ -235,11 +235,11 @@ outputs:
when:
- old_cinder_volume_image_id.stdout != new_cinder_volume_image_id.stdout
- block:
- name: Get a list of container using cinder_volume image
- name: Get a list of containers using cinder_volume image
shell: "{{container_cli}} ps -a -q -f 'ancestor={{old_cinder_volume_image_id.stdout}}'"
register: cinder_volume_containers_to_destroy
# It will be recreated with the delpoy step.
- name: Remove any container using the same cinder_volume image
- name: Remove any containers using the same cinder_volume image
shell: "{{container_cli}} rm -fv {{item}}"
with_items: "{{ cinder_volume_containers_to_destroy.stdout_lines }}"
- name: Remove previous cinder_volume images
@ -261,20 +261,24 @@ outputs:
set_fact:
cinder_volume_image_latest: *cinder_volume_image_pcmklatest
cinder_volume_image_current: "{{cinder_volume_image_current_res.stdout}}"
- name: Prepare the switch to new cinder_volume container image name in pacemaker
block:
- name: Temporarily tag the current cinder_volume image id with the upgraded image name
import_role:
name: tripleo-container-tag
vars:
container_image: "{{cinder_volume_image_current}}"
container_image_latest: "{{cinder_volume_image_latest}}"
pull_image: false
when:
- cinder_volume_image_current != ''
- cinder_volume_image_current != cinder_volume_image_latest
- name: Temporarily tag the current cinder_volume image id with the upgraded image name
import_role:
name: tripleo-container-tag
vars:
container_image: "{{cinder_volume_image_current}}"
container_image_latest: "{{cinder_volume_image_latest}}"
pull_image: false
when:
- cinder_volume_image_current != ''
- cinder_volume_image_current != cinder_volume_image_latest
# During an OS Upgrade, the cluster may not exist so we use
# the shell module instead.
# TODO(odyssey4me):
# Fix the pacemaker_resource module to handle the exception
# for a non-existant cluster more gracefully.
- name: Check openstack-cinder-volume cluster resource status
shell: pcs resource config openstack-cinder-volume
changed_when: false
failed_when: false
register: cinder_volume_pcs_res_result
- name: Set fact cinder_volume_pcs_res
@ -301,7 +305,6 @@ outputs:
- name: pcs resource bundle update cinder_volume for new container image name
command: "pcs resource bundle update openstack-cinder-volume container image={{cinder_volume_image_latest}}"
- name: Enable the cinder_volume cluster resource
when:
pacemaker_resource:
resource: openstack-cinder-volume
state: enable

View File

@ -407,21 +407,25 @@ outputs:
set_fact:
galera_image_latest: *mysql_image_pcmklatest
galera_image_current: "{{galera_image_current_res.stdout}}"
- name: Prepare the switch to new galera container image name in pacemaker
block:
- name: Temporarily tag the current galera image id with the upgraded image name
import_role:
name: tripleo-container-tag
vars:
container_image: "{{galera_image_current}}"
container_image_latest: "{{galera_image_latest}}"
pull_image: false
when:
- galera_image_current != ''
- galera_image_current != galera_image_latest
- name: Temporarily tag the current galera image id with the upgraded image name
import_role:
name: tripleo-container-tag
vars:
container_image: "{{galera_image_current}}"
container_image_latest: "{{galera_image_latest}}"
pull_image: false
when:
- galera_image_current != ''
- galera_image_current != galera_image_latest
# During an OS Upgrade, the cluster may not exist so we use
# the shell module instead.
# TODO(odyssey4me):
# Fix the pacemaker_resource module to handle the exception
# for a non-existant cluster more gracefully.
- name: Check galera cluster resource status
shell: pcs resource config galera-bundle
failed_when: false
changed_when: false
register: galera_pcs_res_result
- name: Set fact galera_pcs_res
set_fact:

View File

@ -327,11 +327,11 @@ outputs:
when:
- old_redis_image_id.stdout != new_redis_image_id.stdout
- block:
- name: Get a list of container using redis image
- name: Get a list of containers using redis image
shell: "{{container_cli}} ps -a -q -f 'ancestor={{old_redis_image_id.stdout}}'"
register: redis_containers_to_destroy
# It will be recreated with the delpoy step.
- name: Remove any container using the same redis image
- name: Remove any containers using the same redis image
shell: "{{container_cli}} rm -fv {{item}}"
with_items: "{{ redis_containers_to_destroy.stdout_lines }}"
- name: Remove previous redis images
@ -353,21 +353,25 @@ outputs:
set_fact:
redis_image_latest: *redis_image_pcmklatest
redis_image_current: "{{redis_image_current_res.stdout}}"
- name: Prepare the switch to new redis container image name in pacemaker
block:
- name: Temporarily tag the current redis image id with the upgraded image name
import_role:
name: tripleo-container-tag
vars:
container_image: "{{redis_image_current}}"
container_image_latest: "{{redis_image_latest}}"
pull_image: false
when:
- redis_image_current != ''
- redis_image_current != redis_image_latest
- name: Temporarily tag the current redis image id with the upgraded image name
import_role:
name: tripleo-container-tag
vars:
container_image: "{{redis_image_current}}"
container_image_latest: "{{redis_image_latest}}"
pull_image: false
when:
- redis_image_current != ''
- redis_image_current != redis_image_latest
# During an OS Upgrade, the cluster may not exist so we use
# the shell module instead.
# TODO(odyssey4me):
# Fix the pacemaker_resource module to handle the exception
# for a non-existant cluster more gracefully.
- name: Check redis cluster resource status
shell: pcs resource config redis-bundle
failed_when: false
changed_when: false
register: redis_pcs_res_result
- name: Set upgrade redis facts
set_fact:

View File

@ -399,7 +399,8 @@ outputs:
until: output.rc == 0
when: haproxy_cert_mounted.rc == 6
- name: Haproxy fetch and retag container image for pacemaker
when: step|int == 2
when:
- step|int == 2
block: &haproxy_fetch_retag_container_tasks
- name: Get container haproxy image
set_fact:
@ -449,21 +450,25 @@ outputs:
set_fact:
haproxy_image_latest: *haproxy_image_pcmklatest
haproxy_image_current: "{{haproxy_image_current_res.stdout}}"
- name: Prepare the switch to new haproxy container image name in pacemaker
block:
- name: Temporarily tag the current haproxy image id with the upgraded image name
import_role:
name: tripleo-container-tag
vars:
container_image: "{{haproxy_image_current}}"
container_image_latest: "{{haproxy_image_latest}}"
pull_image: false
when:
- haproxy_image_current != ''
- haproxy_image_current != haproxy_image_latest
- name: Temporarily tag the current haproxy image id with the upgraded image name
import_role:
name: tripleo-container-tag
vars:
container_image: "{{haproxy_image_current}}"
container_image_latest: "{{haproxy_image_latest}}"
pull_image: false
when:
- haproxy_image_current != ''
- haproxy_image_current != haproxy_image_latest
# During an OS Upgrade, the cluster may not exist so we use
# the shell module instead.
# TODO(odyssey4me):
# Fix the pacemaker_resource module to handle the exception
# for a non-existant cluster more gracefully.
- name: Check haproxy cluster resource status
shell: pcs resource config haproxy-bundle
failed_when: false
changed_when: false
register: haproxy_pcs_res_result
- name: Set upgrade haproxy facts
set_fact:

View File

@ -212,64 +212,76 @@ outputs:
container_image: {get_param: ContainerManilaShareImage}
container_image_latest: *manila_share_image_pcmklatest
update_tasks:
- name: Manila-Share fetch and retag container image for pacemaker
- name: manila_share fetch and retag container image for pacemaker
when: step|int == 2
block: &manila_share_fetch_retag_container_tasks
- name: Get docker Manila-Share image
- name: Get container manila_share image
set_fact:
docker_image: {get_param: ContainerManilaShareImage}
docker_image_latest: *manila_share_image_pcmklatest
- name: Get previous Manila-Share image id
shell: "{{container_cli}} images | awk '/manila-share.* pcmklatest/{print $3}' | uniq"
register: manila_share_image_id
manila_share_image: {get_param: ContainerManilaShareImage}
manila_share_image_latest: *manila_share_image_pcmklatest
- name: Pull latest manila_share images
command: "{{container_cli}} pull {{manila_share_image}}"
- name: Get previous manila_share image id
shell: "{{container_cli}} inspect --format '{{'{{'}}.Id{{'}}'}}' {{manila_share_image_latest}}"
register: old_manila_share_image_id
failed_when: false
- name: Get new manila_share image id
shell: "{{container_cli}} inspect --format '{{'{{'}}.Id{{'}}'}}' {{manila_share_image}}"
register: new_manila_share_image_id
- name: Retag pcmklatest to latest manila_share image
include_role:
name: tripleo-container-tag
vars:
container_image: "{{manila_share_image}}"
container_image_latest: "{{manila_share_image_latest}}"
when:
- old_manila_share_image_id.stdout != new_manila_share_image_id.stdout
- block:
- name: Get a list of container using Manila-Share image
shell: "{{container_cli}} ps -a -q -f 'ancestor={{manila_share_image_id.stdout}}'"
- name: Get a list of containers using manila_share image
shell: "{{container_cli}} ps -a -q -f 'ancestor={{old_manila_share_image_id.stdout}}'"
register: manila_share_containers_to_destroy
# It will be recreated with the delpoy step.
- name: Remove any container using the same Manila-Share image
- name: Remove any containers using the same manila_share image
shell: "{{container_cli}} rm -fv {{item}}"
with_items: "{{ manila_share_containers_to_destroy.stdout_lines }}"
- name: Remove previous Manila-Share images
- name: Remove previous manila_share images
shell: "{{container_cli}} rmi -f {{manila_share_image_id.stdout}}"
when:
- manila_share_image_id.stdout != ''
- name: Pull latest Manila-Share images
command: "{{container_cli}} pull {{docker_image}}"
- name: Retag pcmklatest to latest Manila-Share image
- old_manila_share_image_id.stdout != ''
- old_manila_share_image_id.stdout != new_manila_share_image_id.stdout
upgrade_tasks:
- name: Prepare switch of manila_share image name
when:
- step|int == 0
block:
- name: Get manila_share image id currently used by pacemaker
shell: "pcs resource config openstack-manila-share | grep -Eo 'image=[^ ]+' | awk -F= '{print $2;}'"
register: manila_share_image_current_res
failed_when: false
- name: manila_share image facts
set_fact:
manila_share_image_latest: *manila_share_image_pcmklatest
manila_share_image_current: "{{manila_share_image_current_res.stdout}}"
- name: Temporarily tag the current manila_share image id with the upgraded image name
import_role:
name: tripleo-container-tag
vars:
container_image: "{{docker_image}}"
container_image_latest: "{{docker_image_latest}}"
# Got to check that pacemaker_is_active is working fine with bundle.
# TODO: pacemaker_is_active resource doesn't support bundle.
upgrade_tasks:
- when: step|int == 0
tags: common
block:
- name: Get docker Manila-Share image
set_fact:
manila_share_docker_image_latest: *manila_share_image_pcmklatest
- name: Prepare the switch to new Manila-Share container image name in pacemaker
block:
- name: Get Manila-Share image id currently used by pacemaker
shell: "{{container_cli}} images | awk '/manila-share.* pcmklatest/{print $3}' | uniq"
register: manila_share_current_pcmklatest_id
- name: Temporarily tag the current Manila-Share image id with the upgraded image name
import_role:
name: tripleo-container-tag
vars:
container_image: "{{manila_share_current_pcmklatest_id.stdout}}"
container_image_latest: "{{manila_share_docker_image_latest}}"
pull_image: false
when: manila_share_current_pcmklatest_id.stdout != ''
container_image: "{{manila_share_image_current}}"
container_image_latest: "{{manila_share_image_latest}}"
pull_image: false
when:
- manila_share_image_current != ''
- manila_share_image_current != manila_share_image_latest
# During an OS Upgrade, the cluster may not exist so we use
# the shell module instead.
# TODO(odyssey4me):
# Fix the pacemaker_resource module to handle the exception
# for a non-existant cluster more gracefully.
- name: Check openstack-manila-share cluster resource status
pacemaker_resource:
resource: openstack-manila-share
state: show
check_mode: false
ignore_errors: true
shell: pcs resource config openstack-manila-share
failed_when: false
changed_when: false
register: manila_share_pcs_res_result
- name: Set fact manila_share_pcs_res
set_fact:
@ -282,8 +294,9 @@ outputs:
- step|int == 1
- is_manila_share_bootstrap_node
- manila_share_pcs_res|bool
- manila_share_image_current != manila_share_image_latest
block:
- name: Disable the Manila-Share cluster resource before container upgrade
- name: Disable the manila_share cluster resource before container upgrade
pacemaker_resource:
resource: openstack-manila-share
state: disable
@ -291,10 +304,9 @@ outputs:
register: output
retries: 5
until: output.rc == 0
- name: Update the Manila-Share bundle to use the new container image name
command: "pcs resource bundle update openstack-manila-share container image={{manila_share_docker_image_latest}}"
- name: Enable the Manila-Share cluster resource
when:
- name: pcs resource bundle update manila_share for new container image name
command: "pcs resource bundle update openstack-manila-share container image={{manila_share_image_latest}}"
- name: Enable the manila_share cluster resource
pacemaker_resource:
resource: openstack-manila-share
state: enable
@ -306,6 +318,7 @@ outputs:
when:
- step|int == 3
block: *manila_share_fetch_retag_container_tasks
fast_forward_upgrade_tasks:
- name: Check cluster resource status
pacemaker_resource:

View File

@ -253,6 +253,7 @@ outputs:
vars:
container_image: {get_param: ContainerOvnDbsImage}
container_image_latest: *ovn_dbs_image_pcmklatest
update_tasks:
# When a schema change happens, the newer slaves don't connect
# back to the older master and end up timing out. So we clean
@ -273,39 +274,44 @@ outputs:
- step|int == 1
- name: Get docker ovn-dbs image
set_fact:
ovn_dbs_docker_image: {get_param: ContainerOvnDbsImage}
ovn_dbs_docker_image_latest: *ovn_dbs_image_pcmklatest
ovn_dbs_image: {get_param: ContainerOvnDbsImage}
ovn_dbs_image_latest: *ovn_dbs_image_pcmklatest
- name: set is_ovn_dbs_bootstrap_node fact
set_fact: is_ovn_dbs_bootstrap_node={{ovn_dbs_short_bootstrap_node_name|lower == ansible_hostname|lower}}
- name: ovn-dbs fetch and retag container image for pacemaker
when:
- step|int == 3
block: &ovn_dbs_fetch_retag_container_tasks
- name: Get previous ovn-dbs image id
shell: "{{container_cli}} images | awk '/ovn.* pcmklatest/{print $3}' | uniq"
register: ovn_dbs_image_id
- name: Pull latest ovn-dbs images
command: "{{container_cli}} pull {{ovn_dbs_image}}"
- name: Get previous ovn_dbs image id
shell: "{{container_cli}} inspect --format '{{'{{'}}.Id{{'}}'}}' {{ovn_dbs_image_latest}}"
register: old_ovn_dbs_image_id
failed_when: false
- name: Get new ovn_dbs image id
shell: "{{container_cli}} inspect --format '{{'{{'}}.Id{{'}}'}}' {{ovn_dbs_image}}"
register: new_ovn_dbs_image_id
- name: Retag pcmklatest to latest ovn_dbs image
include_role:
name: tripleo-container-tag
vars:
container_image: "{{ovn_dbs_image}}"
container_image_latest: "{{ovn_dbs_image_latest}}"
when:
- old_ovn_dbs_image_id.stdout != new_ovn_dbs_image_id.stdout
- block:
- name: Get a list of container using ovn-dbs image
shell: "{{container_cli}} ps -a -q -f 'ancestor={{ovn_dbs_image_id.stdout}}'"
- name: Get a list of containers using ovn-dbs image
shell: "{{container_cli}} ps -a -q -f 'ancestor={{old_ovn_dbs_image_id.stdout}}'"
register: ovn_dbs_containers_to_destroy
# It will be recreated with the deploy step.
- name: Remove any container using the same ovn-dbs image
- name: Remove any containers using the same ovn-dbs image
shell: "{{container_cli}} rm -fv {{item}}"
with_items: "{{ ovn_dbs_containers_to_destroy.stdout_lines }}"
- name: Remove previous ovn-dbs images
shell: "{{container_cli}} rmi -f {{ovn_dbs_image_id.stdout}}"
shell: "{{container_cli}} rmi -f {{old_ovn_dbs_image_id.stdout}}"
when:
- ovn_dbs_image_id.stdout != ''
- name: Pull latest ovn-dbs images
command: "{{container_cli}} pull {{ovn_dbs_docker_image}}"
- name: Retag pcmklatest to latest ovn-dbs image
import_role:
name: tripleo-container-tag
vars:
container_image: "{{ovn_dbs_docker_image}}"
container_image_latest: "{{ovn_dbs_docker_image_latest}}"
# Got to check that pacemaker_is_active is working fine with bundle.
# TODO: pacemaker_is_active resource doesn't support bundle.
- old_ovn_dbs_image_id.stdout != new_ovn_dbs_image_id.stdout
# We remove any leftover error and remove the ban.
- name: Ensure the cluster converge back even in case of schema change
shell: "pcs resource cleanup ovn-dbs-bundle"
@ -326,71 +332,79 @@ outputs:
- is_ovn_dbs_bootstrap_node
block:
- name: Get the present image used by ovn-dbs-bundle
shell: "pcs resource show ovn-dbs-bundle | grep image | awk '{ split($2, image, \"=\"); print image[2] }'"
shell: "pcs resource config ovn-dbs-bundle | grep -Eo 'image=[^ ]+' | awk -F= '{print $2;}'"
register: ovn_dbs_current_image
- block: &ovn_dbs_update_bundle_with_new_image
- block:
- name: Update the ovn-dbs-bundle to use the new container image name
command: "pcs resource bundle update ovn-dbs-bundle container image={{ovn_dbs_docker_image_latest}}"
command: "pcs resource bundle update ovn-dbs-bundle container image={{ovn_dbs_image_latest}}"
when:
- ovn_dbs_current_image.stdout != ovn_dbs_docker_image_latest
- ovn_dbs_current_image.stdout != ovn_dbs_image_latest
upgrade_tasks:
- when: step|int == 0
- name: Prepare switch of ovn-dbs image name
when:
- step|int == 0
block:
- name: Get docker ovn-dbs image
- name: Get ovn-dbs image id currently used by pacemaker
shell: "pcs resource config ovn-dbs-bundle | grep -Eo 'image=[^ ]+' | awk -F= '{print $2;}'"
register: ovn_dbs_image_current_res
failed_when: false
- name: ovn-dbs image facts
set_fact:
ovn_dbs_docker_image: {get_param: ContainerOvnDbsImage}
ovn_dbs_docker_image_latest: *ovn_dbs_image_pcmklatest
- name: set is_ovn_dbs_bootstrap_node fact
set_fact: is_ovn_dbs_bootstrap_node={{ovn_dbs_short_bootstrap_node_name|lower == ansible_hostname|lower}}
ovn_dbs_image_latest: *ovn_dbs_image_pcmklatest
ovn_dbs_image_current: "{{ovn_dbs_image_current_res.stdout}}"
- name: Temporarily tag the current ovn_dbs image id with the upgraded image name
import_role:
name: tripleo-container-tag
vars:
container_image: "{{ovn_dbs_image_current}}"
container_image_latest: "{{ovn_dbs_image_latest}}"
pull_image: false
when:
- ovn_dbs_image_current != ''
- ovn_dbs_image_current != ovn_dbs_image_latest
# During an OS Upgrade, the cluster may not exist so we use
# the shell module instead.
# TODO(odyssey4me):
# Fix the pacemaker_resource module to handle the exception
# for a non-existant cluster more gracefully.
- name: Check ovn-dbs-bundle cluster resource status
pacemaker_resource:
resource: ovn-dbs-bundle
state: show
check_mode: false
ignore_errors: true
register: ovndbs_pcs_result
- name: Set fact ovndbs_pcs_res
shell: pcs resource config ovn-dbs-bundle
failed_when: false
changed_when: false
register: ovn_dbs_pcs_result
- name: Set fact ovn_dbs_pcs_res
set_fact:
ovndbs_pcs_res: "{{ ovndbs_pcs_result|succeeded }}"
- name: Prepare the switch to new ovn-dbs container image name in pacemaker
block:
- name: Get ovn-dbs image id currently used by pacemaker
shell: "{{container_cli}} images | awk '/ovn.* pcmklatest/{print $3}' | uniq"
register: ovn_dbs_current_pcmklatest_id
- name: Temporarily tag the current ovn-dbs pcmklatest image id with the upgraded image name
import_role:
name: tripleo-container-tag
vars:
container_image: "{{ovn_dbs_current_pcmklatest_id.stdout}}"
container_image_latest: "{{ovn_dbs_docker_image_latest}}"
pull_image: false
when: ovn_dbs_current_pcmklatest_id.stdout != ''
# If ovn-dbs image is not tagged with pcmklatest, then create a new
# tag. This could happen if the stack is upgraded without updating the stack before.
# In the next step, the block 'ovn_dbs_update_bundle_with_new_image'
# will update the ovn-dbs-bundle resource to use the tagged image.
# And in step 3, we will fetch the latest image. Ensure we run these
# steps when the resource is up and running, otherwise the tagging
# will fail.
- block:
- name: Get the present image used by ovn-dbs-bundle
shell: "pcs resource show ovn-dbs-bundle | grep image | awk '{ split($2, image, \"=\"); print image[2] }'"
register: ovn_dbs_current_image
- name: Tag the current image with pcmklatest tag
import_role:
name: tripleo-container-tag
vars:
container_image: "{{ovn_dbs_current_image.stdout}}"
container_image_latest: "{{ovn_dbs_docker_image_latest}}"
when:
- ovn_dbs_current_pcmklatest_id.stdout == ''
- ovndbs_pcs_res
- name: Update ovn-bundle pcs resource bundle for new container image
ovndbs_pcs_res: "{{ ovn_dbs_pcs_result.rc == 0 }}"
- name: set is_ovn_dbs_bootstrap_node fact
tags: common
set_fact: is_ovn_dbs_bootstrap_node={{ovn_dbs_short_bootstrap_node_name|lower == ansible_hostname|lower}}
- name: Update ovn_dbs pcs resource bundle for new container image
when:
- step|int == 1
- is_ovn_dbs_bootstrap_node
- ovndbs_pcs_res
block: *ovn_dbs_update_bundle_with_new_image
- ovn_dbs_pcs_res|bool
- ovn_dbs_image_current != ovn_dbs_image_latest
block:
- name: Disable the ovn-dbs-bundle cluster resource before container upgrade
pacemaker_resource:
resource: ovn-dbs-bindle
state: disable
wait_for_resource: true
register: output
retries: 5
until: output.rc == 0
- name: pcs resource bundle update ovn-dbs for new container image name
command: "pcs resource bundle update ovn-dbs-bundle container image={{ovn_dbs_image_latest}}"
- name: Enable the ovn-dbs-bundle cluster resource
when:
pacemaker_resource:
resource: ovn-dbs-bundle
state: enable
wait_for_resource: true
register: output
retries: 5
until: output.rc == 0
- name: Retag the pacemaker image if containerized
when:
- step|int == 3