Add pacemaker upgrade_tasks for P..Q major upgrade

This adds pacemaker upgrade_tasks for Pike to Queens. We need
to handle both cases:
 - Upgrade from baremetal so we should kill the systemd things.
 - Upgrade from containers so we should kill the container pull
   and retag the image.

Change-Id: Icacb31b79da3a18b7ab0986779a021dfe6a5553f
This commit is contained in:
marios 2018-01-30 18:23:47 +02:00 committed by Sergii Golovatiuk
parent c93028f254
commit 029ec62b79
7 changed files with 499 additions and 404 deletions

View File

@ -205,64 +205,84 @@ outputs:
file:
path: /etc/ceph
state: directory
upgrade_tasks:
- name: get bootstrap nodeid
tags: common
command: hiera -c /etc/puppet/hiera.yaml bootstrap_nodeid
register: bootstrap_node
- name: set is_bootstrap_node fact
tags: common
set_fact: is_bootstrap_node={{bootstrap_node.stdout|lower == ansible_hostname|lower}}
- when:
- step|int == 2
- is_bootstrap_node|bool
block:
- name: Disable the openstack-cinder-backup cluster resource
pacemaker_resource:
resource: openstack-cinder-backup
state: disable
wait_for_resource: true
register: output
retries: 5
until: output.rc == 0
- name: Delete the stopped openstack-cinder-backup cluster resource.
pacemaker_resource:
resource: openstack-cinder-backup
state: delete
wait_for_resource: true
register: output
retries: 5
until: output.rc == 0
- name: Disable cinder_backup service
when: step|int == 2
service: name=openstack-cinder-backup enabled=no
update_tasks:
- name: Cinder-Backup fetch and retag container image for pacemaker
when: step|int == 2
block: &cinder_backup_fetch_retag_container_tasks
- name: Get docker Cinder-Backup image
set_fact:
docker_image: {get_param: DockerCinderBackupImage}
docker_image_latest: *cinder_backup_image_pcmklatest
- name: Pull latest Cinder-Backup images
command: "docker pull {{docker_image}}"
- name: Get previous Cinder-Backup image id
shell: "docker images | awk '/cinder-backup.* pcmklatest/{print $3}'"
register: cinder_backup_image_id
- block:
- name: Get a list of container using Cinder-Backup image
shell: "docker ps -a -q -f 'ancestor={{cinder_backup_image_id.stdout}}'"
register: cinder_backup_containers_to_destroy
# It will be recreated with the deploy step.
- name: Remove any container using the same Cinder-Backup image
shell: "docker rm -fv {{item}}"
with_items: "{{ cinder_backup_containers_to_destroy.stdout_lines }}"
- name: Remove previous Cinder-Backup images
shell: "docker rmi -f {{cinder_backup_image_id.stdout}}"
when:
- cinder_backup_image_id.stdout != ''
- name: Retag pcmklatest to latest Cinder-Backup image
shell: "docker tag {{docker_image}} {{docker_image_latest}}"
# Got to check that pacemaker_is_active is working fine with bundle.
# TODO: pacemaker_is_active resource doesn't support bundle.
upgrade_tasks:
- name: Get docker Cinder-Backup image
set_fact:
docker_image: {get_param: DockerCinderBackupImage}
docker_image_latest: *cinder_backup_image_pcmklatest
when: step|int == 2
- name: Pull latest Cinder-Backup images
command: "docker pull {{docker_image}}"
when: step|int == 2
- name: Get previous Cinder-Backup image id
shell: "docker images | awk '/cinder-backup.* pcmklatest/{print $3}'"
register: cinder_backup_image_id
- block:
- name: Get a list of container using Cinder-Backup image
shell: "docker ps -a -q -f 'ancestor={{cinder_backup_image_id.stdout}}'"
register: cinder_backup_containers_to_destroy
# It will be recreated with the delpoy step.
- name: Remove any container using the same Cinder-Backup image
shell: "docker rm -fv {{item}}"
with_items: "{{ cinder_backup_containers_to_destroy.stdout_lines }}"
- name: Remove previous Cinder-Backup images
shell: "docker rmi -f {{cinder_backup_image_id.stdout}}"
- name: Check if Cinder-Backup is already containerized
shell: "docker ps -a | grep {{docker_image_latest}}"
ignore_errors: true
register: cinder_backup_containerized
- name: Retag the pacemaker image if containerized
when:
- step|int == 2
- cinder_backup_image_id.stdout != ''
- name: Retag pcmklatest to latest Cinder-Backup image
shell: "docker tag {{docker_image}} {{docker_image_latest}}"
when: step|int == 2
# Got to check that pacemaker_is_active is working fine with bundle.
# TODO: pacemaker_is_active resource doesn't support bundle.
- cinder_backup_containerized|succeeded
block: *cinder_backup_fetch_retag_container_tasks
- name: Cinder-Backup baremetal to container upgrade tasks
when:
- step|int == 2
- cinder_backup_containerized|failed
block:
- name: get bootstrap nodeid
tags: common
command: hiera -c /etc/puppet/hiera.yaml bootstrap_nodeid
register: bootstrap_node
- name: set is_bootstrap_node fact
tags: common
set_fact: is_bootstrap_node={{bootstrap_node.stdout|lower == ansible_hostname|lower}}
- name: Check cluster resource status
pacemaker_resource:
resource: {get_attr: [CinderBackupBase, role_data, service_name]}
state: started
check_mode: true
ignore_errors: true
register: cinder_backup_res
- when: (is_bootstrap_node) and (cinder_backup_res|succeeded)
block:
- name: Disable the openstack-cinder-backup cluster resource
pacemaker_resource:
resource: openstack-cinder-backup
state: disable
wait_for_resource: true
register: output
retries: 5
until: output.rc == 0
- name: Delete the stopped openstack-cinder-backup cluster resource.
pacemaker_resource:
resource: openstack-cinder-backup
state: delete
wait_for_resource: true
register: output
retries: 5
until: output.rc == 0
- name: Disable cinder_backup service
service: name=openstack-cinder-backup enabled=no

View File

@ -230,64 +230,84 @@ outputs:
executable: /bin/bash
creates: /dev/loop2
when: cinder_enable_iscsi_backend
upgrade_tasks:
- name: get bootstrap nodeid
tags: common
command: hiera -c /etc/puppet/hiera.yaml bootstrap_nodeid
register: bootstrap_node
- name: set is_bootstrap_node fact
tags: common
set_fact: is_bootstrap_node={{bootstrap_node.stdout|lower == ansible_hostname|lower}}
- when:
- step|int == 2
- is_bootstrap_node|bool
block:
- name: Disable the openstack-cinder-volume cluster resource
pacemaker_resource:
resource: openstack-cinder-volume
state: disable
wait_for_resource: true
register: output
retries: 5
until: output.rc == 0
- name: Delete the stopped openstack-cinder-volume cluster resource.
pacemaker_resource:
resource: openstack-cinder-volume
state: delete
wait_for_resource: true
register: output
retries: 5
until: output.rc == 0
- name: Disable cinder_volume service from boot
when: step|int == 2
service: name=openstack-cinder-volume enabled=no
update_tasks:
- name: Cinder-Volume fetch and retag container image for pacemaker
when: step|int == 2
block: &cinder_volume_fetch_retag_container_tasks
- name: Get docker Cinder-Volume image
set_fact:
docker_image: {get_param: DockerCinderVolumeImage}
docker_image_latest: *cinder_volume_image_pcmklatest
- name: Get previous Cinder-Volume image id
shell: "docker images | awk '/cinder-volume.* pcmklatest/{print $3}'"
register: cinder_volume_image_id
- block:
- name: Get a list of container using Cinder-Volume image
shell: "docker ps -a -q -f 'ancestor={{cinder_volume_image_id.stdout}}'"
register: cinder_volume_containers_to_destroy
# It will be recreated with the delpoy step.
- name: Remove any container using the same Cinder-Volume image
shell: "docker rm -fv {{item}}"
with_items: "{{ cinder_volume_containers_to_destroy.stdout_lines }}"
- name: Remove previous Cinder-Volume images
shell: "docker rmi -f {{cinder_volume_image_id.stdout}}"
when:
- cinder_volume_image_id.stdout != ''
- name: Pull latest Cinder-Volume images
command: "docker pull {{docker_image}}"
- name: Retag pcmklatest to latest Cinder-Volume image
shell: "docker tag {{docker_image}} {{docker_image_latest}}"
# Got to check that pacemaker_is_active is working fine with bundle.
# TODO: pacemaker_is_active resource doesn't support bundle.
upgrade_tasks:
- name: Get docker Cinder-Volume image
set_fact:
docker_image: {get_param: DockerCinderVolumeImage}
docker_image_latest: *cinder_volume_image_pcmklatest
when: step|int == 2
- name: Get previous Cinder-Volume image id
shell: "docker images | awk '/cinder-volume.* pcmklatest/{print $3}'"
register: cinder_volume_image_id
- block:
- name: Get a list of container using Cinder-Volume image
shell: "docker ps -a -q -f 'ancestor={{cinder_volume_image_id.stdout}}'"
register: cinder_volume_containers_to_destroy
# It will be recreated with the delpoy step.
- name: Remove any container using the same Cinder-Volume image
shell: "docker rm -fv {{item}}"
with_items: "{{ cinder_volume_containers_to_destroy.stdout_lines }}"
- name: Remove previous Cinder-Volume images
shell: "docker rmi -f {{cinder_volume_image_id.stdout}}"
- name: Check if Cinder-Volume is already containerized
shell: "docker ps -a | grep {{docker_image_latest}}"
ignore_errors: true
register: cinder_volume_containerized
- name: Retag the pacemaker image if containerized
when:
- step|int == 2
- cinder_volume_image_id.stdout != ''
- name: Pull latest Cinder-Volume images
command: "docker pull {{docker_image}}"
when: step|int == 2
- name: Retag pcmklatest to latest Cinder-Volume image
shell: "docker tag {{docker_image}} {{docker_image_latest}}"
when: step|int == 2
# Got to check that pacemaker_is_active is working fine with bundle.
# TODO: pacemaker_is_active resource doesn't support bundle.
- cinder_volume_containerized|succeeded
block: *cinder_volume_fetch_retag_container_tasks
- name: Cinder-Volume baremetal to container upgrade tasks
when:
- step|int == 2
- cinder_volume_containerized|failed
block:
- name: get bootstrap nodeid
tags: common
command: hiera -c /etc/puppet/hiera.yaml bootstrap_nodeid
register: bootstrap_node
- name: set is_bootstrap_node fact
tags: common
set_fact: is_bootstrap_node={{bootstrap_node.stdout|lower == ansible_hostname|lower}}
- name: Check cluster resource status
pacemaker_resource:
resource: {get_attr: [CinderBase, role_data, service_name]}
state: started
check_mode: true
ignore_errors: true
register: cinder_volume_res
- when: (is_bootstrap_node) and (cinder_volume_res|succeeded)
block:
- name: Disable the openstack-cinder-volume cluster resource
pacemaker_resource:
resource: openstack-cinder-volume
state: disable
wait_for_resource: true
register: output
retries: 5
until: output.rc == 0
- name: Delete the stopped openstack-cinder-volume cluster resource.
pacemaker_resource:
resource: openstack-cinder-volume
state: delete
wait_for_resource: true
register: output
retries: 5
until: output.rc == 0
- name: Disable cinder_volume service from boot
service: name=openstack-cinder-volume enabled=no

View File

@ -266,80 +266,88 @@ outputs:
state: directory
metadata_settings:
get_attr: [MysqlPuppetBase, role_data, metadata_settings]
upgrade_tasks:
- name: get bootstrap nodeid
tags: common
command: hiera -c /etc/puppet/hiera.yaml bootstrap_nodeid
register: bootstrap_node
- name: set is_bootstrap_node fact
tags: common
set_fact: is_bootstrap_node={{bootstrap_node.stdout|lower == ansible_hostname|lower}}
- name: Check cluster resource status
when: step|int == 2
pacemaker_resource:
resource: galera
state: master
check_mode: true
ignore_errors: true
register: galera_res
- when:
- step|int == 2
- is_bootstrap_node|bool
- galera_res|succeeded
block:
- name: Disable the galera cluster resource
pacemaker_resource:
resource: galera
state: disable
wait_for_resource: true
register: output
retries: 5
until: output.rc == 0
- name: Delete the stopped galera cluster resource.
when: step|int == 2
pacemaker_resource:
resource: galera
state: delete
wait_for_resource: true
register: output
retries: 5
until: output.rc == 0
- name: Disable mysql service
when: step|int == 2
service: name=mariadb enabled=no
- name: Remove clustercheck service from xinetd
when: step|int == 2
file: state=absent path=/etc/xinetd.d/galera-monitor
- name: Restart xinetd service after clustercheck removal
when: step|int == 2
service: name=xinetd state=restarted
update_tasks:
- name: Get docker Mariadb image
set_fact:
docker_image: {get_param: DockerMysqlImage}
docker_image_latest: *mysql_image_pcmklatest
- name: Mariadb fetch and retag container image for pacemaker
when: step|int == 2
- name: Get previous Mariadb image id
shell: "docker images | awk '/mariadb.* pcmklatest/{print $3}'"
register: mariadb_image_id
- block:
- name: Get a list of container using Mariadb image
shell: "docker ps -a -q -f 'ancestor={{mariadb_image_id.stdout}}'"
register: mariadb_containers_to_destroy
# It will be recreated with the delpoy step.
- name: Remove any container using the same Mariadb image
shell: "docker rm -fv {{item}}"
with_items: "{{ mariadb_containers_to_destroy.stdout_lines }}"
- name: Remove previous Mariadb images
shell: "docker rmi -f {{mariadb_image_id.stdout}}"
block: &mysql_fetch_retag_container_tasks
- name: Get docker Mariadb image
set_fact:
docker_image: {get_param: DockerMysqlImage}
docker_image_latest: *mysql_image_pcmklatest
- name: Get previous Mariadb image id
shell: "docker images | awk '/mariadb.* pcmklatest/{print $3}'"
register: mariadb_image_id
- block:
- name: Get a list of container using Mariadb image
shell: "docker ps -a -q -f 'ancestor={{mariadb_image_id.stdout}}'"
register: mariadb_containers_to_destroy
# It will be recreated with the delpoy step.
- name: Remove any container using the same Mariadb image
shell: "docker rm -fv {{item}}"
with_items: "{{ mariadb_containers_to_destroy.stdout_lines }}"
- name: Remove previous Mariadb images
shell: "docker rmi -f {{mariadb_image_id.stdout}}"
when:
- mariadb_image_id.stdout != ''
- name: Pull latest Mariadb images
command: "docker pull {{docker_image}}"
- name: Retag pcmklatest to latest Mariadb image
shell: "docker tag {{docker_image}} {{docker_image_latest}}"
# Got to check that pacemaker_is_active is working fine with bundle.
# TODO: pacemaker_is_active resource doesn't support bundle.
upgrade_tasks:
- name: Get docker Mysql image
set_fact:
docker_image_latest: *mysql_image_pcmklatest
- name: Check if Mysql is already containerized
shell: "docker ps -a | grep {{docker_image_latest}}"
ignore_errors: true
register: mysql_containerized
- name: Retag the pacemaker image if containerized
when:
- step|int == 2
- mariadb_image_id.stdout != ''
- name: Pull latest Mariadb images
command: "docker pull {{docker_image}}"
when: step|int == 2
- name: Retag pcmklatest to latest Mariadb image
shell: "docker tag {{docker_image}} {{docker_image_latest}}"
when: step|int == 2
# Got to check that pacemaker_is_active is working fine with bundle.
# TODO: pacemaker_is_active resource doesn't support bundle.
- mysql_containerized|succeeded
block: *mysql_fetch_retag_container_tasks
- name: Mysql baremetal to container upgrade tasks
when:
- step|int == 2
- mysql_containerized|failed
block:
- name: get bootstrap nodeid
tags: common
command: hiera -c /etc/puppet/hiera.yaml bootstrap_nodeid
register: bootstrap_node
- name: set is_bootstrap_node fact
tags: common
set_fact: is_bootstrap_node={{bootstrap_node.stdout|lower == ansible_hostname|lower}}
- name: Check cluster resource status
pacemaker_resource:
resource: galera
state: master
check_mode: true
ignore_errors: true
register: galera_res
- when: (is_bootstrap_node) and (galera_res|succeeded)
block:
- name: Disable the galera cluster resource
pacemaker_resource:
resource: galera
state: disable
wait_for_resource: true
register: output
retries: 5
until: output.rc == 0
- name: Delete the stopped galera cluster resource.
pacemaker_resource:
resource: galera
state: delete
wait_for_resource: true
register: output
retries: 5
until: output.rc == 0
- name: Disable mysql service
service: name=mariadb enabled=no
- name: Remove clustercheck service from xinetd
file: state=absent path=/etc/xinetd.d/galera-monitor
- name: Restart xinetd service after clustercheck removal
service: name=xinetd state=restarted

View File

@ -253,73 +253,84 @@ outputs:
file:
path: /var/lib/redis
state: directory
upgrade_tasks:
- name: get bootstrap nodeid
tags: common
command: hiera -c /etc/puppet/hiera.yaml bootstrap_nodeid
register: bootstrap_node
- name: set is_bootstrap_node fact
tags: common
set_fact: is_bootstrap_node={{bootstrap_node.stdout|lower == ansible_hostname|lower}}
- name: Check cluster resource status
when: step|int == 2
pacemaker_resource:
resource: {get_attr: [RedisBase, role_data, service_name]}
state: master
check_mode: true
ignore_errors: true
register: redis_res
- when:
- step|int == 2
- is_bootstrap_node|bool
- redis_res|succeeded
block:
- name: Disable the redis cluster resource
pacemaker_resource:
resource: {get_attr: [RedisBase, role_data, service_name]}
state: disable
wait_for_resource: true
register: output
retries: 5
until: output.rc == 0
- name: Delete the stopped redis cluster resource.
pacemaker_resource:
resource: {get_attr: [RedisBase, role_data, service_name]}
state: delete
wait_for_resource: true
register: output
retries: 5
until: output.rc == 0
- name: Disable redis service
when: step|int == 2
service: name=redis enabled=no
update_tasks:
- name: Get docker Redis image
set_fact:
docker_image: {get_param: DockerRedisImage}
docker_image_latest: *redis_image_pcmklatest
- name: Redis fetch and retag container image for pacemaker
when: step|int == 2
- name: Get previous Redis image id
shell: "docker images | awk '/redis.* pcmklatest/{print $3}'"
register: redis_image_id
- block:
- name: Get a list of container using Redis image
shell: "docker ps -a -q -f 'ancestor={{redis_image_id.stdout}}'"
register: redis_containers_to_destroy
# It will be recreated with the delpoy step.
- name: Remove any container using the same Redis image
shell: "docker rm -fv {{item}}"
with_items: "{{ redis_containers_to_destroy.stdout_lines }}"
- name: Remove previous Redis images
shell: "docker rmi -f {{redis_image_id.stdout}}"
block: &redis_fetch_retag_container_tasks
- name: Get docker Redis image
set_fact:
docker_image: {get_param: DockerRedisImage}
docker_image_latest: *redis_image_pcmklatest
- name: Get previous Redis image id
shell: "docker images | awk '/redis.* pcmklatest/{print $3}'"
register: redis_image_id
- block:
- name: Get a list of container using Redis image
shell: "docker ps -a -q -f 'ancestor={{redis_image_id.stdout}}'"
register: redis_containers_to_destroy
# It will be recreated with the delpoy step.
- name: Remove any container using the same Redis image
shell: "docker rm -fv {{item}}"
with_items: "{{ redis_containers_to_destroy.stdout_lines }}"
- name: Remove previous Redis images
shell: "docker rmi -f {{redis_image_id.stdout}}"
when:
- redis_image_id.stdout != ''
- name: Pull latest Redis images
command: "docker pull {{docker_image}}"
- name: Retag pcmklatest to latest Redis image
shell: "docker tag {{docker_image}} {{docker_image_latest}}"
# Got to check that pacemaker_is_active is working fine with bundle.
# TODO: pacemaker_is_active resource doesn't support bundle.
upgrade_tasks:
- name: Get docker redis image
set_fact:
docker_image_latest: *redis_image_pcmklatest
- name: Check if redis is already containerized
shell: "docker ps -a | grep {{docker_image_latest}}"
ignore_errors: true
register: redis_containerized
- name: Retag the pacemaker image if containerized
when:
- step|int == 2
- redis_image_id.stdout != ''
- name: Pull latest Redis images
command: "docker pull {{docker_image}}"
when: step|int == 2
- name: Retag pcmklatest to latest Redis image
shell: "docker tag {{docker_image}} {{docker_image_latest}}"
when: step|int == 2
# Got to check that pacemaker_is_active is working fine with bundle.
# TODO: pacemaker_is_active resource doesn't support bundle.
- redis_containerized|succeeded
block: *redis_fetch_retag_container_tasks
- name: redis baremetal to container upgrade tasks
when:
- step|int == 2
- redis_containerized|failed
block:
- name: get bootstrap nodeid
tags: common
command: hiera -c /etc/puppet/hiera.yaml bootstrap_nodeid
register: bootstrap_node
- name: set is_bootstrap_node fact
tags: common
set_fact: is_bootstrap_node={{bootstrap_node.stdout|lower == ansible_hostname|lower}}
- name: Check cluster resource status
pacemaker_resource:
resource: {get_attr: [RedisBase, role_data, service_name]}
state: master
check_mode: true
ignore_errors: true
register: redis_res
- when: (is_bootstrap_node) and (redis_res|succeeded)
block:
- name: Disable the redis cluster resource
pacemaker_resource:
resource: {get_attr: [RedisBase, role_data, service_name]}
state: disable
wait_for_resource: true
register: output
retries: 5
until: output.rc == 0
- name: Delete the stopped redis cluster resource.
pacemaker_resource:
resource: {get_attr: [RedisBase, role_data, service_name]}
state: delete
wait_for_resource: true
register: output
retries: 5
until: output.rc == 0
- name: Disable redis service
service: name=redis enabled=no

View File

@ -218,70 +218,82 @@ outputs:
- /dev/shm:/dev/shm:rw
metadata_settings:
get_attr: [HAProxyBase, role_data, metadata_settings]
upgrade_tasks:
- name: get bootstrap nodeid
tags: common
command: hiera -c /etc/puppet/hiera.yaml bootstrap_nodeid
register: bootstrap_node
- name: set is_bootstrap_node fact
tags: common
set_fact: is_bootstrap_node={{bootstrap_node.stdout|lower == ansible_hostname|lower}}
- name: Check cluster resource status
when: step|int == 2
pacemaker_resource:
resource: {get_attr: [HAProxyBase, role_data, service_name]}
state: started
check_mode: true
ignore_errors: true
register: haproxy_res
- when:
- step|int == 2
- is_bootstrap_node|bool
- haproxy_res|succeeded
block:
- name: Disable the haproxy cluster resource.
pacemaker_resource:
resource: {get_attr: [HAProxyBase, role_data, service_name]}
state: disable
wait_for_resource: true
register: output
retries: 5
until: output.rc == 0
- name: Delete the stopped haproxy cluster resource.
pacemaker_resource:
resource: {get_attr: [HAProxyBase, role_data, service_name]}
state: delete
wait_for_resource: true
register: output
retries: 5
until: output.rc == 0
update_tasks:
- name: Get docker Haproxy image
set_fact:
docker_image: {get_param: DockerHAProxyImage}
docker_image_latest: *haproxy_image_pcmklatest
- name: Haproxy fetch and retag container image for pacemaker
when: step|int == 2
- name: Get previous Haproxy image id
shell: "docker images | awk '/haproxy.* pcmklatest/{print $3}'"
register: haproxy_image_id
- block:
- name: Get a list of container using Haproxy image
shell: "docker ps -a -q -f 'ancestor={{haproxy_image_id.stdout}}'"
register: haproxy_containers_to_destroy
# It will be recreated with the delpoy step.
- name: Remove any container using the same Haproxy image
shell: "docker rm -fv {{item}}"
with_items: "{{ haproxy_containers_to_destroy.stdout_lines }}"
- name: Remove previous Haproxy images
shell: "docker rmi -f {{haproxy_image_id.stdout}}"
block: &haproxy_fetch_retag_container_tasks
- name: Get docker Haproxy image
set_fact:
docker_image: {get_param: DockerHAProxyImage}
docker_image_latest: *haproxy_image_pcmklatest
- name: Get previous Haproxy image id
shell: "docker images | awk '/haproxy.* pcmklatest/{print $3}'"
register: haproxy_image_id
- block:
- name: Get a list of container using Haproxy image
shell: "docker ps -a -q -f 'ancestor={{haproxy_image_id.stdout}}'"
register: haproxy_containers_to_destroy
# It will be recreated with the delpoy step.
- name: Remove any container using the same Haproxy image
shell: "docker rm -fv {{item}}"
with_items: "{{ haproxy_containers_to_destroy.stdout_lines }}"
- name: Remove previous Haproxy images
shell: "docker rmi -f {{haproxy_image_id.stdout}}"
when:
- haproxy_image_id.stdout != ''
- name: Pull latest Haproxy images
command: "docker pull {{docker_image}}"
- name: Retag pcmklatest to latest Haproxy image
shell: "docker tag {{docker_image}} {{docker_image_latest}}"
# Got to check that pacemaker_is_active is working fine with bundle.
# TODO: pacemaker_is_active resource doesn't support bundle.
upgrade_tasks:
- name: Get docker haproxy image
set_fact:
docker_image_latest: *haproxy_image_pcmklatest
- name: Check if haproxy is already containerized
shell: "docker ps -a | grep {{docker_image_latest}}"
ignore_errors: true
register: haproxy_containerized
- name: Retag the pacemaker image if containerized
when:
- step|int == 2
- haproxy_image_id.stdout != ''
- name: Pull latest Haproxy images
command: "docker pull {{docker_image}}"
when: step|int == 2
- name: Retag pcmklatest to latest Haproxy image
shell: "docker tag {{docker_image}} {{docker_image_latest}}"
when: step|int == 2
# Got to check that pacemaker_is_active is working fine with bundle.
# TODO: pacemaker_is_active resource doesn't support bundle.
- haproxy_containerized|succeeded
block: *haproxy_fetch_retag_container_tasks
- name: haproxy baremetal to container upgrade tasks
when:
- step|int == 2
- haproxy_containerized|failed
block:
- name: get bootstrap nodeid
tags: common
command: hiera -c /etc/puppet/hiera.yaml bootstrap_nodeid
register: bootstrap_node
- name: set is_bootstrap_node fact
tags: common
set_fact: is_bootstrap_node={{bootstrap_node.stdout|lower == ansible_hostname|lower}}
- name: Check cluster resource status
pacemaker_resource:
resource: {get_attr: [HAProxyBase, role_data, service_name]}
state: started
check_mode: true
ignore_errors: true
register: haproxy_res
- when: (is_bootstrap_node) and (haproxy_res|succeeded)
block:
- name: Disable the haproxy cluster resource.
pacemaker_resource:
resource: {get_attr: [HAProxyBase, role_data, service_name]}
state: disable
wait_for_resource: true
register: output
retries: 5
until: output.rc == 0
- name: Delete the stopped haproxy cluster resource.
pacemaker_resource:
resource: {get_attr: [HAProxyBase, role_data, service_name]}
state: delete
wait_for_resource: true
register: output
retries: 5
until: output.rc == 0

View File

@ -176,37 +176,52 @@ outputs:
Log files from manila containers can be found under
/var/log/containers/manila and /var/log/containers/httpd/manila-api.
ignore_errors: true
upgrade_tasks:
- name: Stop and disable manila_share service
when: step|int == 2
service: name=openstack-manila-share state=stopped enabled=no
update_tasks:
- name: Manila-Share fetch and retag container image for pacemaker
when: step|int == 2
block: &manila_share_fetch_retag_container_tasks
- name: Get docker Manila-Share image
set_fact:
docker_image: {get_param: DockerManilaShareImage}
docker_image_latest: *manila_share_image_pcmklatest
- name: Get previous Manila-Share image id
shell: "docker images | awk '/manila-share.* pcmklatest/{print $3}'"
register: manila_share_image_id
- block:
- name: Get a list of container using Manila-Share image
shell: "docker ps -a -q -f 'ancestor={{manila_share_image_id.stdout}}'"
register: manila-share_containers_to_destroy
# It will be recreated with the delpoy step.
- name: Remove any container using the same Manila-Share image
shell: "docker rm -fv {{item}}"
with_items: "{{ manila_share_containers_to_destroy.stdout_lines }}"
- name: Remove previous Manila-Share images
shell: "docker rmi -f {{manila_share_image_id.stdout}}"
when:
- manila_share_image_id.stdout != ''
- name: Pull latest Manila-Share images
command: "docker pull {{docker_image}}"
- name: Retag pcmklatest to latest Manila-Share image
shell: "docker tag {{docker_image}} {{docker_image_latest}}"
# Got to check that pacemaker_is_active is working fine with bundle.
# TODO: pacemaker_is_active resource doesn't support bundle.
upgrade_tasks:
- name: Get docker Manila-Share image
set_fact:
docker_image: {get_param: DockerManilaShareImage}
docker_image_latest: *manila_share_image_pcmklatest
when: step|int == 2
- name: Get previous Manila-Share image id
shell: "docker images | awk '/manila-share.* pcmklatest/{print $3}'"
register: manila_share_image_id
- block:
- name: Get a list of container using Manila-Share image
shell: "docker ps -a -q -f 'ancestor={{manila_share_image_id.stdout}}'"
register: manila-share_containers_to_destroy
# It will be recreated with the delpoy step.
- name: Remove any container using the same Manila-Share image
shell: "docker rm -fv {{item}}"
with_items: "{{ manila_share_containers_to_destroy.stdout_lines }}"
- name: Remove previous Manila-Share images
shell: "docker rmi -f {{manila_share_image_id.stdout}}"
- name: Check if Manila-Share is already containerized
shell: "docker ps -a | grep {{docker_image_latest}}"
ignore_errors: true
register: manila_share_containerized
- name: Retag the pacemaker image if containerized
when:
- step|int == 2
- manila_share_image_id.stdout != ''
- name: Pull latest Manila-Share images
command: "docker pull {{docker_image}}"
when: step|int == 2
- name: Retag pcmklatest to latest Manila-Share image
shell: "docker tag {{docker_image}} {{docker_image_latest}}"
when: step|int == 2
# Got to check that pacemaker_is_active is working fine with bundle.
# TODO: pacemaker_is_active resource doesn't support bundle.
- manila_share_containerized|succeeded
block: *manila_share_fetch_retag_container_tasks
- name: Manila-Share baremetal to container upgrade tasks
when:
- step|int == 2
- manila_share_containerized|failed
block:
- name: Stop and disable manila_share service
service: name=openstack-manila-share state=stopped enabled=no

View File

@ -219,73 +219,82 @@ outputs:
for pid in $(pgrep epmd --ns 1 --nslist pid); do kill $pid; done
metadata_settings:
get_attr: [RabbitmqBase, role_data, metadata_settings]
upgrade_tasks:
- name: get bootstrap nodeid
tags: common
command: hiera -c /etc/puppet/hiera.yaml bootstrap_nodeid
register: bootstrap_node
- name: set is_bootstrap_node fact
tags: common
set_fact: is_bootstrap_node={{bootstrap_node.stdout|lower == ansible_hostname|lower}}
- name: Check cluster resource status
when: step|int == 2
pacemaker_resource:
resource: {get_attr: [RabbitmqBase, role_data, service_name]}
state: started
check_mode: true
ignore_errors: true
register: rabbitmq_res
- when:
- step|int == 2
- is_bootstrap_node|bool
- rabbitmq_res|succeeded
block:
- name: Disable the rabbitmq cluster resource.
pacemaker_resource:
resource: {get_attr: [RabbitmqBase, role_data, service_name]}
state: disable
wait_for_resource: true
register: output
retries: 5
until: output.rc == 0
- name: Delete the stopped rabbitmq cluster resource.
pacemaker_resource:
resource: {get_attr: [RabbitmqBase, role_data, service_name]}
state: delete
wait_for_resource: true
register: output
retries: 5
until: output.rc == 0
- name: Disable rabbitmq service
when: step|int == 2
service: name=rabbitmq-server enabled=no
update_tasks:
- name: Rabbit fetch and retag container image for pacemaker
when: step|int == 2
block: &rabbitmq_fetch_retag_container_tasks
- name: Get docker Rabbitmq image
set_fact:
docker_image: {get_param: DockerRabbitmqImage}
docker_image_latest: *rabbitmq_image_pcmklatest
- name: Get previous Rabbitmq image id
shell: "docker images | awk '/rabbitmq.* pcmklatest/{print $3}'"
register: rabbitmq_image_id
- block:
- name: Get a list of container using Rabbitmq image
shell: "docker ps -a -q -f 'ancestor={{rabbitmq_image_id.stdout}}'"
register: rabbitmq_containers_to_destroy
# It will be recreated with the delpoy step.
- name: Remove any container using the same Rabbitmq image
shell: "docker rm -fv {{item}}"
with_items: "{{ rabbitmq_containers_to_destroy.stdout_lines }}"
- name: Remove previous Rabbitmq images
shell: "docker rmi -f {{rabbitmq_image_id.stdout}}"
when:
- rabbitmq_image_id.stdout != ''
- name: Pull latest Rabbitmq images
command: "docker pull {{docker_image}}"
- name: Retag pcmklatest to latest Rabbitmq image
shell: "docker tag {{docker_image}} {{docker_image_latest}}"
# Got to check that pacemaker_is_active is working fine with bundle.
# TODO: pacemaker_is_active resource doesn't support bundle.
upgrade_tasks:
- name: Get docker Rabbitmq image
set_fact:
docker_image: {get_param: DockerRabbitmqImage}
docker_image_latest: *rabbitmq_image_pcmklatest
when: step|int == 2
- name: Get previous Rabbitmq image id
shell: "docker images | awk '/rabbitmq.* pcmklatest/{print $3}'"
register: rabbitmq_image_id
- block:
- name: Get a list of container using Rabbitmq image
shell: "docker ps -a -q -f 'ancestor={{rabbitmq_image_id.stdout}}'"
register: rabbitmq_containers_to_destroy
# It will be recreated with the delpoy step.
- name: Remove any container using the same Rabbitmq image
shell: "docker rm -fv {{item}}"
with_items: "{{ rabbitmq_containers_to_destroy.stdout_lines }}"
- name: Remove previous Rabbitmq images
shell: "docker rmi -f {{rabbitmq_image_id.stdout}}"
- name: Check if Rabbitmq is already containerized
shell: "docker ps -a | grep {{docker_image_latest}}"
ignore_errors: true
register: rabbit_containerized
- name: Retag the pacemaker image if containerized
when:
- step|int == 2
- rabbitmq_image_id.stdout != ''
- name: Pull latest Rabbitmq images
command: "docker pull {{docker_image}}"
when: step|int == 2
- name: Retag pcmklatest to latest Rabbitmq image
shell: "docker tag {{docker_image}} {{docker_image_latest}}"
when: step|int == 2
# Got to check that pacemaker_is_active is working fine with bundle.
# TODO: pacemaker_is_active resource doesn't support bundle.
- rabbit_containerized|succeeded
block: *rabbitmq_fetch_retag_container_tasks
- name: Rabbitmq baremetal to container upgrade tasks
when:
- step|int == 2
- rabbit_containerized|failed
block:
- name: get bootstrap nodeid
command: hiera -c /etc/puppet/hiera.yaml bootstrap_nodeid
register: bootstrap_node
- name: set is_bootstrap_node fact
set_fact: is_bootstrap_node={{bootstrap_node.stdout|lower == ansible_hostname|lower}}
- name: Check cluster resource status
pacemaker_resource:
resource: {get_attr: [RabbitmqBase, role_data, service_name]}
state: started
check_mode: true
ignore_errors: true
register: rabbitmq_res
- when: (is_bootstrap_node) and (rabbitmq_res|succeeded)
block:
- name: Disable the rabbitmq cluster resource.
pacemaker_resource:
resource: {get_attr: [RabbitmqBase, role_data, service_name]}
state: disable
wait_for_resource: true
register: output
retries: 5
until: output.rc == 0
- name: Delete the stopped rabbitmq cluster resource.
pacemaker_resource:
resource: {get_attr: [RabbitmqBase, role_data, service_name]}
state: delete
wait_for_resource: true
register: output
retries: 5
until: output.rc == 0
- name: Disable rabbitmq service
service: name=rabbitmq-server enabled=no