tripleo-heat-templates/deployment/cinder/cinder-backup-pacemaker-puppet.yaml
Rajesh Tailor 6861fb324b Fix typos in comments and parameter descriptions
This change fixes typos in comments, parameter descriptions
and ansible task names.

Change-Id: I82b67ca834077b66ebd71744face3bba0b43da2f
2022-08-03 17:07:11 +05:30

370 lines
17 KiB
YAML

heat_template_version: wallaby
description: >
OpenStack containerized Cinder Backup service
parameters:
ContainerCinderBackupImage:
description: image
type: string
tags:
- role_specific
ContainerCinderConfigImage:
description: The container image to use for the cinder config_volume
type: string
tags:
- role_specific
ClusterCommonTag:
default: false
description: When set to false, a pacemaker service is configured
to use a floating tag for its container image name,
e.g. 'REGISTRY/NAMESPACE/IMAGENAME:pcmklatest'. When
set to true, the service uses a floating prefix as
well, e.g. 'cluster.common.tag/IMAGENAME:pcmklatest'.
type: boolean
ClusterFullTag:
default: false
description: When set to true, the pacemaker service uses a fully
constant tag for its container image name, e.g.
'cluster.common.tag/SERVICENAME:pcmklatest'.
type: boolean
CinderBackupBackend:
default: swift
description: The short name of the Cinder Backup backend to use.
type: string
constraints:
- allowed_values: ['swift', 'ceph', 'nfs', 'gcs', 's3']
CinderBackupRbdPoolName:
default: backups
type: string
CephClientUserName:
default: openstack
type: string
EndpointMap:
default: {}
description: Mapping of service endpoint -> protocol. Typically set
via parameter_defaults in the resource registry.
type: json
ServiceData:
default: {}
description: Dictionary packing service data
type: json
ServiceNetMap:
default: {}
description: Mapping of service_name -> network name. Typically set
via parameter_defaults in the resource registry. Use
parameter_merge_strategies to merge it with the defaults.
type: json
RoleName:
default: ''
description: Role name on which the service is applied
type: string
RoleParameters:
default: {}
description: Parameters specific to the role
type: json
ConfigDebug:
default: false
description: Whether to run config management (e.g. Puppet) in debug mode.
type: boolean
ContainerCli:
type: string
default: 'podman'
description: CLI tool used to manage containers.
constraints:
- allowed_values: ['podman']
CephConfigPath:
type: string
default: "/var/lib/tripleo-config/ceph"
description: |
The path where the Ceph Cluster config files are stored on the host.
resources:
ContainersCommon:
type: ../containers-common.yaml
MySQLClient:
type: ../database/mysql-client.yaml
CinderBackupBase:
type: ./cinder-backup-container-puppet.yaml
properties:
EndpointMap: {get_param: EndpointMap}
ServiceData: {get_param: ServiceData}
ServiceNetMap: {get_param: ServiceNetMap}
RoleName: {get_param: RoleName}
RoleParameters: {get_param: RoleParameters}
CinderBackupBackend: {get_param: CinderBackupBackend}
CinderBackupRbdPoolName: {get_param: CinderBackupRbdPoolName}
CephClientUserName: {get_param: CephClientUserName}
CinderCommon:
type: ./cinder-common-container-puppet.yaml
RoleParametersValue:
type: OS::Heat::Value
properties:
type: json
value:
map_replace:
- map_replace:
- ContainerCinderBackupImage: ContainerCinderBackupImage
ContainerCinderConfigImage: ContainerCinderConfigImage
- values: {get_param: [RoleParameters]}
- values:
ContainerCinderBackupImage: {get_param: ContainerCinderBackupImage}
ContainerCinderConfigImage: {get_param: ContainerCinderConfigImage}
outputs:
role_data:
description: Role data for the Cinder Backup role.
value:
service_name: cinder_backup
monitoring_subscription: {get_attr: [CinderBackupBase, role_data, monitoring_subscription]}
config_settings:
map_merge:
- get_attr: [CinderBackupBase, role_data, config_settings]
- tripleo::profile::pacemaker::cinder::backup_bundle::cinder_backup_docker_image: &cinder_backup_image_pcmklatest
if:
- {get_param: ClusterFullTag}
- "cluster.common.tag/cinder-backup:pcmklatest"
- yaql:
data:
if:
- {get_param: ClusterCommonTag}
- yaql:
data: {get_attr: [RoleParametersValue, value, ContainerCinderBackupImage]}
expression: concat("cluster.common.tag/", $.data.rightSplit(separator => "/", maxSplits => 1)[1])
- {get_attr: [RoleParametersValue, value, ContainerCinderBackupImage]}
expression: concat($.data.rightSplit(separator => ":", maxSplits => 1)[0], ":pcmklatest")
tripleo::profile::pacemaker::cinder::backup_bundle::docker_volumes: {get_attr: [CinderCommon, cinder_backup_volumes]}
tripleo::profile::pacemaker::cinder::backup_bundle::docker_environment: {get_attr: [CinderCommon, cinder_backup_environment]}
tripleo::profile::pacemaker::cinder::backup_bundle::container_backend: {get_param: ContainerCli}
tripleo::profile::pacemaker::cinder::backup_bundle::ceph_conf_path: {get_param: CephConfigPath}
cinder::backup::manage_service: false
cinder::backup::enabled: false
service_config_settings:
get_attr: [CinderBackupBase, role_data, service_config_settings]
# BEGIN DOCKER SETTINGS
puppet_config:
config_volume: cinder
puppet_tags: cinder_config,file,concat,file_line
step_config:
list_join:
- "\n"
- - {get_attr: [CinderBackupBase, role_data, puppet_config, step_config]}
- "include tripleo::profile::pacemaker::cinder::backup_bundle"
config_image: {get_attr: [RoleParametersValue, value, ContainerCinderConfigImage]}
kolla_config:
/var/lib/kolla/config_files/cinder_backup.json:
command: /usr/bin/cinder-backup --config-file /etc/cinder/cinder.conf
config_files: {get_attr: [CinderCommon, cinder_common_kolla_config_files]}
permissions: {get_attr: [CinderCommon, cinder_common_kolla_permissions]}
container_config_scripts: {get_attr: [ContainersCommon, container_config_scripts]}
docker_config:
step_3:
cinder_backup_init_logs:
start_order: 0
image: {get_attr: [RoleParametersValue, value, ContainerCinderBackupImage]}
net: none
privileged: false
user: root
volumes:
- /var/log/containers/cinder:/var/log/cinder:z
command: ['/bin/bash', '-c', 'chown -R cinder:cinder /var/log/cinder']
host_prep_tasks: {get_attr: [CinderCommon, cinder_backup_host_prep_tasks]}
deploy_steps_tasks:
- name: Cinder Backup tag container image for pacemaker
when: step|int == 1
import_role:
name: tripleo_container_tag
vars:
container_image: {get_attr: [RoleParametersValue, value, ContainerCinderBackupImage]}
container_image_latest: *cinder_backup_image_pcmklatest
- name: Cinder Backup HA Wrappers Step
when: step|int == 5
block: &cinder_backup_puppet_bundle
- name: Cinder backup puppet bundle
import_role:
name: tripleo_ha_wrapper
vars:
tripleo_ha_wrapper_service_name: cinder_backup
tripleo_ha_wrapper_resource_name: openstack-cinder-backup
tripleo_ha_wrapper_bundle_name: openstack-cinder-backup
tripleo_ha_wrapper_resource_state: _ Started
tripleo_ha_wrapper_puppet_config_volume: cinder
tripleo_ha_wrapper_puppet_execute: 'include ::tripleo::profile::base::pacemaker; include ::tripleo::profile::pacemaker::cinder::backup_bundle'
tripleo_ha_wrapper_puppet_tags: 'pacemaker::resource::bundle,pacemaker::property,pacemaker::resource::ip,pacemaker::resource::ocf,pacemaker::constraint::order,pacemaker::constraint::colocation'
tripleo_ha_wrapper_puppet_debug: {get_param: ConfigDebug}
tripleo_ha_wrapper_config_suffix: .cinder_backup_previous_run
update_tasks:
- name: cinder_backup fetch and retag container image for pacemaker
when: step|int == 2
block: &cinder_backup_fetch_retag_container_tasks
- name: Get container cinder_backup image
set_fact:
cinder_backup_image: {get_attr: [RoleParametersValue, value, ContainerCinderBackupImage]}
cinder_backup_image_latest: *cinder_backup_image_pcmklatest
- name: Pull latest cinder_backup images
command: "{{container_cli}} pull {{cinder_backup_image}}"
register: result
retries: 3
delay: 3
until: result.rc == 0
- name: Get previous cinder_backup image id
shell: "{{container_cli}} inspect --format '{{'{{'}}.Id{{'}}'}}' {{cinder_backup_image_latest}}"
register: old_cinder_backup_image_id
failed_when: false
- name: Get new cinder_backup image id
shell: "{{container_cli}} inspect --format '{{'{{'}}.Id{{'}}'}}' {{cinder_backup_image}}"
register: new_cinder_backup_image_id
- name: Retag pcmklatest to latest cinder_backup image
include_role:
name: tripleo_container_tag
vars:
container_image: "{{cinder_backup_image}}"
container_image_latest: "{{cinder_backup_image_latest}}"
when:
- old_cinder_backup_image_id.stdout != new_cinder_backup_image_id.stdout
post_update_tasks:
- name: Cinder backup bundle post update
when: step|int == 1
block: *cinder_backup_puppet_bundle
vars:
tripleo_ha_wrapper_minor_update: true
upgrade_tasks:
- name: Prepare switch of cinder_backup image name
when:
- step|int == 0
block:
- name: Get cinder_backup image id currently used by pacemaker
shell: "pcs resource config openstack-cinder-backup | grep -Eo 'image=[^ ]+' | awk -F= '{print $2;}'"
register: cinder_backup_image_current_res
failed_when: false
- name: cinder_backup image facts
set_fact:
cinder_backup_image_latest: *cinder_backup_image_pcmklatest
cinder_backup_image_current: "{{cinder_backup_image_current_res.stdout}}"
- name: Temporarily tag the current cinder_backup image id with the upgraded image name
import_role:
name: tripleo_container_tag
vars:
container_image: "{{cinder_backup_image_current}}"
container_image_latest: "{{cinder_backup_image_latest}}"
pull_image: false
when:
- cinder_backup_image_current != ''
- cinder_backup_image_current != cinder_backup_image_latest
# During an OS Upgrade, the cluster may not exist so we use
# the shell module instead.
# TODO(odyssey4me):
# Fix the pacemaker_resource module to handle the exception
# for a non-existent cluster more gracefully.
- name: Check openstack-cinder-backup cluster resource status
shell: pcs resource config openstack-cinder-backup
failed_when: false
changed_when: false
register: cinder_backup_pcs_res_result
- name: Set fact cinder_backup_pcs_res
set_fact:
cinder_backup_pcs_res: "{{cinder_backup_pcs_res_result.rc == 0}}"
- name: set is_cinder_backup_bootstrap_node fact
tags: common
set_fact: is_cinder_backup_bootstrap_node={{cinder_backup_short_bootstrap_node_name|lower == ansible_facts['hostname']|lower}}
- name: Update cinder_backup pcs resource bundle for new container image
when:
- step|int == 1
- is_cinder_backup_bootstrap_node
- cinder_backup_pcs_res|bool
- cinder_backup_image_current != cinder_backup_image_latest
block:
- name: Disable the cinder_backup cluster resource before container upgrade
pacemaker_resource:
resource: openstack-cinder-backup
state: disable
wait_for_resource: true
register: output
retries: 5
until: output.rc == 0
- name: Update the cinder_backup bundle to use the new container image name
command: "pcs resource bundle update openstack-cinder-backup container image={{cinder_backup_image_latest}}"
- name: Enable the cinder_backup cluster resource
pacemaker_resource:
resource: openstack-cinder-backup
state: enable
wait_for_resource: true
register: output
retries: 5
until: output.rc == 0
- name: Create hiera data to upgrade cinder_backup in a stepwise manner.
when:
- step|int == 1
- cluster_recreate|bool
block:
- name: set cinder_backup upgrade node facts in a single-node environment
set_fact:
cinder_backup_short_node_names_upgraded: "{{ cinder_backup_short_node_names }}"
cinder_backup_node_names_upgraded: "{{ cinder_backup_node_names }}"
cacheable: false
when: groups['cinder_backup'] | length <= 1
- name: set cinder_backup upgrade node facts from the limit option
set_fact:
cinder_backup_short_node_names_upgraded: "{{ cinder_backup_short_node_names_upgraded|default([]) + [item.split('.')[0]] }}"
cinder_backup_node_names_upgraded: "{{ cinder_backup_node_names_upgraded|default([]) + [item] }}"
cacheable: false
when:
- groups['cinder_backup'] | length > 1
- item.split('.')[0] in ansible_limit.split(':')
loop: "{{ cinder_backup_node_names | default([]) }}"
- fail:
msg: >
You can't upgrade cinder_backup without
staged upgrade. You need to use the limit option in order
to do so.
when: >-
cinder_backup_short_node_names_upgraded is not defined or
cinder_backup_short_node_names_upgraded | length == 0 or
cinder_backup_node_names_upgraded is not defined or
cinder_backup_node_names_upgraded | length == 0
- debug:
msg: "Prepare cinder_backup upgrade for {{ cinder_backup_short_node_names_upgraded }}"
- name: remove cinder_backup init container on upgrade-scaleup to force re-init
include_role:
name: tripleo_container_rm
vars:
tripleo_containers_to_rm:
- cinder_backup_init_bundle
when:
- cinder_backup_short_node_names_upgraded | length > 1
- name: add the cinder_backup short name to hiera data for the upgrade.
include_role:
name: tripleo_upgrade_hiera
tasks_from: set.yml
vars:
tripleo_upgrade_key: cinder_backup_short_node_names_override
tripleo_upgrade_value: "{{ cinder_backup_short_node_names_upgraded }}"
- name: add the cinder_backup long name to hiera data for the upgrade
include_role:
name: tripleo_upgrade_hiera
tasks_from: set.yml
vars:
tripleo_upgrade_key: cinder_backup_node_names_override
tripleo_upgrade_value: "{{ cinder_backup_node_names_upgraded }}"
- name: remove the extra hiera data needed for the upgrade.
include_role:
name: tripleo_upgrade_hiera
tasks_from: remove.yml
vars:
tripleo_upgrade_key: "{{ item }}"
loop:
- cinder_backup_short_node_names_override
- cinder_backup_node_names_override
when: cinder_backup_short_node_names_upgraded | length == cinder_backup_node_names | length
- name: Retag the pacemaker image if containerized
when:
- step|int == 3
block: *cinder_backup_fetch_retag_container_tasks