
This implements the creation of the haproxy bundle on the host. The testing protocol used is documented in the depends-on. The reason for adding a post_update task is that during a minor update the deployment tasks are not run during the node update procedure but only during the final converge. So we ran the role again there to make sure that any config change will trigger a restart during the minor update, so the disruption is only local to the single node being updated. If we did not do this a final converge could potentially trigger a global restart of HA bundles which would be quite disruptive. Depends-On: Iaa7e89f0d25221c2a6ef0b81eb88a6f496f01696 Change-Id: Ia4399b632257e693fb2c516e487856331149589d Related-Bug: #1863442
398 lines
17 KiB
YAML
398 lines
17 KiB
YAML
heat_template_version: rocky
|
|
|
|
description: >
|
|
OpenStack containerized Cinder Backup service
|
|
|
|
parameters:
|
|
ContainerCinderBackupImage:
|
|
description: image
|
|
type: string
|
|
ContainerCinderConfigImage:
|
|
description: The container image to use for the cinder config_volume
|
|
type: string
|
|
ClusterCommonTag:
|
|
default: false
|
|
description: When set to false, a pacemaker service is configured
|
|
to use a floating tag for its container image name,
|
|
e.g. 'REGISTRY/NAMESPACE/IMAGENAME:pcmklatest'. When
|
|
set to true, the service uses a floating prefix as
|
|
well, e.g. 'cluster.common.tag/IMAGENAME:pcmklatest'.
|
|
type: boolean
|
|
CinderBackupBackend:
|
|
default: swift
|
|
description: The short name of the Cinder Backup backend to use.
|
|
type: string
|
|
constraints:
|
|
- allowed_values: ['swift', 'ceph', 'nfs']
|
|
CinderBackupRbdPoolName:
|
|
default: backups
|
|
type: string
|
|
CephClientUserName:
|
|
default: openstack
|
|
type: string
|
|
EndpointMap:
|
|
default: {}
|
|
description: Mapping of service endpoint -> protocol. Typically set
|
|
via parameter_defaults in the resource registry.
|
|
type: json
|
|
ServiceData:
|
|
default: {}
|
|
description: Dictionary packing service data
|
|
type: json
|
|
ServiceNetMap:
|
|
default: {}
|
|
description: Mapping of service_name -> network name. Typically set
|
|
via parameter_defaults in the resource registry. This
|
|
mapping overrides those in ServiceNetMapDefaults.
|
|
type: json
|
|
DefaultPasswords:
|
|
default: {}
|
|
type: json
|
|
RoleName:
|
|
default: ''
|
|
description: Role name on which the service is applied
|
|
type: string
|
|
RoleParameters:
|
|
default: {}
|
|
description: Parameters specific to the role
|
|
type: json
|
|
ConfigDebug:
|
|
default: false
|
|
description: Whether to run config management (e.g. Puppet) in debug mode.
|
|
type: boolean
|
|
ContainerCli:
|
|
type: string
|
|
default: 'podman'
|
|
description: CLI tool used to manage containers.
|
|
constraints:
|
|
- allowed_values: ['docker', 'podman']
|
|
DeployIdentifier:
|
|
default: ''
|
|
type: string
|
|
description: >
|
|
Setting this to a unique value will re-run any deployment tasks which
|
|
perform configuration on a Heat stack-update.
|
|
DockerCinderBackupLoggingSource:
|
|
type: json
|
|
default:
|
|
tag: openstack.cinder.backup
|
|
file: /var/log/containers/cinder/cinder-backup.log
|
|
|
|
conditions:
|
|
docker_enabled: {equals: [{get_param: ContainerCli}, 'docker']}
|
|
common_tag_enabled: {equals: [{get_param: ClusterCommonTag}, true]}
|
|
|
|
resources:
|
|
|
|
ContainersCommon:
|
|
type: ../containers-common.yaml
|
|
|
|
MySQLClient:
|
|
type: ../database/mysql-client.yaml
|
|
|
|
CinderBackupBase:
|
|
type: ./cinder-backup-container-puppet.yaml
|
|
properties:
|
|
EndpointMap: {get_param: EndpointMap}
|
|
ServiceData: {get_param: ServiceData}
|
|
ServiceNetMap: {get_param: ServiceNetMap}
|
|
DefaultPasswords: {get_param: DefaultPasswords}
|
|
RoleName: {get_param: RoleName}
|
|
RoleParameters: {get_param: RoleParameters}
|
|
CinderBackupBackend: {get_param: CinderBackupBackend}
|
|
CinderBackupRbdPoolName: {get_param: CinderBackupRbdPoolName}
|
|
CephClientUserName: {get_param: CephClientUserName}
|
|
|
|
CinderCommon:
|
|
type: ./cinder-common-container-puppet.yaml
|
|
|
|
outputs:
|
|
role_data:
|
|
description: Role data for the Cinder Backup role.
|
|
value:
|
|
service_name: cinder_backup
|
|
monitoring_subscription: {get_attr: [CinderBackupBase, role_data, monitoring_subscription]}
|
|
config_settings:
|
|
map_merge:
|
|
- get_attr: [CinderBackupBase, role_data, config_settings]
|
|
- tripleo::profile::pacemaker::cinder::backup_bundle::cinder_backup_docker_image: &cinder_backup_image_pcmklatest
|
|
yaql:
|
|
data:
|
|
if:
|
|
- common_tag_enabled
|
|
- yaql:
|
|
data: {get_param: ContainerCinderBackupImage}
|
|
expression: concat("cluster.common.tag/", $.data.rightSplit(separator => "/", maxSplits => 1)[1])
|
|
- {get_param: ContainerCinderBackupImage}
|
|
expression: concat($.data.rightSplit(separator => ":", maxSplits => 1)[0], ":pcmklatest")
|
|
tripleo::profile::pacemaker::cinder::backup_bundle::docker_volumes: {get_attr: [CinderCommon, cinder_backup_volumes]}
|
|
tripleo::profile::pacemaker::cinder::backup_bundle::docker_environment: {get_attr: [CinderCommon, cinder_backup_environment]}
|
|
tripleo::profile::pacemaker::cinder::backup_bundle::container_backend: {get_param: ContainerCli}
|
|
cinder::backup::manage_service: false
|
|
cinder::backup::enabled: false
|
|
service_config_settings:
|
|
map_merge:
|
|
- get_attr: [CinderBackupBase, role_data, service_config_settings]
|
|
- rsyslog:
|
|
tripleo_logging_sources_cinder_backup:
|
|
- {get_param: DockerCinderBackupLoggingSource}
|
|
# BEGIN DOCKER SETTINGS
|
|
puppet_config:
|
|
config_volume: cinder
|
|
puppet_tags: cinder_config,file,concat,file_line
|
|
step_config:
|
|
list_join:
|
|
- "\n"
|
|
- - {get_attr: [CinderBackupBase, role_data, puppet_config, step_config]}
|
|
- "include tripleo::profile::pacemaker::cinder::backup"
|
|
config_image: {get_param: ContainerCinderConfigImage}
|
|
kolla_config:
|
|
/var/lib/kolla/config_files/cinder_backup.json:
|
|
command: /usr/bin/cinder-backup --config-file /usr/share/cinder/cinder-dist.conf --config-file /etc/cinder/cinder.conf
|
|
config_files:
|
|
- source: "/var/lib/kolla/config_files/src/*"
|
|
dest: "/"
|
|
merge: true
|
|
preserve_properties: true
|
|
- source: "/var/lib/kolla/config_files/src-ceph/"
|
|
dest: "/etc/ceph/"
|
|
merge: true
|
|
preserve_properties: true
|
|
- source: "/var/lib/kolla/config_files/src-iscsid/*"
|
|
dest: "/etc/iscsi/"
|
|
merge: true
|
|
preserve_properties: true
|
|
- source: "/var/lib/kolla/config_files/src-tls/*"
|
|
dest: "/"
|
|
merge: true
|
|
preserve_properties: true
|
|
optional: true
|
|
permissions:
|
|
- path: /var/lib/cinder
|
|
owner: cinder:cinder
|
|
recurse: true
|
|
- path: /var/log/cinder
|
|
owner: cinder:cinder
|
|
recurse: true
|
|
- path: /etc/pki/tls/certs/etcd.crt
|
|
owner: cinder:cinder
|
|
- path: /etc/pki/tls/private/etcd.key
|
|
owner: cinder:cinder
|
|
container_config_scripts: {get_attr: [ContainersCommon, container_config_scripts]}
|
|
docker_config:
|
|
step_3:
|
|
cinder_backup_init_logs:
|
|
start_order: 0
|
|
image: {get_param: ContainerCinderBackupImage}
|
|
net: none
|
|
privileged: false
|
|
user: root
|
|
volumes:
|
|
- /var/log/containers/cinder:/var/log/cinder:z
|
|
command: ['/bin/bash', '-c', 'chown -R cinder:cinder /var/log/cinder']
|
|
host_prep_tasks: {get_attr: [CinderCommon, cinder_backup_host_prep_tasks]}
|
|
deploy_steps_tasks:
|
|
- name: Cinder Backup tag container image for pacemaker
|
|
when: step|int == 1
|
|
import_role:
|
|
name: tripleo_container_tag
|
|
vars:
|
|
container_image: {get_param: ContainerCinderBackupImage}
|
|
container_image_latest: *cinder_backup_image_pcmklatest
|
|
- name: Cinder Backup HA Wrappers Step
|
|
when: step|int == 5
|
|
block: &cinder_backup_puppet_bundle
|
|
- name: Cinder backup puppet bundle
|
|
import_role:
|
|
name: tripleo_ha_wrapper
|
|
vars:
|
|
tripleo_ha_wrapper_service_name: cinder_backup
|
|
tripleo_ha_wrapper_resource_name: openstack-cinder-backup
|
|
tripleo_ha_wrapper_bundle_name: openstack-cinder-backup
|
|
tripleo_ha_wrapper_resource_state: _ Started
|
|
tripleo_ha_wrapper_puppet_config_volume: cinder
|
|
tripleo_ha_wrapper_puppet_execute: 'include ::tripleo::profile::base::pacemaker; include ::tripleo::profile::pacemaker::cinder::backup_bundle'
|
|
tripleo_ha_wrapper_puppet_tags: 'pacemaker::resource::bundle,pacemaker::property,pacemaker::resource::ip,pacemaker::resource::ocf,pacemaker::constraint::order,pacemaker::constraint::colocation'
|
|
tripleo_ha_wrapper_puppet_debug: {get_param: ConfigDebug}
|
|
|
|
update_tasks:
|
|
- name: Tear-down non-HA cinder-backup container
|
|
when:
|
|
- step|int == 1
|
|
block: &cinder_backup_teardown_nonha
|
|
- name: Remove non-HA cinder-backup container
|
|
include_role:
|
|
name: tripleo_container_rm
|
|
vars:
|
|
tripleo_container_cli: "{{ container_cli }}"
|
|
tripleo_containers_to_rm:
|
|
- cinder_backup
|
|
- name: cinder_backup fetch and retag container image for pacemaker
|
|
when: step|int == 2
|
|
block: &cinder_backup_fetch_retag_container_tasks
|
|
- name: Get container cinder_backup image
|
|
set_fact:
|
|
cinder_backup_image: {get_param: ContainerCinderBackupImage}
|
|
cinder_backup_image_latest: *cinder_backup_image_pcmklatest
|
|
- name: Pull latest cinder_backup images
|
|
command: "{{container_cli}} pull {{cinder_backup_image}}"
|
|
- name: Get previous cinder_backup image id
|
|
shell: "{{container_cli}} inspect --format '{{'{{'}}.Id{{'}}'}}' {{cinder_backup_image_latest}}"
|
|
register: old_cinder_backup_image_id
|
|
failed_when: false
|
|
- name: Get new cinder_backup image id
|
|
shell: "{{container_cli}} inspect --format '{{'{{'}}.Id{{'}}'}}' {{cinder_backup_image}}"
|
|
register: new_cinder_backup_image_id
|
|
- name: Retag pcmklatest to latest cinder_backup image
|
|
include_role:
|
|
name: tripleo_container_tag
|
|
vars:
|
|
container_image: "{{cinder_backup_image}}"
|
|
container_image_latest: "{{cinder_backup_image_latest}}"
|
|
when:
|
|
- old_cinder_backup_image_id.stdout != new_cinder_backup_image_id.stdout
|
|
|
|
post_update_tasks:
|
|
- name: Cinder backup bundle post update
|
|
when: step|int == 1
|
|
block: *cinder_backup_puppet_bundle
|
|
vars:
|
|
tripleo_ha_wrapper_minor_update: true
|
|
|
|
upgrade_tasks:
|
|
- name: Tear-down non-HA cinder_backup container
|
|
when:
|
|
- step|int == 0
|
|
block: *cinder_backup_teardown_nonha
|
|
- name: Prepare switch of cinder_backup image name
|
|
when:
|
|
- step|int == 0
|
|
block:
|
|
- name: Get cinder_backup image id currently used by pacemaker
|
|
shell: "pcs resource config openstack-cinder-backup | grep -Eo 'image=[^ ]+' | awk -F= '{print $2;}'"
|
|
register: cinder_backup_image_current_res
|
|
failed_when: false
|
|
- name: cinder_backup image facts
|
|
set_fact:
|
|
cinder_backup_image_latest: *cinder_backup_image_pcmklatest
|
|
cinder_backup_image_current: "{{cinder_backup_image_current_res.stdout}}"
|
|
- name: Temporarily tag the current cinder_backup image id with the upgraded image name
|
|
import_role:
|
|
name: tripleo_container_tag
|
|
vars:
|
|
container_image: "{{cinder_backup_image_current}}"
|
|
container_image_latest: "{{cinder_backup_image_latest}}"
|
|
pull_image: false
|
|
when:
|
|
- cinder_backup_image_current != ''
|
|
- cinder_backup_image_current != cinder_backup_image_latest
|
|
# During an OS Upgrade, the cluster may not exist so we use
|
|
# the shell module instead.
|
|
# TODO(odyssey4me):
|
|
# Fix the pacemaker_resource module to handle the exception
|
|
# for a non-existant cluster more gracefully.
|
|
- name: Check openstack-cinder-backup cluster resource status
|
|
shell: pcs resource config openstack-cinder-backup
|
|
failed_when: false
|
|
changed_when: false
|
|
register: cinder_backup_pcs_res_result
|
|
- name: Set fact cinder_backup_pcs_res
|
|
set_fact:
|
|
cinder_backup_pcs_res: "{{cinder_backup_pcs_res_result.rc == 0}}"
|
|
- name: set is_cinder_backup_bootstrap_node fact
|
|
tags: common
|
|
set_fact: is_cinder_backup_bootstrap_node={{cinder_backup_short_bootstrap_node_name|lower == ansible_hostname|lower}}
|
|
- name: Update cinder_backup pcs resource bundle for new container image
|
|
when:
|
|
- step|int == 1
|
|
- is_cinder_backup_bootstrap_node
|
|
- cinder_backup_pcs_res|bool
|
|
- cinder_backup_image_current != cinder_backup_image_latest
|
|
block:
|
|
- name: Disable the cinder_backup cluster resource before container upgrade
|
|
pacemaker_resource:
|
|
resource: openstack-cinder-backup
|
|
state: disable
|
|
wait_for_resource: true
|
|
register: output
|
|
retries: 5
|
|
until: output.rc == 0
|
|
- name: Update the cinder_backup bundle to use the new container image name
|
|
command: "pcs resource bundle update openstack-cinder-backup container image={{cinder_backup_image_latest}}"
|
|
- name: Enable the cinder_backup cluster resource
|
|
pacemaker_resource:
|
|
resource: openstack-cinder-backup
|
|
state: enable
|
|
wait_for_resource: true
|
|
register: output
|
|
retries: 5
|
|
until: output.rc == 0
|
|
- name: Create hiera data to upgrade cinder_backup in a stepwise manner.
|
|
when:
|
|
- step|int == 1
|
|
- cluster_recreate|bool
|
|
block:
|
|
- name: set cinder_backup upgrade node facts in a single-node environment
|
|
set_fact:
|
|
cinder_backup_short_node_names_upgraded: "{{ cinder_backup_short_node_names }}"
|
|
cinder_backup_node_names_upgraded: "{{ cinder_backup_node_names }}"
|
|
cacheable: no
|
|
when: groups['cinder_backup'] | length <= 1
|
|
- name: set cinder_backup upgrade node facts from the limit option
|
|
set_fact:
|
|
cinder_backup_short_node_names_upgraded: "{{ cinder_backup_short_node_names_upgraded|default([]) + [item.split('.')[0]] }}"
|
|
cinder_backup_node_names_upgraded: "{{ cinder_backup_node_names_upgraded|default([]) + [item] }}"
|
|
cacheable: no
|
|
when:
|
|
- groups['cinder_backup'] | length > 1
|
|
- item.split('.')[0] in ansible_limit.split(':')
|
|
loop: "{{ cinder_backup_node_names | default([]) }}"
|
|
- fail:
|
|
msg: >
|
|
You can't upgrade cinder_backup without
|
|
staged upgrade. You need to use the limit option in order
|
|
to do so.
|
|
when: >-
|
|
cinder_backup_short_node_names_upgraded is not defined or
|
|
cinder_backup_short_node_names_upgraded | length == 0 or
|
|
cinder_backup_node_names_upgraded is not defined or
|
|
cinder_backup_node_names_upgraded | length == 0
|
|
- debug:
|
|
msg: "Prepare cinder_backup upgrade for {{ cinder_backup_short_node_names_upgraded }}"
|
|
- name: remove cinder_backup init container on upgrade-scaleup to force re-init
|
|
include_role:
|
|
name: tripleo_container_rm
|
|
vars:
|
|
tripleo_containers_to_rm:
|
|
- cinder_backup_init_bundle
|
|
when:
|
|
- cinder_backup_short_node_names_upgraded | length > 1
|
|
- name: add the cinder_backup short name to hiera data for the upgrade.
|
|
include_role:
|
|
name: tripleo_upgrade_hiera
|
|
tasks_from: set.yml
|
|
vars:
|
|
tripleo_upgrade_key: cinder_backup_short_node_names_override
|
|
tripleo_upgrade_value: "{{ cinder_backup_short_node_names_upgraded }}"
|
|
- name: add the cinder_backup long name to hiera data for the upgrade
|
|
include_role:
|
|
name: tripleo_upgrade_hiera
|
|
tasks_from: set.yml
|
|
vars:
|
|
tripleo_upgrade_key: cinder_backup_node_names_override
|
|
tripleo_upgrade_value: "{{ cinder_backup_node_names_upgraded }}"
|
|
- name: remove the extra hiera data needed for the upgrade.
|
|
include_role:
|
|
name: tripleo_upgrade_hiera
|
|
tasks_from: remove.yml
|
|
vars:
|
|
tripleo_upgrade_key: "{{ item }}"
|
|
loop:
|
|
- cinder_backup_short_node_names_override
|
|
- cinder_backup_node_names_override
|
|
when: cinder_backup_short_node_names_upgraded | length == cinder_backup_node_names | length
|
|
- name: Retag the pacemaker image if containerized
|
|
when:
|
|
- step|int == 3
|
|
block: *cinder_backup_fetch_retag_container_tasks
|