tripleo-heat-templates/deployment/pacemaker/clustercheck-container-puppet.yaml
Jose Luis Franco Arza d1035703b7 Force removal of docker container in tripleo-docker-rm.
The tripleo-docker-rm role has been replaced by tripleo-container-rm [0].
This role will identify the docker engine via the container_cli variable
and perform a deletion of that container. However, these tasks inside the
post_upgrade_tasks section were thought to remove the old docker containers
after upgrading from rocky to stein, in which podman starts to be the
container engine by default.

For that reason, we need to ensure that the container engine in which the
containers are removed is docker, as otherwise we will be removing the
podman container and the deployment steps will fail.

Closes-Bug: #1836531
[0] - 2135446a35

Depends-On: https://review.opendev.org/#/c/671698/
Change-Id: Ib139a1d77f71fc32a49c9878d1b4a6d07564e9dc
2019-07-19 12:37:35 +00:00

108 lines
3.6 KiB
YAML

heat_template_version: rocky
description: >
MySQL HA clustercheck service deployment using puppet
This service is used by HAProxy in a HA scenario to report whether
the local galera node is synced
parameters:
ContainerClustercheckImage:
description: image
type: string
ContainerClustercheckConfigImage:
description: The container image to use for the clustercheck config_volume
type: string
EndpointMap:
default: {}
description: Mapping of service endpoint -> protocol. Typically set
via parameter_defaults in the resource registry.
type: json
ServiceData:
default: {}
description: Dictionary packing service data
type: json
ServiceNetMap:
default: {}
description: Mapping of service_name -> network name. Typically set
via parameter_defaults in the resource registry. This
mapping overrides those in ServiceNetMapDefaults.
type: json
DefaultPasswords:
default: {}
type: json
RoleName:
default: ''
description: Role name on which the service is applied
type: string
RoleParameters:
default: {}
description: Parameters specific to the role
type: json
resources:
ContainersCommon:
type: ../containers-common.yaml
# We import from the corresponding docker service because otherwise we risk
# rewriting the tripleo::mysql::firewall_rules key with the baremetal firewall
# rules (see LP#1728918)
MysqlPuppetBase:
type: ../database/mysql-pacemaker-puppet.yaml
properties:
EndpointMap: {get_param: EndpointMap}
ServiceData: {get_param: ServiceData}
ServiceNetMap: {get_param: ServiceNetMap}
DefaultPasswords: {get_param: DefaultPasswords}
RoleName: {get_param: RoleName}
RoleParameters: {get_param: RoleParameters}
outputs:
role_data:
description: Containerized service clustercheck using composable services.
value:
service_name: clustercheck
config_settings: {get_attr: [MysqlPuppetBase, role_data, config_settings]}
# BEGIN DOCKER SETTINGS #
puppet_config:
config_volume: clustercheck
puppet_tags: file # set this even though file is the default
step_config: "include ::tripleo::profile::pacemaker::clustercheck"
config_image: {get_param: ContainerClustercheckConfigImage}
kolla_config:
/var/lib/kolla/config_files/clustercheck.json:
command: /usr/sbin/xinetd -dontfork
config_files:
- source: "/var/lib/kolla/config_files/src/*"
dest: "/"
merge: true
preserve_properties: true
docker_config:
step_2:
clustercheck:
start_order: 1
image: {get_param: ContainerClustercheckImage}
restart: always
net: host
volumes:
list_concat:
- {get_attr: [ContainersCommon, volumes]}
-
- /var/lib/kolla/config_files/clustercheck.json:/var/lib/kolla/config_files/config.json
- /var/lib/config-data/puppet-generated/clustercheck/:/var/lib/kolla/config_files/src:ro
- /var/lib/mysql:/var/lib/mysql
environment:
- KOLLA_CONFIG_STRATEGY=COPY_ALWAYS
host_prep_tasks:
upgrade_tasks:
update_tasks:
# Nothing: It's not managed by pacemaker, so let paunch do it.
post_upgrade_tasks:
- when: step|int == 1
import_role:
name: tripleo-docker-rm
vars:
containers_to_rm:
- clustercheck
tripleo_container_cli: "docker"