tripleo-heat-templates/deployment/cinder/cinder-common-container-pup...

352 lines
13 KiB
YAML

heat_template_version: wallaby
description: >
Provides the list of common container volumes and environment used by
various cinder services.
parameters:
EndpointMap:
default: {}
description: Mapping of service endpoint -> protocol. Typically set
via parameter_defaults in the resource registry.
type: json
ServiceData:
default: {}
description: Dictionary packing service data
type: json
ServiceNetMap:
default: {}
description: Mapping of service_name -> network name. Typically set
via parameter_defaults in the resource registry. Use
parameter_merge_strategies to merge it with the defaults.
type: json
RoleName:
default: ''
description: Role name on which the service is applied
type: string
RoleParameters:
default: {}
description: Parameters specific to the role
type: json
CinderVolumeOptVolumes:
default: []
description: list of optional volumes to be mounted
type: comma_delimited_list
CinderBackupOptVolumes:
default: []
description: list of optional volumes to be mounted
type: comma_delimited_list
CinderVolumeOptEnvVars:
default: {}
description: hash of optional environment variables
type: json
CinderEnableIscsiBackend:
default: true
description: Whether to enable or not the Iscsi backend for Cinder
type: boolean
CinderLVMLoopDeviceSize:
default: 10280
description: The size of the loopback file used by the cinder LVM driver.
type: number
MultipathdEnable:
default: false
description: Whether to enable the multipath daemon
type: boolean
CinderVolumeCluster:
default: ''
description: >
The cluster name used for deploying the cinder-volume service in an
active-active (A/A) configuration. This configuration requires the
Cinder backend drivers support A/A, and the cinder-volume service not
be managed by pacemaker. If these criteria are not met then the cluster
name must be left blank.
type: string
EnableInternalTLS:
type: boolean
default: false
EnableEtcdInternalTLS:
description: Controls whether etcd and the cinder-volume service use TLS
for cinder's lock manager, even when the rest of the internal
API network is using TLS.
type: boolean
default: true
CephConfigPath:
type: string
default: "/var/lib/tripleo-config/ceph"
description: |
The path where the Ceph Cluster config files are stored on the host.
CephClientUserName:
default: openstack
type: string
CephClusterName:
type: string
default: ceph
description: The Ceph cluster name.
constraints:
- allowed_pattern: "[a-zA-Z0-9]+"
description: >
The Ceph cluster name must be at least 1 character and contain only
letters and numbers.
CinderRbdMultiConfig:
type: json
default: {}
description: |
Dictionary of settings when configuring multiple RBD backends. The
hash key is the backend name, and the value is a dictionary of parameter
values unique to that backend. The following parameters are required,
and must match the corresponding value defined in CephExternalMultiConfig.
CephClusterName (must match the CephExternalMultiConfig entry's 'cluster')
CephClusterFSID (must match the CephExternalMultiConfig entry's 'fsid')
The following parameters are optional, and override the corresponding
parameter's default value.
CephClientUserName
CinderRbdPoolName
CinderRbdExtraPools
CinderRbdAvailabilityZone
CinderRbdFlattenVolumeFromSnapshot
conditions:
cvol_active_active_tls_enabled:
and:
- not: {equals: [{get_param: CinderVolumeCluster}, '']}
- {get_param: EnableInternalTLS}
- {get_param: EnableEtcdInternalTLS}
resources:
ContainersCommon:
type: ../containers-common.yaml
outputs:
cinder_common_host_prep_tasks:
description: Common host prep tasks for cinder-volume and cinder-backup services
value: &cinder_common_host_prep_tasks
- name: create fcontext entry for cinder data
sefcontext:
target: "/var/lib/cinder(/.*)?"
setype: container_file_t
state: present
- name: create persistent directories
file:
path: "{{ item.path }}"
state: directory
setype: "{{ item.setype }}"
mode: "{{ item.mode|default(omit) }}"
with_items:
- { 'path': /var/log/containers/cinder, 'setype': container_file_t, 'mode': '0750' }
- { 'path': /var/lib/cinder, 'setype': container_file_t }
- name: ensure ceph configurations exist
file:
path: {get_param: CephConfigPath}
state: directory
cinder_common_volumes:
description: Common volumes for all cinder services
value: &cinder_common_volumes
list_concat:
- {get_attr: [ContainersCommon, volumes]}
- - /var/lib/config-data/puppet-generated/cinder:/var/lib/kolla/config_files/src:ro
- /var/log/containers/cinder:/var/log/cinder:z
- if:
- cvol_active_active_tls_enabled
- - /etc/pki/tls/certs/etcd.crt:/var/lib/kolla/config_files/src-tls/etc/pki/tls/certs/etcd.crt:ro
- /etc/pki/tls/private/etcd.key:/var/lib/kolla/config_files/src-tls/etc/pki/tls/private/etcd.key:ro
cinder_common_kolla_config_files:
description: Common kolla config_files for cinder-volume and cinder-backup services
value:
- source: "/var/lib/kolla/config_files/src/*"
dest: "/"
merge: true
preserve_properties: true
- source: "/var/lib/kolla/config_files/src-ceph/"
dest: "/etc/ceph/"
merge: true
preserve_properties: true
- source: "/var/lib/kolla/config_files/src-iscsid/*"
dest: "/etc/iscsi/"
merge: true
preserve_properties: true
- source: "/var/lib/kolla/config_files/src-tls/*"
dest: "/"
merge: true
preserve_properties: true
optional: true
cinder_common_kolla_permissions:
description: Common kolla permissions for cinder-volume and cinder-backup services
value:
list_concat:
-
- path: /var/log/cinder
owner: cinder:cinder
recurse: true
- path:
str_replace:
template: /etc/ceph/CLUSTER.client.USER.keyring
params:
CLUSTER: {get_param: CephClusterName}
USER: {get_param: CephClientUserName}
owner: cinder:cinder
perm: '0600'
- path: /etc/pki/tls/certs/etcd.crt
owner: cinder:cinder
- path: /etc/pki/tls/private/etcd.key
owner: cinder:cinder
- repeat:
template:
path: /etc/ceph/<%keyring%>
owner: cinder:cinder
perm: '0600'
for_each:
<%keyring%>:
yaql:
expression: let(u => $.data.default_user) -> $.data.multiconfig.values().select("{0}.client.{1}.keyring".format($.CephClusterName, $.get("CephClientUserName", $u)))
data:
default_user: {get_param: CephClientUserName}
multiconfig: {get_param: CinderRbdMultiConfig}
cinder_volume_host_prep_tasks:
description: Host prep tasks for the cinder-volume service (HA or non-HA)
value:
list_concat:
- *cinder_common_host_prep_tasks
-
- name: cinder_enable_iscsi_backend fact
set_fact:
cinder_enable_iscsi_backend: {get_param: CinderEnableIscsiBackend}
- when: cinder_enable_iscsi_backend|bool
block:
- name: ensure LVM rpm dependencies are installed
package:
name: lvm2
state: latest
- name: cinder create LVM volume group dd
command:
list_join:
- ''
- - 'dd if=/dev/zero of=/var/lib/cinder/cinder-volumes bs=1 count=0 seek='
- str_replace:
template: VALUE
params:
VALUE: {get_param: CinderLVMLoopDeviceSize}
- 'M'
args:
creates: /var/lib/cinder/cinder-volumes
- name: Get or create LVM loopback device
shell: |-
exit_code=0
existing_device=$(losetup -j /var/lib/cinder/cinder-volumes -l -n -O NAME)
if [[ -z "${existing_device}" ]]; then
losetup -f /var/lib/cinder/cinder-volumes --show
exit_code=2
else
echo ${existing_device%$'\n'*}
fi
exit ${exit_code}
args:
executable: /bin/bash
register: _loopback_device
changed_when: _loopback_device.rc == 2
failed_when: _loopback_device.rc not in [0,2]
- name: Create LVM volume group
lvg:
vg: "cinder-volumes"
pvs: "{{ _loopback_device.stdout }}"
state: present
when:
- not (ansible_check_mode | bool)
- name: cinder create service to run losetup for LVM on startup
copy:
dest: /etc/systemd/system/cinder-lvm-losetup.service
content: |
[Unit]
Description=Cinder LVM losetup
DefaultDependencies=no
Conflicts=umount.target
Requires=lvm2-monitor.service systemd-udev-settle.service
Before=local-fs.target umount.target
After=var.mount lvm2-monitor.service systemd-udev-settle.service
[Service]
Type=oneshot
ExecStart=/sbin/losetup {{ _loopback_device.stdout }} /var/lib/cinder/cinder-volumes
ExecStop=/sbin/losetup -d {{ _loopback_device.stdout }}
RemainAfterExit=yes
[Install]
WantedBy=local-fs-pre.target
when:
- not (ansible_check_mode | bool)
- name: cinder enable the LVM losetup service
systemd:
name: cinder-lvm-losetup
enabled: yes
daemon_reload: yes
cinder_volume_volumes:
description: Volumes for the cinder-volume container (HA or non-HA)
value:
list_concat:
- *cinder_common_volumes
- {get_param: CinderVolumeOptVolumes}
- - /var/lib/kolla/config_files/cinder_volume.json:/var/lib/kolla/config_files/config.json:ro
- /etc/iscsi:/var/lib/kolla/config_files/src-iscsid:ro
- list_join:
- ':'
- - {get_param: CephConfigPath}
- - '/var/lib/kolla/config_files/src-ceph'
- - 'ro'
- /lib/modules:/lib/modules:ro
- /dev/:/dev/
- /run/:/run/
- /sys:/sys
- /var/lib/cinder:/var/lib/cinder:z
- /var/lib/iscsi:/var/lib/iscsi:z
- if:
- {get_param: CinderEnableIscsiBackend}
- - /etc/target:/etc/target:z
- if:
- {get_param: MultipathdEnable}
- - /etc/multipath:/etc/multipath:z
- /etc/multipath.conf:/etc/multipath.conf:ro
cinder_volume_environment:
description: Docker environment for the cinder-volume container (HA or non-HA)
value:
map_merge:
- {get_param: CinderVolumeOptEnvVars}
- KOLLA_CONFIG_STRATEGY: COPY_ALWAYS
cinder_backup_host_prep_tasks:
description: Host prep tasks for the cinder-backup service (HA or non-HA)
value: *cinder_common_host_prep_tasks
cinder_backup_volumes:
description: Volumes for the cinder-backup container (HA or non-HA)
value:
list_concat:
- *cinder_common_volumes
- {get_param: CinderBackupOptVolumes}
-
- /var/lib/kolla/config_files/cinder_backup.json:/var/lib/kolla/config_files/config.json:ro
- /etc/iscsi:/var/lib/kolla/config_files/src-iscsid:ro
- list_join:
- ':'
- - {get_param: CephConfigPath}
- - '/var/lib/kolla/config_files/src-ceph'
- - 'ro'
- /dev/:/dev/
- /run/:/run/
- /sys:/sys
- /lib/modules:/lib/modules:ro
- /var/lib/cinder:/var/lib/cinder:z
- /var/lib/iscsi:/var/lib/iscsi:z
- if:
- {get_param: MultipathdEnable}
- - /etc/multipath:/etc/multipath:z
cinder_backup_environment:
description: Docker environment for the cinder-backup container (HA or non-HA)
value:
KOLLA_CONFIG_STRATEGY: COPY_ALWAYS