heat_template_version: rocky description: > Provides the list of common container volumes and environment used by various cinder services. parameters: EndpointMap: default: {} description: Mapping of service endpoint -> protocol. Typically set via parameter_defaults in the resource registry. type: json ServiceData: default: {} description: Dictionary packing service data type: json ServiceNetMap: default: {} description: Mapping of service_name -> network name. Typically set via parameter_defaults in the resource registry. This mapping overrides those in ServiceNetMapDefaults. type: json DefaultPasswords: default: {} type: json RoleName: default: '' description: Role name on which the service is applied type: string RoleParameters: default: {} description: Parameters specific to the role type: json CinderVolumeOptVolumes: default: [] description: list of optional volumes to be mounted type: comma_delimited_list CinderBackupOptVolumes: default: [] description: list of optional volumes to be mounted type: comma_delimited_list CinderVolumeOptEnvVars: default: {} description: hash of optional environment variables type: json CinderEnableIscsiBackend: default: true description: Whether to enable or not the Iscsi backend for Cinder type: boolean CinderLVMLoopDeviceSize: default: 10280 description: The size of the loopback file used by the cinder LVM driver. type: number MultipathdEnable: default: false description: Whether to enable the multipath daemon type: boolean CinderVolumeCluster: default: '' description: > The cluster name used for deploying the cinder-volume service in an active-active (A/A) configuration. This configuration requires the Cinder backend drivers support A/A, and the cinder-volume service not be managed by pacemaker. If these criteria are not met then the cluster name must be left blank. type: string EnableInternalTLS: type: boolean default: false EnableEtcdInternalTLS: description: Controls whether etcd and the cinder-volume service use TLS for cinder's lock manager, even when the rest of the internal API network is using TLS. type: boolean default: false CephClientUserName: default: openstack type: string CephClusterName: type: string default: ceph description: The Ceph cluster name. constraints: - allowed_pattern: "[a-zA-Z0-9]+" description: > The Ceph cluster name must be at least 1 character and contain only letters and numbers. CephExternalMultiConfig: type: json hidden: true description: | List of maps describing extra overrides which will be applied when configuring extra external Ceph clusters. If this list is non-empty, ceph-ansible will run an extra count(list) times using the same parameters as the first run except each parameter within each map will override the defaults. If the following were used, the second run would configure the overcloud to also use the ceph2 cluster with all the previous parameters except /etc/ceph/ceph2.conf would have a mon_host entry containing the value of external_cluster_mon_ips below, and not the default CephExternalMonHost. Subsequent ceph-ansible runs are restricted to just ceph clients. CephExternalMultiConfig may not be used to deploy additional internal Ceph clusters within one Heat stack. The map for each list should contain not tripleo-heat-template parameters but ceph-ansible parameters. - cluster: 'ceph2' fsid: 'e2cba068-5f14-4b0f-b047-acf375c0004a' external_cluster_mon_ips: '172.18.0.5,172.18.0.6,172.18.0.7' keys: - name: "client.openstack" caps: mgr: "allow *" mon: "profile rbd" osd: "osd: profile rbd pool=volumes, profile rbd pool=backups, profile rbd pool=vms, profile rbd pool=images" key: "AQCwmeRcAAAAABAA6SQU/bGqFjlfLro5KxrB1Q==" mode: "0600" dashboard_enabled: false default: [] conditions: cinder_iscsi_backend_enabled: {equals: [{get_param: CinderEnableIscsiBackend}, true]} multipathd_enabled: {equals: [{get_param: MultipathdEnable}, true]} cvol_active_active_tls_enabled: and: - not: {equals: [{get_param: CinderVolumeCluster}, '']} - equals: [{get_param: EnableInternalTLS}, true] - equals: [{get_param: EnableEtcdInternalTLS}, true] resources: ContainersCommon: type: ../containers-common.yaml outputs: cinder_common_host_prep_tasks: description: Common host prep tasks for cinder-volume and cinder-backup services value: &cinder_common_host_prep_tasks - name: create persistent directories file: path: "{{ item.path }}" state: directory setype: "{{ item.setype }}" mode: "{{ item.mode|default(omit) }}" with_items: - { 'path': /var/log/containers/cinder, 'setype': container_file_t, 'mode': '0750' } - { 'path': /var/lib/cinder, 'setype': container_file_t } - name: ensure ceph configurations exist file: path: /etc/ceph state: directory cinder_common_volumes: description: Common volumes for all cinder services value: &cinder_common_volumes list_concat: - {get_attr: [ContainersCommon, volumes]} - - /var/lib/config-data/puppet-generated/cinder:/var/lib/kolla/config_files/src:ro - /var/log/containers/cinder:/var/log/cinder:z - if: - cvol_active_active_tls_enabled - - /etc/pki/tls/certs/etcd.crt:/var/lib/kolla/config_files/src-tls/etc/pki/tls/certs/etcd.crt:ro - /etc/pki/tls/private/etcd.key:/var/lib/kolla/config_files/src-tls/etc/pki/tls/private/etcd.key:ro - [] cinder_common_kolla_config_files: description: Common kolla config_files for cinder-volume and cinder-backup services value: - source: "/var/lib/kolla/config_files/src/*" dest: "/" merge: true preserve_properties: true - source: "/var/lib/kolla/config_files/src-ceph/" dest: "/etc/ceph/" merge: true preserve_properties: true - source: "/var/lib/kolla/config_files/src-iscsid/*" dest: "/etc/iscsi/" merge: true preserve_properties: true - source: "/var/lib/kolla/config_files/src-tls/*" dest: "/" merge: true preserve_properties: true optional: true cinder_common_kolla_permissions: description: Common kolla permissions for cinder-volume and cinder-backup services value: list_concat: - - path: /var/log/cinder owner: cinder:cinder recurse: true - path: str_replace: template: /etc/ceph/CLUSTER.client.USER.keyring params: CLUSTER: {get_param: CephClusterName} USER: {get_param: CephClientUserName} owner: cinder:cinder perm: '0600' - path: /etc/pki/tls/certs/etcd.crt owner: cinder:cinder - path: /etc/pki/tls/private/etcd.key owner: cinder:cinder - repeat: template: path: /etc/ceph/<%cluster%>.client.*.keyring owner: cinder:cinder perm: '0600' for_each: <%cluster%>: yaql: expression: $.data.multiconfig.select($.cluster) data: multiconfig: {get_param: CephExternalMultiConfig} cinder_volume_host_prep_tasks: description: Host prep tasks for the cinder-volume service (HA or non-HA) value: list_concat: - *cinder_common_host_prep_tasks - - name: cinder_enable_iscsi_backend fact set_fact: cinder_enable_iscsi_backend: {if: [cinder_iscsi_backend_enabled, true, false]} - when: cinder_enable_iscsi_backend|bool block: - name: ensure LVM rpm dependencies are installed package: name: lvm2 state: latest - name: cinder create LVM volume group dd command: list_join: - '' - - 'dd if=/dev/zero of=/var/lib/cinder/cinder-volumes bs=1 count=0 seek=' - str_replace: template: VALUE params: VALUE: {get_param: CinderLVMLoopDeviceSize} - 'M' args: creates: /var/lib/cinder/cinder-volumes - name: Get or create LVM loopback device shell: |- exit_code=0 existing_device=$(losetup -j /var/lib/cinder/cinder-volumes -l -n -O NAME) if [[ -z "${existing_device}" ]]; then losetup -f /var/lib/cinder/cinder-volumes --show exit_code=2 else echo ${existing_device%$'\n'*} fi exit ${exit_code} args: executable: /bin/bash register: _loopback_device changed_when: _loopback_device.rc == 2 failed_when: _loopback_device.rc not in [0,2] - name: Create LVM volume group lvg: vg: "cinder-volumes" pvs: "{{ _loopback_device.stdout }}" state: present when: - not (ansible_check_mode | bool) - name: cinder create service to run losetup for LVM on startup copy: dest: /etc/systemd/system/cinder-lvm-losetup.service content: | [Unit] Description=Cinder LVM losetup DefaultDependencies=no Conflicts=umount.target Requires=lvm2-monitor.service systemd-udev-settle.service Before=local-fs.target umount.target After=var.mount lvm2-monitor.service systemd-udev-settle.service [Service] Type=oneshot ExecStart=/sbin/losetup {{ _loopback_device.stdout }} /var/lib/cinder/cinder-volumes ExecStop=/sbin/losetup -d {{ _loopback_device.stdout }} RemainAfterExit=yes [Install] WantedBy=local-fs-pre.target when: - not (ansible_check_mode | bool) - name: cinder enable the LVM losetup service systemd: name: cinder-lvm-losetup enabled: yes daemon_reload: yes cinder_volume_volumes: description: Volumes for the cinder-volume container (HA or non-HA) value: list_concat: - *cinder_common_volumes - {get_param: CinderVolumeOptVolumes} - - /var/lib/kolla/config_files/cinder_volume.json:/var/lib/kolla/config_files/config.json:ro - /etc/iscsi:/var/lib/kolla/config_files/src-iscsid:ro - /etc/ceph:/var/lib/kolla/config_files/src-ceph:ro - /lib/modules:/lib/modules:ro - /dev/:/dev/ - /run/:/run/ - /sys:/sys - /var/lib/cinder:/var/lib/cinder:z - /var/lib/iscsi:/var/lib/iscsi:z - if: - cinder_iscsi_backend_enabled - - /etc/target:/etc/target:z - [] - if: - multipathd_enabled - - /etc/multipath:/etc/multipath:z - /etc/multipath.conf:/etc/multipath.conf:ro - [] cinder_volume_environment: description: Docker environment for the cinder-volume container (HA or non-HA) value: map_merge: - {get_param: CinderVolumeOptEnvVars} - KOLLA_CONFIG_STRATEGY: COPY_ALWAYS cinder_backup_host_prep_tasks: description: Host prep tasks for the cinder-backup service (HA or non-HA) value: *cinder_common_host_prep_tasks cinder_backup_volumes: description: Volumes for the cinder-backup container (HA or non-HA) value: list_concat: - *cinder_common_volumes - {get_param: CinderBackupOptVolumes} - - /var/lib/kolla/config_files/cinder_backup.json:/var/lib/kolla/config_files/config.json:ro - /etc/iscsi:/var/lib/kolla/config_files/src-iscsid:ro - /etc/ceph:/var/lib/kolla/config_files/src-ceph:ro - /dev/:/dev/ - /run/:/run/ - /sys:/sys - /lib/modules:/lib/modules:ro - /var/lib/cinder:/var/lib/cinder:z - /var/lib/iscsi:/var/lib/iscsi:z - if: - multipathd_enabled - - /etc/multipath:/etc/multipath:z - [] cinder_backup_environment: description: Docker environment for the cinder-backup container (HA or non-HA) value: KOLLA_CONFIG_STRATEGY: COPY_ALWAYS