tripleo-heat-templates/docker/services/messaging/rpc-rabbitmq.yaml
Rabi Mishra 87bc72a37c Use net=none for *_init_log(s) containers
It seems we're using the default docker0(docker)/cni0(podman) bridges
for these containers. It would be good to remove dependencies on these
default bridges.

Some other containers changed that have the same issue:
- ironic_inspector_init_dnsmasq_dhcp_hostsdir
- nova_statedir_owner
- swift_setup_srv
- swift_copy_rings
- horizon_fix_perms

Change-Id: Id3e60de7bb5a13e32544bed76e4970d53a9257d4
2019-01-21 13:43:07 +05:30

243 lines
9.1 KiB
YAML

heat_template_version: rocky
description: >
OpenStack containerized Rabbitmq service
parameters:
DockerRabbitmqImage:
description: image
type: string
DockerRabbitmqConfigImage:
description: The container image to use for the rabbitmq config_volume
type: string
EndpointMap:
default: {}
description: Mapping of service endpoint -> protocol. Typically set
via parameter_defaults in the resource registry.
type: json
ServiceData:
default: {}
description: Dictionary packing service data
type: json
ServiceNetMap:
default: {}
description: Mapping of service_name -> network name. Typically set
via parameter_defaults in the resource registry. This
mapping overrides those in ServiceNetMapDefaults.
type: json
DefaultPasswords:
default: {}
type: json
RoleName:
default: ''
description: Role name on which the service is applied
type: string
RoleParameters:
default: {}
description: Parameters specific to the role
type: json
RabbitCookie:
type: string
default: ''
hidden: true
EnableInternalTLS:
type: boolean
default: false
InternalTLSCAFile:
default: '/etc/ipa/ca.crt'
type: string
description: Specifies the default CA cert to use if TLS is used for
services in the internal network.
conditions:
internal_tls_enabled: {equals: [{get_param: EnableInternalTLS}, true]}
resources:
ContainersCommon:
type: ../containers-common.yaml
RabbitmqBase:
type: ../../../puppet/services/messaging/rpc-rabbitmq.yaml
properties:
EndpointMap: {get_param: EndpointMap}
ServiceData: {get_param: ServiceData}
ServiceNetMap: {get_param: ServiceNetMap}
DefaultPasswords: {get_param: DefaultPasswords}
RoleName: {get_param: RoleName}
RoleParameters: {get_param: RoleParameters}
outputs:
role_data:
description: Role data for the Rabbitmq API role.
value:
service_name: {get_attr: [RabbitmqBase, role_data, service_name]}
# RabbitMQ plugins initialization occurs on every node
global_config_settings: {get_attr: [RabbitmqBase, role_data, global_config_settings]}
config_settings:
map_merge:
- {get_attr: [RabbitmqBase, role_data, config_settings]}
- rabbitmq::admin_enable: false
- if:
- internal_tls_enabled
- tripleo::certmonger::rabbitmq::postsave_cmd: "true" # TODO: restart the rabbitmq container here
- {}
service_config_settings: {get_attr: [RabbitmqBase, role_data, service_config_settings]}
# BEGIN DOCKER SETTINGS
puppet_config:
config_volume: rabbitmq
step_config:
list_join:
- "\n"
- - "['Rabbitmq_policy', 'Rabbitmq_user'].each |String $val| { noop_resource($val) }"
- get_attr: [RabbitmqBase, role_data, step_config]
config_image: &rabbitmq_config_image {get_param: DockerRabbitmqConfigImage}
kolla_config:
/var/lib/kolla/config_files/rabbitmq.json:
command: /usr/lib/rabbitmq/bin/rabbitmq-server
config_files:
- source: "/var/lib/kolla/config_files/src/*"
dest: "/"
merge: true
preserve_properties: true
- source: "/var/lib/kolla/config_files/src-tls/*"
dest: "/"
merge: true
preserve_properties: true
optional: true
permissions:
- path: /var/lib/rabbitmq
owner: rabbitmq:rabbitmq
recurse: true
- path: /etc/pki/tls/certs/rabbitmq.crt
owner: rabbitmq:rabbitmq
optional: true
- path: /etc/pki/tls/private/rabbitmq.key
owner: rabbitmq:rabbitmq
optional: true
docker_config:
# Kolla_bootstrap runs before permissions set by kolla_config
step_1:
rabbitmq_init_logs:
start_order: 0
detach: false
image: &rabbitmq_image {get_param: DockerRabbitmqImage}
net: none
privileged: false
user: root
volumes:
- /var/log/containers/rabbitmq:/var/log/rabbitmq:z
command: ['/bin/bash', '-c', 'chown -R rabbitmq:rabbitmq /var/log/rabbitmq']
rabbitmq_bootstrap:
start_order: 1
detach: false
image: *rabbitmq_image
net: host
privileged: false
volumes:
list_concat:
- {get_attr: [ContainersCommon, volumes]}
-
- /var/lib/kolla/config_files/rabbitmq.json:/var/lib/kolla/config_files/config.json:ro
- /var/lib/config-data/puppet-generated/rabbitmq/:/var/lib/kolla/config_files/src:ro
- /var/lib/rabbitmq:/var/lib/rabbitmq:z
- /var/log/containers/rabbitmq:/var/log/rabbitmq:z
- if:
- internal_tls_enabled
-
- list_join:
- ':'
- - {get_param: InternalTLSCAFile}
- {get_param: InternalTLSCAFile}
- 'ro'
- /etc/pki/tls/certs/rabbitmq.crt:/var/lib/kolla/config_files/src-tls/etc/pki/tls/certs/rabbitmq.crt:ro
- /etc/pki/tls/private/rabbitmq.key:/var/lib/kolla/config_files/src-tls/etc/pki/tls/private/rabbitmq.key:ro
- null
environment:
- KOLLA_CONFIG_STRATEGY=COPY_ALWAYS
- KOLLA_BOOTSTRAP=True
-
list_join:
- '='
- - 'RABBITMQ_CLUSTER_COOKIE'
-
yaql:
expression: $.data.passwords.where($ != '').first()
data:
passwords:
- {get_param: RabbitCookie}
- {get_param: [DefaultPasswords, rabbit_cookie]}
rabbitmq:
start_order: 2
image: *rabbitmq_image
net: host
privileged: false
restart: always
healthcheck:
test: /openstack/healthcheck
volumes:
list_concat:
- {get_attr: [ContainersCommon, volumes]}
-
- /var/lib/kolla/config_files/rabbitmq.json:/var/lib/kolla/config_files/config.json:ro
- /var/lib/config-data/puppet-generated/rabbitmq/:/var/lib/kolla/config_files/src:ro
- /var/lib/rabbitmq:/var/lib/rabbitmq:z
- /var/log/containers/rabbitmq:/var/log/rabbitmq:z
- if:
- internal_tls_enabled
-
- list_join:
- ':'
- - {get_param: InternalTLSCAFile}
- {get_param: InternalTLSCAFile}
- 'ro'
- /etc/pki/tls/certs/rabbitmq.crt:/var/lib/kolla/config_files/src-tls/etc/pki/tls/certs/rabbitmq.crt:ro
- /etc/pki/tls/private/rabbitmq.key:/var/lib/kolla/config_files/src-tls/etc/pki/tls/private/rabbitmq.key:ro
- null
environment:
- KOLLA_CONFIG_STRATEGY=COPY_ALWAYS
docker_puppet_tasks:
# RabbitMQ users and policies initialization occurs only on single node
step_2:
config_volume: 'rabbit_init_tasks'
puppet_tags: 'rabbitmq_policy,rabbitmq_user'
step_config: 'include ::tripleo::profile::base::rabbitmq'
config_image: *rabbitmq_config_image
volumes:
- /var/lib/config-data/rabbitmq/etc/rabbitmq/:/etc/rabbitmq/:ro
- /var/lib/rabbitmq:/var/lib/rabbitmq:z
metadata_settings:
get_attr: [RabbitmqBase, role_data, metadata_settings]
host_prep_tasks:
- name: create persistent directories
file:
path: "{{ item.path }}"
state: directory
setype: "{{ item.setype }}"
with_items:
- { 'path': /var/log/containers/rabbitmq, 'setype': svirt_sandbox_file_t }
- { 'path': /var/lib/rabbitmq, 'setype': svirt_sandbox_file_t }
- { 'path': /var/log/rabbitmq, 'setype': svirt_sandbox_file_t }
- name: rabbitmq logs readme
copy:
dest: /var/log/rabbitmq/readme.txt
content: |
Log files from rabbitmq containers can be found under
/var/log/containers/rabbitmq.
ignore_errors: true
upgrade_tasks: []
update_tasks:
# TODO: Are we sure we want to support this. Rolling update
# without pacemaker may fail. Do we test this ? In any case,
# this is under paunch control so the latest image should be
# pulled in by the deploy steps. Same question for other
# usually managed by pacemaker container.
post_upgrade_tasks:
- when: step|int == 1
import_role:
name: tripleo-docker-rm
vars:
containers_to_rm:
- rabbitmq