tripleo-heat-templates/docker/services/ovn-controller.yaml
Jose Luis Franco Arza 40467b0f3f [Rocky/Queens Only] Remove pre-upgrade validation tasks in cont services.
The pre-upgrade validation tasks in the containerized services made
sense when upgrading from non-containerized to containerized overcloud
(ocata to pike), however we kept them for queens and rocky
release thinking about using them during the undercloud upgrade from
non-containerized to containerized (queens to rocky) but they
were skipped there too [0]. It's beeing observed in the current
upgrades taken place by operators, that these validations are
causing more issues than it was thought, so let's get rid of
them from all the containerized services in Queens and Rocky.

Closes-Bug: #1829858

[0] - https://github.com/openstack/python-tripleoclient/blob/stable/rocky/tripleoclient/v1/tripleo_deploy.py#L833

Change-Id: If99ea62b6cefb140a9c918b8f6a774658c52d54b
(cherry picked from commit 7e0a4d0e52)
2019-06-04 14:07:47 +02:00

153 lines
5.8 KiB
YAML

heat_template_version: queens
description: >
OpenStack containerized Ovn Controller agent.
parameters:
EndpointMap:
default: {}
description: Mapping of service endpoint -> protocol. Typically set
via parameter_defaults in the resource registry.
type: json
ServiceNetMap:
default: {}
description: Mapping of service_name -> network name. Typically set
via parameter_defaults in the resource registry. This
mapping overrides those in ServiceNetMapDefaults.
type: json
ServiceData:
default: {}
description: Dictionary packing service data
type: json
DockerOvnControllerImage:
description: image
type: string
DockerOvnControllerConfigImage:
description: The container image to use for the ovn_controller config_volume
type: string
DefaultPasswords:
default: {}
type: json
RoleName:
default: ''
description: Role name on which the service is applied
type: string
RoleParameters:
default: {}
description: Parameters specific to the role
type: json
resources:
ContainersCommon:
type: ./containers-common.yaml
OvnControllerBase:
type: ../../puppet/services/ovn-controller.yaml
properties:
EndpointMap: {get_param: EndpointMap}
ServiceData: {get_param: ServiceData}
ServiceNetMap: {get_param: ServiceNetMap}
DefaultPasswords: {get_param: DefaultPasswords}
RoleName: {get_param: RoleName}
RoleParameters: {get_param: RoleParameters}
outputs:
role_data:
description: Role data for the Ovn Controller agent.
value:
service_name: {get_attr: [OvnControllerBase, role_data, service_name]}
config_settings:
map_merge:
- get_attr: [OvnControllerBase, role_data, config_settings]
logging_source: {get_attr: [OvnControllerBase, role_data, logging_source]}
logging_groups: {get_attr: [OvnControllerBase, role_data, logging_groups]}
service_config_settings: {get_attr: [OvnControllerBase, role_data, service_config_settings]}
# BEGIN DOCKER SETTINGS
puppet_config:
puppet_tags: vs_config,exec
config_volume: ovn_controller
step_config:
get_attr: [OvnControllerBase, role_data, step_config]
config_image: {get_param: DockerOvnControllerConfigImage}
# We need to mount /run for puppet_config step. This is because
# puppet-vswitch runs the commands "ovs-vsctl set open_vswitch . external_ids:..."
# to configure the required parameters in ovs db which will be read
# by ovn-controller. And ovs-vsctl talks to the ovsdb-server (hosting conf.db)
# on the unix domain socket - /run/openvswitch/db.sock
volumes:
- /lib/modules:/lib/modules:ro
- /run/openvswitch:/run/openvswitch
kolla_config:
/var/lib/kolla/config_files/ovn_controller.json:
command: /usr/bin/ovn-controller --pidfile --log-file unix:/run/openvswitch/db.sock
permissions:
- path: /var/log/openvswitch
owner: root:root
recurse: true
docker_config:
step_4:
configure_cms_options:
start_order: 0
detach: false
net: host
privileged: true
user: root
command: ['/bin/bash', '-c', 'CMS_OPTS=$(hiera ovn::controller::ovn_cms_options -c /etc/puppet/hiera.yaml); if [ X"$CMS_OPTS" != X ]; then ovs-vsctl set open . external_ids:ovn-cms-options=$CMS_OPTS; fi']
image: &ovn_controller_image {get_param: DockerOvnControllerImage}
volumes:
list_concat:
- {get_attr: [ContainersCommon, volumes]}
-
- /lib/modules:/lib/modules:ro
- /run/openvswitch:/run/openvswitch
ovn_controller:
start_order: 1
image: *ovn_controller_image
net: host
privileged: true
user: root
restart: always
volumes:
- /var/lib/kolla/config_files/ovn_controller.json:/var/lib/kolla/config_files/config.json:ro
- /lib/modules:/lib/modules:ro
# TODO(numans): This is temporary. Mount /run/openvswitch once
# openvswitch systemd script is fixed to not delete /run/openvswitch
# folder in the host when openvswitch service is stopped.
- /run:/run
- /var/log/containers/openvswitch:/var/log/openvswitch
environment:
- KOLLA_CONFIG_STRATEGY=COPY_ALWAYS
host_prep_tasks:
- name: create persistent directories
file:
path: "{{ item.path }}"
state: directory
setype: "{{ item.setype }}"
with_items:
- { 'path': /var/log/containers/openvswitch, 'setype': svirt_sandbox_file_t }
- { 'path': /var/log/openvswitch, 'setype': svirt_sandbox_file_t }
- name: openvswitch logs readme
copy:
dest: /var/log/openvswitch/readme.txt
content: |
Log files from openvswitch containers can be found under
/var/log/containers/openvswitch.
ignore_errors: true
upgrade_tasks:
- when: step|int == 0
tags: common
block:
- name: Check if ovn_controller is deployed
command: systemctl is-enabled --quiet ovn-controller
ignore_errors: True
register: ovn_controller_enabled_result
- name: Set fact ovn_controller_enabled
set_fact:
ovn_controller_enabled: "{{ ovn_controller_enabled_result.rc == 0 }}"
- when: step|int == 2
block:
- name: Stop and disable ovn-controller service
when: ovn_controller_enabled|bool
service: name=ovn-controller state=stopped enabled=no