tripleo-heat-templates/docker/services/opendaylight-api.yaml
Jose Luis Franco Arza 40467b0f3f [Rocky/Queens Only] Remove pre-upgrade validation tasks in cont services.
The pre-upgrade validation tasks in the containerized services made
sense when upgrading from non-containerized to containerized overcloud
(ocata to pike), however we kept them for queens and rocky
release thinking about using them during the undercloud upgrade from
non-containerized to containerized (queens to rocky) but they
were skipped there too [0]. It's beeing observed in the current
upgrades taken place by operators, that these validations are
causing more issues than it was thought, so let's get rid of
them from all the containerized services in Queens and Rocky.

Closes-Bug: #1829858

[0] - https://github.com/openstack/python-tripleoclient/blob/stable/rocky/tripleoclient/v1/tripleo_deploy.py#L833

Change-Id: If99ea62b6cefb140a9c918b8f6a774658c52d54b
(cherry picked from commit 7e0a4d0e52)
2019-06-04 14:07:47 +02:00

290 lines
12 KiB
YAML

heat_template_version: queens
description: >
OpenStack containerized OpenDaylight API service
parameters:
DockerOpendaylightApiImage:
description: image
type: string
DockerOpendaylightConfigImage:
description: image
type: string
EndpointMap:
default: {}
description: Mapping of service endpoint -> protocol. Typically set
via parameter_defaults in the resource registry.
type: json
ServiceData:
default: {}
description: Dictionary packing service data
type: json
ServiceNetMap:
default: {}
description: Mapping of service_name -> network name. Typically set
via parameter_defaults in the resource registry. This
mapping overrides those in ServiceNetMapDefaults.
type: json
DefaultPasswords:
default: {}
type: json
RoleName:
default: ''
description: Role name on which the service is applied
type: string
RoleParameters:
default: {}
description: Parameters specific to the role
type: json
EnableInternalTLS:
type: boolean
default: false
InternalTLSCAFile:
default: '/etc/ipa/ca.crt'
type: string
description: Specifies the default CA cert to use if TLS is used for
services in the internal network.
ODLUpdateLevel:
default: 1
description: Specify the level of update
type: number
constraints:
- allowed_values:
- 1
- 2
conditions:
internal_tls_enabled: {equals: [{get_param: EnableInternalTLS}, true]}
resources:
ContainersCommon:
type: ./containers-common.yaml
OpenDaylightBase:
type: ../../puppet/services/opendaylight-api.yaml
properties:
EndpointMap: {get_param: EndpointMap}
ServiceData: {get_param: ServiceData}
ServiceNetMap: {get_param: ServiceNetMap}
DefaultPasswords: {get_param: DefaultPasswords}
RoleName: {get_param: RoleName}
RoleParameters: {get_param: RoleParameters}
outputs:
role_data:
description: Role data for the OpenDaylight API role.
value:
service_name: {get_attr: [OpenDaylightBase, role_data, service_name]}
config_settings:
map_merge:
- get_attr: [OpenDaylightBase, role_data, config_settings]
- if:
- internal_tls_enabled
- tripleo::certmonger::opendaylight::postsave_cmd: "true" # TODO: restart the odl container here
- {}
logging_source: {get_attr: [OpenDaylightBase, role_data, logging_source]}
logging_groups: {get_attr: [OpenDaylightBase, role_data, logging_groups]}
# BEGIN DOCKER SETTINGS
puppet_config:
config_volume: opendaylight
volumes:
list_concat:
- if:
- internal_tls_enabled
- - /etc/pki/tls/certs/odl.crt:/etc/pki/tls/certs/odl.crt:ro
- /etc/pki/tls/private/odl.key:/etc/pki/tls/private/odl.key:ro
- list_join:
- ':'
- - {get_param: InternalTLSCAFile}
- {get_param: InternalTLSCAFile}
- 'ro'
- null
# 'file,concat,file_line,augeas' are included by default
puppet_tags: odl_user,odl_keystore
step_config:
get_attr: [OpenDaylightBase, role_data, step_config]
config_image: {get_param: DockerOpendaylightConfigImage}
kolla_config:
/var/lib/kolla/config_files/opendaylight_api.json:
command: /opt/opendaylight/bin/karaf server
config_files:
- source: "/var/lib/kolla/config_files/src/*"
dest: "/"
merge: true
preserve_properties: true
permissions:
- path: /opt/opendaylight
owner: odl:odl
recurse: true
docker_config:
step_1:
opendaylight_api:
start_order: 0
image: &odl_api_image {get_param: DockerOpendaylightApiImage}
privileged: false
net: host
detach: true
user: odl
restart: unless-stopped
healthcheck:
test: /openstack/healthcheck
volumes:
list_concat:
- {get_attr: [ContainersCommon, volumes]}
-
- /var/lib/kolla/config_files/opendaylight_api.json:/var/lib/kolla/config_files/config.json:ro
- /var/lib/config-data/puppet-generated/opendaylight/:/var/lib/kolla/config_files/src:ro
- /var/lib/opendaylight/journal:/opt/opendaylight/journal
- /var/lib/opendaylight/snapshots:/opt/opendaylight/snapshots
- /var/lib/opendaylight/data:/opt/opendaylight/data
- /var/log/containers/opendaylight/karaf/logs:/opt/opendaylight/data/log
environment:
- KOLLA_CONFIG_STRATEGY=COPY_ALWAYS
metadata_settings:
get_attr: [OpenDaylightBase, role_data, metadata_settings]
host_prep_tasks:
- name: Delete cache and karaf folder
file:
path: "{{ item }}"
state: absent
with_items:
- /var/lib/opendaylight/data/cache
- /var/lib/config-data/puppet-generated/opendaylight/opt/opendaylight/etc/opendaylight/karaf
- name: create persistent directories
file:
path: "{{ item.path }}"
state: directory
setype: "{{ item.setype }}"
with_items:
- { 'path': /var/lib/opendaylight/snapshots, 'setype': svirt_sandbox_file_t }
- { 'path': /var/lib/opendaylight/journal, 'setype': svirt_sandbox_file_t }
- { 'path': /var/lib/opendaylight/data, 'setype': svirt_sandbox_file_t }
- { 'path': /var/log/opendaylight, 'setype': svirt_sandbox_file_t }
- { 'path': /var/log/containers/opendaylight/karaf/logs, 'setype': svirt_sandbox_file_t }
- name: opendaylight logs readme
copy:
dest: /var/log/opendaylight/readme.txt
content: |
Logs from opendaylight container can be found by running "sudo docker logs opendaylight_api" and are also available in /var/log/containers/opendaylight/karaf/logs/karaf.log
ignore_errors: true
upgrade_tasks:
- when: step|int == 0
tags: common
block:
- name: Check if opendaylight is deployed
command: systemctl is-enabled --quiet opendaylight
ignore_errors: True
register: opendaylight_enabled_result
- name: Set fact opendaylight_enabled
set_fact:
opendaylight_enabled: "{{ opendaylight_enabled_result.rc == 0 }}"
- when: step|int == 2
block:
- name: Stop and disable opendaylight_api service
when: opendaylight_enabled|bool
service: name=opendaylight state=stopped enabled=no
# Containerized deployment upgrade steps
- name: ODL container L2 update and upgrade tasks
block: &odl_container_upgrade_tasks
- name: Check if ODL container is present
shell: "docker ps -a --format '{{ '{{' }}.Names{{ '}}' }}' | grep '^opendaylight_api$'"
register: opendaylight_api_container_present
failed_when: false
# NOTE: using shell module because of
# https://github.com/ansible/ansible/issues/27960
- name: Update ODL container restart policy to unless-stopped
shell: "docker update --restart=unless-stopped opendaylight_api"
when: opendaylight_api_container_present.rc == 0
- name: stop previous ODL container
docker_container:
name: opendaylight_api
state: stopped
when: step|int == 0
- name: remove data, journal and snapshots
file:
path: /var/lib/opendaylight/{{item}}
state: absent
with_items:
- snapshots
- journal
- data
when: step|int == 0
- name: Set ODL upgrade flag to True
copy:
dest: /var/lib/config-data/puppet-generated/opendaylight/opt/opendaylight/etc/opendaylight/datastore/initial/config/genius-mdsalutil-config.xml
content: |
<config xmlns="urn:opendaylight:params:xml:ns:yang:mdsalutil">
<upgradeInProgress>true</upgradeInProgress>
</config>
owner: 42462
group: 42462
mode: 0644
when: step|int == 1
post_upgrade_tasks: &odl_container_post_upgrade_tasks
- name: Disable Upgrade in Config File
copy:
dest: /var/lib/config-data/puppet-generated/opendaylight/opt/opendaylight/etc/opendaylight/datastore/initial/config/genius-mdsalutil-config.xml
content: |
<config xmlns="urn:opendaylight:params:xml:ns:yang:mdsalutil">
<upgradeInProgress>false</upgradeInProgress>
</config>
owner: 42462
group: 42462
mode: 0644
when: step|int == 0
# 2 commands in 1 task because the sequence of commands needs to be ensured
# and that no other task is executed in between.
- name: Delete Upgrade Flag and Unset it via Rest
shell:
str_replace:
template: >
curl -k -v --silent --fail -u $ODL_USERNAME:$ODL_PASSWORD
-H "Content-Type: application/json" -X DELETE
$ODL_URI/restconf/config/genius-mdsalutil:config;
curl -k -v --silent --fail -u $ODL_USERNAME:$ODL_PASSWORD
-H "Content-Type: application/json" -X POST
$ODL_URI/restconf/config/genius-mdsalutil:config
-d "{ "upgradeInProgress": false }"
params:
$ODL_USERNAME: {get_attr: [OpenDaylightBase, role_data, config_settings, 'opendaylight::username']}
$ODL_PASSWORD: {get_attr: [OpenDaylightBase, role_data, config_settings, 'opendaylight::password']}
$ODL_URI: {get_param: [EndpointMap, OpenDaylightInternal, uri]}
when: step|int == 0
update_tasks:
- name: Get ODL update level
block: &get_odl_update_level
- name: store update level to update_level variable
set_fact:
odl_update_level: {get_param: ODLUpdateLevel}
- name: Stop ODL container and remove cache
block:
- name: Check if ODL container is present
shell: "docker ps -a --format '{{ '{{' }}.Names{{ '}}' }}' | grep '^opendaylight_api$'"
register: opendaylight_api_container_present
failed_when: false
# NOTE: using shell module because of
# https://github.com/ansible/ansible/issues/27960
- name: Update ODL container restart policy to unless-stopped
shell: "docker update --restart=unless-stopped opendaylight_api"
when: opendaylight_api_container_present.rc == 0
- name: Stop previous ODL container
docker_container:
name: opendaylight_api
state: stopped
- name: Delete cache folder
file:
path: /var/lib/opendaylight/data/cache
state: absent
when:
- step|int == 0
- odl_update_level == 1
- name: Run L2 update tasks that are similar to upgrade_tasks when update level is 2
block: *odl_container_upgrade_tasks
when: odl_update_level == 2
post_update_tasks:
- block: *get_odl_update_level
- block: *odl_container_post_upgrade_tasks
when: odl_update_level == 2