97173caf8f
The ODL etc directory was being overridden with an empty mount directory before kolla_start copied the puppet-generated config files. The puppet-generated config files only include modified configuration files and not all of the default config files. Therefore ODL was missing config files when it started so the container was constantly crashing. This patch removes the unwanted mount erasing the /opt/opendaylight/etc directory and moves the upgrade file to be created in puppet-generated, which will be copied at kolla start time for upgrade. The puppet-generated dir is read-only, so the REST call to disable upgrade flag in ODL will only disable it for the running instance. Therefore we have to use ansible to write the file again to disable it incase ODL is rebooted. Closes-Bug: 1755916 Change-Id: Ie380cc41ca50a294a2647d673f339d02111bf6b3 Signed-off-by: Tim Rozet <trozet@redhat.com>
235 lines
8.7 KiB
YAML
235 lines
8.7 KiB
YAML
heat_template_version: queens
|
|
|
|
description: >
|
|
OpenStack containerized OpenDaylight API service
|
|
|
|
parameters:
|
|
DockerOpendaylightApiImage:
|
|
description: image
|
|
type: string
|
|
DockerOpendaylightConfigImage:
|
|
description: image
|
|
type: string
|
|
EndpointMap:
|
|
default: {}
|
|
description: Mapping of service endpoint -> protocol. Typically set
|
|
via parameter_defaults in the resource registry.
|
|
type: json
|
|
ServiceData:
|
|
default: {}
|
|
description: Dictionary packing service data
|
|
type: json
|
|
ServiceNetMap:
|
|
default: {}
|
|
description: Mapping of service_name -> network name. Typically set
|
|
via parameter_defaults in the resource registry. This
|
|
mapping overrides those in ServiceNetMapDefaults.
|
|
type: json
|
|
DefaultPasswords:
|
|
default: {}
|
|
type: json
|
|
RoleName:
|
|
default: ''
|
|
description: Role name on which the service is applied
|
|
type: string
|
|
RoleParameters:
|
|
default: {}
|
|
description: Parameters specific to the role
|
|
type: json
|
|
EnableInternalTLS:
|
|
type: boolean
|
|
default: false
|
|
InternalTLSCAFile:
|
|
default: '/etc/ipa/ca.crt'
|
|
type: string
|
|
description: Specifies the default CA cert to use if TLS is used for
|
|
services in the internal network.
|
|
ODLUpdateLevel:
|
|
default: 1
|
|
description: Specify the level of update
|
|
type: number
|
|
constraints:
|
|
- allowed_values:
|
|
- 1
|
|
- 2
|
|
|
|
conditions:
|
|
|
|
internal_tls_enabled: {equals: [{get_param: EnableInternalTLS}, true]}
|
|
|
|
resources:
|
|
|
|
ContainersCommon:
|
|
type: ./containers-common.yaml
|
|
|
|
OpenDaylightBase:
|
|
type: ../../puppet/services/opendaylight-api.yaml
|
|
properties:
|
|
EndpointMap: {get_param: EndpointMap}
|
|
ServiceData: {get_param: ServiceData}
|
|
ServiceNetMap: {get_param: ServiceNetMap}
|
|
DefaultPasswords: {get_param: DefaultPasswords}
|
|
RoleName: {get_param: RoleName}
|
|
RoleParameters: {get_param: RoleParameters}
|
|
|
|
outputs:
|
|
role_data:
|
|
description: Role data for the OpenDaylight API role.
|
|
value:
|
|
service_name: {get_attr: [OpenDaylightBase, role_data, service_name]}
|
|
config_settings:
|
|
map_merge:
|
|
- get_attr: [OpenDaylightBase, role_data, config_settings]
|
|
- if:
|
|
- internal_tls_enabled
|
|
- tripleo::certmonger::opendaylight::postsave_cmd: "true" # TODO: restart the odl container here
|
|
- {}
|
|
logging_source: {get_attr: [OpenDaylightBase, role_data, logging_source]}
|
|
logging_groups: {get_attr: [OpenDaylightBase, role_data, logging_groups]}
|
|
# BEGIN DOCKER SETTINGS
|
|
puppet_config:
|
|
config_volume: opendaylight
|
|
volumes:
|
|
list_concat:
|
|
- if:
|
|
- internal_tls_enabled
|
|
- - /etc/pki/tls/certs/odl.crt:/etc/pki/tls/certs/odl.crt:ro
|
|
- /etc/pki/tls/private/odl.key:/etc/pki/tls/private/odl.key:ro
|
|
- list_join:
|
|
- ':'
|
|
- - {get_param: InternalTLSCAFile}
|
|
- {get_param: InternalTLSCAFile}
|
|
- 'ro'
|
|
- null
|
|
# 'file,concat,file_line,augeas' are included by default
|
|
puppet_tags: odl_user,odl_keystore
|
|
step_config:
|
|
get_attr: [OpenDaylightBase, role_data, step_config]
|
|
config_image: {get_param: DockerOpendaylightConfigImage}
|
|
kolla_config:
|
|
/var/lib/kolla/config_files/opendaylight_api.json:
|
|
command: /opt/opendaylight/bin/karaf server
|
|
config_files:
|
|
- source: "/var/lib/kolla/config_files/src/*"
|
|
dest: "/"
|
|
merge: true
|
|
preserve_properties: true
|
|
permissions:
|
|
- path: /opt/opendaylight
|
|
owner: odl:odl
|
|
recurse: true
|
|
docker_config:
|
|
step_1:
|
|
opendaylight_api:
|
|
start_order: 0
|
|
image: &odl_api_image {get_param: DockerOpendaylightApiImage}
|
|
privileged: false
|
|
net: host
|
|
detach: true
|
|
user: odl
|
|
restart: always
|
|
healthcheck:
|
|
test: /openstack/healthcheck
|
|
volumes:
|
|
list_concat:
|
|
- {get_attr: [ContainersCommon, volumes]}
|
|
-
|
|
- /var/lib/kolla/config_files/opendaylight_api.json:/var/lib/kolla/config_files/config.json:ro
|
|
- /var/lib/config-data/puppet-generated/opendaylight/:/var/lib/kolla/config_files/src:ro
|
|
- /var/lib/opendaylight/journal:/opt/opendaylight/journal
|
|
- /var/lib/opendaylight/snapshots:/opt/opendaylight/snapshots
|
|
environment:
|
|
- KOLLA_CONFIG_STRATEGY=COPY_ALWAYS
|
|
metadata_settings:
|
|
get_attr: [OpenDaylightBase, role_data, metadata_settings]
|
|
host_prep_tasks:
|
|
- name: create persistent directories
|
|
file:
|
|
path: "{{ item }}"
|
|
state: directory
|
|
with_items:
|
|
- /var/lib/opendaylight/snapshots
|
|
- /var/lib/opendaylight/journal
|
|
- name: opendaylight logs readme
|
|
copy:
|
|
dest: /var/log/opendaylight/readme.txt
|
|
content: |
|
|
Logs from opendaylight container can be found by running "sudo docker logs opendaylight_api"
|
|
ignore_errors: true
|
|
upgrade_tasks:
|
|
- name: Check if opendaylight is deployed
|
|
command: systemctl is-enabled --quiet opendaylight
|
|
tags: common
|
|
ignore_errors: True
|
|
register: opendaylight_enabled
|
|
- name: "PreUpgrade step0,validation: Check service opendaylight is running"
|
|
command: systemctl is-active --quiet opendaylight
|
|
when:
|
|
- step|int == 0
|
|
- opendaylight_enabled.rc == 0
|
|
tags: validation
|
|
- name: Stop and disable opendaylight_api service
|
|
when:
|
|
- step|int == 2
|
|
- opendaylight_enabled.rc == 0
|
|
service: name=opendaylight state=stopped enabled=no
|
|
# Containerized deployment upgrade steps
|
|
- name: ODL container L2 update and upgrade tasks
|
|
block: &odl_container_upgrade_tasks
|
|
- name: remove journal and snapshots
|
|
when: step|int == 0
|
|
file:
|
|
path: /var/lib/opendaylight/{{item}}
|
|
state: absent
|
|
with_items:
|
|
- snapshots
|
|
- journal
|
|
- name: Set ODL upgrade flag to True
|
|
copy:
|
|
dest: /var/lib/config-data/puppet-generated/opendaylight/etc/opendaylight/datastore/initial/config/genius-mdsalutil-config.xml
|
|
content: |
|
|
<config xmlns="urn:opendaylight:params:xml:ns:yang:mdsalutil">
|
|
<upgradeInProgress>true</upgradeInProgress>
|
|
</config>
|
|
owner: odl
|
|
group: odl
|
|
mode: 0644
|
|
when: step|int == 1
|
|
post_upgrade_tasks: &odl_container_post_upgrade_tasks
|
|
- name: Disable Upgrade Flag via Rest
|
|
shell:
|
|
str_replace:
|
|
template: 'curl -k -v --silent --fail -u ODL_USERNAME:$ODL_PASSWORD -X \
|
|
PUT -d "{ "config": { "upgradeInProgress": false } }" \
|
|
-H "Content-Type: application/json" \
|
|
$ODL_URI/restconf/config/genius-mdsalutil:config'
|
|
params:
|
|
$ODL_USERNAME: {get_attr: [OpenDaylightBase, role_data, config_settings, 'opendaylight::username']}
|
|
$ODL_PASSWORD: {get_attr: [OpenDaylightBase, role_data, config_settings, 'opendaylight::password']}
|
|
$ODL_URI: {get_param: [EndpointMap, OpenDaylightInternal, uri]}
|
|
when: step|int == 0
|
|
- name: Disable Upgrade in Config File
|
|
copy:
|
|
dest: /var/lib/config-data/puppet-generated/opendaylight/etc/opendaylight/datastore/initial/config/genius-mdsalutil-config.xml
|
|
content: |
|
|
<config xmlns="urn:opendaylight:params:xml:ns:yang:mdsalutil">
|
|
<upgradeInProgress>false</upgradeInProgress>
|
|
</config>
|
|
owner: odl
|
|
group: odl
|
|
mode: 0644
|
|
when: step|int == 0
|
|
update_tasks:
|
|
- name: Get ODL update level
|
|
block: &get_odl_update_level
|
|
- name: store update level to update_level variable
|
|
set_fact:
|
|
odl_update_level: {get_param: ODLUpdateLevel}
|
|
- name: Run L2 update tasks that are similar to upgrade_tasks when update level is 2
|
|
block: *odl_container_upgrade_tasks
|
|
when: odl_update_level == 2
|
|
post_update_tasks:
|
|
- block: *get_odl_update_level
|
|
- block: *odl_container_post_upgrade_tasks
|
|
when: odl_update_level == 2
|