[Train] Fix Tacker config for the local Glance store

It turned out the previous fix ([1]) was incomplete.
Additionally, it seems we have to limit Tacker server
to one instance co-located with conductor.

[1] https://review.opendev.org/684275
commit b96ade3cf0

Change-Id: I9ce27d5f68f32ef59e245960e23336ae5c5db905
Closes-bug: #1853715
Related-bug: #1845142
(cherry picked from commit c5f9ea9590)
This commit is contained in:
Radosław Piliszek 2019-11-23 21:46:11 +01:00
parent 744f6cb85c
commit c7458b006f
7 changed files with 63 additions and 13 deletions

View File

@ -5,7 +5,7 @@ tacker_services:
tacker-server:
container_name: "tacker_server"
group: "tacker-server"
host_in_groups: "{{ inventory_hostname in groups['tacker-server'] }}"
host_in_groups: "{{ inventory_hostname in tacker_hosts }}"
enabled: true
image: "{{ tacker_server_image_full }}"
volumes: "{{ tacker_server_default_volumes + tacker_server_extra_volumes }}"
@ -16,15 +16,17 @@ tacker_services:
mode: "http"
external: false
port: "{{ tacker_server_port }}"
custom_member_list: "{{ tacker_haproxy_members.split(';') }}"
tacker_server_external:
enabled: "{{ enable_tacker }}"
mode: "http"
external: true
port: "{{ tacker_server_port }}"
custom_member_list: "{{ tacker_haproxy_members.split(';') }}"
tacker-conductor:
container_name: "tacker_conductor"
group: "tacker-conductor"
host_in_groups: "{{ inventory_hostname in tacker_conductor_hosts }}"
host_in_groups: "{{ inventory_hostname in tacker_hosts }}"
enabled: true
image: "{{ tacker_conductor_image_full }}"
volumes: "{{ tacker_conductor_default_volumes + tacker_conductor_extra_volumes }}"
@ -59,19 +61,27 @@ tacker_server_default_volumes:
- "{{ kolla_dev_repos_directory ~ '/tacker/tacker:/var/lib/kolla/venv/lib/python2.7/site-packages/tacker' if tacker_dev_mode | bool else '' }}"
- "/etc/localtime:/etc/localtime:ro"
- "kolla_logs:/var/log/kolla/"
# NOTE(yoctozepto): Starting in Train, this volume must be shared
# between all instances of both Tacker services (Server, Conductor)
- "kolla_tacker_csar_files:/var/lib/tacker/csar_files/"
tacker_conductor_default_volumes:
- "{{ node_config_directory }}/tacker-conductor/:{{ container_config_directory }}/:ro"
- "{{ kolla_dev_repos_directory ~ '/tacker/tacker:/var/lib/kolla/venv/lib/python2.7/site-packages/tacker' if tacker_dev_mode | bool else '' }}"
- "/etc/localtime:/etc/localtime:ro"
- "kolla_logs:/var/log/kolla/"
- "kolla_tacker_vnfpackages:/var/lib/tacker/vnfpackages/"
# NOTE(yoctozepto): Starting in Train, this volume must be shared
# between all instances of both Tacker services (Server, Conductor)
- "kolla_tacker_csar_files:/var/lib/tacker/csar_files/"
tacker_extra_volumes: "{{ default_extra_volumes }}"
tacker_server_extra_volumes: "{{ tacker_extra_volumes }}"
tacker_conductor_extra_volumes: "{{ tacker_extra_volumes }}"
tacker_conductor_hosts: "{{ [groups['tacker-conductor']|first] }}"
# NOTE(yoctozepto): Starting in Train, this variable is used to co-locate all
# Tacker services on one host since we do not provide shared filesystem
# to satisfy kolla_tacker_csar_files volume needs.
tacker_hosts: "{{ [groups['tacker']|first] }}"
####################
# OpenStack
@ -120,3 +130,8 @@ tacker_ks_users:
user: "{{ tacker_keystone_user }}"
password: "{{ tacker_keystone_password }}"
role: "admin"
####################
# HAProxy
####################
tacker_haproxy_members: "{% for host in tacker_hosts %}server {{ hostvars[host]['ansible_hostname'] }} {{ 'api' | kolla_address(host) }}:{{ tacker_server_port }} check inter 2000 rise 2 fall 5;{% endfor %}"

View File

@ -1,4 +1,35 @@
---
- name: Warn about Train upgrade
debug:
msg: >-
In Train, Tacker started using local filesystem to store VNF
packages and CSAR files.
Kolla Ansible provides no shared filesystem capabilities, hence only
one instance of each Tacker service is deployed and all on the same host.
Previous multinode deployments will be descaled when running upgrade.
- name: Stop and remove extra tacker-conductor containers
vars:
service_name: "tacker-conductor"
service: "{{ tacker_services[service_name] }}"
become: true
kolla_docker:
action: "stop_and_remove_container"
name: "{{ service.container_name }}"
when:
- inventory_hostname not in tacker_hosts
- name: Stop and remove extra tacker-server containers
vars:
service_name: "tacker-server"
service: "{{ tacker_services[service_name] }}"
become: true
kolla_docker:
action: "stop_and_remove_container"
name: "{{ service.container_name }}"
when:
- inventory_hostname not in tacker_hosts
- include_tasks: config.yml
- include_tasks: bootstrap_service.yml

View File

@ -22,13 +22,11 @@
},
{
"path": "/var/lib/tacker/vnfpackages",
"owner": "tacker:tacker",
"recurse": true
"owner": "tacker:tacker"
},
{
"path": "/var/lib/tacker/csar_files",
"owner": "tacker:tacker",
"recurse": true
"owner": "tacker:tacker"
}
]
}

View File

@ -19,6 +19,10 @@
"path": "/var/log/kolla/tacker",
"owner": "tacker:tacker",
"recurse": true
},
{
"path": "/var/lib/tacker/csar_files",
"owner": "tacker:tacker"
}
]
}

View File

@ -67,3 +67,6 @@ driver = noop
[oslo_policy]
policy_file = {{ tacker_policy_file }}
{% endif %}
[glance_store]
filesystem_store_datadir = /var/lib/tacker/csar_files

View File

@ -64,10 +64,10 @@ In order to enable them, you need to edit the file
.. warning::
In Train, Tacker Conductor started using local filesystem to store VNF
In Train, Tacker started using local filesystem to store VNF
packages and CSAR files.
Kolla Ansible provides no shared filesystem capabilities, hence only
one instance of Tacker Conductor is deployed.
one instance of each Tacker service is deployed and all on the same host.
Deploy tacker and related services.

View File

@ -1,9 +1,8 @@
---
upgrade:
- |
In Train, Tacker Conductor started using local filesystem to store VNF
In Train, Tacker started using local filesystem to store VNF
packages and CSAR files.
Kolla Ansible provides no shared filesystem capabilities, hence only
one instance of Tacker Conductor is deployed.
Previous multinode deployments must manually remove all Tacker Conductors
before proceeding with upgrade.
one instance of each Tacker service is deployed and all on the same host.
Previous multinode deployments will be descaled when running upgrade.