openstack-ansible/playbooks/common-playbooks/cinder.yml
Dmitriy Rabotyagov 9b50edb626 Resolve issue with undefined facts while running with tags
Ansible fixed "issue" where gather_facts was working with any tag defined
Now this behaviour changed and we need to gather facts not depending on
the provided tag. Some info provided here [1]

[1] https://github.com/ansible/ansible/issues/57529

Change-Id: Idc8be5f490cba79e70a45d159718ab68c78cbcee
2020-05-15 12:54:17 +03:00

120 lines
4.1 KiB
YAML

---
# Copyright 2014, Rackspace US, Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
- name: Gather cinder facts
hosts: "{{ cinder_hosts }}"
gather_facts: "{{ osa_gather_facts | default(True) }}"
tags:
- always
- name: Install cinder services
hosts: "{{ cinder_hosts }}"
serial: "{{ cinder_serial }}"
gather_facts: false
user: root
environment: "{{ deployment_environment_variables | default({}) }}"
vars_files:
- "../defaults/repo_packages/openstack_services.yml"
- "../defaults/{{ install_method }}_install.yml"
tags:
- cinder
pre_tasks:
# In order to ensure that any container, software or
# config file changes which causes a container/service
# restart do not cause an unexpected outage, we drain
# the load balancer back end for this container.
- include_tasks: ../common-tasks/haproxy-endpoint-manage.yml
vars:
haproxy_backend: cinder_api-back
haproxy_state: disabled
when:
- "'cinder_api' in group_names"
- "groups['cinder_api'] | length > 1"
- name: Determine storage bridge IP address
include_tasks: ../common-tasks/dynamic-address-fact.yml
vars:
network_address: "storage_address"
tags:
- always
- name: Configure container (cinder-volume) when lvm is in-use
include_tasks: "../common-tasks/os-{{ container_tech | default('lxc') }}-container-setup.yml"
vars:
extra_container_config:
- "lxc.autodev=0"
- "lxc.cgroup.devices.allow=a *:* rmw"
- "lxc.mount.entry=udev dev devtmpfs defaults 0 0"
extra_container_config_no_restart:
- "lxc.start.order=39"
when:
- "not is_metal"
- "'cinder_volume' in group_names"
- "cinder_backend_lvm_inuse | bool"
- name: Configure container (other services)
include_tasks: "../common-tasks/os-{{ container_tech | default('lxc') }}-container-setup.yml"
when:
- "not is_metal"
- "not ( 'cinder_volume' in group_names and cinder_backend_lvm_inuse | bool )"
- include_tasks: ../common-tasks/unbound-clients.yml
when:
- hostvars['localhost']['resolvconf_enabled'] | bool
- name: Add volume group block device to cinder
shell: |
{% if item.value.volume_group is defined %}
if [ "$(pvdisplay | grep -B1 {{ item.value.volume_group }} | awk '/PV/ {print $3}')" ];then
for device in `pvdisplay | grep -B1 {{ item.value.volume_group }} | awk '/PV/ {print $3}'`
do lxc-device -n {{ container_name }} add $device
done
fi
{% else %}
echo "{{ item.key }} volume_group not defined"
{% endif %}
with_dict: "{{ cinder_backends | default({}) }}"
when:
- container_tech | default('lxc') == 'lxc'
- physical_host != container_name
- cinder_backend_lvm_inuse | bool
delegate_to: "{{ physical_host }}"
- name: udevadm trigger
command: udevadm trigger
delegate_to: "{{ physical_host }}"
when: cinder_backend_lvm_inuse | bool
roles:
- role: "os_cinder"
cinder_storage_address: "{{ storage_address }}"
- role: "system_crontab_coordination"
tags:
- crontab
post_tasks:
# Now that container changes are done, we can set
# the load balancer back end for this container
# to available again.
- include_tasks: ../common-tasks/haproxy-endpoint-manage.yml
vars:
haproxy_backend: cinder_api-back
haproxy_state: enabled
when:
- "'cinder_api' in group_names"
- "groups['cinder_api'] | length > 1"