64608d4eda
Ansible throws a warning when we use gather_facts as a variable. This patches changes the variable to osa_gather_facts to avoid warnings. Closes-Bug: 1722604 Change-Id: Ic0a46b93e1ba62bc8e99e2d0a279c361a168690b
127 lines
4.2 KiB
YAML
127 lines
4.2 KiB
YAML
---
|
|
# Copyright 2014, Rackspace US, Inc.
|
|
#
|
|
# Licensed under the Apache License, Version 2.0 (the "License");
|
|
# you may not use this file except in compliance with the License.
|
|
# You may obtain a copy of the License at
|
|
#
|
|
# http://www.apache.org/licenses/LICENSE-2.0
|
|
#
|
|
# Unless required by applicable law or agreed to in writing, software
|
|
# distributed under the License is distributed on an "AS IS" BASIS,
|
|
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
|
# See the License for the specific language governing permissions and
|
|
# limitations under the License.
|
|
|
|
- name: Install cinder services
|
|
hosts: "{{ cinder_hosts }}"
|
|
serial: "{{ cinder_serial }}"
|
|
gather_facts: "{{ osa_gather_facts | default(True) }}"
|
|
user: root
|
|
environment: "{{ deployment_environment_variables | default({}) }}"
|
|
tags:
|
|
- cinder
|
|
pre_tasks:
|
|
|
|
# In order to ensure that any container, software or
|
|
# config file changes which causes a container/service
|
|
# restart do not cause an unexpected outage, we drain
|
|
# the load balancer back end for this container.
|
|
- include: ../common-tasks/haproxy-endpoint-manage.yml
|
|
vars:
|
|
haproxy_backend: cinder_api-back
|
|
haproxy_state: disabled
|
|
when:
|
|
- "'cinder_api' in group_names"
|
|
- "groups['cinder_api'] | length > 1"
|
|
|
|
- name: Determine storage bridge IP address
|
|
include: ../common-tasks/dynamic-address-fact.yml
|
|
vars:
|
|
network_address: "storage_address"
|
|
|
|
- name: Configure container (cinder-volume)
|
|
include: ../common-tasks/os-lxc-container-setup.yml
|
|
static: no
|
|
vars:
|
|
aa_profile: "unconfined"
|
|
extra_container_config:
|
|
- "lxc.autodev=0"
|
|
- "lxc.cgroup.devices.allow=a *:* rmw"
|
|
- "lxc.mount.entry=udev dev devtmpfs defaults 0 0"
|
|
extra_container_config_no_restart:
|
|
- "lxc.start.order=39"
|
|
when:
|
|
- "'cinder_volume' in group_names"
|
|
- "cinder_backend_lvm_inuse | bool"
|
|
|
|
- name: Configure container (other services)
|
|
include: ../common-tasks/os-lxc-container-setup.yml
|
|
static: no
|
|
when:
|
|
- "'cinder_volume' not in group_names"
|
|
|
|
- name: Configure log directories (on metal)
|
|
include: ../common-tasks/os-log-dir-setup.yml
|
|
vars:
|
|
log_dirs:
|
|
- src: "/openstack/log/{{ inventory_hostname }}-cinder"
|
|
dest: "/var/log/cinder"
|
|
|
|
- include: ../common-tasks/unbound-clients.yml
|
|
static: no
|
|
when:
|
|
- hostvars['localhost']['resolvconf_enabled'] | bool
|
|
|
|
- name: Configure package proxy cache
|
|
include: ../common-tasks/package-cache-proxy.yml
|
|
|
|
- name: Add volume group block device to cinder
|
|
shell: |
|
|
{% if item.value.volume_group is defined %}
|
|
if [ "$(pvdisplay | grep -B1 {{ item.value.volume_group }} | awk '/PV/ {print $3}')" ];then
|
|
for device in `pvdisplay | grep -B1 {{ item.value.volume_group }} | awk '/PV/ {print $3}'`
|
|
do lxc-device -n {{ container_name }} add $device
|
|
done
|
|
fi
|
|
{% else %}
|
|
echo "{{ item.key }} volume_group not defined"
|
|
{% endif %}
|
|
with_dict: "{{ cinder_backends | default({}) }}"
|
|
when:
|
|
- physical_host != container_name
|
|
- cinder_backend_lvm_inuse | bool
|
|
delegate_to: "{{ physical_host }}"
|
|
|
|
- name: udevadm trigger
|
|
command: udevadm trigger
|
|
delegate_to: "{{ physical_host }}"
|
|
when: cinder_backend_lvm_inuse | bool
|
|
|
|
roles:
|
|
- role: "os_cinder"
|
|
cinder_storage_address: "{{ storage_address }}"
|
|
|
|
- role: "rsyslog_client"
|
|
rsyslog_client_log_rotate_file: cinder_log_rotate
|
|
rsyslog_client_log_dir: "/var/log/cinder"
|
|
rsyslog_client_config_name: "99-cinder-rsyslog-client.conf"
|
|
tags:
|
|
- rsyslog
|
|
|
|
- role: "system_crontab_coordination"
|
|
tags:
|
|
- crontab
|
|
|
|
post_tasks:
|
|
# Now that container changes are done, we can set
|
|
# the load balancer back end for this container
|
|
# to available again.
|
|
- include: ../common-tasks/haproxy-endpoint-manage.yml
|
|
vars:
|
|
haproxy_backend: cinder_api-back
|
|
haproxy_state: enabled
|
|
when:
|
|
- "'cinder_api' in group_names"
|
|
- "groups['cinder_api'] | length > 1"
|