3f1c6c0626
The common playbooks for cinder, neutron and nova have an initial facts gathering step. This patch switches from implicit gather_facts to direct use of the ansible setup module. The minimum set of facts are gathered, and an additional filtered set of facts about the target processor count is gathered. The processor count is used in many service templates to set the number or threads/processes used. This approach significantly reduces the size of the gathered facts which cannot be achieved directly with a gather_subset of '!all,min,hardware'. In addition, all uses of ansible facts are converted to ansible_facts[]. Change-Id: Id29f7344da7f71c9513d62c939ce2baead6ef451
181 lines
5.8 KiB
YAML
181 lines
5.8 KiB
YAML
---
|
|
# Copyright 2014, Rackspace US, Inc.
|
|
#
|
|
# Licensed under the Apache License, Version 2.0 (the "License");
|
|
# you may not use this file except in compliance with the License.
|
|
# You may obtain a copy of the License at
|
|
#
|
|
# http://www.apache.org/licenses/LICENSE-2.0
|
|
#
|
|
# Unless required by applicable law or agreed to in writing, software
|
|
# distributed under the License is distributed on an "AS IS" BASIS,
|
|
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
|
# See the License for the specific language governing permissions and
|
|
# limitations under the License.
|
|
|
|
- name: Gather nova facts
|
|
hosts: "{{ nova_hosts }}"
|
|
gather_facts: false
|
|
tags:
|
|
- always
|
|
tasks:
|
|
- name: Gather minimal facts for nova
|
|
setup:
|
|
gather_subset:
|
|
- "!all"
|
|
- min
|
|
when: osa_gather_facts | default(True)
|
|
|
|
- name: Gather additional facts for nova
|
|
setup:
|
|
gather_subset: "{{ nova_gather_subset | default('processor_count') }}"
|
|
filter: "{{ nova_gather_filter | default('ansible_processor*') }}"
|
|
when: osa_gather_facts | default(True)
|
|
|
|
- name: Install nova services
|
|
hosts: "{{ nova_hosts }}"
|
|
serial: "{{ nova_serial }}"
|
|
gather_facts: false
|
|
user: root
|
|
environment: "{{ deployment_environment_variables | default({}) }}"
|
|
vars_files:
|
|
- "../defaults/repo_packages/openstack_services.yml"
|
|
- "../defaults/repo_packages/nova_consoles.yml"
|
|
- "../defaults/{{ install_method }}_install.yml"
|
|
tags:
|
|
- nova
|
|
pre_tasks:
|
|
# Enable execution of ceph_client on the nova compute hosts if cinder RBD
|
|
# backends are used. This is necessary to ensure that volume-backed Nova
|
|
# instances can function when RBD is the volume backend.
|
|
- name: Set cinder RBD inuse fact
|
|
set_fact:
|
|
nova_cinder_rbd_inuse: "{{ True in groups['cinder_volume'] | map('extract', hostvars, 'cinder_backend_rbd_inuse') }}"
|
|
delegate_to: localhost
|
|
delegate_facts: True
|
|
when:
|
|
- "'nova_compute' in group_names"
|
|
- "inventory_hostname == ((groups['nova_compute'] | intersect(ansible_play_hosts)) | list)[0]"
|
|
- "hostvars['localhost']['nova_cinder_rbd_inuse'] is not defined"
|
|
tags:
|
|
- always
|
|
|
|
# In order to ensure that any container, software or
|
|
# config file changes which causes a container/service
|
|
# restart do not cause an unexpected outage, we drain
|
|
# the load balancer back end for this container.
|
|
- include_tasks: ../common-tasks/haproxy-endpoint-manage.yml
|
|
vars:
|
|
haproxy_backend: "{{ backend_name }}-back"
|
|
haproxy_state: disabled
|
|
loop_control:
|
|
loop_var: backend_name
|
|
when:
|
|
- "backend_name in group_names"
|
|
- "groups[backend_name] | length > 1"
|
|
with_items:
|
|
- "nova_api_metadata"
|
|
- "nova_api_os_compute"
|
|
- "nova_console"
|
|
|
|
- name: Determine management bridge IP address
|
|
include_tasks: ../common-tasks/dynamic-address-fact.yml
|
|
vars:
|
|
network_address: "container_address"
|
|
tags:
|
|
- always
|
|
|
|
- name: Configure container
|
|
include_tasks: "../common-tasks/os-{{ container_tech | default('lxc') }}-container-setup.yml"
|
|
when: not is_metal
|
|
vars:
|
|
extra_container_config_no_restart:
|
|
- "lxc.start.order=39"
|
|
|
|
- include_tasks: ../common-tasks/unbound-clients.yml
|
|
when:
|
|
- hostvars['localhost']['resolvconf_enabled'] | bool
|
|
|
|
- name: Add nbd devices to the compute
|
|
shell: |
|
|
for i in /dev/nbd*;do
|
|
lxc-device -n {{ container_name }} add $i $i
|
|
done
|
|
failed_when: false
|
|
register: device_add
|
|
changed_when: >
|
|
'added' in device_add.stdout.lower()
|
|
delegate_to: "{{ physical_host }}"
|
|
when:
|
|
- container_tech | default('lxc') == 'lxc'
|
|
- "'nova_compute' in group_names"
|
|
- "not is_metal | bool"
|
|
tags:
|
|
- always
|
|
|
|
- name: Add net/tun device to the compute
|
|
command: |
|
|
lxc-device -n {{ container_name }} add /dev/net/tun /dev/net/tun
|
|
delegate_to: "{{ physical_host }}"
|
|
when:
|
|
- container_tech | default('lxc') == 'lxc'
|
|
- "'nova_compute' in group_names"
|
|
- "not is_metal | bool"
|
|
tags:
|
|
- always
|
|
|
|
- name: Check if kvm device exists
|
|
stat:
|
|
path: /dev/kvm
|
|
delegate_to: "{{ physical_host }}"
|
|
register: kvm_device
|
|
when:
|
|
- container_tech | default('lxc') == 'lxc'
|
|
- "'nova_compute' in group_names"
|
|
- "not is_metal | bool"
|
|
tags:
|
|
- always
|
|
|
|
- name: Add kvm device to the compute
|
|
command: |
|
|
lxc-device -n {{ container_name }} add /dev/kvm /dev/kvm
|
|
delegate_to: "{{ physical_host }}"
|
|
register: device_add
|
|
failed_when: false
|
|
changed_when: >
|
|
'added' in device_add.stdout.lower()
|
|
when:
|
|
- container_tech | default('lxc') == 'lxc'
|
|
- "'nova_compute' in group_names"
|
|
- "not is_metal | bool"
|
|
- "'ischr' in kvm_device.stat and kvm_device.stat.ischr | bool"
|
|
tags:
|
|
- always
|
|
|
|
roles:
|
|
- role: "os_nova"
|
|
nova_management_address: "{{ container_address }}"
|
|
nova_cinder_rbd_inuse: "{{ hostvars['localhost']['nova_cinder_rbd_inuse'] | default(False) }}"
|
|
|
|
- role: "system_crontab_coordination"
|
|
tags:
|
|
- crontab
|
|
|
|
post_tasks:
|
|
# Now that container changes are done, we can set
|
|
# the load balancer back end for this container
|
|
# to available again.
|
|
- include_tasks: ../common-tasks/haproxy-endpoint-manage.yml
|
|
vars:
|
|
haproxy_backend: "{{ backend_name }}-back"
|
|
haproxy_state: enabled
|
|
loop_control:
|
|
loop_var: backend_name
|
|
when:
|
|
- "backend_name in group_names"
|
|
- "groups[backend_name] | length > 1"
|
|
with_items:
|
|
- "nova_api_metadata"
|
|
- "nova_api_os_compute"
|
|
- "nova_console"
|