Files
openstack-ansible-os_nova/tasks/main.yml
Jesse Pretorius 363162da23 Allow tags to be used for MQ tasks
The use of 'include_tasks' and a loop of variables creates
a situation where a user is unable to use tags to scope the
inclusion of only the MQ tasks when running the playbooks.

The use-case this is important for is when the rabbitmq
containers are destroyed and rebuilt in order to resolve
an issue with them, and the user wishes to quickly recreate
all the vhosts/users.

Ansible's 'include_tasks' is a dynamic inclusion, and dynamic
inclusions are not included when using tags. The nice thing
about dynamic inclusions is that they completely skip all
tasks when the condition does not apply, cutting down deploy
time. However, given the use-case, we should rather take on
the extra deployment time.

This patch changes the dynamic inclusion to a static one,
adds a 'common-mq' tag to cover all MQ implementations,
and re-implements the 'common-rabbitmq' tag for the tasks
that relate to RabbitMQ specifically.

It also implements conditionals for each task set so that
the rpc/notify tasks can be skipped if a vhost/user is not
required for that purpose (eg: swift does not use RPC, and
most roles will not use notifications by default).

Depends-On: https://review.openstack.org/588191
Change-Id: I69430899e381f515ee304e9623feb972e5400639
2018-08-07 14:52:39 +01:00

210 lines
6.8 KiB
YAML

---
# Copyright 2014, Rackspace US, Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
- name: Gather variables for each operating system
include_vars: "{{ item }}"
with_first_found:
- "{{ ansible_distribution | lower }}-{{ ansible_distribution_version | lower }}.yml"
- "{{ ansible_distribution | lower }}-{{ ansible_distribution_major_version | lower }}.yml"
- "{{ ansible_os_family | lower }}-{{ ansible_distribution_major_version | lower }}.yml"
- "{{ ansible_distribution | lower }}.yml"
- "{{ ansible_os_family | lower }}.yml"
tags:
- always
- name: Fail when virt type is unsupported
fail:
msg: "Unsupported Virt Type Provided {{ nova_supported_virt_types }}"
when:
- nova_virt_type is defined
- nova_virt_type not in nova_supported_virt_types
tags:
- always
- name: Fail if TCP and TLS are both enabled
fail:
msg: |
TCP and TLS connectivity are currently enabled for libvirtd. This
combination prevents libvirtd from starting properly since this role
does not generate TLS certificates for libvirtd at this time.
To enable TCP connectivity without TLS, set the following variables:
nova_libvirtd_listen_tcp: 1
nova_libvirtd_listen_tls: 0
Please note that this configuration does not encrypt communication with
libvirtd.
when:
- nova_libvirtd_listen_tcp == 1
- nova_libvirtd_listen_tls == 1
tags:
- always
- name: Fail if service was deployed using a different installation method
fail:
msg: "Switching installation methods for OpenStack services is not supported"
when:
- ansible_local is defined
- ansible_local.openstack_ansible is defined
- ansible_local.openstack_ansible.nova is defined
- ansible_local.openstack_ansible.nova.install_method is defined
- ansible_local.openstack_ansible.nova.install_method != nova_install_method
- name: Gather variables for installation method
include_vars: "{{ nova_install_method }}_install.yml"
tags:
- always
- include_tasks: nova_virt_detect.yml
tags:
- always
- nova-config
- include_tasks: nova_pre_install.yml
tags:
- nova-install
- include_tasks: nova_install.yml
tags:
- nova-install
- name: refresh local facts
setup:
filter: ansible_local
gather_subset: "!all"
tags:
- nova-config
- include_tasks: nova_post_install.yml
tags:
- nova-config
- include_tasks: nova_service_setup.yml
when:
- "nova_services['nova-conductor']['group'] in group_names"
- "inventory_hostname == ((groups['nova_conductor'] | intersect(ansible_play_hosts)) | list)[0]"
tags:
- nova-config
- import_tasks: mq_setup.yml
when:
- "nova_services['nova-conductor']['group'] in group_names"
- "inventory_hostname == ((groups[nova_services['nova-conductor']['group']] | intersect(ansible_play_hosts)) | list)[0]"
vars:
_oslomsg_rpc_setup_host: "{{ nova_oslomsg_rpc_setup_host }}"
_oslomsg_rpc_userid: "{{ nova_oslomsg_rpc_userid }}"
_oslomsg_rpc_password: "{{ nova_oslomsg_rpc_password }}"
_oslomsg_rpc_vhost: "{{ nova_oslomsg_rpc_vhost }}"
_oslomsg_rpc_transport: "{{ nova_oslomsg_rpc_transport }}"
_oslomsg_notify_setup_host: "{{ nova_oslomsg_notify_setup_host }}"
_oslomsg_notify_userid: "{{ nova_oslomsg_notify_userid }}"
_oslomsg_notify_password: "{{ nova_oslomsg_notify_password }}"
_oslomsg_notify_vhost: "{{ nova_oslomsg_notify_vhost }}"
_oslomsg_notify_transport: "{{ nova_oslomsg_notify_transport }}"
_oslomsg_configure_notify: "{{ (nova_ceilometer_enabled | bool) or (nova_designate_enabled | bool) }}"
tags:
- common-mq
- nova-config
- include_tasks: nova_db_setup.yml
when:
- "nova_services['nova-conductor']['group'] in group_names"
- "inventory_hostname == ((groups['nova_conductor'] | intersect(ansible_play_hosts)) | list)[0]"
tags:
- nova-config
- include_tasks: nova_uwsgi.yml
tags:
- nova-config
- name: Run the systemd service role
include_role:
name: systemd_service
private: true
vars:
systemd_user_name: "{{ nova_system_user_name }}"
systemd_group_name: "{{ nova_system_group_name }}"
systemd_tempd_prefix: openstack
systemd_slice_name: nova
systemd_lock_path: /var/lock/nova
systemd_CPUAccounting: true
systemd_BlockIOAccounting: true
systemd_MemoryAccounting: true
systemd_TasksAccounting: true
systemd_services:
- service_name: "{{ service_var.service_name }}"
enabled: yes
state: started
execstarts: "{{ service_var.execstarts }}"
execreloads: "{{ service_var.execreloads | default([]) }}"
config_overrides: "{{ service_var.init_config_overrides }}"
with_items: "{{ filtered_nova_services }}"
loop_control:
loop_var: service_var
tags:
- nova-config
- systemd-service
- include_tasks: nova_compute.yml
when:
- "nova_services['nova-compute']['group'] in group_names"
tags:
- nova-compute
- name: Include ceph_client role
include_role:
name: ceph_client
vars:
openstack_service_system_user: "{{ nova_system_user_name }}"
openstack_service_venv_bin: "{{ nova_bin }}"
when:
- "nova_services['nova-compute']['group'] in group_names"
- "(nova_libvirt_images_rbd_pool is defined) or
(nova_cinder_rbd_inuse | bool)"
tags:
- ceph
- name: Flush handlers
meta: flush_handlers
# We delegate this back to the conductor because that is
# where we want to isolate the clouds.yaml configuration,
# rather than have it implemented on all compute nodes.
- import_tasks: nova_compute_wait.yml
delegate_to: "{{ first_conductor }}"
when:
- "nova_services['nova-compute']['group'] in group_names"
- "nova_discover_hosts_in_cells_interval | int < 1"
vars:
first_conductor: "{{ groups[nova_services['nova-conductor']['group']][0] }}"
compute_host_to_wait_for: "{{ ansible_nodename }}"
tags:
- nova-config
# We have to delegate this back to the conductor
# because the compute hosts do not have access to
# the database connection string and therefore
# cannot run nova-manage.
- import_tasks: nova_db_post_setup.yml
delegate_to: "{{ first_conductor }}"
run_once: true
when:
- "nova_services['nova-compute']['group'] in group_names"
vars:
first_conductor: "{{ groups[nova_services['nova-conductor']['group']][0] }}"
tags:
- nova-config