openstack-ansible/playbooks/os-cinder-install.yml
Kevin Carter 3e84d1e36d Remove the "max_fail_percentage" option
This option can cause silent failures which are confusing and hard to
track down. While the intention of this was to allow large scale
deployments to succeed in cases where a single node fails due to
transiant issues it has produced more problems in terms of confusion
that it solves. This change removes the option from all production
playbooks.

Change-Id: I1dcbbf5bc8cc66f11dd8ddc22d2a177c5c0f31f1
Signed-off-by: Kevin Carter <kevin.carter@rackspace.com>
(cherry picked from commit c2743f5cca)
Signed-off-by: Kevin Carter <kevin.carter@rackspace.com>
2018-03-01 09:38:13 +00:00

229 lines
7.7 KiB
YAML

---
# Copyright 2014, Rackspace US, Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
- name: Prepare MQ/DB services
hosts: cinder_all
gather_facts: no
user: root
environment: "{{ deployment_environment_variables | default({}) }}"
tags:
- cinder
tasks:
- name: Configure rabbitmq vhost/user
include: common-tasks/rabbitmq-vhost-user.yml
vars:
user: "{{ cinder_rabbitmq_userid }}"
password: "{{ cinder_rabbitmq_password }}"
vhost: "{{ cinder_rabbitmq_vhost }}"
_rabbitmq_host_group: "{{ cinder_rabbitmq_host_group }}"
when:
- "groups[cinder_rabbitmq_host_group] | length > 0"
run_once: yes
- name: Configure rabbitmq vhost/user (telemetry)
include: common-tasks/rabbitmq-vhost-user.yml
vars:
user: "{{ cinder_rabbitmq_telemetry_userid }}"
password: "{{ cinder_rabbitmq_telemetry_password }}"
vhost: "{{ cinder_rabbitmq_telemetry_vhost }}"
_rabbitmq_host_group: "{{ cinder_rabbitmq_telemetry_host_group }}"
when:
- "cinder_ceilometer_enabled | bool"
- "groups[cinder_rabbitmq_telemetry_host_group] is defined"
- "groups[cinder_rabbitmq_telemetry_host_group] | length > 0"
- "groups[cinder_rabbitmq_telemetry_host_group] != groups[cinder_rabbitmq_host_group]"
run_once: yes
- name: Configure MySQL user
include: common-tasks/mysql-db-user.yml
vars:
user_name: "{{ cinder_galera_user }}"
password: "{{ cinder_container_mysql_password }}"
login_host: "{{ cinder_galera_address }}"
db_name: "{{ cinder_galera_database }}"
run_once: yes
- name: Install cinder scheduler services
include: common-playbooks/cinder.yml
vars:
cinder_hosts: "cinder_scheduler:!cinder_api"
cinder_serial: "{{ cinder_scheduler_serial | default(['1', '100%']) }}"
- name: Install cinder volume services
include: common-playbooks/cinder.yml
vars:
cinder_hosts: "cinder_volume:!cinder_scheduler:!cinder_api"
cinder_serial: "{{ cinder_backend_serial | default('1', '100%') }}"
- name: Install cinder backup services
include: common-playbooks/cinder.yml
vars:
cinder_hosts: "cinder_backup:!cinder_volume:!cinder_scheduler:!cinder_api"
cinder_serial: "{{ cinder_backend_serial | default(['1', '100%']) }}"
- name: Install cinder API services
include: common-playbooks/cinder.yml
vars:
cinder_hosts: "cinder_api"
cinder_serial: "{{ cinder_api_serial | default(['1', '100%']) }}"
# These facts are set against the deployment host to ensure that
# they are fast to access. This is done in preference to setting
# them against each target as the hostvars extraction will take
# a long time if executed against a large inventory.
- name: Refresh local facts after all software changes are made
hosts: cinder_all
gather_facts: no
user: root
environment: "{{ deployment_environment_variables | default({}) }}"
tags:
- cinder
tasks:
- name: refresh local facts
setup:
filter: ansible_local
gather_subset: "!all"
# This variable contains the values of the local fact set for the cinder
# venv tag for all hosts in the 'cinder_all' host group.
- name: Gather software version list
set_fact:
cinder_all_software_versions: "{{ (groups['cinder_all'] | map('extract', hostvars, ['ansible_local', 'openstack_ansible', 'cinder', 'venv_tag'])) | list }}"
delegate_to: localhost
run_once: yes
# This variable outputs a boolean value which is True when
# cinder_all_software_versions contains a list of defined
# values. If they are not defined, it means that not all
# hosts have their software deployed yet.
- name: Set software deployed fact
set_fact:
cinder_all_software_deployed: "{{ (cinder_all_software_versions | select('defined')) | list == cinder_all_software_versions }}"
delegate_to: localhost
run_once: yes
# This variable outputs a boolean when all the values in
# cinder_all_software_versions are the same and the software
# has been deployed to all hosts in the group.
- name: Set software updated fact
set_fact:
cinder_all_software_updated: "{{ ((cinder_all_software_versions | unique) | length == 1) and (cinder_all_software_deployed | bool) }}"
delegate_to: localhost
run_once: yes
- name: Restart cinder agents to ensure new RPC object version is used
hosts: cinder_backup,cinder_volume,cinder_scheduler
gather_facts: no
serial: "{{ cinder_backend_serial | default(['1', '100%']) }}"
user: root
environment: "{{ deployment_environment_variables | default({}) }}"
tags:
- cinder
tasks:
- name: Execute cinder service reload
include: common-tasks/restart-service.yml
vars:
service_name: "{{ item }}"
service_action: "reloaded"
service_fact: "cinder"
with_items:
- "cinder-scheduler"
- "cinder-backup"
- "cinder-volume"
when:
- "cinder_all_software_updated | bool"
- "ansible_local['openstack_ansible']['cinder']['need_service_restart'] | bool"
- name: Restart cinder API to ensure new RPC object version is used
hosts: cinder_api
gather_facts: no
serial: "{{ cinder_api_serial | default(['1','100%']) }}"
user: root
environment: "{{ deployment_environment_variables | default({}) }}"
tags:
- cinder
tasks:
# In order to ensure that the service restart does not
# cause an unexpected outage, we drain the load balancer
# back end for this container.
- include: common-tasks/haproxy-endpoint-manage.yml
vars:
haproxy_state: disabled
when:
- "cinder_all_software_updated | bool"
- "ansible_local['openstack_ansible']['cinder']['need_service_restart'] | bool"
- "groups['cinder_api'] | length > 1"
- name: Execute cinder service restart
include: common-tasks/restart-service.yml
vars:
service_name: "cinder-api"
service_action: "restarted"
service_fact: "cinder"
when:
- "cinder_all_software_updated | bool"
- "ansible_local['openstack_ansible']['cinder']['need_service_restart'] | bool"
# Now that service restart is done, we can set
# the load balancer back end for this container
# to available again.
- include: common-tasks/haproxy-endpoint-manage.yml
vars:
haproxy_state: enabled
when: "groups['cinder_api'] | length > 1"
- name: Perform online database migrations
hosts: cinder_api[0]
gather_facts: no
user: root
environment: "{{ deployment_environment_variables | default({}) }}"
tags:
- cinder
tasks:
- name: Perform online data migrations
command: "{{ cinder_bin }}/cinder-manage db online_data_migrations"
become: yes
become_user: "{{ cinder_system_user_name }}"
when:
- "cinder_all_software_updated | bool"
- "ansible_local['openstack_ansible']['cinder']['need_online_data_migrations'] | bool"
changed_when: false
register: data_migrations
- name: Disable the online migrations requirement
ini_file:
dest: "/etc/ansible/facts.d/openstack_ansible.fact"
section: cinder
option: need_online_data_migrations
value: False
when:
- data_migrations | succeeded