b878370a0b
Currently the playbooks do not allow Ceph to be configured as a backend for Cinder, Glance or Nova. This commit adds a new role called ceph_client to do the required configuration of the hosts and updates the service roles to include the required configuration file changes. This commit requires that a Ceph cluster already exists and does not make any changes to that cluster. ceph_client role, run on the OpenStack service hosts - configures the Ceph apt repo - installs any required Ceph dependencies - copies the ceph.conf file and appropriate keyring file to /etc/ceph - creates the necessary libvirt secrets os_glance role glance-api.conf will set the following variables for Ceph: - [DEFAULT]/show_image_direct_url - [glance_store]/stores - [glance_store]/rbd_store_pool - [glance_store]/rbd_store_user - [glance_store]/rbd_store_ceph_conf - [glance_store]/rbd_store_chunk_size os_nova role nova.conf will set the following variables for Ceph: - [libvirt]/rbd_user - [libvirt]/rbd_secret_uuid - [libvirt]/images_type - [libvirt]/images_rbd_pool - [libvirt]/images_rbd_ceph_conf - [libvirt]/inject_password - [libvirt]/inject_key - [libvirt]/inject_partition - [libvirt]/live_migration_flag os_cinder is not updated because ceph is defined as a backend and that is generated from a dictionary of the config, for an example backend config, see etc/openstack_deploy/openstack_user_config.yml.example pw-token-gen.py is updated so that variables ending in uuid are assigned a UUID. DocImpact Implements: blueprint ceph-block-devices Closes-Bug: #1455238 Change-Id: Ie484ce0bbb93adc53c30be32f291aa5058b20028
115 lines
3.8 KiB
YAML
115 lines
3.8 KiB
YAML
---
|
|
# Copyright 2014, Rackspace US, Inc.
|
|
#
|
|
# Licensed under the Apache License, Version 2.0 (the "License");
|
|
# you may not use this file except in compliance with the License.
|
|
# You may obtain a copy of the License at
|
|
#
|
|
# http://www.apache.org/licenses/LICENSE-2.0
|
|
#
|
|
# Unless required by applicable law or agreed to in writing, software
|
|
# distributed under the License is distributed on an "AS IS" BASIS,
|
|
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
|
# See the License for the specific language governing permissions and
|
|
# limitations under the License.
|
|
|
|
- name: Installation and setup of Nova
|
|
hosts: nova_all
|
|
max_fail_percentage: 20
|
|
user: root
|
|
pre_tasks:
|
|
- name: Sort the rabbitmq servers
|
|
dist_sort:
|
|
value_to_lookup: "{{ container_name }}"
|
|
ref_list: "{{ groups['nova_all'] }}"
|
|
src_list: "{{ rabbitmq_servers }}"
|
|
register: servers
|
|
- name: Set rabbitmq servers
|
|
set_fact:
|
|
rabbitmq_servers: "{{ servers.sorted_list }}"
|
|
- name: Add nbd devices to the compute
|
|
shell: |
|
|
for i in /dev/nbd*;do
|
|
lxc-device -n {{ container_name }} add $i $i
|
|
done
|
|
failed_when: false
|
|
register: device_add
|
|
changed_when: >
|
|
'added' in device_add.stdout.lower()
|
|
delegate_to: "{{ physical_host }}"
|
|
when: >
|
|
inventory_hostname in groups['nova_compute'] and
|
|
(is_metal == false or is_metal == "False")
|
|
tags:
|
|
- nova-kvm
|
|
- nova-kvm-container-devices
|
|
- name: Add net/tun device to the compute
|
|
shell: |
|
|
lxc-device -n {{ container_name }} add /dev/net/tun /dev/net/tun
|
|
delegate_to: "{{ physical_host }}"
|
|
when: >
|
|
inventory_hostname in groups['nova_compute'] and
|
|
(is_metal == false or is_metal == "False")
|
|
tags:
|
|
- nova-kvm
|
|
- nova-kvm-container-devices
|
|
- name: Add kvm device to the compute
|
|
shell: |
|
|
lxc-device -n {{ container_name }} add /dev/kvm /dev/kvm
|
|
delegate_to: "{{ physical_host }}"
|
|
register: device_add
|
|
failed_when: false
|
|
changed_when: >
|
|
'added' in device_add.stdout.lower()
|
|
when: >
|
|
inventory_hostname in groups['nova_compute'] and
|
|
(is_metal == false or is_metal == "False") and
|
|
nova_virt_type == 'kvm'
|
|
tags:
|
|
- nova-kvm
|
|
- nova-kvm-container-devices
|
|
- name: Create log dir
|
|
file:
|
|
path: "{{ item.path }}"
|
|
state: directory
|
|
with_items:
|
|
- { path: "/openstack/log/{{ inventory_hostname }}-nova" }
|
|
when: is_metal == true or is_metal == "True"
|
|
tags:
|
|
- nova-logs
|
|
- nova-log-dirs
|
|
- name: Create log aggregation links
|
|
file:
|
|
src: "{{ item.src }}"
|
|
dest: "{{ item.dest }}"
|
|
state: "{{ item.state }}"
|
|
force: "yes"
|
|
with_items:
|
|
- { src: "/openstack/log/{{ inventory_hostname }}-nova", dest: "/var/log/nova", state: "link" }
|
|
when: is_metal == true or is_metal == "True"
|
|
tags:
|
|
- nova-logs
|
|
roles:
|
|
- { role: "os_nova", tags: [ "os-nova" ] }
|
|
- role: "ceph_client"
|
|
openstack_service_system_user: "{{ nova_system_user_name }}"
|
|
tags:
|
|
- "nova-ceph-client"
|
|
- "ceph-client"
|
|
- { role: "openstack_openrc", tags: [ "openstack-openrc" ] }
|
|
- role: "rsyslog_client"
|
|
rsyslog_client_log_dir: "/var/log/nova"
|
|
rsyslog_client_config_name: "99-nova-rsyslog-client.conf"
|
|
tags:
|
|
- "nova-rsyslog-client"
|
|
- "rsyslog-client"
|
|
- role: "system_crontab_coordination"
|
|
tags:
|
|
- "system-crontab-coordination"
|
|
vars:
|
|
nova_galera_address: "{{ internal_lb_vip_address }}"
|
|
ansible_hostname: "{{ container_name }}"
|
|
ansible_ssh_host: "{{ container_address }}"
|
|
is_metal: "{{ properties.is_metal|default(false) }}"
|
|
glance_host: "{{ internal_lb_vip_address }}"
|