Automate setting up a cache tier

Introduces a new flag to bootstrap cache devices

DocImpact
Partially-Implements: blueprint ceph-improvements

Change-Id: I09b5a0d5c61b3465237e5f01dc10120725561cd3
This commit is contained in:
Sam Yaple 2015-10-18 20:34:17 +00:00
parent 1d60be3770
commit b27880aa77
11 changed files with 258 additions and 32 deletions

View File

@ -173,6 +173,12 @@ cinder_volume_driver: "{{ 'ceph' if enable_ceph | bool else 'lvm' }}"
###################
# Ceph options
###################
# Ceph can be setup with a caching to improve performance. To use the cache you
# must provide seperate disks than those for the OSDs
ceph_enable_cache: "no"
# Valid options are [ forward, none, writeback ]
ceph_cache_mode: "writeback"
# A requirement for using the erasure-coded pools is you must setup a cache tier
# Valid options are [ erasure, replicated ]
ceph_pool_type: "replicated"
@ -181,3 +187,7 @@ ceph_cinder_pool_name: "volumes"
ceph_cinder_backup_pool_name: "backups"
ceph_glance_pool_name: "images"
ceph_nova_pool_name: "vms"
ceph_erasure_profile: "k=4 m=2 ruleset-failure-domain=host"
ceph_rule: "default host {{ 'indep' if ceph_pool_type == 'erasure' else 'firstn' }}"
ceph_cache_rule: "cache host firstn"

View File

@ -5,6 +5,12 @@
register: osds_bootstrap
when: inventory_hostname in groups['ceph-osd']
- name: Looking up disks to bootstrap for Ceph cache
find_disks:
partition_name: 'KOLLA_CEPH_OSD_CACHE_BOOTSTRAP'
register: osds_cache_bootstrap
when: inventory_hostname in groups['ceph-osd']
- name: Bootstrapping Ceph OSDs
docker:
tty: True
@ -47,3 +53,47 @@
state: absent
with_indexed_items: osds_bootstrap['disks']|default([])
when: inventory_hostname in groups['ceph-osd']
- name: Bootstrapping Ceph Cache OSDs
docker:
tty: True
net: host
pull: "{{ docker_pull_policy }}"
restart_policy: "no"
state: reloaded
registry: "{{ docker_registry }}"
username: "{{ docker_registry_username }}"
password: "{{ docker_registry_password }}"
insecure_registry: "{{ docker_insecure_registry }}"
privileged: True
name: "bootstrap_osd_cache_{{ item.0 }}"
image: "{{ ceph_osd_image_full }}"
volumes:
- "{{ node_config_directory }}/ceph-osd/:{{ container_config_directory }}/:ro"
- "/dev/:/dev/"
env:
KOLLA_BOOTSTRAP:
KOLLA_CONFIG_STRATEGY: "{{ config_strategy }}"
CEPH_CACHE:
OSD_DEV: "{{ item.1.device }}"
OSD_INITIAL_WEIGHT: "{{ osd_initial_weight }}"
with_indexed_items: osds_cache_bootstrap['disks']|default([])
when: inventory_hostname in groups['ceph-osd']
# https://github.com/ansible/ansible-modules-core/pull/1031
- name: Waiting for bootstrap containers to exit
command: docker wait "bootstrap_osd_cache_{{ item.0 }}"
register: bootstrap_result
run_once: True
failed_when: bootstrap_result.stdout != "0"
with_indexed_items: osds_cache_bootstrap['disks']|default([])
when: inventory_hostname in groups['ceph-osd']
- name: Cleaning up bootstrap containers
docker:
tty: True
name: "bootstrap_osd_cache_{{ item.0 }}"
image: "{{ ceph_osd_image_full }}"
state: absent
with_indexed_items: osds_cache_bootstrap['disks']|default([])
when: inventory_hostname in groups['ceph-osd']

View File

@ -0,0 +1,80 @@
---
# TODO(SamYaple): Improve failed_when and changed_when tests if possible
- name: Creating ceph erasure profile
command: docker exec ceph_mon ceph osd erasure-code-profile set erasure-profile {{ ceph_erasure_profile }}
delegate_to: "{{ groups['ceph-mon'][0] }}"
changed_when: False
failed_when: False
run_once: True
when: pool_type == "erasure"
- name: Creating ceph ruleset
command: docker exec ceph_mon ceph osd crush rule create-erasure disks erasure-profile
delegate_to: "{{ groups['ceph-mon'][0] }}"
changed_when: False
failed_when: False
run_once: True
when: pool_type == "erasure"
- name: Creating ceph ruleset
command: docker exec ceph_mon ceph osd crush rule create-simple disks {{ ceph_rule }}
delegate_to: "{{ groups['ceph-mon'][0] }}"
changed_when: False
failed_when: False
run_once: True
when: pool_type == "replicated"
- name: Creating ceph pool
command: docker exec ceph_mon ceph osd pool create {{ pool_name }} 128 128 {{ pool_type }} {{ 'erasure-profile' if pool_type == 'erasure' else '' }} disks
delegate_to: "{{ groups['ceph-mon'][0] }}"
changed_when: False
failed_when: False
run_once: True
- name: Creating ceph ruleset for cache
command: docker exec ceph_mon ceph osd crush rule create-simple cache {{ ceph_cache_rule }}
delegate_to: "{{ groups['ceph-mon'][0] }}"
changed_when: False
failed_when: False
run_once: True
when: "{{ ceph_enable_cache | bool }}"
- name: Creating ceph pool for cache
command: docker exec ceph_mon ceph osd pool create {{ pool_name }}-cache 128 128 replicated cache
delegate_to: "{{ groups['ceph-mon'][0] }}"
changed_when: False
failed_when: False
run_once: True
when: "{{ ceph_enable_cache | bool }}"
- name: Adding cache to pool
command: docker exec ceph_mon ceph osd tier add {{ pool_name }} {{ pool_name }}-cache
delegate_to: "{{ groups['ceph-mon'][0] }}"
changed_when: False
failed_when: False
run_once: True
when: "{{ ceph_enable_cache | bool }}"
- name: Setting cache-mode
command: docker exec ceph_mon ceph osd tier cache-mode {{ pool_name }}-cache {{ cache_mode }}
delegate_to: "{{ groups['ceph-mon'][0] }}"
changed_when: False
failed_when: False
run_once: True
when: "{{ ceph_enable_cache | bool }}"
- name: Setting cache overlay for pool
command: docker exec ceph_mon ceph osd tier set-overlay {{ pool_name }} {{ pool_name }}-cache
delegate_to: "{{ groups['ceph-mon'][0] }}"
changed_when: False
failed_when: False
run_once: True
when: "{{ ceph_enable_cache | bool }}"
- name: Setting cache hit_set_type
command: docker exec ceph_mon ceph osd pool set {{ pool_name }}-cache hit_set_type bloom
delegate_to: "{{ groups['ceph-mon'][0] }}"
changed_when: False
failed_when: False
run_once: True
when: "{{ ceph_enable_cache | bool }}"

View File

@ -1,6 +1,24 @@
---
project_name: "cinder"
####################
# Ceph
####################
ceph_cinder_pool_type: "{{ ceph_pool_type }}"
ceph_cinder_cache_mode: "{{ ceph_cache_mode }}"
ceph_cinder_backup_pool_type: "{{ ceph_pool_type }}"
ceph_cinder_backup_cache_mode: "{{ ceph_cache_mode }}"
# Due to Ansible issues on include, you cannot override these variables. Please
# override the variables they refernce instead.
cinder_pool_name: "{{ ceph_cinder_pool_name }}"
cinder_pool_type: "{{ ceph_cinder_pool_type }}"
cinder_cache_mode: "{{ ceph_cinder_cache_mode }}"
cinder_backup_pool_name: "{{ ceph_cinder_backup_pool_name }}"
cinder_backup_pool_type: "{{ ceph_cinder_backup_pool_type }}"
cinder_backup_cache_mode: "{{ ceph_cinder_backup_cache_mode }}"
####################
# Database
####################

View File

@ -17,21 +17,17 @@
- "cinder-backup"
when: inventory_hostname in groups['cinder-volume']
# TODO(SamYaple): Improve failed_when and changed_when tests
- name: Creating ceph pool for cinder
command: docker exec ceph_mon ceph osd pool create {{ ceph_cinder_pool_name }} 128 {{ ceph_pool_type }}
delegate_to: "{{ groups['ceph-mon'][0] }}"
changed_when: False
failed_when: False
run_once: True
- include: ../../ceph_pools.yml
vars:
pool_name: "{{ cinder_pool_name }}"
pool_type: "{{ cinder_pool_type }}"
cache_mode: "{{ cinder_cache_mode }}"
# TODO(SamYaple): Improve failed_when and changed_when tests
- name: Creating ceph pool for cinder-backup
command: docker exec ceph_mon ceph osd pool create {{ ceph_cinder_backup_pool_name }} 128 {{ ceph_pool_type }}
delegate_to: "{{ groups['ceph-mon'][0] }}"
changed_when: False
failed_when: False
run_once: True
- include: ../../ceph_pools.yml
vars:
pool_name: "{{ cinder_backup_pool_name }}"
pool_type: "{{ cinder_backup_pool_type }}"
cache_mode: "{{ cinder_backup_cache_mode }}"
# TODO(SamYaple): Improve failed_when and changed_when tests
- name: Pulling cephx keyring for cinder
@ -40,6 +36,16 @@
delegate_to: "{{ groups['ceph-mon'][0] }}"
changed_when: False
run_once: True
when: not ceph_enable_cache | bool
# TODO(SamYaple): Improve failed_when and changed_when tests
- name: Pulling cephx keyring for cinder
command: docker exec ceph_mon ceph auth get-or-create client.cinder mon 'allow r' osd 'allow class-read object_prefix rbd_children, allow rwx pool={{ ceph_cinder_pool_name }}, allow rwx pool={{ ceph_cinder_pool_name }}-cache, allow rwx pool={{ ceph_nova_pool_name }}, allow rwx pool={{ ceph_nova_pool_name }}-cache, allow rx pool={{ ceph_glance_pool_name }}, allow rx pool={{ ceph_glance_pool_name }}-cache'
register: cephx_key_cinder
delegate_to: "{{ groups['ceph-mon'][0] }}"
changed_when: False
run_once: True
when: ceph_enable_cache | bool
# TODO(SamYaple): Improve failed_when and changed_when tests
- name: Pulling cephx keyring for cinder-backup
@ -48,6 +54,16 @@
delegate_to: "{{ groups['ceph-mon'][0] }}"
changed_when: False
run_once: True
when: not ceph_enable_cache | bool
# TODO(SamYaple): Improve failed_when and changed_when tests
- name: Pulling cephx keyring for cinder-backup
command: docker exec ceph_mon ceph auth get-or-create client.cinder-backup mon 'allow r' osd 'allow class-read object_prefix rbd_children, allow rwx pool={{ ceph_cinder_backup_pool_name }}, allow rwx pool={{ ceph_cinder_backup_pool_name }}-cache'
register: cephx_key_cinder_backup
delegate_to: "{{ groups['ceph-mon'][0] }}"
changed_when: False
run_once: True
when: ceph_enable_cache | bool
- name: Pushing cephx keyring
copy:

View File

@ -1,6 +1,19 @@
---
project_name: "glance"
####################
# Ceph
####################
ceph_glance_pool_type: "{{ ceph_pool_type }}"
ceph_glance_cache_mode: "{{ ceph_cache_mode }}"
# Due to Ansible issues on include, you cannot override these variables. Please
# override the variables they refernce instead.
glance_pool_name: "{{ ceph_glance_pool_name }}"
glance_pool_type: "{{ ceph_glance_pool_type }}"
glance_cache_mode: "{{ ceph_glance_cache_mode }}"
####################
# Database
####################

View File

@ -11,13 +11,11 @@
dest: "{{ node_config_directory }}/glance-api/ceph.conf"
when: inventory_hostname in groups['glance-api']
# TODO(SamYaple): Improve failed_when and changed_when tests
- name: Creating ceph pool for glance
command: docker exec ceph_mon ceph osd pool create {{ ceph_glance_pool_name }} 128 {{ ceph_pool_type }}
delegate_to: "{{ groups['ceph-mon'][0] }}"
changed_when: False
failed_when: False
run_once: True
- include: ../../ceph_pools.yml
vars:
pool_name: "{{ glance_pool_name }}"
pool_type: "{{ glance_pool_type }}"
cache_mode: "{{ glance_cache_mode }}"
# TODO(SamYaple): Improve failed_when and changed_when tests
- name: Pulling cephx keyring
@ -26,6 +24,16 @@
delegate_to: "{{ groups['ceph-mon'][0] }}"
changed_when: False
run_once: True
when: not ceph_enable_cache | bool
# TODO(SamYaple): Improve failed_when and changed_when tests
- name: Pulling cephx keyring
command: docker exec ceph_mon ceph auth get-or-create client.glance mon 'allow r' osd 'allow class-read object_prefix rbd_children, allow rwx pool={{ ceph_glance_pool_name }}, allow rwx pool={{ ceph_glance_pool_name }}-cache'
register: cephx_key
delegate_to: "{{ groups['ceph-mon'][0] }}"
changed_when: False
run_once: True
when: ceph_enable_cache | bool
- name: Pushing cephx keyring
copy:

View File

@ -1,6 +1,19 @@
---
project_name: "nova"
####################
# Ceph
####################
ceph_nova_pool_type: "{{ ceph_pool_type }}"
ceph_nova_cache_mode: "{{ ceph_cache_mode }}"
# Due to Ansible issues on include, you cannot override these variables. Please
# override the variables they refernce instead.
nova_pool_name: "{{ ceph_nova_pool_name }}"
nova_pool_type: "{{ ceph_nova_pool_type }}"
nova_cache_mode: "{{ ceph_nova_cache_mode }}"
####################
# Database
####################

View File

@ -17,21 +17,29 @@
- "nova-libvirt"
when: inventory_hostname in groups['compute']
# TODO(SamYaple): Improve failed_when and changed_when tests
- name: Creating ceph pool for vms
command: docker exec ceph_mon ceph osd pool create {{ ceph_nova_pool_name }} 128 {{ ceph_pool_type }}
delegate_to: "{{ groups['ceph-mon'][0] }}"
changed_when: False
failed_when: False
run_once: True
- include: ../../ceph_pools.yml
vars:
pool_name: "{{ nova_pool_name }}"
pool_type: "{{ nova_pool_type }}"
cache_mode: "{{ nova_cache_mode }}"
# TODO(SamYaple): Improve failed_when and changed_when tests
- name: Pulling cephx keyring for nova
command: docker exec ceph_mon ceph auth get-or-create client.nova mon 'allow r' osd 'allow class-read object_prefix rbd_children, allow rwx pool={{ ceph_volumes_pool_name }}, allow rwx pool={{ ceph_nova_pool_name }}, allow rx pool={{ ceph_glance_pool_name }}'
command: docker exec ceph_mon ceph auth get-or-create client.nova mon 'allow r' osd 'allow class-read object_prefix rbd_children, allow rwx pool={{ ceph_cinder_pool_name }}, allow rwx pool={{ ceph_nova_pool_name }}, allow rx pool={{ ceph_glance_pool_name }}'
register: cephx_key
delegate_to: "{{ groups['ceph-mon'][0] }}"
changed_when: False
run_once: True
when: not ceph_enable_cache | bool
# TODO(SamYaple): Improve failed_when and changed_when tests
- name: Pulling cephx keyring for nova
command: docker exec ceph_mon ceph auth get-or-create client.nova mon 'allow r' osd 'allow class-read object_prefix rbd_children, allow rwx pool={{ ceph_cinder_pool_name }}, allow rwx pool={{ ceph_cinder_pool_name }}-cache, allow rwx pool={{ ceph_nova_pool_name }}, allow rwx pool={{ ceph_nova_pool_name }}-cache, allow rx pool={{ ceph_glance_pool_name }}, allow rx pool={{ ceph_glance_pool_name }}-cache'
register: cephx_key
delegate_to: "{{ groups['ceph-mon'][0] }}"
changed_when: False
run_once: True
when: ceph_enable_cache | bool
# TODO(SamYaple): Improve failed_when and changed_when tests
- name: Pulling cephx keyring for libvirt

View File

@ -26,13 +26,17 @@ if [[ "${!KOLLA_BOOTSTRAP[@]}" ]]; then
ceph auth add "osd.${OSD_ID}" osd 'allow *' mon 'allow profile osd' -i "${OSD_DIR}/keyring"
umount "${OSD_PARTITION}"
if [[ "${!CEPH_CACHE[@]}" ]]; then
CEPH_ROOT_NAME=cache
fi
# These commands only need to be run once per host but are safe to run
# repeatedly. This can be improved later or if any problems arise.
ceph osd crush add-bucket "$(hostname)" host
ceph osd crush move "$(hostname)" root=default
ceph osd crush add-bucket "$(hostname)${CEPH_ROOT_NAME:+-${CEPH_ROOT_NAME}}" host
ceph osd crush move "$(hostname)${CEPH_ROOT_NAME:+-${CEPH_ROOT_NAME}}" root=${CEPH_ROOT_NAME:-default}
# Adding osd to crush map
ceph osd crush add "${OSD_ID}" "${OSD_INITIAL_WEIGHT}" host="$(hostname)"
ceph osd crush add "${OSD_ID}" "${OSD_INITIAL_WEIGHT}" host="$(hostname)${CEPH_ROOT_NAME:+-${CEPH_ROOT_NAME}}"
exit 0
fi

View File

@ -89,6 +89,12 @@ neutron_external_interface: "eth1"
###################
# Ceph options
###################
# Ceph can be setup with a caching to improve performance. To use the cache you
# must provide seperate disks than those for the OSDs
# ceph_use_cache: "no"
# Valid options are [ forward, none, writeback ]
# ceph_cache_mode: writeback
# A requirement for using the erasure-coded pools is you must setup a cache tier
# Valid options are [ erasure, replicated ]
# ceph_pool_type: "replicated"