kolla-ansible/ansible/roles/ceph/tasks/bootstrap_osds.yml

151 lines
6.9 KiB
YAML

---
- name: Looking up disks to bootstrap for Ceph OSDs
become: true
command: docker exec -t kolla_toolbox sudo -E ansible localhost
-m find_disks
-a "partition_name={{ partition_name_osd_bootstrap }} match_mode='prefix' use_udev={{ kolla_ceph_use_udev }}"
register: osd_lookup
changed_when: osd_lookup.stdout.find('localhost | SUCCESS => ') != -1 and (osd_lookup.stdout.split('localhost | SUCCESS => ')[1]|from_json).changed
failed_when: osd_lookup.stdout.split()[2] != 'SUCCESS'
- name: Parsing disk info for Ceph OSDs
set_fact:
osds_bootstrap: "{{ (osd_lookup.stdout.split('localhost | SUCCESS => ')[1]|from_json).disks|from_json }}"
- name: Looking up disks to bootstrap for Ceph Cache OSDs
become: true
command: docker exec -t kolla_toolbox sudo -E ansible localhost
-m find_disks
-a "partition_name={{ partition_name_cache_bootstrap }} match_mode='prefix' use_udev={{ kolla_ceph_use_udev }}"
register: osd_cache_lookup
changed_when: osd_cache_lookup.stdout.find('localhost | SUCCESS => ') != -1 and (osd_cache_lookup.stdout.split('localhost | SUCCESS => ')[1]|from_json).changed
failed_when: osd_cache_lookup.stdout.split()[2] != 'SUCCESS'
- name: Parsing disk info for Ceph Cache OSDs
set_fact:
osds_cache_bootstrap: "{{ (osd_cache_lookup.stdout.split('localhost | SUCCESS => ')[1]|from_json).disks|from_json }}"
- name: Prompting before wiping existing partitions
pause:
prompt: |
WARNING: It seems {{ item.device }} is marked to be wiped and partitioned for Ceph data and
a co-located journal, but appears to contain other existing partitions (>1).
If you are sure you want this disk to be *wiped* for use with Ceph, press enter.
Otherwise, press Ctrl-C, then 'A'. (You can disable this check by setting
ceph_osd_wipe_disk: 'yes-i-really-really-mean-it' within globals.yml)
with_items: "{{ osds_bootstrap|default([]) }}"
when:
- not item.external_journal | bool
- item.device.split('/')[2] in ansible_devices # if there is no device in setup (like loopback, we don't need to warn user
- ansible_devices[item.device.split('/')[2]].partitions|count > 1
- ceph_osd_wipe_disk != "yes-i-really-really-mean-it"
- name: Bootstrapping Ceph OSDs
become: true
kolla_docker:
action: "start_container"
common_options: "{{ docker_common_options }}"
detach: False
environment:
KOLLA_BOOTSTRAP:
KOLLA_CONFIG_STRATEGY: "{{ config_strategy }}"
OSD_DEV: "{{ item.1.device }}"
OSD_PARTITION: "{{ item.1.partition }}"
OSD_PARTITION_NUM: "{{ item.1.partition_num }}"
JOURNAL_DEV: "{{ item.1.journal_device }}"
JOURNAL_PARTITION: "{{ item.1.journal }}"
JOURNAL_PARTITION_NUM: "{{ item.1.journal_num }}"
USE_EXTERNAL_JOURNAL: "{{ item.1.external_journal | bool }}"
OSD_FILESYSTEM: "{{ ceph_osd_filesystem }}"
OSD_INITIAL_WEIGHT: "{{ osd_initial_weight }}"
HOSTNAME: "{{ ceph_osd_hostname }}"
OSD_STORETYPE: "{{ ceph_osd_store_type }}"
OSD_BS_DEV: "{{ item.1.device | default('') }}"
OSD_BS_LABEL: "{{ item.1.partition_label | default('') }}"
OSD_BS_PARTNUM: "{{ item.1.partition_num | default('') }}"
OSD_BS_BLK_DEV: "{{ item.1.bs_blk_device | default('') }}"
OSD_BS_BLK_LABEL: "{{ item.1.bs_blk_label | default('') }}"
OSD_BS_BLK_PARTNUM: "{{ item.1.bs_blk_partition_num | default('') }}"
OSD_BS_WAL_DEV: "{{ item.1.bs_wal_device | default('') }}"
OSD_BS_WAL_LABEL: "{{ item.1.bs_wal_label | default('') }}"
OSD_BS_WAL_PARTNUM: "{{ item.1.bs_wal_partition_num | default('') }}"
OSD_BS_DB_DEV: "{{ item.1.bs_db_device | default('') }}"
OSD_BS_DB_LABEL: "{{ item.1.bs_db_label | default('') }}"
OSD_BS_DB_PARTNUM: "{{ item.1.bs_db_partition_num | default('') }}"
image: "{{ ceph_osd_image_full }}"
labels:
BOOTSTRAP:
name: "bootstrap_osd_{{ item.0 }}"
privileged: True
restart_policy: no
volumes:
- "{{ node_config_directory }}/ceph-osd/:{{ container_config_directory }}/:ro"
- "/etc/localtime:/etc/localtime:ro"
- "/dev/:/dev/"
- "kolla_logs:/var/log/kolla/"
with_indexed_items: "{{ osds_bootstrap|default([]) }}"
- name: Prompting before wiping existing partitions
pause:
prompt: |
WARNING: It seems {{ item.device }} is marked to be wiped and partitioned for Ceph data and
a co-located journal, but appears to contain other existing partitions (>1).
If you are sure you want this disk to be *wiped* for use with Ceph, press enter.
Otherwise, press Ctrl-C, then 'A'. (You can disable this check by setting
ceph_osd_wipe_disk: 'yes-i-really-really-mean-it' within globals.yml)
with_items: "{{ osds_cache_bootstrap|default([]) }}"
when:
- not item.external_journal | bool
- ansible_devices[item.device.split('/')[2]].partitions|count > 1
- ceph_osd_wipe_disk != "yes-i-really-really-mean-it"
- name: Bootstrapping Ceph Cache OSDs
become: true
kolla_docker:
action: "start_container"
common_options: "{{ docker_common_options }}"
detach: False
environment:
KOLLA_BOOTSTRAP:
KOLLA_CONFIG_STRATEGY: "{{ config_strategy }}"
CEPH_CACHE:
OSD_DEV: "{{ item.1.device }}"
OSD_PARTITION: "{{ item.1.partition }}"
OSD_PARTITION_NUM: "{{ item.1.partition_num }}"
JOURNAL_DEV: "{{ item.1.journal_device }}"
JOURNAL_PARTITION: "{{ item.1.journal }}"
JOURNAL_PARTITION_NUM: "{{ item.1.journal_num }}"
USE_EXTERNAL_JOURNAL: "{{ item.1.external_journal | bool }}"
OSD_FILESYSTEM: "{{ ceph_osd_filesystem }}"
OSD_INITIAL_WEIGHT: "{{ osd_initial_weight }}"
HOSTNAME: "{{ ceph_osd_hostname }}"
OSD_STORETYPE: "{{ ceph_osd_store_type }}"
OSD_BS_DEV: "{{ item.1.device | default('') }}"
OSD_BS_LABEL: "{{ item.1.partition_label | default('') }}"
OSD_BS_PARTNUM: "{{ item.1.partition_num | default('') }}"
OSD_BS_BLK_DEV: "{{ item.1.bs_blk_device | default('') }}"
OSD_BS_BLK_LABEL: "{{ item.1.bs_blk_label | default('') }}"
OSD_BS_BLK_PARTNUM: "{{ item.1.bs_blk_partition_num | default('') }}"
OSD_BS_WAL_DEV: "{{ item.1.bs_wal_device | default('') }}"
OSD_BS_WAL_LABEL: "{{ item.1.bs_wal_label|default('') }}"
OSD_BS_WAL_PARTNUM: "{{ item.1.bs_wal_partition_num | default('') }}"
OSD_BS_DB_DEV: "{{ item.1.bs_db_device | default('') }}"
OSD_BS_DB_LABEL: "{{ item.1.bs_db_label | default('') }}"
OSD_BS_DB_PARTNUM: "{{ item.1.bs_db_partition_num | default('') }}"
image: "{{ ceph_osd_image_full }}"
labels:
BOOTSTRAP:
name: "bootstrap_osd_cache_{{ item.0 }}"
privileged: True
restart_policy: no
volumes:
- "{{ node_config_directory }}/ceph-osd/:{{ container_config_directory }}/:ro"
- "/etc/localtime:/etc/localtime:ro"
- "/dev/:/dev/"
- "kolla_logs:/var/log/kolla/"
with_indexed_items: "{{ osds_cache_bootstrap|default([]) }}"