add tag for ceph + storage node
This commit is contained in:
parent
435298b792
commit
80f350b8f0
9
ansible/ceph-block-devices.yml
Normal file
9
ansible/ceph-block-devices.yml
Normal file
@ -0,0 +1,9 @@
|
|||||||
|
---
|
||||||
|
- name: Ensure Ceph disk are tagged
|
||||||
|
hosts: overcloud
|
||||||
|
tags:
|
||||||
|
- kolla-ceph
|
||||||
|
roles:
|
||||||
|
- role: stackhpc.parted-1-1
|
||||||
|
- role: kolla-ceph
|
||||||
|
when: kolla_enable_ceph | bool
|
@ -101,6 +101,16 @@ compute_lvm_group_data_lv_docker_volumes_size: 75%VG
|
|||||||
# Filesystem for docker volumes LVM backing volume. ext4 allows for shrinking.
|
# Filesystem for docker volumes LVM backing volume. ext4 allows for shrinking.
|
||||||
compute_lvm_group_data_lv_docker_volumes_fs: ext4
|
compute_lvm_group_data_lv_docker_volumes_fs: ext4
|
||||||
|
|
||||||
|
###############################################################################
|
||||||
|
# Compute node Ceph configuration.
|
||||||
|
|
||||||
|
# List of Ceph disks.
|
||||||
|
# The format is a list of dict like :
|
||||||
|
# - { osd: "/dev/sdb", journal: "/dev/sdc" }
|
||||||
|
# - { osd: "/dev/sdd" }
|
||||||
|
# Journal variable is not mandatory.
|
||||||
|
compute_ceph_disks: []
|
||||||
|
|
||||||
###############################################################################
|
###############################################################################
|
||||||
# Compute node sysctl configuration.
|
# Compute node sysctl configuration.
|
||||||
|
|
||||||
|
@ -111,6 +111,16 @@ controller_lvm_group_data_lv_docker_volumes_size: 75%VG
|
|||||||
# Filesystem for docker volumes LVM backing volume. ext4 allows for shrinking.
|
# Filesystem for docker volumes LVM backing volume. ext4 allows for shrinking.
|
||||||
controller_lvm_group_data_lv_docker_volumes_fs: ext4
|
controller_lvm_group_data_lv_docker_volumes_fs: ext4
|
||||||
|
|
||||||
|
###############################################################################
|
||||||
|
# Controller node Ceph configuration.
|
||||||
|
|
||||||
|
# List of Ceph disks.
|
||||||
|
# The format is a list of dict like :
|
||||||
|
# - { osd: "/dev/sdb", journal: "/dev/sdc" }
|
||||||
|
# - { osd: "/dev/sdd" }
|
||||||
|
# Journal variable is not mandatory.
|
||||||
|
controller_ceph_disks: []
|
||||||
|
|
||||||
###############################################################################
|
###############################################################################
|
||||||
# Controller node sysctl configuration.
|
# Controller node sysctl configuration.
|
||||||
|
|
||||||
|
@ -223,6 +223,10 @@ kolla_overcloud_inventory_custom_services:
|
|||||||
# concatenation of the top level, component, and service inventories.
|
# concatenation of the top level, component, and service inventories.
|
||||||
kolla_overcloud_inventory_custom:
|
kolla_overcloud_inventory_custom:
|
||||||
|
|
||||||
|
# List of groups mapped to kolla storage group.
|
||||||
|
kolla_overcloud_inventory_storage_groups:
|
||||||
|
- "storage"
|
||||||
|
|
||||||
# Dict mapping from kolla-ansible groups to kayobe groups and variables. Each
|
# Dict mapping from kolla-ansible groups to kayobe groups and variables. Each
|
||||||
# item is a dict with the following items:
|
# item is a dict with the following items:
|
||||||
# * groups: A list of kayobe ansible groups to map to this kolla-ansible group.
|
# * groups: A list of kayobe ansible groups to map to this kolla-ansible group.
|
||||||
@ -241,6 +245,9 @@ kolla_overcloud_inventory_top_level_group_map:
|
|||||||
monitoring:
|
monitoring:
|
||||||
groups:
|
groups:
|
||||||
- monitoring
|
- monitoring
|
||||||
|
storage:
|
||||||
|
groups:
|
||||||
|
- "{{ kolla_overcloud_inventory_storage_groups }}"
|
||||||
|
|
||||||
# List of names of top level kolla-ansible groups. Any of these groups which
|
# List of names of top level kolla-ansible groups. Any of these groups which
|
||||||
# have no hosts mapped to them will be provided with an empty group definition.
|
# have no hosts mapped to them will be provided with an empty group definition.
|
||||||
|
126
ansible/group_vars/all/storage
Normal file
126
ansible/group_vars/all/storage
Normal file
@ -0,0 +1,126 @@
|
|||||||
|
---
|
||||||
|
###############################################################################
|
||||||
|
# Storage node configuration.
|
||||||
|
|
||||||
|
# User with which to access the storages via SSH during bootstrap, in order
|
||||||
|
# to setup the Kayobe user account.
|
||||||
|
storage_bootstrap_user: "{{ lookup('env', 'USER') }}"
|
||||||
|
|
||||||
|
###############################################################################
|
||||||
|
# Storage network interface configuration.
|
||||||
|
|
||||||
|
# List of networks to which storage nodes are attached.
|
||||||
|
storage_network_interfaces: >
|
||||||
|
{{ (storage_default_network_interfaces +
|
||||||
|
storage_extra_network_interfaces) | unique | list }}
|
||||||
|
|
||||||
|
# List of default networks to which storage nodes are attached.
|
||||||
|
storage_default_network_interfaces: >
|
||||||
|
{{ [provision_oc_net_name,
|
||||||
|
internal_net_name,
|
||||||
|
storage_mgmt_net_name,
|
||||||
|
storage_net_name] | unique | list }}
|
||||||
|
|
||||||
|
# List of extra networks to which storage nodes are attached.
|
||||||
|
storage_extra_network_interfaces: []
|
||||||
|
|
||||||
|
###############################################################################
|
||||||
|
# Storage node BIOS configuration.
|
||||||
|
|
||||||
|
# Dict of storage BIOS options. Format is same as that used by stackhpc.drac
|
||||||
|
# role.
|
||||||
|
storage_bios_config: "{{ storage_bios_config_default | combine(storage_bios_config_extra) }}"
|
||||||
|
|
||||||
|
# Dict of default storage BIOS options. Format is same as that used by
|
||||||
|
# stackhpc.drac role.
|
||||||
|
storage_bios_config_default: {}
|
||||||
|
|
||||||
|
# Dict of additional storage BIOS options. Format is same as that used by
|
||||||
|
# stackhpc.drac role.
|
||||||
|
storage_bios_config_extra: {}
|
||||||
|
|
||||||
|
###############################################################################
|
||||||
|
# Storage node RAID configuration.
|
||||||
|
|
||||||
|
# List of storage RAID volumes. Format is same as that used by stackhpc.drac
|
||||||
|
# role.
|
||||||
|
storage_raid_config: "{{ storage_raid_config_default + storage_raid_config_extra }}"
|
||||||
|
|
||||||
|
# List of default storage RAID volumes. Format is same as that used by
|
||||||
|
# stackhpc.drac role.
|
||||||
|
storage_raid_config_default: []
|
||||||
|
|
||||||
|
# List of additional storage RAID volumes. Format is same as that used by
|
||||||
|
# stackhpc.drac role.
|
||||||
|
storage_raid_config_extra: []
|
||||||
|
|
||||||
|
###############################################################################
|
||||||
|
# Storage node LVM configuration.
|
||||||
|
|
||||||
|
# List of storage volume groups. See mrlesmithjr.manage-lvm role for
|
||||||
|
# format.
|
||||||
|
storage_lvm_groups: "{{ storage_lvm_groups_default + storage_lvm_groups_extra }}"
|
||||||
|
|
||||||
|
# Default list of storage volume groups. See mrlesmithjr.manage-lvm role for
|
||||||
|
# format.
|
||||||
|
storage_lvm_groups_default:
|
||||||
|
- "{{ storage_lvm_group_data }}"
|
||||||
|
|
||||||
|
# Additional list of storage volume groups. See mrlesmithjr.manage-lvm role
|
||||||
|
# for format.
|
||||||
|
storage_lvm_groups_extra: []
|
||||||
|
|
||||||
|
# Storage LVM volume group for data. See mrlesmithjr.manage-lvm role for
|
||||||
|
# format.
|
||||||
|
storage_lvm_group_data:
|
||||||
|
vgname: data
|
||||||
|
disks: "{{ storage_lvm_group_data_disks | join(',') }}"
|
||||||
|
create: True
|
||||||
|
lvnames: "{{ storage_lvm_group_data_lvs }}"
|
||||||
|
|
||||||
|
# List of disks for use by storage LVM data volume group. Default to an
|
||||||
|
# invalid value to require configuration.
|
||||||
|
storage_lvm_group_data_disks:
|
||||||
|
- changeme
|
||||||
|
|
||||||
|
# List of LVM logical volumes for the data volume group.
|
||||||
|
storage_lvm_group_data_lvs:
|
||||||
|
- "{{ storage_lvm_group_data_lv_docker_volumes }}"
|
||||||
|
|
||||||
|
# Docker volumes LVM backing volume.
|
||||||
|
storage_lvm_group_data_lv_docker_volumes:
|
||||||
|
lvname: docker-volumes
|
||||||
|
size: "{{ storage_lvm_group_data_lv_docker_volumes_size }}"
|
||||||
|
create: True
|
||||||
|
filesystem: "{{ storage_lvm_group_data_lv_docker_volumes_fs }}"
|
||||||
|
mount: True
|
||||||
|
mntp: /var/lib/docker/volumes
|
||||||
|
|
||||||
|
# Size of docker volumes LVM backing volume.
|
||||||
|
storage_lvm_group_data_lv_docker_volumes_size: 75%VG
|
||||||
|
|
||||||
|
# Filesystem for docker volumes LVM backing volume. ext4 allows for shrinking.
|
||||||
|
storage_lvm_group_data_lv_docker_volumes_fs: ext4
|
||||||
|
|
||||||
|
###############################################################################
|
||||||
|
# Storage node Ceph configuration.
|
||||||
|
|
||||||
|
# List of Ceph disks.
|
||||||
|
# The format is a list of dict like :
|
||||||
|
# - { osd: "/dev/sdb", journal: "/dev/sdc" }
|
||||||
|
# - { osd: "/dev/sdd" }
|
||||||
|
# Journal variable is not mandatory.
|
||||||
|
storage_ceph_disks: []
|
||||||
|
|
||||||
|
###############################################################################
|
||||||
|
# Storage node sysctl configuration.
|
||||||
|
|
||||||
|
# Dict of sysctl parameters to set.
|
||||||
|
storage_sysctl_parameters: {}
|
||||||
|
|
||||||
|
###############################################################################
|
||||||
|
# Storage node user configuration.
|
||||||
|
|
||||||
|
# List of users to create. This should be in a format accepted by the
|
||||||
|
# singleplatform-eng.users role.
|
||||||
|
storage_users: "{{ users_default }}"
|
6
ansible/group_vars/compute/ceph
Normal file
6
ansible/group_vars/compute/ceph
Normal file
@ -0,0 +1,6 @@
|
|||||||
|
---
|
||||||
|
###############################################################################
|
||||||
|
# Compute node Ceph configuration.
|
||||||
|
|
||||||
|
# List of Ceph disks.
|
||||||
|
ceph_disks: "{{ compute_ceph_disks }}"
|
6
ansible/group_vars/controllers/ceph
Normal file
6
ansible/group_vars/controllers/ceph
Normal file
@ -0,0 +1,6 @@
|
|||||||
|
---
|
||||||
|
###############################################################################
|
||||||
|
# Controller node Ceph configuration.
|
||||||
|
|
||||||
|
# List of Ceph disks.
|
||||||
|
ceph_disks: "{{ controller_ceph_disks }}"
|
7
ansible/group_vars/storage/ansible-user
Normal file
7
ansible/group_vars/storage/ansible-user
Normal file
@ -0,0 +1,7 @@
|
|||||||
|
---
|
||||||
|
# User with which to access the storages via SSH.
|
||||||
|
ansible_user: "{{ kayobe_ansible_user }}"
|
||||||
|
|
||||||
|
# User with which to access the storages before the kayobe_ansible_user
|
||||||
|
# account has been created.
|
||||||
|
bootstrap_user: "{{ storage_bootstrap_user }}"
|
7
ansible/group_vars/storage/bios
Normal file
7
ansible/group_vars/storage/bios
Normal file
@ -0,0 +1,7 @@
|
|||||||
|
---
|
||||||
|
###############################################################################
|
||||||
|
# Storage node BIOS configuration.
|
||||||
|
|
||||||
|
# Dict of storage node BIOS options. Format is same as that used by
|
||||||
|
# stackhpc.drac role.
|
||||||
|
bios_config: "{{ storage_bios_config }}"
|
6
ansible/group_vars/storage/ceph
Normal file
6
ansible/group_vars/storage/ceph
Normal file
@ -0,0 +1,6 @@
|
|||||||
|
---
|
||||||
|
###############################################################################
|
||||||
|
# Storage node Ceph configuration.
|
||||||
|
|
||||||
|
# List of Ceph disks.
|
||||||
|
ceph_disks: "{{ storage_ceph_disks }}"
|
6
ansible/group_vars/storage/lvm
Normal file
6
ansible/group_vars/storage/lvm
Normal file
@ -0,0 +1,6 @@
|
|||||||
|
---
|
||||||
|
###############################################################################
|
||||||
|
# Storage node LVM configuration.
|
||||||
|
|
||||||
|
# List of LVM volume groups.
|
||||||
|
lvm_groups: "{{ storage_lvm_groups }}"
|
6
ansible/group_vars/storage/network
Normal file
6
ansible/group_vars/storage/network
Normal file
@ -0,0 +1,6 @@
|
|||||||
|
---
|
||||||
|
###############################################################################
|
||||||
|
# Network interface attachments.
|
||||||
|
|
||||||
|
# List of networks to which these nodes are attached.
|
||||||
|
network_interfaces: "{{ storage_network_interfaces | unique | list }}"
|
7
ansible/group_vars/storage/raid
Normal file
7
ansible/group_vars/storage/raid
Normal file
@ -0,0 +1,7 @@
|
|||||||
|
---
|
||||||
|
###############################################################################
|
||||||
|
# Storage node RAID configuration.
|
||||||
|
|
||||||
|
# List of storage node RAID volumes. Format is same as that used by
|
||||||
|
# stackhpc.drac role.
|
||||||
|
raid_config: "{{ storage_raid_config }}"
|
3
ansible/group_vars/storage/sysctl
Normal file
3
ansible/group_vars/storage/sysctl
Normal file
@ -0,0 +1,3 @@
|
|||||||
|
---
|
||||||
|
# Dict of sysctl parameters to set.
|
||||||
|
sysctl_parameters: "{{ storage_sysctl_parameters }}"
|
4
ansible/group_vars/storage/users
Normal file
4
ansible/group_vars/storage/users
Normal file
@ -0,0 +1,4 @@
|
|||||||
|
---
|
||||||
|
# List of users to create. This should be in a format accepted by the
|
||||||
|
# singleplatform-eng.users role.
|
||||||
|
users: "{{ storage_users }}"
|
4
ansible/roles/kolla-ceph/defaults/main.yml
Normal file
4
ansible/roles/kolla-ceph/defaults/main.yml
Normal file
@ -0,0 +1,4 @@
|
|||||||
|
---
|
||||||
|
|
||||||
|
# List of Ceph disks.
|
||||||
|
ceph_disks: []
|
86
ansible/roles/kolla-ceph/tasks/config.yml
Normal file
86
ansible/roles/kolla-ceph/tasks/config.yml
Normal file
@ -0,0 +1,86 @@
|
|||||||
|
---
|
||||||
|
# (ktibi) Need to remove parted_1_1 module when kayobe will support ansible 2.4
|
||||||
|
|
||||||
|
- name: Ensure required packages are installed
|
||||||
|
yum:
|
||||||
|
name: parted
|
||||||
|
state: installed
|
||||||
|
become: True
|
||||||
|
when: ceph_disks | length > 0
|
||||||
|
|
||||||
|
- name: Check the presence of a partition on the OSD disks
|
||||||
|
become: True
|
||||||
|
parted_1_1:
|
||||||
|
device: "{{ item.osd }}"
|
||||||
|
with_items: "{{ ceph_disks }}"
|
||||||
|
register: "disk_osd_info"
|
||||||
|
|
||||||
|
- name: Check the presence of a partition on the journal disks
|
||||||
|
become: True
|
||||||
|
parted_1_1:
|
||||||
|
device: "{{ item.journal }}"
|
||||||
|
with_items: "{{ ceph_disks }}"
|
||||||
|
register: "disk_journal_info"
|
||||||
|
when:
|
||||||
|
- item.journal is defined
|
||||||
|
|
||||||
|
- name: Fail if the Ceph OSD disks have already a partition
|
||||||
|
fail:
|
||||||
|
msg: >
|
||||||
|
The physical disk {{ item.item }} already has a partition.
|
||||||
|
Ensure that each disk in 'ceph_disks' does not have any partitions.
|
||||||
|
with_items: "{{ disk_osd_info.results }}"
|
||||||
|
when:
|
||||||
|
- item.partitions | length > 0
|
||||||
|
- not item.partitions.0.name.startswith('KOLLA_CEPH')
|
||||||
|
loop_control:
|
||||||
|
label: "{{item.item}}"
|
||||||
|
|
||||||
|
- name: Fail if the Ceph journal disks have already a partition
|
||||||
|
fail:
|
||||||
|
msg: >
|
||||||
|
The physical disk {{ item.item }} already has a partition.
|
||||||
|
Ensure that each disk in 'ceph_disks' does not have any partitions.
|
||||||
|
with_items: "{{ disk_journal_info.results }}"
|
||||||
|
when:
|
||||||
|
- not item | skipped
|
||||||
|
- item.partitions | length > 0
|
||||||
|
- not item.partitions.0.name.startswith('KOLLA_CEPH')
|
||||||
|
loop_control:
|
||||||
|
label: "{{item.item}}"
|
||||||
|
|
||||||
|
- name: Create tag partition for Ceph OSD
|
||||||
|
become: True
|
||||||
|
parted_1_1:
|
||||||
|
device: "{{ item.item.osd }}"
|
||||||
|
number: 1
|
||||||
|
label: gpt
|
||||||
|
name: "{{ part_label }}"
|
||||||
|
state: present
|
||||||
|
with_items: "{{ disk_osd_info.results }}"
|
||||||
|
when: item.partitions | length == 0
|
||||||
|
loop_control:
|
||||||
|
label: "{{item.item}}"
|
||||||
|
vars:
|
||||||
|
part_label: "{% if item.item.journal is defined %}{{ part_label_with_journal }}{% else %}KOLLA_CEPH_OSD_BOOTSTRAP{% endif %}"
|
||||||
|
part_label_with_journal: "KOLLA_CEPH_OSD_BOOTSTRAP_{{ osd_id }}"
|
||||||
|
osd_id: "{{ item.item.osd | basename }}{{ ansible_hostname }}"
|
||||||
|
|
||||||
|
- name: Create tag partition for Ceph external journal
|
||||||
|
become: True
|
||||||
|
parted_1_1:
|
||||||
|
device: "{{ item.item.journal }}"
|
||||||
|
number: 1
|
||||||
|
label: gpt
|
||||||
|
name: "{{ part_label }}"
|
||||||
|
state: present
|
||||||
|
with_items: "{{ disk_journal_info.results }}"
|
||||||
|
when:
|
||||||
|
- not item | skipped
|
||||||
|
- item.partitions | length == 0
|
||||||
|
loop_control:
|
||||||
|
label: "{{item.item}}"
|
||||||
|
vars:
|
||||||
|
part_label: "KOLLA_CEPH_OSD_BOOTSTRAP_{{ osd_id }}_J"
|
||||||
|
osd_id: "{{ item.item.osd | basename }}{{ ansible_hostname }}"
|
||||||
|
|
4
ansible/roles/kolla-ceph/tasks/main.yml
Normal file
4
ansible/roles/kolla-ceph/tasks/main.yml
Normal file
@ -0,0 +1,4 @@
|
|||||||
|
---
|
||||||
|
- include: config.yml
|
||||||
|
tags:
|
||||||
|
- config
|
12
ansible/roles/kolla-ceph/tests/main.yml
Normal file
12
ansible/roles/kolla-ceph/tests/main.yml
Normal file
@ -0,0 +1,12 @@
|
|||||||
|
---
|
||||||
|
|
||||||
|
- include: test-defaults.yml
|
||||||
|
|
||||||
|
- hosts: localhost
|
||||||
|
connection: local
|
||||||
|
tasks:
|
||||||
|
- name: Fail if any tests failed
|
||||||
|
fail:
|
||||||
|
msg: >
|
||||||
|
Test failures: {{ test_failures }}
|
||||||
|
when: test_failures is defined
|
64
ansible/roles/kolla-ceph/tests/test-defaults.yml
Normal file
64
ansible/roles/kolla-ceph/tests/test-defaults.yml
Normal file
@ -0,0 +1,64 @@
|
|||||||
|
---
|
||||||
|
- hosts: localhost
|
||||||
|
connection: local
|
||||||
|
tasks:
|
||||||
|
- name: Allocate a temporary file for a fake OSD
|
||||||
|
tempfile:
|
||||||
|
register: tempfile
|
||||||
|
|
||||||
|
- name: Allocate a fake OSD file
|
||||||
|
command: fallocate -l "{{ device_size }} {{ tempfile.path }}"
|
||||||
|
|
||||||
|
- name: Find a free loopback device
|
||||||
|
command: losetup -f
|
||||||
|
register: loopback_result
|
||||||
|
|
||||||
|
- name: Create a loopback device for the fake OSD file
|
||||||
|
command: losetup "{{ loopback_result.stdout }} {{ tempfile.path }}"
|
||||||
|
become: True
|
||||||
|
|
||||||
|
- block:
|
||||||
|
- name: Import parted role
|
||||||
|
include_role:
|
||||||
|
name: stackhpc.parted-1-1
|
||||||
|
|
||||||
|
- name: Test the kolla-ceph role
|
||||||
|
include_role:
|
||||||
|
name: ../../kolla-ceph
|
||||||
|
vars:
|
||||||
|
ceph_disks:
|
||||||
|
- osd: "{{ loopback_result.stdout }}"
|
||||||
|
|
||||||
|
- name: Get name of fake partition
|
||||||
|
parted_kayobe:
|
||||||
|
device: "{{ loopback_result.stdout }}"
|
||||||
|
register: "disk_osd_info"
|
||||||
|
|
||||||
|
- name: Validate number of partition
|
||||||
|
assert:
|
||||||
|
that: disk_osd_info.partitions | length == 1
|
||||||
|
msg: >
|
||||||
|
Number of partition is not correct.
|
||||||
|
|
||||||
|
- name: Validate OSD tag is present
|
||||||
|
assert:
|
||||||
|
that: "disk_osd_info.partitions.0.name == 'KOLLA_CEPH_OSD_BOOTSTRAP'"
|
||||||
|
msg: >
|
||||||
|
Name of partition is not correct.
|
||||||
|
|
||||||
|
always:
|
||||||
|
- name: Detach the loopback device
|
||||||
|
command: losetup -d {{ loopback_result.stdout }}
|
||||||
|
become: True
|
||||||
|
|
||||||
|
- name: Remove the fake OSD file
|
||||||
|
file:
|
||||||
|
name: "{{ tempfile.path }}"
|
||||||
|
state: absent
|
||||||
|
|
||||||
|
rescue:
|
||||||
|
- name: Flag that a failure occurred
|
||||||
|
set_fact:
|
||||||
|
test_failures: "{{ test_failures | default(0) | int + 1 }}"
|
||||||
|
|
||||||
|
|
@ -43,6 +43,9 @@ Features
|
|||||||
this variable is ``{{ virtualenv_path }}/kolla-ansible``.
|
this variable is ``{{ virtualenv_path }}/kolla-ansible``.
|
||||||
* Adds tags to plays to support more fine grained configuration using the
|
* Adds tags to plays to support more fine grained configuration using the
|
||||||
``--tags`` argument.
|
``--tags`` argument.
|
||||||
|
* Adds support for deployment of storage hosts. These hosts should be added to
|
||||||
|
the ``[storage]`` group.
|
||||||
|
* Adds support for the tagging of ceph disks.
|
||||||
|
|
||||||
Upgrade Notes
|
Upgrade Notes
|
||||||
-------------
|
-------------
|
||||||
@ -94,6 +97,9 @@ Upgrade Notes
|
|||||||
connecting via SSH, due to a timeout in NSS. The workaround employed here is
|
connecting via SSH, due to a timeout in NSS. The workaround employed here is
|
||||||
to remove this bogus entry from the image using virt-customize, if it exists.
|
to remove this bogus entry from the image using virt-customize, if it exists.
|
||||||
See https://bugs.centos.org/view.php?id=14369.
|
See https://bugs.centos.org/view.php?id=14369.
|
||||||
|
* Adds a group ``storage``, which used for deploy node with cinder-volume, LVM
|
||||||
|
or ceph-osd. If you want to add these services to compute or control group,
|
||||||
|
you need to override ``kolla_overcloud_inventory_storage_groups``.
|
||||||
|
|
||||||
Kayobe 3.0.0
|
Kayobe 3.0.0
|
||||||
============
|
============
|
||||||
|
@ -83,6 +83,16 @@
|
|||||||
# Filesystem for docker volumes LVM backing volume. ext4 allows for shrinking.
|
# Filesystem for docker volumes LVM backing volume. ext4 allows for shrinking.
|
||||||
#compute_lvm_group_data_lv_docker_volumes_fs:
|
#compute_lvm_group_data_lv_docker_volumes_fs:
|
||||||
|
|
||||||
|
###############################################################################
|
||||||
|
# Compute node Ceph configuration.
|
||||||
|
|
||||||
|
# List of Ceph disks.
|
||||||
|
# The format is a list of dict like :
|
||||||
|
# - { osd: "/dev/sdb", journal: "/dev/sdc" }
|
||||||
|
# - { osd: "/dev/sdd" }
|
||||||
|
# Journal variable is not mandatory.
|
||||||
|
#compute_ceph_disks:
|
||||||
|
|
||||||
###############################################################################
|
###############################################################################
|
||||||
# Compute node sysctl configuration.
|
# Compute node sysctl configuration.
|
||||||
|
|
||||||
|
@ -86,6 +86,16 @@
|
|||||||
# Filesystem for docker volumes LVM backing volume. ext4 allows for shrinking.
|
# Filesystem for docker volumes LVM backing volume. ext4 allows for shrinking.
|
||||||
#controller_lvm_group_data_lv_docker_volumes_fs:
|
#controller_lvm_group_data_lv_docker_volumes_fs:
|
||||||
|
|
||||||
|
###############################################################################
|
||||||
|
# Controller node Ceph configuration.
|
||||||
|
|
||||||
|
# List of Ceph disks.
|
||||||
|
# The format is a list of dict like :
|
||||||
|
# - { osd: "/dev/sdb", journal: "/dev/sdc" }
|
||||||
|
# - { osd: "/dev/sdd" }
|
||||||
|
# Journal variable is not mandatory.
|
||||||
|
#controller_ceph_disks:
|
||||||
|
|
||||||
###############################################################################
|
###############################################################################
|
||||||
# Controller node sysctl configuration.
|
# Controller node sysctl configuration.
|
||||||
|
|
||||||
|
111
etc/kayobe/storage.yml
Normal file
111
etc/kayobe/storage.yml
Normal file
@ -0,0 +1,111 @@
|
|||||||
|
---
|
||||||
|
###############################################################################
|
||||||
|
# Storage node configuration.
|
||||||
|
|
||||||
|
# User with which to access the storages via SSH during bootstrap, in order
|
||||||
|
# to setup the Kayobe user account.
|
||||||
|
#storage_bootstrap_user:
|
||||||
|
|
||||||
|
###############################################################################
|
||||||
|
# Network interface attachments.
|
||||||
|
|
||||||
|
# List of networks to which storage nodes are attached.
|
||||||
|
#storage_network_interfaces:
|
||||||
|
|
||||||
|
# List of default networks to which storage nodes are attached.
|
||||||
|
#storage_default_network_interfaces:
|
||||||
|
|
||||||
|
# List of extra networks to which storage nodes are attached.
|
||||||
|
#storage_extra_network_interfaces:
|
||||||
|
|
||||||
|
###############################################################################
|
||||||
|
# Storage node BIOS configuration.
|
||||||
|
|
||||||
|
# Dict of storage BIOS options. Format is same as that used by stackhpc.drac
|
||||||
|
# role.
|
||||||
|
#storage_bios_config:
|
||||||
|
|
||||||
|
# Dict of default storage BIOS options. Format is same as that used by
|
||||||
|
# stackhpc.drac role.
|
||||||
|
#storage_bios_config_default:
|
||||||
|
|
||||||
|
# Dict of additional storage BIOS options. Format is same as that used by
|
||||||
|
# stackhpc.drac role.
|
||||||
|
#storage_bios_config_extra:
|
||||||
|
|
||||||
|
###############################################################################
|
||||||
|
# Storage node RAID configuration.
|
||||||
|
|
||||||
|
# List of storage RAID volumes. Format is same as that used by stackhpc.drac
|
||||||
|
# role.
|
||||||
|
#storage_raid_config:
|
||||||
|
|
||||||
|
# List of default storage RAID volumes. Format is same as that used by
|
||||||
|
# stackhpc.drac role.
|
||||||
|
#storage_raid_config_default:
|
||||||
|
|
||||||
|
# List of additional storage RAID volumes. Format is same as that used by
|
||||||
|
# stackhpc.drac role.
|
||||||
|
#storage_raid_config_extra:
|
||||||
|
|
||||||
|
###############################################################################
|
||||||
|
# Storage node LVM configuration.
|
||||||
|
|
||||||
|
# List of storage volume groups. See mrlesmithjr.manage-lvm role for
|
||||||
|
# format.
|
||||||
|
#storage_lvm_groups:
|
||||||
|
|
||||||
|
# Default list of storage volume groups. See mrlesmithjr.manage-lvm role for
|
||||||
|
# format.
|
||||||
|
#storage_lvm_groups_default:
|
||||||
|
|
||||||
|
# Additional list of storage volume groups. See mrlesmithjr.manage-lvm role
|
||||||
|
# for format.
|
||||||
|
#storage_lvm_groups_extra:
|
||||||
|
|
||||||
|
# Storage LVM volume group for data. See mrlesmithjr.manage-lvm role for
|
||||||
|
# format.
|
||||||
|
#storage_lvm_group_data:
|
||||||
|
|
||||||
|
# List of disks for use by storage LVM data volume group. Default to an
|
||||||
|
# invalid value to require configuration.
|
||||||
|
#storage_lvm_group_data_disks:
|
||||||
|
|
||||||
|
# List of LVM logical volumes for the data volume group.
|
||||||
|
#storage_lvm_group_data_lvs:
|
||||||
|
|
||||||
|
# Docker volumes LVM backing volume.
|
||||||
|
#storage_lvm_group_data_lv_docker_volumes:
|
||||||
|
|
||||||
|
# Size of docker volumes LVM backing volume.
|
||||||
|
#storage_lvm_group_data_lv_docker_volumes_size:
|
||||||
|
|
||||||
|
# Filesystem for docker volumes LVM backing volume. ext4 allows for shrinking.
|
||||||
|
#storage_lvm_group_data_lv_docker_volumes_fs:
|
||||||
|
|
||||||
|
###############################################################################
|
||||||
|
# Storage node Ceph configuration.
|
||||||
|
|
||||||
|
# List of Ceph disks.
|
||||||
|
# The format is a list of dict like :
|
||||||
|
# - { osd: "/dev/sdb", journal: "/dev/sdc" }
|
||||||
|
# - { osd: "/dev/sdd" }
|
||||||
|
# Journal variable is not mandatory.
|
||||||
|
#storage_ceph_disks:
|
||||||
|
|
||||||
|
###############################################################################
|
||||||
|
# Storage node sysctl configuration.
|
||||||
|
|
||||||
|
# Dict of sysctl parameters to set.
|
||||||
|
#storage_sysctl_parameters:
|
||||||
|
|
||||||
|
###############################################################################
|
||||||
|
# Storage node user configuration.
|
||||||
|
|
||||||
|
# List of users to create. This should be in a format accepted by the
|
||||||
|
# singleplatform-eng.users role.
|
||||||
|
#storage_users:
|
||||||
|
|
||||||
|
###############################################################################
|
||||||
|
# Dummy variable to allow Ansible to accept this file.
|
||||||
|
workaround_ansible_issue_8743: yes
|
@ -716,7 +716,7 @@ class OvercloudHostConfigure(KollaAnsibleMixin, KayobeAnsibleMixin, VaultMixin,
|
|||||||
|
|
||||||
# Further kayobe playbooks.
|
# Further kayobe playbooks.
|
||||||
playbooks = _build_playbook_list(
|
playbooks = _build_playbook_list(
|
||||||
"kolla-target-venv", "kolla-host", "docker")
|
"kolla-target-venv", "kolla-host", "docker", "ceph-block-devices")
|
||||||
self.run_kayobe_playbooks(parsed_args, playbooks, limit="overcloud")
|
self.run_kayobe_playbooks(parsed_args, playbooks, limit="overcloud")
|
||||||
|
|
||||||
|
|
||||||
|
@ -482,6 +482,7 @@ class TestCase(unittest.TestCase):
|
|||||||
"ansible/kolla-target-venv.yml",
|
"ansible/kolla-target-venv.yml",
|
||||||
"ansible/kolla-host.yml",
|
"ansible/kolla-host.yml",
|
||||||
"ansible/docker.yml",
|
"ansible/docker.yml",
|
||||||
|
"ansible/ceph-block-devices.yml",
|
||||||
],
|
],
|
||||||
limit="overcloud",
|
limit="overcloud",
|
||||||
),
|
),
|
||||||
|
@ -8,6 +8,7 @@
|
|||||||
- src: https://github.com/stackhpc/ansible-users
|
- src: https://github.com/stackhpc/ansible-users
|
||||||
version: append
|
version: append
|
||||||
name: singleplatform-eng.users
|
name: singleplatform-eng.users
|
||||||
|
- src: stackhpc.parted-1-1
|
||||||
- src: stackhpc.drac
|
- src: stackhpc.drac
|
||||||
- src: stackhpc.drac-facts
|
- src: stackhpc.drac-facts
|
||||||
- src: stackhpc.grafana-conf
|
- src: stackhpc.grafana-conf
|
||||||
|
Loading…
Reference in New Issue
Block a user