diff --git a/ansible/ceph-block-devices.yml b/ansible/ceph-block-devices.yml new file mode 100644 index 000000000..4912f605c --- /dev/null +++ b/ansible/ceph-block-devices.yml @@ -0,0 +1,9 @@ +--- +- name: Ensure Ceph disk are tagged + hosts: overcloud + tags: + - kolla-ceph + roles: + - role: stackhpc.parted-1-1 + - role: kolla-ceph + when: kolla_enable_ceph | bool diff --git a/ansible/disable-cloud-init.yml b/ansible/disable-cloud-init.yml new file mode 100644 index 000000000..222f3bf1d --- /dev/null +++ b/ansible/disable-cloud-init.yml @@ -0,0 +1,12 @@ +--- +# Cloud-init ‘s searches for network configuration in order of +# increasing precedence; each item overriding the previous. +# In some cases cloud-init reconfigure automatically network interface +# and cause some issues in network configuration +- name: Disable Cloud-init service + hosts: overcloud + tags: + - disable-cloud-init + roles: + - role: disable-cloud-init + when: disable_cloud_init | bool diff --git a/ansible/docker.yml b/ansible/docker.yml index 4b6c8219a..76b179b70 100644 --- a/ansible/docker.yml +++ b/ansible/docker.yml @@ -3,6 +3,8 @@ hosts: docker tags: - docker + vars: + - docker_upper_constraints_file: "{{ kolla_upper_constraints_file }}" roles: - role: docker docker_daemon_mtu: "{{ public_net_name | net_mtu | default }}" diff --git a/ansible/group_vars/all/compute b/ansible/group_vars/all/compute index 0c3878052..3366351e8 100644 --- a/ansible/group_vars/all/compute +++ b/ansible/group_vars/all/compute @@ -101,6 +101,16 @@ compute_lvm_group_data_lv_docker_volumes_size: 75%VG # Filesystem for docker volumes LVM backing volume. ext4 allows for shrinking. compute_lvm_group_data_lv_docker_volumes_fs: ext4 +############################################################################### +# Compute node Ceph configuration. + +# List of Ceph disks. +# The format is a list of dict like : +# - { osd: "/dev/sdb", journal: "/dev/sdc" } +# - { osd: "/dev/sdd" } +# Journal variable is not mandatory. +compute_ceph_disks: [] + ############################################################################### # Compute node sysctl configuration. diff --git a/ansible/group_vars/all/controllers b/ansible/group_vars/all/controllers index 44550de6f..53d5f7f2e 100644 --- a/ansible/group_vars/all/controllers +++ b/ansible/group_vars/all/controllers @@ -111,6 +111,16 @@ controller_lvm_group_data_lv_docker_volumes_size: 75%VG # Filesystem for docker volumes LVM backing volume. ext4 allows for shrinking. controller_lvm_group_data_lv_docker_volumes_fs: ext4 +############################################################################### +# Controller node Ceph configuration. + +# List of Ceph disks. +# The format is a list of dict like : +# - { osd: "/dev/sdb", journal: "/dev/sdc" } +# - { osd: "/dev/sdd" } +# Journal variable is not mandatory. +controller_ceph_disks: [] + ############################################################################### # Controller node sysctl configuration. diff --git a/ansible/group_vars/all/kolla b/ansible/group_vars/all/kolla index f9fa93948..a89d0307a 100644 --- a/ansible/group_vars/all/kolla +++ b/ansible/group_vars/all/kolla @@ -223,6 +223,10 @@ kolla_overcloud_inventory_custom_services: # concatenation of the top level, component, and service inventories. kolla_overcloud_inventory_custom: +# List of groups mapped to kolla storage group. +kolla_overcloud_inventory_storage_groups: + - "storage" + # Dict mapping from kolla-ansible groups to kayobe groups and variables. Each # item is a dict with the following items: # * groups: A list of kayobe ansible groups to map to this kolla-ansible group. @@ -241,6 +245,9 @@ kolla_overcloud_inventory_top_level_group_map: monitoring: groups: - monitoring + storage: + groups: + - "{{ kolla_overcloud_inventory_storage_groups }}" # List of names of top level kolla-ansible groups. Any of these groups which # have no hosts mapped to them will be provided with an empty group definition. @@ -271,6 +278,9 @@ kolla_external_fqdn_cert: # Whether debug logging is enabled. kolla_openstack_logging_debug: "False" +# Upper constraints file for the stable/pike branch of Kolla +kolla_upper_constraints_file: "https://raw.githubusercontent.com/openstack/requirements/stable/pike/upper-constraints.txt" + ############################################################################### # Kolla feature flag configuration. diff --git a/ansible/group_vars/all/overcloud b/ansible/group_vars/all/overcloud index 5c3cff206..c374a6ff1 100644 --- a/ansible/group_vars/all/overcloud +++ b/ansible/group_vars/all/overcloud @@ -16,6 +16,9 @@ overcloud_groups: > # should not be added to the inventory. overcloud_group_hosts_map: {} +# To prevent some network issues you can choose to disable cloud-init +disable_cloud_init: False + ############################################################################### # Overcloud host image configuration. diff --git a/ansible/group_vars/all/storage b/ansible/group_vars/all/storage new file mode 100644 index 000000000..6a60113f8 --- /dev/null +++ b/ansible/group_vars/all/storage @@ -0,0 +1,126 @@ +--- +############################################################################### +# Storage node configuration. + +# User with which to access the storages via SSH during bootstrap, in order +# to setup the Kayobe user account. +storage_bootstrap_user: "{{ lookup('env', 'USER') }}" + +############################################################################### +# Storage network interface configuration. + +# List of networks to which storage nodes are attached. +storage_network_interfaces: > + {{ (storage_default_network_interfaces + + storage_extra_network_interfaces) | unique | list }} + +# List of default networks to which storage nodes are attached. +storage_default_network_interfaces: > + {{ [provision_oc_net_name, + internal_net_name, + storage_mgmt_net_name, + storage_net_name] | unique | list }} + +# List of extra networks to which storage nodes are attached. +storage_extra_network_interfaces: [] + +############################################################################### +# Storage node BIOS configuration. + +# Dict of storage BIOS options. Format is same as that used by stackhpc.drac +# role. +storage_bios_config: "{{ storage_bios_config_default | combine(storage_bios_config_extra) }}" + +# Dict of default storage BIOS options. Format is same as that used by +# stackhpc.drac role. +storage_bios_config_default: {} + +# Dict of additional storage BIOS options. Format is same as that used by +# stackhpc.drac role. +storage_bios_config_extra: {} + +############################################################################### +# Storage node RAID configuration. + +# List of storage RAID volumes. Format is same as that used by stackhpc.drac +# role. +storage_raid_config: "{{ storage_raid_config_default + storage_raid_config_extra }}" + +# List of default storage RAID volumes. Format is same as that used by +# stackhpc.drac role. +storage_raid_config_default: [] + +# List of additional storage RAID volumes. Format is same as that used by +# stackhpc.drac role. +storage_raid_config_extra: [] + +############################################################################### +# Storage node LVM configuration. + +# List of storage volume groups. See mrlesmithjr.manage-lvm role for +# format. +storage_lvm_groups: "{{ storage_lvm_groups_default + storage_lvm_groups_extra }}" + +# Default list of storage volume groups. See mrlesmithjr.manage-lvm role for +# format. +storage_lvm_groups_default: + - "{{ storage_lvm_group_data }}" + +# Additional list of storage volume groups. See mrlesmithjr.manage-lvm role +# for format. +storage_lvm_groups_extra: [] + +# Storage LVM volume group for data. See mrlesmithjr.manage-lvm role for +# format. +storage_lvm_group_data: + vgname: data + disks: "{{ storage_lvm_group_data_disks | join(',') }}" + create: True + lvnames: "{{ storage_lvm_group_data_lvs }}" + +# List of disks for use by storage LVM data volume group. Default to an +# invalid value to require configuration. +storage_lvm_group_data_disks: + - changeme + +# List of LVM logical volumes for the data volume group. +storage_lvm_group_data_lvs: + - "{{ storage_lvm_group_data_lv_docker_volumes }}" + +# Docker volumes LVM backing volume. +storage_lvm_group_data_lv_docker_volumes: + lvname: docker-volumes + size: "{{ storage_lvm_group_data_lv_docker_volumes_size }}" + create: True + filesystem: "{{ storage_lvm_group_data_lv_docker_volumes_fs }}" + mount: True + mntp: /var/lib/docker/volumes + +# Size of docker volumes LVM backing volume. +storage_lvm_group_data_lv_docker_volumes_size: 75%VG + +# Filesystem for docker volumes LVM backing volume. ext4 allows for shrinking. +storage_lvm_group_data_lv_docker_volumes_fs: ext4 + +############################################################################### +# Storage node Ceph configuration. + +# List of Ceph disks. +# The format is a list of dict like : +# - { osd: "/dev/sdb", journal: "/dev/sdc" } +# - { osd: "/dev/sdd" } +# Journal variable is not mandatory. +storage_ceph_disks: [] + +############################################################################### +# Storage node sysctl configuration. + +# Dict of sysctl parameters to set. +storage_sysctl_parameters: {} + +############################################################################### +# Storage node user configuration. + +# List of users to create. This should be in a format accepted by the +# singleplatform-eng.users role. +storage_users: "{{ users_default }}" diff --git a/ansible/group_vars/compute/ceph b/ansible/group_vars/compute/ceph new file mode 100644 index 000000000..9f8e30b00 --- /dev/null +++ b/ansible/group_vars/compute/ceph @@ -0,0 +1,6 @@ +--- +############################################################################### +# Compute node Ceph configuration. + +# List of Ceph disks. +ceph_disks: "{{ compute_ceph_disks }}" diff --git a/ansible/group_vars/controllers/ceph b/ansible/group_vars/controllers/ceph new file mode 100644 index 000000000..d2c4eefdd --- /dev/null +++ b/ansible/group_vars/controllers/ceph @@ -0,0 +1,6 @@ +--- +############################################################################### +# Controller node Ceph configuration. + +# List of Ceph disks. +ceph_disks: "{{ controller_ceph_disks }}" diff --git a/ansible/group_vars/storage/ansible-user b/ansible/group_vars/storage/ansible-user new file mode 100644 index 000000000..c2c4ac3e6 --- /dev/null +++ b/ansible/group_vars/storage/ansible-user @@ -0,0 +1,7 @@ +--- +# User with which to access the storages via SSH. +ansible_user: "{{ kayobe_ansible_user }}" + +# User with which to access the storages before the kayobe_ansible_user +# account has been created. +bootstrap_user: "{{ storage_bootstrap_user }}" diff --git a/ansible/group_vars/storage/bios b/ansible/group_vars/storage/bios new file mode 100644 index 000000000..8c07ab5ae --- /dev/null +++ b/ansible/group_vars/storage/bios @@ -0,0 +1,7 @@ +--- +############################################################################### +# Storage node BIOS configuration. + +# Dict of storage node BIOS options. Format is same as that used by +# stackhpc.drac role. +bios_config: "{{ storage_bios_config }}" diff --git a/ansible/group_vars/storage/ceph b/ansible/group_vars/storage/ceph new file mode 100644 index 000000000..dca397fa9 --- /dev/null +++ b/ansible/group_vars/storage/ceph @@ -0,0 +1,6 @@ +--- +############################################################################### +# Storage node Ceph configuration. + +# List of Ceph disks. +ceph_disks: "{{ storage_ceph_disks }}" diff --git a/ansible/group_vars/storage/lvm b/ansible/group_vars/storage/lvm new file mode 100644 index 000000000..4ccee0185 --- /dev/null +++ b/ansible/group_vars/storage/lvm @@ -0,0 +1,6 @@ +--- +############################################################################### +# Storage node LVM configuration. + +# List of LVM volume groups. +lvm_groups: "{{ storage_lvm_groups }}" diff --git a/ansible/group_vars/storage/network b/ansible/group_vars/storage/network new file mode 100644 index 000000000..46051fd9e --- /dev/null +++ b/ansible/group_vars/storage/network @@ -0,0 +1,6 @@ +--- +############################################################################### +# Network interface attachments. + +# List of networks to which these nodes are attached. +network_interfaces: "{{ storage_network_interfaces | unique | list }}" diff --git a/ansible/group_vars/storage/raid b/ansible/group_vars/storage/raid new file mode 100644 index 000000000..ba80a5372 --- /dev/null +++ b/ansible/group_vars/storage/raid @@ -0,0 +1,7 @@ +--- +############################################################################### +# Storage node RAID configuration. + +# List of storage node RAID volumes. Format is same as that used by +# stackhpc.drac role. +raid_config: "{{ storage_raid_config }}" diff --git a/ansible/group_vars/storage/sysctl b/ansible/group_vars/storage/sysctl new file mode 100644 index 000000000..3bae3f238 --- /dev/null +++ b/ansible/group_vars/storage/sysctl @@ -0,0 +1,3 @@ +--- +# Dict of sysctl parameters to set. +sysctl_parameters: "{{ storage_sysctl_parameters }}" diff --git a/ansible/group_vars/storage/users b/ansible/group_vars/storage/users new file mode 100644 index 000000000..b366f79d9 --- /dev/null +++ b/ansible/group_vars/storage/users @@ -0,0 +1,4 @@ +--- +# List of users to create. This should be in a format accepted by the +# singleplatform-eng.users role. +users: "{{ storage_users }}" diff --git a/ansible/overcloud-docker-sdk-upgrade.yml b/ansible/overcloud-docker-sdk-upgrade.yml index e63ace8ac..046a11af1 100644 --- a/ansible/overcloud-docker-sdk-upgrade.yml +++ b/ansible/overcloud-docker-sdk-upgrade.yml @@ -26,5 +26,6 @@ pip: name: docker state: latest + extra_args: "{% if kolla_upper_constraints_file %}-c {{ kolla_upper_constraints_file }}{% endif %}" virtualenv: "{{ virtualenv is defined | ternary(virtualenv, omit) }}" become: "{{ virtualenv is not defined }}" diff --git a/ansible/roles/disable-cloud-init/handlers/main.yml b/ansible/roles/disable-cloud-init/handlers/main.yml new file mode 100644 index 000000000..3fc34e8ea --- /dev/null +++ b/ansible/roles/disable-cloud-init/handlers/main.yml @@ -0,0 +1,7 @@ +--- +- name: restart cloud-init daemon + systemd: + name: cloud-init + state: restarted + daemon_reload: yes + become: True diff --git a/ansible/roles/disable-cloud-init/tasks/main.yml b/ansible/roles/disable-cloud-init/tasks/main.yml new file mode 100644 index 000000000..89c23f198 --- /dev/null +++ b/ansible/roles/disable-cloud-init/tasks/main.yml @@ -0,0 +1,9 @@ +--- +- name: Disable cloud init service + file: + path: /etc/cloud/cloud-init.disabled + state: touch + mode: "u=rw,g=r,o=r" + notify: + - restart cloud-init daemon + become: True diff --git a/ansible/roles/docker/defaults/main.yml b/ansible/roles/docker/defaults/main.yml index bf7b7a3fb..c7c920db9 100644 --- a/ansible/roles/docker/defaults/main.yml +++ b/ansible/roles/docker/defaults/main.yml @@ -33,3 +33,7 @@ docker_registry_ca: # MTU to pass through to containers not using net=host docker_daemon_mtu: 1500 + +# Upper constraints file which is passed to pip when installing packages +# into a venv. +docker_upper_constraints_file: diff --git a/ansible/roles/docker/tasks/main.yml b/ansible/roles/docker/tasks/main.yml index f053932fc..49b13a741 100644 --- a/ansible/roles/docker/tasks/main.yml +++ b/ansible/roles/docker/tasks/main.yml @@ -18,6 +18,7 @@ pip: name: docker state: latest + extra_args: "{% if docker_upper_constraints_file %}-c {{ docker_upper_constraints_file }}{% endif %}" virtualenv: "{{ virtualenv is defined | ternary(virtualenv, omit) }}" become: "{{ virtualenv is not defined }}" diff --git a/ansible/roles/kolla-ansible/defaults/main.yml b/ansible/roles/kolla-ansible/defaults/main.yml index f0db8379a..4d2496188 100644 --- a/ansible/roles/kolla-ansible/defaults/main.yml +++ b/ansible/roles/kolla-ansible/defaults/main.yml @@ -19,6 +19,10 @@ kolla_ansible_venv: "{{ ansible_env['PWD'] }}/kolla-venv" # remotely on the target nodes. If None, no virtualenv will be used. kolla_ansible_target_venv: +# Upper constraints file which is passed to pip when installing packages +# into the kolla-ansible venv. +kolla_upper_constraints_file: + # Password to use to encrypt the passwords.yml file. kolla_ansible_vault_password: diff --git a/ansible/roles/kolla-ansible/tasks/install.yml b/ansible/roles/kolla-ansible/tasks/install.yml index 59bdaf2b2..63b06e461 100644 --- a/ansible/roles/kolla-ansible/tasks/install.yml +++ b/ansible/roles/kolla-ansible/tasks/install.yml @@ -58,6 +58,7 @@ pip: requirements: "{{ kolla_ansible_venv }}/requirements.txt" state: present + extra_args: "{% if kolla_upper_constraints_file %}-c {{ kolla_upper_constraints_file }}{% endif %}" virtualenv: "{{ kolla_ansible_venv }}" # This is a workaround for the lack of a python package for libselinux-python diff --git a/ansible/roles/kolla-ceph/defaults/main.yml b/ansible/roles/kolla-ceph/defaults/main.yml new file mode 100644 index 000000000..83c5eee90 --- /dev/null +++ b/ansible/roles/kolla-ceph/defaults/main.yml @@ -0,0 +1,4 @@ +--- + +# List of Ceph disks. +ceph_disks: [] diff --git a/ansible/roles/kolla-ceph/tasks/config.yml b/ansible/roles/kolla-ceph/tasks/config.yml new file mode 100644 index 000000000..0a542b30e --- /dev/null +++ b/ansible/roles/kolla-ceph/tasks/config.yml @@ -0,0 +1,86 @@ +--- +# (ktibi) Need to remove parted_1_1 module when kayobe will support ansible 2.4 + +- name: Ensure required packages are installed + package: + name: parted + state: installed + become: True + when: ceph_disks | length > 0 + +- name: Check the presence of a partition on the OSD disks + become: True + parted_1_1: + device: "{{ item.osd }}" + with_items: "{{ ceph_disks }}" + register: "disk_osd_info" + +- name: Check the presence of a partition on the journal disks + become: True + parted_1_1: + device: "{{ item.journal }}" + with_items: "{{ ceph_disks }}" + register: "disk_journal_info" + when: + - item.journal is defined + +- name: Fail if the Ceph OSD disks have already a partition + fail: + msg: > + The physical disk {{ item.item }} already has a partition. + Ensure that each disk in 'ceph_disks' does not have any partitions. + with_items: "{{ disk_osd_info.results }}" + when: + - item.partitions | length > 0 + - not item.partitions.0.name.startswith('KOLLA_CEPH') + loop_control: + label: "{{item.item}}" + +- name: Fail if the Ceph journal disks have already a partition + fail: + msg: > + The physical disk {{ item.item }} already has a partition. + Ensure that each disk in 'ceph_disks' does not have any partitions. + with_items: "{{ disk_journal_info.results }}" + when: + - not item | skipped + - item.partitions | length > 0 + - not item.partitions.0.name.startswith('KOLLA_CEPH') + loop_control: + label: "{{item.item}}" + +- name: Create tag partition for Ceph OSD + become: True + parted_1_1: + device: "{{ item.item.osd }}" + number: 1 + label: gpt + name: "{{ part_label }}" + state: present + with_items: "{{ disk_osd_info.results }}" + when: item.partitions | length == 0 + loop_control: + label: "{{item.item}}" + vars: + part_label: "{% if item.item.journal is defined %}{{ part_label_with_journal }}{% else %}KOLLA_CEPH_OSD_BOOTSTRAP{% endif %}" + part_label_with_journal: "KOLLA_CEPH_OSD_BOOTSTRAP_{{ (osd_id | hash('md5'))[:9] }}" + osd_id: "{{ item.item.osd | basename }}{{ ansible_hostname }}" + +- name: Create tag partition for Ceph external journal + become: True + parted_1_1: + device: "{{ item.item.journal }}" + number: 1 + label: gpt + name: "{{ part_label }}" + state: present + with_items: "{{ disk_journal_info.results }}" + when: + - not item | skipped + - item.partitions | length == 0 + loop_control: + label: "{{item.item}}" + vars: + part_label: "KOLLA_CEPH_OSD_BOOTSTRAP_{{ (osd_id | hash('md5'))[:9] }}_J" + osd_id: "{{ item.item.osd | basename }}{{ ansible_hostname }}" + diff --git a/ansible/roles/kolla-ceph/tasks/main.yml b/ansible/roles/kolla-ceph/tasks/main.yml new file mode 100644 index 000000000..0079f60ce --- /dev/null +++ b/ansible/roles/kolla-ceph/tasks/main.yml @@ -0,0 +1,4 @@ +--- +- include: config.yml + tags: + - config diff --git a/ansible/roles/kolla-ceph/tests/main.yml b/ansible/roles/kolla-ceph/tests/main.yml new file mode 100644 index 000000000..97e8ab4f2 --- /dev/null +++ b/ansible/roles/kolla-ceph/tests/main.yml @@ -0,0 +1,14 @@ +--- +- include: test-no-journal.yml +- include: test-journal.yml +- include: test-bootstrapped-journal.yml +- include: test-data-journal.yml + +- hosts: localhost + connection: local + tasks: + - name: Fail if any tests failed + fail: + msg: > + Test failures: {{ test_failures }} + when: test_failures is defined diff --git a/ansible/roles/kolla-ceph/tests/test-bootstrapped-journal.yml b/ansible/roles/kolla-ceph/tests/test-bootstrapped-journal.yml new file mode 100644 index 000000000..a32fd1771 --- /dev/null +++ b/ansible/roles/kolla-ceph/tests/test-bootstrapped-journal.yml @@ -0,0 +1,118 @@ +--- +# Test case with an OSD and external journal that have already been tagged by +# kayobe with the kolla-ansible bootstrap label, but have not yet been +# converted to use the in-use label. + +- hosts: localhost + connection: local + tasks: + - name: Allocate a temporary file for a fake OSD + tempfile: + register: osd_tempfile + + - name: Allocate a temporary file for a fake journal + tempfile: + register: journal_tempfile + + - name: Allocate a fake OSD file + command: fallocate -l 10M {{ osd_tempfile.path }} + + - name: Allocate a fake journal file + command: fallocate -l 10M {{ journal_tempfile.path }} + + - name: Create tag partition for the fake OSD + become: True + parted_1_1: + device: "{{ osd_tempfile.path }}" + number: 1 + label: gpt + name: "{{ part_label }}" + state: present + vars: + part_label: "KOLLA_CEPH_OSD_BOOTSTRAP_{{ (osd_id | hash('md5'))[:9] }}" + osd_id: "{{ osd_tempfile.path | basename }}{{ ansible_hostname }}" + + - name: Create tag partition for the fake journal + become: True + parted_1_1: + device: "{{ journal_tempfile.path }}" + number: 1 + label: gpt + name: "{{ part_label }}" + state: present + vars: + part_label: "KOLLA_CEPH_OSD_BOOTSTRAP_{{ (osd_id | hash('md5'))[:9] }}_J" + osd_id: "{{ osd_tempfile.path | basename }}{{ ansible_hostname }}" + + - block: + - name: Import parted role + include_role: + name: ../../stackhpc.parted-1-1 + + - name: Test the kolla-ceph role + include_role: + name: ../../kolla-ceph + vars: + ceph_disks: + - osd: "{{ osd_tempfile.path }}" + journal: "{{ journal_tempfile.path }}" + + - name: Get name of fake OSD partition + parted_1_1: + device: "{{ osd_tempfile.path }}" + register: "disk_osd_info" + become: True + + - name: Validate number of OSD partitions + assert: + that: disk_osd_info.partitions | length == 1 + msg: > + Number of OSD partitions is not correct. Expected 1, + actual {{ disk_osd_info.partitions | length }} + + - name: Validate OSD tag is present + assert: + that: "disk_osd_info.partitions.0.name == expected" + msg: > + Name of OSD partition is not correct. Expected {{ expected }}, + actual {{ disk_osd_info.partitions.0.name }}. + vars: + expected: "{{ 'KOLLA_CEPH_OSD_BOOTSTRAP_' ~ ((osd_tempfile.path | basename ~ ansible_hostname) | hash('md5'))[:9] }}" + + - name: Get name of fake journal partition + parted_1_1: + device: "{{ journal_tempfile.path }}" + register: "disk_journal_info" + become: True + + - name: Validate number of journal partitions + assert: + that: disk_journal_info.partitions | length == 1 + msg: > + Number of journal partitions is not correct. Expected 1, + actual {{ disk_journal_info.partitions | length }} + + - name: Validate journal tag is present + assert: + that: "disk_journal_info.partitions.0.name == expected" + msg: > + Name of journal partition is not correct. Expected {{ expected }}, + actual {{ disk_journal_info.partitions.0.name }}. + vars: + expected: "{{ 'KOLLA_CEPH_OSD_BOOTSTRAP_' ~ (( osd_tempfile.path | basename ~ ansible_hostname) | hash('md5'))[:9] ~ '_J' }}" + + always: + - name: Remove the fake OSD file + file: + name: "{{ osd_tempfile.path }}" + state: absent + + - name: Remove the fake journal file + file: + name: "{{ journal_tempfile.path }}" + state: absent + + rescue: + - name: Flag that a failure occurred + set_fact: + test_failures: "{{ test_failures | default(0) | int + 1 }}" diff --git a/ansible/roles/kolla-ceph/tests/test-data-journal.yml b/ansible/roles/kolla-ceph/tests/test-data-journal.yml new file mode 100644 index 000000000..6fdf489bf --- /dev/null +++ b/ansible/roles/kolla-ceph/tests/test-data-journal.yml @@ -0,0 +1,117 @@ +--- +# Test case with an OSD and external journal that have been converted by +# kolla-ansible to use the in-use label. + +- hosts: localhost + connection: local + tasks: + - name: Allocate a temporary file for a fake OSD + tempfile: + register: osd_tempfile + + - name: Allocate a temporary file for a fake journal + tempfile: + register: journal_tempfile + + - name: Allocate a fake OSD file + command: fallocate -l 10M {{ osd_tempfile.path }} + + - name: Allocate a fake journal file + command: fallocate -l 10M {{ journal_tempfile.path }} + + - name: Create tag partition for the fake OSD + become: True + parted_1_1: + device: "{{ osd_tempfile.path }}" + number: 1 + label: gpt + name: "{{ part_label }}" + state: present + vars: + part_label: "KOLLA_CEPH_DATA_{{ (osd_id | hash('md5'))[:9]}}" + osd_id: "{{ (osd_tempfile.path | basename ~ ansible_hostname) }}" + + - name: Create tag partition for the fake journal + become: True + parted_1_1: + device: "{{ journal_tempfile.path }}" + number: 1 + label: gpt + name: "{{ part_label }}" + state: present + vars: + part_label: "KOLLA_CEPH_DATA_{{ (osd_id | hash('md5'))[:9] }}_J" + osd_id: "{{ (osd_tempfile.path | basename ~ ansible_hostname) }}" + + - block: + - name: Import parted role + include_role: + name: ../../stackhpc.parted-1-1 + + - name: Test the kolla-ceph role + include_role: + name: ../../kolla-ceph + vars: + ceph_disks: + - osd: "{{ osd_tempfile.path }}" + journal: "{{ journal_tempfile.path }}" + + - name: Get name of fake OSD partition + parted_1_1: + device: "{{ osd_tempfile.path }}" + register: "disk_osd_info" + become: True + + - name: Validate number of OSD partitions + assert: + that: disk_osd_info.partitions | length == 1 + msg: > + Number of OSD partitions is not correct. Expected 1, + actual {{ disk_osd_info.partitions | length }} + + - name: Validate OSD tag is present + assert: + that: "disk_osd_info.partitions.0.name == expected" + msg: > + Name of OSD partition is not correct. Expected {{ expected }}, + actual {{ disk_osd_info.partitions.0.name }}. + vars: + expected: "{{ 'KOLLA_CEPH_DATA_' ~ ((osd_tempfile.path | basename ~ ansible_hostname)| hash('md5'))[:9] }}" + + - name: Get name of fake journal partition + parted_1_1: + device: "{{ journal_tempfile.path }}" + register: "disk_journal_info" + become: True + + - name: Validate number of journal partitions + assert: + that: disk_journal_info.partitions | length == 1 + msg: > + Number of journal partitions is not correct. Expected 1, + actual {{ disk_journal_info.partitions | length }} + + - name: Validate journal tag is present + assert: + that: "disk_journal_info.partitions.0.name == expected" + msg: > + Name of journal partition is not correct. Expected {{ expected }}, + actual {{ disk_journal_info.partitions.0.name }}. + vars: + expected: "{{ 'KOLLA_CEPH_DATA_' ~ ((osd_tempfile.path | basename ~ ansible_hostname)| hash('md5'))[:9] ~ '_J' }}" + + always: + - name: Remove the fake OSD file + file: + name: "{{ osd_tempfile.path }}" + state: absent + + - name: Remove the fake journal file + file: + name: "{{ journal_tempfile.path }}" + state: absent + + rescue: + - name: Flag that a failure occurred + set_fact: + test_failures: "{{ test_failures | default(0) | int + 1 }}" diff --git a/ansible/roles/kolla-ceph/tests/test-journal.yml b/ansible/roles/kolla-ceph/tests/test-journal.yml new file mode 100644 index 000000000..850a805fc --- /dev/null +++ b/ansible/roles/kolla-ceph/tests/test-journal.yml @@ -0,0 +1,93 @@ +--- +# Test case with an OSD and external journal that have not yet been tagged by +# kayobe with the kolla-ansible bootstrap label. + +- hosts: localhost + connection: local + tasks: + - name: Allocate a temporary file for a fake OSD + tempfile: + register: osd_tempfile + + - name: Allocate a temporary file for a fake journal + tempfile: + register: journal_tempfile + + - name: Allocate a fake OSD file + command: fallocate -l 10M {{ osd_tempfile.path }} + + - name: Allocate a fake journal file + command: fallocate -l 10M {{ journal_tempfile.path }} + + - block: + - name: Import parted role + include_role: + name: ../../stackhpc.parted-1-1 + + - name: Test the kolla-ceph role + include_role: + name: ../../kolla-ceph + vars: + ceph_disks: + - osd: "{{ osd_tempfile.path }}" + journal: "{{ journal_tempfile.path }}" + + - name: Get name of fake OSD partition + parted_1_1: + device: "{{ osd_tempfile.path }}" + register: "disk_osd_info" + become: True + + - name: Validate number of OSD partitions + assert: + that: disk_osd_info.partitions | length == 1 + msg: > + Number of OSD partitions is not correct. Expected 1, + actual {{ disk_osd_info.partitions | length }} + + - name: Validate OSD tag is present + assert: + that: "disk_osd_info.partitions.0.name == expected" + msg: > + Name of OSD partition is not correct. Expected {{ expected }}, + actual {{ disk_osd_info.partitions.0.name }}. + vars: + expected: "{{ 'KOLLA_CEPH_OSD_BOOTSTRAP_' ~ ((osd_tempfile.path | basename ~ ansible_hostname)| hash('md5'))[:9] }}" + + - name: Get name of fake journal partition + parted_1_1: + device: "{{ journal_tempfile.path }}" + register: "disk_journal_info" + become: True + + - name: Validate number of journal partitions + assert: + that: disk_journal_info.partitions | length == 1 + msg: > + Number of journal partitions is not correct. Expected 1, + actual {{ disk_journal_info.partitions | length }} + + - name: Validate journal tag is present + assert: + that: "disk_journal_info.partitions.0.name == expected" + msg: > + Name of journal partition is not correct. Expected {{ expected }}, + actual {{ disk_journal_info.partitions.0.name }}. + vars: + expected: "{{ 'KOLLA_CEPH_OSD_BOOTSTRAP_' ~ ((osd_tempfile.path | basename ~ ansible_hostname)| hash('md5'))[:9] ~ '_J' }}" + + always: + - name: Remove the fake OSD file + file: + name: "{{ osd_tempfile.path }}" + state: absent + + - name: Remove the fake journal file + file: + name: "{{ journal_tempfile.path }}" + state: absent + + rescue: + - name: Flag that a failure occurred + set_fact: + test_failures: "{{ test_failures | default(0) | int + 1 }}" diff --git a/ansible/roles/kolla-ceph/tests/test-no-journal.yml b/ansible/roles/kolla-ceph/tests/test-no-journal.yml new file mode 100644 index 000000000..8c0dec3f8 --- /dev/null +++ b/ansible/roles/kolla-ceph/tests/test-no-journal.yml @@ -0,0 +1,54 @@ +--- +# Test case with an OSD and no external journal that has not yet been tagged by +# kayobe with the kolla-ansible bootstrap label. + +- hosts: localhost + connection: local + tasks: + - name: Allocate a temporary file for a fake OSD + tempfile: + register: tempfile + + - name: Allocate a fake OSD file + command: fallocate -l 10M {{ tempfile.path }} + + - block: + - name: Import parted role + include_role: + name: ../../stackhpc.parted-1-1 + + - name: Test the kolla-ceph role + include_role: + name: ../../kolla-ceph + vars: + ceph_disks: + - osd: "{{ tempfile.path }}" + + - name: Get name of fake partition + parted_1_1: + device: "{{ tempfile.path }}" + register: "disk_osd_info" + become: True + + - name: Validate number of partition + assert: + that: disk_osd_info.partitions | length == 1 + msg: > + Number of partition is not correct. + + - name: Validate OSD tag is present + assert: + that: "disk_osd_info.partitions.0.name == 'KOLLA_CEPH_OSD_BOOTSTRAP'" + msg: > + Name of partition is not correct. + + always: + - name: Remove the fake OSD file + file: + name: "{{ tempfile.path }}" + state: absent + + rescue: + - name: Flag that a failure occurred + set_fact: + test_failures: "{{ test_failures | default(0) | int + 1 }}" diff --git a/ansible/roles/kolla/defaults/main.yml b/ansible/roles/kolla/defaults/main.yml index 82adc1402..3d6bf0a27 100644 --- a/ansible/roles/kolla/defaults/main.yml +++ b/ansible/roles/kolla/defaults/main.yml @@ -15,6 +15,10 @@ kolla_source_version: # Virtualenv directory where Kolla will be installed. kolla_venv: "{{ ansible_env['PWD'] }}/kolla-venv" +# Upper constraints file which is passed to pip when installing packages +# into the kolla venv. +kolla_upper_constraints_file: + # Directory where Kolla config files will be installed. kolla_build_config_path: diff --git a/ansible/roles/kolla/tasks/install.yml b/ansible/roles/kolla/tasks/install.yml index 8095c786d..103dce49c 100644 --- a/ansible/roles/kolla/tasks/install.yml +++ b/ansible/roles/kolla/tasks/install.yml @@ -51,18 +51,22 @@ with_items: - { name: pip } +- name: Ensure Python package docker-py is absent + # In version 2.0.0, docker renamed the docker-py python package to docker. + # Kolla requires the docker package rather than the docker-py package. + pip: + name: docker-py + state: absent + virtualenv: "{{ kolla_venv }}" + - name: Ensure required Python packages are installed pip: name: "{{ item.name }}" version: "{{ item.version | default(omit) }}" state: "{{ item.state | default('present') }}" virtualenv: "{{ kolla_venv }}" + extra_args: "{% if kolla_upper_constraints_file %}-c {{ kolla_upper_constraints_file }}{% endif %}" with_items: - # In version 2.0.0, docker renamed the docker-py python package to docker. - # Kolla requires the docker package rather than the docker-py package. - - name: docker-py - state: absent - - name: docker # Intall Kolla from source. - name: "{{ kolla_source_path }}" install: "{{ kolla_ctl_install_type == 'source' }}" @@ -70,7 +74,4 @@ - name: "kolla" version: "{{ kolla_openstack_release }}" install: "{{ kolla_ctl_install_type == 'binary' }}" - # Required for kolla-genpwd. - - name: PyYAML - version: "3.12" when: item.install | default(True) | bool diff --git a/doc/source/release-notes.rst b/doc/source/release-notes.rst index 74228aea6..fbecf0378 100644 --- a/doc/source/release-notes.rst +++ b/doc/source/release-notes.rst @@ -43,6 +43,9 @@ Features this variable is ``{{ virtualenv_path }}/kolla-ansible``. * Adds tags to plays to support more fine grained configuration using the ``--tags`` argument. +* Adds support for deployment of storage hosts. These hosts should be added to + the ``[storage]`` group. +* Adds support for the tagging of ceph disks. Upgrade Notes ------------- @@ -94,6 +97,9 @@ Upgrade Notes connecting via SSH, due to a timeout in NSS. The workaround employed here is to remove this bogus entry from the image using virt-customize, if it exists. See https://bugs.centos.org/view.php?id=14369. +* Adds a group ``storage``, which used for deploy node with cinder-volume, LVM + or ceph-osd. If you want to add these services to compute or control group, + you need to override ``kolla_overcloud_inventory_storage_groups``. Kayobe 3.0.0 ============ diff --git a/etc/kayobe/compute.yml b/etc/kayobe/compute.yml index 18fa9a878..8d97cec16 100644 --- a/etc/kayobe/compute.yml +++ b/etc/kayobe/compute.yml @@ -83,6 +83,16 @@ # Filesystem for docker volumes LVM backing volume. ext4 allows for shrinking. #compute_lvm_group_data_lv_docker_volumes_fs: +############################################################################### +# Compute node Ceph configuration. + +# List of Ceph disks. +# The format is a list of dict like : +# - { osd: "/dev/sdb", journal: "/dev/sdc" } +# - { osd: "/dev/sdd" } +# Journal variable is not mandatory. +#compute_ceph_disks: + ############################################################################### # Compute node sysctl configuration. diff --git a/etc/kayobe/controllers.yml b/etc/kayobe/controllers.yml index ed412727f..0af11ebdc 100644 --- a/etc/kayobe/controllers.yml +++ b/etc/kayobe/controllers.yml @@ -86,6 +86,16 @@ # Filesystem for docker volumes LVM backing volume. ext4 allows for shrinking. #controller_lvm_group_data_lv_docker_volumes_fs: +############################################################################### +# Controller node Ceph configuration. + +# List of Ceph disks. +# The format is a list of dict like : +# - { osd: "/dev/sdb", journal: "/dev/sdc" } +# - { osd: "/dev/sdd" } +# Journal variable is not mandatory. +#controller_ceph_disks: + ############################################################################### # Controller node sysctl configuration. diff --git a/etc/kayobe/overcloud.yml b/etc/kayobe/overcloud.yml index 4b35737a7..5c4828ad4 100644 --- a/etc/kayobe/overcloud.yml +++ b/etc/kayobe/overcloud.yml @@ -13,6 +13,9 @@ # should not be added to the inventory. #overcloud_group_hosts_map: +# To prevent some network issues you can choose to disable cloud-init +#disable_cloud_init: + ############################################################################### # Overcloud host image configuration. diff --git a/etc/kayobe/storage.yml b/etc/kayobe/storage.yml new file mode 100644 index 000000000..e1e1795cc --- /dev/null +++ b/etc/kayobe/storage.yml @@ -0,0 +1,111 @@ +--- +############################################################################### +# Storage node configuration. + +# User with which to access the storages via SSH during bootstrap, in order +# to setup the Kayobe user account. +#storage_bootstrap_user: + +############################################################################### +# Network interface attachments. + +# List of networks to which storage nodes are attached. +#storage_network_interfaces: + +# List of default networks to which storage nodes are attached. +#storage_default_network_interfaces: + +# List of extra networks to which storage nodes are attached. +#storage_extra_network_interfaces: + +############################################################################### +# Storage node BIOS configuration. + +# Dict of storage BIOS options. Format is same as that used by stackhpc.drac +# role. +#storage_bios_config: + +# Dict of default storage BIOS options. Format is same as that used by +# stackhpc.drac role. +#storage_bios_config_default: + +# Dict of additional storage BIOS options. Format is same as that used by +# stackhpc.drac role. +#storage_bios_config_extra: + +############################################################################### +# Storage node RAID configuration. + +# List of storage RAID volumes. Format is same as that used by stackhpc.drac +# role. +#storage_raid_config: + +# List of default storage RAID volumes. Format is same as that used by +# stackhpc.drac role. +#storage_raid_config_default: + +# List of additional storage RAID volumes. Format is same as that used by +# stackhpc.drac role. +#storage_raid_config_extra: + +############################################################################### +# Storage node LVM configuration. + +# List of storage volume groups. See mrlesmithjr.manage-lvm role for +# format. +#storage_lvm_groups: + +# Default list of storage volume groups. See mrlesmithjr.manage-lvm role for +# format. +#storage_lvm_groups_default: + +# Additional list of storage volume groups. See mrlesmithjr.manage-lvm role +# for format. +#storage_lvm_groups_extra: + +# Storage LVM volume group for data. See mrlesmithjr.manage-lvm role for +# format. +#storage_lvm_group_data: + +# List of disks for use by storage LVM data volume group. Default to an +# invalid value to require configuration. +#storage_lvm_group_data_disks: + +# List of LVM logical volumes for the data volume group. +#storage_lvm_group_data_lvs: + +# Docker volumes LVM backing volume. +#storage_lvm_group_data_lv_docker_volumes: + +# Size of docker volumes LVM backing volume. +#storage_lvm_group_data_lv_docker_volumes_size: + +# Filesystem for docker volumes LVM backing volume. ext4 allows for shrinking. +#storage_lvm_group_data_lv_docker_volumes_fs: + +############################################################################### +# Storage node Ceph configuration. + +# List of Ceph disks. +# The format is a list of dict like : +# - { osd: "/dev/sdb", journal: "/dev/sdc" } +# - { osd: "/dev/sdd" } +# Journal variable is not mandatory. +#storage_ceph_disks: + +############################################################################### +# Storage node sysctl configuration. + +# Dict of sysctl parameters to set. +#storage_sysctl_parameters: + +############################################################################### +# Storage node user configuration. + +# List of users to create. This should be in a format accepted by the +# singleplatform-eng.users role. +#storage_users: + +############################################################################### +# Dummy variable to allow Ansible to accept this file. +workaround_ansible_issue_8743: yes diff --git a/kayobe/cli/commands.py b/kayobe/cli/commands.py index fb281147f..1e1c7056d 100644 --- a/kayobe/cli/commands.py +++ b/kayobe/cli/commands.py @@ -691,7 +691,7 @@ class OvercloudHostConfigure(KollaAnsibleMixin, KayobeAnsibleMixin, VaultMixin, playbooks += _build_playbook_list("wipe-disks") playbooks += _build_playbook_list( "users", "yum", "dev-tools", "disable-selinux", "network", - "sysctl", "disable-glean", "ntp", "lvm") + "sysctl", "disable-glean", "disable-cloud-init", "ntp", "lvm") self.run_kayobe_playbooks(parsed_args, playbooks, limit="overcloud") playbooks = _build_playbook_list("kolla-ansible") self.run_kayobe_playbooks(parsed_args, playbooks, tags="config") @@ -716,7 +716,7 @@ class OvercloudHostConfigure(KollaAnsibleMixin, KayobeAnsibleMixin, VaultMixin, # Further kayobe playbooks. playbooks = _build_playbook_list( - "kolla-target-venv", "kolla-host", "docker") + "kolla-target-venv", "kolla-host", "docker", "ceph-block-devices") self.run_kayobe_playbooks(parsed_args, playbooks, limit="overcloud") diff --git a/kayobe/tests/unit/cli/test_commands.py b/kayobe/tests/unit/cli/test_commands.py index 84aa64e1d..161fe3f00 100644 --- a/kayobe/tests/unit/cli/test_commands.py +++ b/kayobe/tests/unit/cli/test_commands.py @@ -466,6 +466,7 @@ class TestCase(unittest.TestCase): "ansible/network.yml", "ansible/sysctl.yml", "ansible/disable-glean.yml", + "ansible/disable-cloud-init.yml", "ansible/ntp.yml", "ansible/lvm.yml", ], @@ -482,6 +483,7 @@ class TestCase(unittest.TestCase): "ansible/kolla-target-venv.yml", "ansible/kolla-host.yml", "ansible/docker.yml", + "ansible/ceph-block-devices.yml", ], limit="overcloud", ), diff --git a/requirements.yml b/requirements.yml index 0cc06d688..c0ae60f73 100644 --- a/requirements.yml +++ b/requirements.yml @@ -8,6 +8,7 @@ - src: https://github.com/stackhpc/ansible-users version: append name: singleplatform-eng.users +- src: stackhpc.parted-1-1 - src: stackhpc.drac - src: stackhpc.drac-facts - src: stackhpc.grafana-conf diff --git a/tools/test-ansible.sh b/tools/test-ansible.sh new file mode 100755 index 000000000..e54dc56fc --- /dev/null +++ b/tools/test-ansible.sh @@ -0,0 +1,17 @@ +#!/bin/bash + +# Run ansible tests. Any arguments passed to this script will be passed onto +# ansible-playbook. + +set -e + +failed=0 +for playbook in ansible/roles/*/tests/main.yml; do + if ! ansible-playbook --connection=local $playbook $*; then + failed=$((failed + 1)) + fi +done +if [[ $failed -ne 0 ]]; then + echo "Failed $failed test cases" + exit 1 +fi diff --git a/tox.ini b/tox.ini index 267fca970..342b202d2 100644 --- a/tox.ini +++ b/tox.ini @@ -23,11 +23,7 @@ commands = flake8 {posargs} kayobe # Check the *.rst files # We use a thin wrapper around doc8 currently, which has support for sphinx - # directives. We install sphinx 1.5.x because versions prior to this - # (installed due to upper constraints) automatically import all - # sphinx.directive.* modules when any one of those modules is imported, and - # importing sphinx.directive.other breaks docutils parsing. - pip install -U sphinx<1.6 + # directives. {toxinidir}/tools/sphinx8 README.rst CONTRIBUTING.rst doc/source --ignore D001 [testenv:venv] @@ -46,11 +42,11 @@ usedevelop = True sitepackages = True install_command = pip install -c{env:UPPER_CONSTRAINTS_FILE:https://git.openstack.org/cgit/openstack/requirements/plain/upper-constraints.txt?h=stable/pike} {opts} {packages} commands = - bash -c \ - "ansible-playbook \ - --connection=local \ - {toxinidir}/ansible/roles/*/tests/main.yml \ - {posargs}" + # Install ansible role dependencies from Galaxy. + ansible-galaxy install \ + -r {toxinidir}/requirements.yml \ + -p {toxinidir}/ansible/roles + {toxinidir}/tools/test-ansible.sh {posargs} [testenv:molecule] install_command = pip install -c{env:UPPER_CONSTRAINTS_FILE:https://git.openstack.org/cgit/openstack/requirements/plain/upper-constraints.txt?h=stable/pike} {opts} {packages}