CI: Move from ceph-ansible to cephadm

Change-Id: I81a4f8f8b8faa7559740531bb16d8aec7fc23f9b
This commit is contained in:
Michał Nasiadka 2021-01-19 11:27:59 +01:00
parent a8981a79aa
commit 65a16a08e2
16 changed files with 234 additions and 131 deletions

View File

@ -0,0 +1,29 @@
---
cephadm_ceph_apt_repo: "deb http://download.ceph.com/debian-octopus/ {{ ansible_distribution_release }} main"
cephadm_ceph_rpm_repos:
- shortname: "ceph"
name: "Ceph {{ ansible_architecture }} repository"
url: "http://download.ceph.com/rpm-octopus/el8/{{ ansible_architecture }}/"
- shortname: "ceph-noarch"
name: "Ceph noarch repository"
url: "http://download.ceph.com/rpm-octopus/el8/noarch/"
cephadm_ceph_rpm_gpg_key: http://download.ceph.com/keys/release.gpg
cephadm_ceph_osd_devices:
- primary:/dev/cephvg/cephlv
- secondary1:/dev/cephvg/cephlv
- secondary2:/dev/cephvg/cephlv
cephadm_ceph_pools:
- backup
- images
- vms
- volumes
cephadm_ceph_users:
- client.glance mon 'profile rbd' osd 'profile rbd pool=images' mgr 'profile rbd pool=images' -o /var/run/ceph/ceph.client.glance.keyring
- client.cinder mon 'profile rbd' osd 'profile rbd pool=volumes, profile rbd pool=vms, profile rbd-read-only pool=images' mgr 'profile rbd pool=volumes, profile rbd pool=vms' -o /var/run/ceph/ceph.client.cinder.keyring
- client.cinder-backup mon 'profile rbd' osd 'profile rbd pool=backups' mgr 'profile rbd pool=backups' -o /var/run/ceph/ceph.client.cinder-backup.keyring
- client.nova mon 'profile rbd' osd 'profile rbd pool=volumes, profile rbd pool=vms, profile rbd-read-only pool=images' mgr 'profile rbd pool=volumes, profile rbd pool=vms' -o /var/run/ceph/ceph.client.nova.keyring

View File

@ -0,0 +1,113 @@
---
- include_tasks: pkg_{{ ansible_os_family | lower }}.yml
- name: Install cephadm
package:
name: cephadm
state: present
become: True
- name: Ensure /etc/ceph exists
file:
path: /etc/ceph
state: directory
become: True
- name: Generate ssh key for cephadm
openssh_keypair:
path: "/etc/ceph/cephadm.id"
size: 4096
comment: "cephadm"
become: True
register: cephadm_ssh_key
- name: Save public key
copy:
content: "{{ cephadm_ssh_key.public_key }}"
dest: /etc/ceph/cephadm.pub
become: True
- name: Copy cephadm public key to all hosts
authorized_key:
user: root
state: present
key: "{{ cephadm_ssh_key.public_key }}"
become: True
with_inventory_hostnames:
- all
delegate_to: "{{ item }}"
- name: Bootstrap cephadm
vars:
mon_ip: "{{ hostvars[inventory_hostname]['ansible_'+api_interface_name].ipv4.address }}"
command:
cmd: >
cephadm bootstrap
--ssh-private-key=/etc/ceph/cephadm.id
--ssh-public-key=/etc/ceph/cephadm.pub
--skip-monitoring-stack
--skip-dashboard
--skip-firewalld
--mon-ip={{ mon_ip }}
become: True
register: cephadm_bootstrap_output
- name: Get ceph fsid
vars:
regexp: 'Cluster fsid: (.*)'
set_fact:
ceph_fsid: "{{ cephadm_bootstrap_output.stdout | regex_search(regexp,'\\1') | first }}"
- name: Template out cluster spec
template:
src: templates/cephadm.yml.j2
dest: "/var/run/ceph/{{ ceph_fsid }}/cluster.yml"
become: True
- name: Apply cluster spec
command:
cmd: >
cephadm shell --
ceph orch apply -i /var/run/ceph/cluster.yml
become: True
- name: Add osds
command:
cmd: >
cephadm shell --
ceph orch daemon add osd {{ item }}
become: True
loop: "{{ cephadm_ceph_osd_devices }}"
- name: Create and initialise pools for OpenStack services
command:
cmd: >
cephadm shell --
ceph osd pool create {{ item }}
with_items: "{{ cephadm_ceph_pools }}"
become: true
- name: Create users for OpenStack services
command:
cmd: >
cephadm shell --
ceph auth get-or-create {{ item }}
become: true
with_items: "{{ cephadm_ceph_users }}"
# TODO(mnasiadka): Fix merge_configs to support tabs
- name: Generate ceph.conf without tabs
vars:
ceph_conf_fixed: |
[global]
fsid = {{ ceph_fsid }}
mon_host = {% for host in groups['all'] %} {{ hostvars[host]['ansible_'+api_interface_name].ipv4.address }} {% if not loop.last %},{% endif %} {% endfor %}
copy:
content: "{{ ceph_conf_fixed }}"
dest: /etc/ceph/ceph.conf
become: True
- name: Check ceph health
command:
cmd: cephadm shell -- ceph health detail
become: True

View File

@ -0,0 +1,13 @@
---
- name: Ensure apt sources list directory exists
file:
path: /etc/apt/sources.list.d
state: directory
recurse: yes
become: True
- name: Enable Ceph apt repository
apt_repository:
repo: "{{ cephadm_ceph_apt_repo }}"
filename: ceph
become: True

View File

@ -0,0 +1,16 @@
---
- name: Ensure yum repos directory exists
file:
path: /etc/yum.repos.d/
state: directory
recurse: yes
become: True
- name: Enable Ceph base yum repository
yum_repository:
name: "{{ item.shortname }}"
description: "{{ item.name }}"
baseurl: "{{ item.url }}"
gpgkey: "{{ cephadm_ceph_rpm_gpg_key }}"
loop: "{{ cephadm_ceph_rpm_repos }}"
become: True

View File

@ -0,0 +1,21 @@
{% for host in groups['all'] %}
---
service_type: host
hostname: {{ host }}
labels:
- mon
- mgr
- osd
{% endfor %}
---
service_type: mon
placement:
label: "mon"
---
service_type: mgr
placement:
label: "mgr"
---
service_type: crash
placement:
host_pattern: "*"

View File

@ -23,7 +23,6 @@ function check_config {
-not -path /etc/kolla \
-not -regex .*-openrc.sh \
-not -name globals.yml \
-not -name ceph-ansible.yml \
-not -name header \
-not -name inventory \
-not -name ceph-inventory \

View File

@ -1,29 +0,0 @@
#!/bin/bash
set -o xtrace
set -o errexit
# Enable unbuffered output for Ansible in Jenkins.
export PYTHONUNBUFFERED=1
function setup_ceph_ansible {
# Prepare virtualenv for ceph-ansible deployment
python3 -m venv --system-site-packages ~/ceph-venv
# NOTE(mgoddard): We need a recent pip to install the latest cryptography
# library. See https://github.com/pyca/cryptography/issues/5753
~/ceph-venv/bin/pip install -I 'pip>=19.1.1'
~/ceph-venv/bin/pip install -Ir requirements.txt
~/ceph-venv/bin/pip install -IU selinux
}
function deploy_ceph_ansible {
RAW_INVENTORY=/etc/kolla/ceph-inventory
. ~/ceph-venv/bin/activate
cp site-container.yml.sample site-container.yml
ansible-playbook -i ${RAW_INVENTORY} -e @/etc/kolla/ceph-ansible.yml -vvv site-container.yml --skip-tags=with_pkg &> /tmp/logs/ansible/deploy-ceph
}
setup_ceph_ansible
deploy_ceph_ansible

View File

@ -78,15 +78,10 @@ copy_logs() {
# docker related information
(docker info && docker images && docker ps -a && docker network ls && docker inspect $(docker ps -aq)) > ${LOG_DIR}/system_logs/docker-info.txt
# ceph-ansible related logs
# cephadm related logs
mkdir -p ${LOG_DIR}/ceph
for container in $(docker ps --filter name=ceph-mon --format "{{.Names}}"); do
docker exec ${container} ceph --connect-timeout 5 -s > ${LOG_DIR}/ceph/ceph_s.txt
# NOTE(yoctozepto): osd df removed on purpose to avoid CI POST_FAILURE due to a possible hang:
# as of ceph mimic it hangs when MON is operational but MGR not
# its usefulness is mediocre and having POST_FAILUREs is bad
docker exec ${container} ceph --connect-timeout 5 osd tree > ${LOG_DIR}/ceph/ceph_osd_tree.txt
done
sudo cephadm shell -- ceph --connect-timeout 5 -s > ${LOG_DIR}/ceph/ceph_s.txt
sudo cephadm shell -- ceph --connect-timeout 5 osd tree > ${LOG_DIR}/ceph/ceph_osd_tree.txt
# bifrost related logs
if [[ $(docker ps --filter name=bifrost_deploy --format "{{.Names}}") ]]; then

View File

@ -17,7 +17,6 @@
kolla_ansible_src_dir: "{{ ansible_env.PWD }}/src/{{ zuul.project.canonical_hostname }}/openstack/kolla-ansible"
kolla_ansible_local_src_dir: "{{ zuul.executor.work_root }}/src/{{ zuul.project.canonical_hostname }}/openstack/kolla-ansible"
infra_dockerhub_mirror: "http://{{ zuul_site_mirror_fqdn }}:8082/"
ceph_ansible_src_dir: "{{ ansible_env.PWD }}/src/github.com/ceph/ceph-ansible"
need_build_image: false
build_image_tag: "change_{{ zuul.change | default('none') }}"
openstack_core_enabled: "{{ openstack_core_enabled }}"
@ -42,11 +41,10 @@
- name: Prepare disks for a storage service
script: "setup_disks.sh {{ disk_type }}"
when: scenario in ['ceph-ansible', 'zun', 'swift']
when: scenario in ['cephadm', 'zun', 'swift']
become: true
vars:
disk_type: "{{ ceph_storetype if scenario in ['ceph-ansible'] else scenario }}"
ceph_storetype: "{{ hostvars[inventory_hostname].get('ceph_osd_storetype') }}"
disk_type: "{{ 'ceph-lvm' if scenario in ['cephadm'] else scenario }}"
- hosts: primary
any_errors_fatal: true
@ -168,14 +166,6 @@
- src: "tests/templates/ironic-overrides.j2"
dest: /etc/kolla/config/ironic.conf
when: "{{ scenario == 'ironic' }}"
# Ceph-Ansible inventory
- src: "tests/templates/ceph-inventory.j2"
dest: /etc/kolla/ceph-inventory
when: "{{ scenario == 'ceph-ansible' }}"
# ceph-ansible.yml
- src: "tests/templates/ceph-ansible.j2"
dest: /etc/kolla/ceph-ansible.yml
when: "{{ scenario == 'ceph-ansible' }}"
when: item.when | default(true)
@ -314,47 +304,41 @@
# ready to deploy the control plane services. Control flow now depends on
# the scenario being exercised.
# Deploy ceph-ansible on ceph-ansible scenarios
# Deploy cephadm on cephadm scenarios
- block:
- name: Run deploy-ceph-ansible.sh script
script:
cmd: deploy-ceph-ansible.sh
executable: /bin/bash
chdir: "{{ ceph_ansible_src_dir }}"
environment:
BASE_DISTRO: "{{ base_distro }}"
- import_role:
name: cephadm
- name: Ensure required kolla config directories exist
file:
state: directory
name: "/etc/kolla/config/{{ item.name }}"
mode: 0777
with_items: "{{ ceph_ansible_services }}"
mode: 0755
with_items: "{{ cephadm_kolla_ceph_services }}"
- name: copy ceph.conf to enabled services
copy:
remote_src: True
src: "/etc/ceph/ceph.conf"
dest: "/etc/kolla/config/{{ item.name }}/ceph.conf"
remote_src: True
with_items: "{{ ceph_ansible_services }}"
with_items: "{{ cephadm_kolla_ceph_services }}"
- name: copy keyrings to enabled services
copy:
remote_src: True
src: "/etc/ceph/{{ item.keyring }}"
src: "/var/run/ceph/{{ ceph_fsid }}/{{ item.keyring }}"
dest: "/etc/kolla/config/{{ item.name }}/{{ item.keyring }}"
with_items: "{{ ceph_ansible_services }}"
with_items: "{{ cephadm_kolla_ceph_services }}"
become: True
vars:
ceph_ansible_services:
- { name: 'cinder/cinder-volume', keyring: "ceph.client.cinder.keyring" }
- { name: 'cinder/cinder-backup', keyring: "ceph.client.cinder.keyring" }
- { name: 'cinder/cinder-backup', keyring: "ceph.client.cinder-backup.keyring" }
- { name: 'glance', keyring: "ceph.client.glance.keyring" }
- { name: 'nova', keyring: "ceph.client.nova.keyring" }
- { name: 'nova', keyring: "ceph.client.cinder.keyring" }
when: scenario == "ceph-ansible"
cephadm_kolla_ceph_services:
- { name: "cinder/cinder-volume", keyring: "ceph.client.cinder.keyring" }
- { name: "cinder/cinder-backup", keyring: "ceph.client.cinder.keyring" }
- { name: "cinder/cinder-backup", keyring: "ceph.client.cinder-backup.keyring" }
- { name: "glance", keyring: "ceph.client.glance.keyring" }
- { name: "nova", keyring: "ceph.client.nova.keyring" }
- { name: "nova", keyring: "ceph.client.cinder.keyring" }
when: scenario == "cephadm"
# Deploy control plane. For upgrade jobs this is the previous release.
- block:

View File

@ -48,7 +48,7 @@ function prepare_images {
GATE_IMAGES="bifrost"
fi
if [[ $SCENARIO == "ceph-ansible" ]]; then
if [[ $SCENARIO == "cephadm" ]]; then
GATE_IMAGES+=",^cinder"
fi

View File

@ -1,36 +0,0 @@
# ceph-ansible group vars
ceph_stable_release: "nautilus"
monitor_interface: "{{ api_interface_name }}"
radosgw_interface: "{{ api_interface_name }}"
public_network: "{{ api_network_prefix }}0/{{ api_network_prefix_length }}"
configure_firewall: false
docker: true
containerized_deployment: true
container_binary: "docker"
docker_pull_timeout: "600s"
ceph_docker_image_tag: "latest-octopus"
dashboard_enabled: false
openstack_config: true
{% raw %}
openstack_pools:
- "{{ openstack_glance_pool }}"
- "{{ openstack_cinder_pool }}"
- "{{ openstack_cinder_backup_pool }}"
- "{{ openstack_nova_pool }}"
openstack_keys:
- { name: client.glance, caps: { mon: "profile rbd", osd: "profile rbd pool=volumes, profile rbd pool={{ openstack_glance_pool.name }}"}, mode: "0600" }
- { name: client.cinder, caps: { mon: "profile rbd", osd: "profile rbd pool={{ openstack_cinder_pool.name }}, profile rbd pool={{ openstack_nova_pool.name }}, profile rbd pool={{ openstack_glance_pool.name }}"}, mode: "0600" }
- { name: client.cinder-backup, caps: { mon: "profile rbd", osd: "profile rbd pool={{ openstack_cinder_backup_pool.name }}"}, mode: "0600" }
- { name: client.nova, caps: { mon: "profile rbd", osd: "profile rbd pool={{ openstack_nova_pool.name }}, profile rbd pool={{ openstack_cinder_pool.name }}, profile rbd pool={{ openstack_glance_pool.name }}"}, mode: "0600" }
{% endraw %}
# osds
lvm_volumes:
- data: cephlv
data_vg: cephvg

View File

@ -111,7 +111,9 @@ enable_memcached: "no"
enable_rabbitmq: "no"
{% endif %}
{% if scenario == "ceph-ansible" %}
{% if scenario == "cephadm" %}
# Disable chrony - cephadm requires chronyd.service running
enable_chrony: "no"
# kolla-ansible vars
enable_cinder: "yes"
# External Ceph

View File

@ -11,7 +11,7 @@ function test_smoke {
openstack --debug compute service list
openstack --debug network agent list
openstack --debug orchestration service list
if [[ $SCENARIO == "ceph-ansible" ]] || [[ $SCENARIO == "zun" ]]; then
if [[ $SCENARIO == "cephadm" ]] || [[ $SCENARIO == "zun" ]]; then
openstack --debug volume service list
fi
}
@ -28,7 +28,7 @@ function test_instance_boot {
fi
echo "SUCCESS: Server creation"
if [[ $SCENARIO == "ceph-ansible" ]] || [[ $SCENARIO == "zun" ]]; then
if [[ $SCENARIO == "cephadm" ]] || [[ $SCENARIO == "zun" ]]; then
echo "TESTING: Cinder volume attachment"
openstack volume create --size 2 test_volume
attempt=1

View File

@ -116,15 +116,11 @@
scenario: swift
- job:
name: kolla-ansible-ceph-ansible-base
name: kolla-ansible-cephadm-base
parent: kolla-ansible-base
voting: false
vars:
scenario: ceph-ansible
ceph_osd_storetype: ceph-lvm
required-projects:
- name: github.com/ceph/ceph-ansible
override-checkout: v5.0.0
scenario: cephadm
- job:
name: kolla-ansible-magnum-base

View File

@ -87,8 +87,8 @@
install_type: binary
- job:
name: kolla-ansible-centos8-source-ceph-ansible
parent: kolla-ansible-ceph-ansible-base
name: kolla-ansible-centos8-source-cephadm
parent: kolla-ansible-cephadm-base
nodeset: kolla-ansible-centos8-multi
timeout: 9000
vars:
@ -96,8 +96,8 @@
install_type: source
- job:
name: kolla-ansible-ubuntu-source-ceph-ansible
parent: kolla-ansible-ceph-ansible-base
name: kolla-ansible-ubuntu-source-cephadm
parent: kolla-ansible-cephadm-base
nodeset: kolla-ansible-focal-multi
timeout: 9000
vars:
@ -160,8 +160,8 @@
tls_enabled: true
- job:
name: kolla-ansible-centos8-source-upgrade-ceph-ansible
parent: kolla-ansible-ceph-ansible-base
name: kolla-ansible-centos8-source-upgrade-cephadm
parent: kolla-ansible-cephadm-base
nodeset: kolla-ansible-centos8-multi
timeout: 9000
vars:
@ -170,8 +170,8 @@
is_upgrade: yes
- job:
name: kolla-ansible-ubuntu-source-upgrade-ceph-ansible
parent: kolla-ansible-ceph-ansible-base
name: kolla-ansible-ubuntu-source-upgrade-cephadm
parent: kolla-ansible-cephadm-base
nodeset: kolla-ansible-focal-multi
timeout: 9000
vars:

View File

@ -38,10 +38,6 @@
- kolla-ansible-centos8-source-cells
- kolla-ansible-centos8-source-mariadb
- kolla-ansible-ubuntu-source-mariadb
- kolla-ansible-centos8-source-ceph-ansible
- kolla-ansible-ubuntu-source-ceph-ansible
- kolla-ansible-centos8-source-upgrade-ceph-ansible
- kolla-ansible-ubuntu-source-upgrade-ceph-ansible
- kolla-ansible-centos8-source-linuxbridge
- kolla-ansible-ubuntu-source-linuxbridge
- kolla-ansible-centos8-source-ovn
@ -49,6 +45,10 @@
- kolla-ansible-centos8-source-prometheus-efk
- kolla-ansible-ubuntu-source-prometheus-efk
- kolla-ansible-centos8-source-monasca
- kolla-ansible-centos8-source-cephadm
- kolla-ansible-ubuntu-source-cephadm
- kolla-ansible-centos8-source-upgrade-cephadm
- kolla-ansible-ubuntu-source-upgrade-cephadm
check-arm64:
jobs:
- kolla-ansible-debian-source-aarch64