diff --git a/devstack/lib/cephadm b/devstack/lib/cephadm new file mode 100755 index 0000000..d24ce19 --- /dev/null +++ b/devstack/lib/cephadm @@ -0,0 +1,638 @@ +#!/bin/bash +# +# lib/cephadm +# Functions to control the configuration +# and operation of the **Ceph** storage service +# when deployed using the cephadm tool + +# ``stack.sh`` calls the entry points in this order: +# +# - pre_install_ceph +# - install_ceph +# - configure_ceph +# - init_ceph +# - cleanup_ceph # unstack || clean + +# Save trace setting +XTRACE=$(set +o | grep xtrace) +set +o xtrace + +# GENERIC CEPHADM INTERNAL OPTIONS, DO NOT EDIT +CEPH_PUB_KEY="/etc/ceph/ceph.pub" +CONFIG="/etc/ceph/ceph.conf" +BOOTSTRAP_CONFIG="$HOME/bootstrap_ceph.conf" +KEYRING="/etc/ceph/ceph.client.admin.keyring" +REQUIREMENTS=("jq" "lvm" "python3") +TARGET_BIN=/usr/bin + +# DEFAULT OPTIONS +ATTEMPTS=30 +CONTAINER_IMAGE=${CONTAINER_IMAGE:-'quay.io/ceph/ceph:v16.2.7'} +DEVICES=() +FSID=$(uuidgen) +KEY_EXPORT_DIR="/etc/ceph" +KEYS=("client.openstack") # at least the client.openstack default key should be created +MIN_OSDS=1 +SERVICES=() +SLEEP=5 +CEPHADM_DEV_OSD=${CEPHADM_DEV_OSD:-"True"} +CEPH_LOOPBACK_DISK_SIZE=${CEPH_LOOPBACK_DISK_SIZE:-8G} +TARGET_DEV_OSD_DIR=${TARGET_DEV_OSD_DIR:-"/var/lib"} + +# POOLS +DEFAULT_PG_NUM=${DEFAULT_PG_NUM:-8} +DEFAULT_PGP_NUM=${DEFAULT_PGP_NUM:-8} + +# NFS OPTIONS +FSNAME=${FSNAME:-'cephfs'} +NFS_PORT=12345 +CEPHFS_CLIENT=0 +CEPHFS_CLIENT_LOG="/var/log/ceph-$CEPHFS_CLIENT_NAME.log" +CEPHFS_CLIENT_NAME="client.manila" +CEPHFS_MULTIPLE_FILESYSTEMS=${CEPHFS_MULTIPLE_FILESYSTEMS:-False} + +# RGW OPTIONS +RGW_PORT=8080 + +# CLIENT CONFIG +CEPH_CLIENT_CONFIG=$HOME/ceph_client.conf +CEPH_CONF_DIR=${CEPH_CONF_DIR:-/etc/ceph} +# The resulting client config pointed by the other clients +CEPH_CONF_FILE=${CEPH_CONF_FILE:-$CEPH_CONF_DIR/ceph.conf} + +# LOG(s) and EXPORTED CONFIG FILES +EXPORT=$HOME/ceph_export.yml +RBD_CLIENT_LOG=/var/log/ceph/qemu-guest-\$pid.log + +# MANILA DEFAULTS +MANILA_CEPH_USER=${MANILA_CEPH_USER:-manila} + +# GLANCE DEFAULTS +GLANCE_CEPH_USER=${GLANCE_CEPH_USER:-glance} +GLANCE_CEPH_POOL=${GLANCE_CEPH_POOL:-images} +GLANCE_RGW_BACKEND=${GLANCE_RGW_BACKEND:-False} + +# Cinder DEFAULTS +CINDER_CEPH_POOL=${CINDER_CEPH_POOL:-volumes} +CINDER_CEPH_USER=${CINDER_CEPH_USER:-cinder} +CINDER_CEPH_UUID=${CINDER_CEPH_UUID:-$(uuidgen)} + +# Cinder Backup DEFAULTS +CINDER_BAK_CEPH_POOL=${CINDER_BAK_CEPH_POOL:-backups} +CINDER_BAK_CEPH_USER=${CINDER_BAK_CEPH_USER:-cinder-bak} + +# Nova DEFAUTLS +NOVA_CEPH_POOL=${NOVA_CEPH_POOL:-vms} + +[ -z "$SUDO" ] && SUDO=sudo + +## Admin + +# Admin: enable debug mode +function set_debug { + if [ "$DEBUG" -eq 1 ]; then + echo "[CEPHADM] Enabling Debug mode" + $SUDO "$CEPHADM" shell --fsid $FSID --config $CONFIG \ + --keyring $KEYRING -- ceph config set mgr mgr/cephadm/log_to_cluster_level debug + echo "[CEPHADM] See debug logs running: ceph -W cephadm --watch-debug" + fi +} + +# Admin: check ceph cluster status +function check_cluster_status { + $SUDO "$CEPHADM" shell --fsid $FSID --config $CONFIG \ + --keyring $KEYRING -- ceph -s -f json-pretty +} + +# Admin: export ceph cluster config spec +function export_spec { + $SUDO "$CEPHADM" shell --fsid $FSID --config $CONFIG \ + --keyring $KEYRING -- ceph orch ls --export > "$EXPORT" + echo "Ceph cluster config exported: $EXPORT" +} + +## Pre-install + +# Pre-install ceph: check depdendencies are available +function prereq { + for cmd in "${REQUIREMENTS[@]}"; do + if ! command -v "$cmd" &> /dev/null; then + echo "Command $cmd not found" + exit 1; + fi + done +} + +# Pre-install ceph: install podman +function _install_podman { + # FIXME(vkmc) Check required for Ubuntu 20.04 LTS (current CI node) + # Remove when our CI is pushed to the next LTS version + if [[ $os_CODENAME =~ (focal) ]]; then + echo "deb https://download.opensuse.org/repositories/devel:/kubic:/libcontainers:/stable/xUbuntu_20.04/ /" \ + | sudo tee /etc/apt/sources.list.d/devel:kubic:libcontainers:stable.list + curl -L "https://download.opensuse.org/repositories/devel:/kubic:/libcontainers:/stable/xUbuntu_20.04/Release.key" \ + | sudo apt-key add - + sudo apt-get update + sudo apt-get -y upgrade + fi + install_package podman +} + +# Pre-install ceph: install required dependencies +function install_deps { + install_package jq ceph-common + _install_podman + if python3_enabled; then + install_package python3-cephfs python3-prettytable python3-rados python3-rbd python3-requests + fi +} + +# Pre-install ceph: get cephadm binary +function get_cephadm { + curl -O https://raw.githubusercontent.com/ceph/ceph/pacific/src/cephadm/cephadm + $SUDO mv cephadm $TARGET_BIN + $SUDO chmod +x $TARGET_BIN/cephadm + echo "[GET CEPHADM] cephadm is now available" + + if [ -z "$CEPHADM" ]; then + CEPHADM=${TARGET_BIN}/cephadm + fi +} + +# Pre-install ceph: bootstrap config +function bootstrap_config { + cat < "$BOOTSTRAP_CONFIG" +[global] +log to file = true +osd crush chooseleaf type = 0 +osd_pool_default_pg_num = 8 +osd_pool_default_pgp_num = 8 +osd_pool_default_size = 1 +[mon] +mon_warn_on_pool_no_redundancy = False +EOF +} + +## Install + +# Install ceph: run cephadm bootstrap +function start_ceph { + cluster=$(sudo cephadm ls | jq '.[]' | jq 'select(.name | test("^mon*")).fsid') + if [ -z "$cluster" ]; then + $SUDO $CEPHADM --image "$CONTAINER_IMAGE" \ + bootstrap \ + --fsid $FSID \ + --config "$BOOTSTRAP_CONFIG" \ + --output-config $CONFIG \ + --output-keyring $KEYRING \ + --output-pub-ssh-key $CEPH_PUB_KEY \ + --allow-overwrite \ + --allow-fqdn-hostname \ + --skip-monitoring-stack \ + --skip-dashboard \ + --single-host-defaults \ + --skip-firewalld \ + --skip-mon-network \ + --mon-ip "$HOST_IP" + + test -e $CONFIG + test -e $KEYRING + + if [ "$CEPHADM_DEV_OSD" == 'True' ]; then + create_osd_dev + fi + # Wait cephadm backend to be operational + # and add osds via drivegroups + sleep "$SLEEP" + add_osds + fi +} + +# Install ceph: create a loopback device to be used as osd +function create_osd_dev { + sudo dd if=/dev/zero of=$TARGET_DEV_OSD_DIR/ceph-osd.img bs=1 count=0 seek="$CEPH_LOOPBACK_DISK_SIZE" + osd_dev=$(sudo losetup -f --show $TARGET_DEV_OSD_DIR/ceph-osd.img) + sudo pvcreate $osd_dev + sudo vgcreate ceph_vg $osd_dev + sudo lvcreate -n ceph_lv_data -l +100%FREE ceph_vg + DEVICES+=("/dev/ceph_vg/ceph_lv_data") +} + +# cleanup ceph: delete the osd file and release the loopback device +function delete_osd_dev { + if [ $(sudo lvs --noheadings -o lv_path -S lv_name=ceph_lv_data) ]; then + sudo lvremove --force /dev/ceph_vg/ceph_lv_data + sudo vgremove --force ceph_vg + osd_dev=$(sudo losetup -j $TARGET_DEV_OSD_DIR/ceph-osd.img -l -n -O NAME) + sudo pvremove --force $osd_dev + sudo losetup -d $osd_dev + sudo rm -f $TARGET_DEV_OSD_DIR/ceph-osd.img + sudo partprobe + DEVICES=() + fi +} + +# Install ceph: add osds +function add_osds { + # let's add some osds + if [ -z "$DEVICES" ]; then + echo "Using ALL available devices" + $SUDO $CEPHADM shell ceph orch apply osd --all-available-devices + else + for item in "${DEVICES[@]}"; do + echo "Creating osd $item on node $HOSTNAME" + $SUDO "$CEPHADM" shell --fsid $FSID --config $CONFIG \ + --keyring $KEYRING -- ceph orch daemon add osd "$HOSTNAME:$item" + done + fi + + while [ "$ATTEMPTS" -ne 0 ]; do + num_osds=$($SUDO $CEPHADM shell --fsid $FSID --config $CONFIG \ + --keyring $KEYRING -- ceph -s -f json | jq '.osdmap | .num_up_osds') + if [ "$num_osds" -ge "$MIN_OSDS" ]; then + break; + fi + ATTEMPTS=$(("$ATTEMPTS" - 1)) + sleep 1 + done + echo "[CEPHADM] OSD(s) deployed: $num_osds" + + # [ "$num_osds" -lt "$MIN_OSDS" ] && exit 255 +} + +# Install ceph: create and enable pools +function add_pools { + + [ "${#POOLS[@]}" -eq 0 ] && return; + + for pool in "${POOLS[@]}"; do + $SUDO "$CEPHADM" shell --fsid $FSID --config $CONFIG \ + --keyring $KEYRING -- ceph osd pool create "$pool" "$DEFAULT_PG_NUM" \ + "$DEFAULT_PGP_NUM" replicated --autoscale-mode on + + # set the application to the pool (which also means rbd init the pool) + $SUDO "$CEPHADM" shell --fsid $FSID --config $CONFIG \ + --keyring $KEYRING -- ceph osd pool application enable "$pool" rbd + done +} + +# Utility: build caps according to the generated pools +function build_caps { + local CAPS="" + for pool in "${POOLS[@]}"; do + caps="allow rwx pool="$pool + CAPS+=$caps, + done + echo "${CAPS::-1}" +} + +# Install ceph: create a keyring +function _create_key { + local name=$1 + local caps + local osd_caps + + if [ "${#POOLS[@]}" -eq 0 ]; then + osd_caps="allow *" + else + caps=$(build_caps) + osd_caps="allow class-read object_prefix rbd_children, $caps" + fi + + $SUDO "$CEPHADM" shell -v "$KEY_EXPORT_DIR:$KEY_EXPORT_DIR" --fsid $FSID --config $CONFIG \ + --keyring $KEYRING -- ceph auth get-or-create "$name" mgr "allow rw" mon "allow r" osd "$osd_caps" \ + -o "$KEY_EXPORT_DIR/ceph.$name.keyring" + + $SUDO chown ${STACK_USER}:$(id -g -n $whoami) \ + ${CEPH_CONF_DIR}/ceph.$name.keyring +} + +# Install ceph: create one or more keyrings +function create_keys { + for key_name in "${KEYS[@]}"; do + echo "Creating key $key_name" + _create_key "$key_name" + done +} + +# Install ceph: add MDS +function cephfs_config { + # Two pools are generated by this action + # - $FSNAME.FSNAME.data + # - $FSNAME.FSNAME.meta + # and the mds daemon is deployed + $SUDO "$CEPHADM" shell --fsid $FSID --config $CONFIG \ + --keyring $KEYRING -- ceph fs volume create "$FSNAME" +} + +# Install ceph: add NFS +function ceph_nfs_config { + # (fpantano) TODO: Build an ingress daemon on top of this + echo "[CEPHADM] Deploy nfs.$FSNAME backend" + $SUDO "$CEPHADM" shell --fsid $FSID --config $CONFIG \ + --keyring $KEYRING -- ceph orch apply nfs \ + "$FSNAME" --placement="$HOSTNAME" --port $NFS_PORT +} + +# RGW pre config +function configure_ceph_embedded_rgw { + + # keystone endpoint for radosgw + _create_swift_endpoint + + # Create radosgw service user with admin privileges + create_service_user "radosgw" "admin" + + if [ "$CEPH_RGW_KEYSTONE_SSL" = "True" ]; then + # radosgw needs to access keystone's revocation list + sudo mkdir -p ${dest}/nss + sudo openssl x509 -in /etc/keystone/ssl/certs/ca.pem -pubkey | \ + sudo certutil -d ${dest}/nss -A -n ca -t "TCu,Cu,Tuw" + + sudo openssl x509 -in /etc/keystone/ssl/certs/signing_cert.pem -pubkey | \ + sudo certutil -A -d ${dest}/nss -n signing_cert -t "P,P,P" + fi +} + +# General Ceph utility to set config keys within the mgr +function set_config_key { + local section=$1 + local key=$2 + local value=$3 + $SUDO "$CEPHADM" shell --fsid $FSID --config $CONFIG \ + ceph config set ${section} ${key} ${value} +} + +# RGW config keys: no iniset anymore, everything is pushed as mgr key/value +function configure_rgw_ceph_section { + + # RGW KEYSTONE KEYS + declare -A RGW_CONFIG_KEYS + + RGW_CONFIG_KEYS=(['rgw_keystone_api_version']=3 + ['rgw_keystone_url']="$KEYSTONE_SERVICE_URI" + ['rgw_keystone_accepted_roles']="member, _member_, Member, admin" + ['rgw_keystone_accepted_admin_roles']="ResellerAdmin" + ['rgw_keystone_admin_domain']="$SERVICE_DOMAIN_NAME" + ['rgw_keystone_admin_project']="$SERVICE_PROJECT_NAME" + ['rgw_keystone_admin_user']="radosgw" + ['rgw_s3_auth_use_keystone']="true" + ['rgw_keystone_admin_password']="$SERVICE_PASSWORD" + ['rgw_keystone_verify_ssl']="false" + ) + + for k in ${!RGW_CONFIG_KEYS[@]}; do + set_config_key "global" ${k} ${RGW_CONFIG_KEYS[$k]} + done +} + +# Install ceph: add RGW +function start_ceph_embedded_rgw { + $SUDO "$CEPHADM" shell --fsid $FSID --config $CONFIG \ + --keyring $KEYRING -- ceph orch apply rgw default default default \ + "--placement=$HOSTNAME count:1" --port "$RGW_PORT" +} + +# Configure cephfs and ceph_nfs +function configure_ceph_manila { + # Deploy mds and configure cephfs + cephfs_config + # Deploy and configure ganesha + [ $MANILA_CEPH_DRIVER == 'cephfsnfs' ] && ceph_nfs_config + # Add manila keys to the list + KEYS+=('client.manila') +} + +# Install ceph: services deployment +function enable_services { + for item in "${SERVICES[@]}"; do + case "$item" in + cephfs|CEPHFS) + echo "[CEPHADM] Config cephfs volume on node $HOSTNAME" + cephfs_config + CEPHFS_CLIENT=1 + ;; + nfs|NFS) + echo "[CEPHADM] Deploying NFS on node $HOSTNAME" + ceph_nfs_config + CEPHFS_CLIENT=1 + ;; + rgw|RGW) + echo "[CEPHADM] Deploying RGW on node $HOSTNAME" + rgw + ;; + esac + done +} + +# Install ceph: client config +function client_config { + echo "Dump the minimal ceph.conf" + cp $CONFIG "$CEPH_CLIENT_CONFIG" + + cat >> "$CEPH_CLIENT_CONFIG" <<-EOF +[client.libvirt] +admin socket = /var/run/ceph/\$cluster-\$type.\$id.\$pid.\$cctid.asok +log file = $RBD_CLIENT_LOG +EOF + + if [ "$CEPHFS_CLIENT" -eq 1 ]; then + cat >> "$CEPH_CLIENT_CONFIG" <<-EOF +[$CEPHFS_CLIENT_NAME] +client mount uid = 0 +client mount gid = 0 +log file = $CEPHFS_CLIENT_LOG +admin socket = /var/run/ceph/\$cluster-\$type.\$id.\$pid.\$cctid.asok +keyring = $KEY_EXPORT_DIR/ceph.$CEPHFS_CLIENT_NAME.keyring +EOF + echo "Client config exported: $CEPH_CLIENT_CONFIG" + fi + + # Nova resolves the keyring using the pattern $cluster.conf + # For this reason we need to override the content of the + # generated (minimal) ceph.conf with the client part. + $SUDO cp $CEPH_CLIENT_CONFIG $CEPH_CONF_FILE +} + +## Remove ceph + +# Remove ceph: remove cluster and zap osds +function stop_ceph { + if ! [ -x "$CEPHADM" ]; then + get_cephadm + CEPHADM=${TARGET_BIN}/cephadm + fi + CLUSTER_FSID=$(sudo cephadm ls | jq '.[]' | jq 'select(.name | test("^mon*")).fsid') + if [ -n "$CLUSTER_FSID" ]; then + sudo cephadm rm-cluster --zap-osds --fsid "$CLUSTERFSID" --force + echo "[CEPHADM] Cluster deleted" + fi +} + +## devstack-plugin-ceph functions + +function pre_install_ceph { + # Check dependencies for the service. + install_deps + prereq +} + +function install_ceph { + # Install the service. + bootstrap_config + get_cephadm + start_ceph +} + +function config_glance { + if [[ "$GLANCE_RGW_BACKEND" = "True" && "$ENABLE_CEPH_RGW" = "True" ]]; then + # common glance accounts for swift + create_service_user "glance-swift" "ResellerAdmin" + iniset $GLANCE_SWIFT_STORE_CONF ref1 user $SERVICE_PROJECT_NAME:glance-swift + + AUTH_URL=$KEYSTONE_SERVICE_URI/v$CEPH_RGW_IDENTITY_API_VERSION + + iniset $GLANCE_SWIFT_STORE_CONF ref1 key $SERVICE_PASSWORD + iniset $GLANCE_SWIFT_STORE_CONF ref1 auth_address $AUTH_URL + iniset $GLANCE_SWIFT_STORE_CONF ref1 user_domain_name $SERVICE_DOMAIN_NAME + iniset $GLANCE_SWIFT_STORE_CONF ref1 project_domain_name $SERVICE_DOMAIN_NAME + iniset $GLANCE_SWIFT_STORE_CONF ref1 auth_version $CEPH_RGW_IDENTITY_API_VERSION + + iniset $GLANCE_API_CONF glance_store default_store swift + iniset $GLANCE_API_CONF glance_store swift_store_create_container_on_put True + + iniset $GLANCE_API_CONF glance_store swift_store_config_file $GLANCE_SWIFT_STORE_CONF + iniset $GLANCE_API_CONF glance_store default_swift_reference ref1 + iniset $GLANCE_API_CONF glance_store stores "file, http, swift" + else + iniset $GLANCE_API_CONF DEFAULT show_multiple_locations True + iniset $GLANCE_API_CONF glance_store default_store rbd + iniset $GLANCE_API_CONF glance_store stores "file, http, rbd" + iniset $GLANCE_API_CONF glance_store rbd_store_ceph_conf $CEPH_CONF_FILE + iniset $GLANCE_API_CONF glance_store rbd_store_user $GLANCE_CEPH_USER + iniset $GLANCE_API_CONF glance_store rbd_store_pool $GLANCE_CEPH_POOL + fi +} + +function config_nova { + iniset $NOVA_CONF libvirt rbd_user ${CINDER_CEPH_USER} + iniset $NOVA_CONF libvirt rbd_secret_uuid ${CINDER_CEPH_UUID} + iniset $NOVA_CONF libvirt inject_key false + iniset $NOVA_CONF libvirt inject_partition -2 + iniset $NOVA_CONF libvirt disk_cachemodes "network=writeback" + iniset $NOVA_CONF libvirt images_type rbd + iniset $NOVA_CONF libvirt images_rbd_pool ${NOVA_CEPH_POOL} + iniset $NOVA_CONF libvirt images_rbd_ceph_conf ${CEPH_CONF_FILE} +} + +# import_libvirt_secret_ceph() - Imports Cinder user key into libvirt +# so it can connect to the Ceph cluster while attaching a Cinder block device +function import_libvirt_secret_ceph { + + [ "$ENABLE_CEPH_NOVA" == "False" ] && return; + + NOVA_VIRSH_SECRET=$($SUDO cat ${CEPH_CONF_DIR}/ceph.client.${CINDER_CEPH_USER}.keyring | awk '/key/ {print $3}') + cat </dev/null + + ${CINDER_CEPH_UUID} + + client.${CINDER_CEPH_USER} secret + + +EOF + $SUDO virsh secret-define --file secret.xml # 2>/dev/null + $SUDO virsh secret-set-value --secret ${CINDER_CEPH_UUID} \ + --base64 ${NOVA_VIRSH_SECRET} # 2>/dev/null + + $SUDO rm -f secret.xml +} + +# _undefine_virsh_secret() - Undefine Cinder key secret from libvirt +function _undefine_virsh_secret { + local virsh_uuid + virsh_uuid=$($SUDO virsh secret-list | awk '/^ ?[0-9a-z]/ { print $1 }') + echo $virsh_uuid + $SUDO virsh secret-undefine ${virsh_uuid} &>/dev/null +} + +function configure_ceph { + [ "$ENABLE_CEPH_MANILA" == "True" ] && { SERVICES+=('cephfs'); KEYS+=('client.manila'); } + [ "$MANILA_CEPH_DRIVER" == "cephfsnfs" ] && SERVICES+=('nfs') + [ "$ENABLE_CEPH_GLANCE" == "True" ] && { + POOLS+=($GLANCE_CEPH_POOL); + KEYS+=('client.glance'); + config_glance; + } + + [ "$ENABLE_CEPH_CINDER" == "True" ] && { + POOLS+=($CINDER_CEPH_POOL); + KEYS+=('client.cinder'); + } + + [ "$ENABLE_CEPH_C_BAK" == "True" ] && { + POOLS+=($CINDER_BAK_CEPH_POOL); + KEYS+=('client.cinder-bak') + } + + [ "$ENABLE_CEPH_NOVA" == "True" ] && { + POOLS+=($NOVA_CEPH_POOL); + KEYS+=('client.cinder'); + config_nova + } + + enable_services + add_pools + create_keys + client_config + import_libvirt_secret_ceph + [ "$DISABLE_CEPHADM" == "True" ] && disable_cephadm +} + +# Hack: remove this function at some point +function configure_ceph_manila { + # noop + : +} + +function cleanup_ceph { + # Cleanup the service. + stop_ceph + delete_osd_dev + # purge ceph config file and keys + $SUDO rm -f ${CEPH_CONF_DIR}/* + _undefine_virsh_secret +} + +function disable_cephadm { + $SUDO "$CEPHADM" shell --fsid $FSID --config $CONFIG \ + --keyring $KEYRING -- ceph orch set backend + + $SUDO "$CEPHADM" shell --fsid $FSID --config $CONFIG \ + --keyring $KEYRING -- ceph mgr module disable cephadm +} + +# is_ceph_enabled_for_service() - checks whether the OpenStack service +# specified as an argument is enabled with Ceph as its storage backend. +function is_ceph_enabled_for_service { + local config config_name enabled service + enabled=1 + service=$1 + # Construct the global variable ENABLE_CEPH_.* corresponding to a + # $service. + config_name=ENABLE_CEPH_$(echo $service | \ + tr '[:lower:]' '[:upper:]' | tr '-' '_') + config=$(eval echo "\$$config_name") + + if (is_service_enabled $service) && [[ $config == 'True' ]]; then + enabled=0 + fi + return $enabled +} + +# Restore xtrace +$XTRACE + +# Tell emacs to use shell-script-mode +## Local variables: +## mode: shell-script +## End: diff --git a/devstack/override-defaults b/devstack/override-defaults index 880daef..18afcd6 100644 --- a/devstack/override-defaults +++ b/devstack/override-defaults @@ -18,3 +18,5 @@ if [[ $ENABLE_CEPH_CINDER == "True" ]]; then CINDER_ENABLED_BACKENDS=${CINDER_ENABLED_BACKENDS:-ceph} fi fi + +CEPHADM_DEPLOY=$(trueorfalse False CEPHADM_DEPLOY) diff --git a/devstack/plugin.sh b/devstack/plugin.sh index c5fd4cf..c6063f1 100644 --- a/devstack/plugin.sh +++ b/devstack/plugin.sh @@ -2,93 +2,115 @@ if [[ "$1" == "source" ]]; then # Initial source - source $TOP_DIR/lib/ceph + if [[ "$CEPHADM_DEPLOY" = "True" ]]; then + source $TOP_DIR/lib/cephadm + else + source $TOP_DIR/lib/ceph + fi elif [[ "$1" == "stack" && "$2" == "pre-install" ]]; then if [[ "$ENABLE_CEPH_RGW" = "True" ]] && (is_service_enabled swift); then die $LINENO \ "You cannot activate both Swift and Ceph Rados Gateway, \ please disable Swift or set ENABLE_CEPH_RGW=False" fi - echo_summary "Installing Ceph" - check_os_support_ceph - if [ "$REMOTE_CEPH" = "False" ]; then - if [ "$CEPH_CONTAINERIZED" = "True" ]; then - echo_summary "Configuring and initializing Ceph" - deploy_containerized_ceph - else - install_ceph - echo_summary "Configuring Ceph" - configure_ceph - # NOTE (leseb): we do everything here - # because we need to have Ceph started before the main - # OpenStack components. - # Ceph OSD must start here otherwise we can't upload any images. - echo_summary "Initializing Ceph" - start_ceph - fi + if [[ "$CEPHADM_DEPLOY" = "True" ]]; then + # Set up system services + echo_summary "[cephadm] Configuring system services ceph" + pre_install_ceph else - install_ceph_remote + echo_summary "Installing Ceph" + check_os_support_ceph + if [ "$REMOTE_CEPH" = "False" ]; then + if [ "$CEPH_CONTAINERIZED" = "True" ]; then + echo_summary "Configuring and initializing Ceph" + deploy_containerized_ceph + else + install_ceph + echo_summary "Configuring Ceph" + configure_ceph + # NOTE (leseb): we do everything here + # because we need to have Ceph started before the main + # OpenStack components. + # Ceph OSD must start here otherwise we can't upload any images. + echo_summary "Initializing Ceph" + start_ceph + fi + else + install_ceph_remote + fi fi elif [[ "$1" == "stack" && "$2" == "install" ]]; then - # FIXME(melwitt): This is a hack to get around a namespacing issue with - # Paste and PasteDeploy. For stable/queens, we use the Pike UCA packages - # and the Ceph packages in the Pike UCA are pulling in python-paste and - # python-pastedeploy packages. The python-pastedeploy package satisfies the - # upper-constraints but python-paste does not, so devstack pip installs a - # newer version of it, while python-pastedeploy remains. The mismatch - # between the install path of paste and paste.deploy causes Keystone to - # fail to start, with "ImportError: cannot import name deploy." - if [[ "$TARGET_BRANCH" == stable/queens || "$TARGET_BRANCH" == master ]]; then - pip_install -U --force PasteDeploy + if [[ "$CEPHADM_DEPLOY" = "True" ]]; then + # Perform installation of service source + echo_summary "[cephadm] Installing ceph" + install_ceph + else + # FIXME(melwitt): This is a hack to get around a namespacing issue with + # Paste and PasteDeploy. For stable/queens, we use the Pike UCA packages + # and the Ceph packages in the Pike UCA are pulling in python-paste and + # python-pastedeploy packages. The python-pastedeploy package satisfies the + # upper-constraints but python-paste does not, so devstack pip installs a + # newer version of it, while python-pastedeploy remains. The mismatch + # between the install path of paste and paste.deploy causes Keystone to + # fail to start, with "ImportError: cannot import name deploy." + if [[ "$TARGET_BRANCH" == stable/queens || "$TARGET_BRANCH" == master ]]; then + pip_install -U --force PasteDeploy + fi fi elif [[ "$1" == "stack" && "$2" == "post-config" ]]; then - if is_ceph_enabled_for_service glance; then - echo_summary "Configuring Glance for Ceph" - configure_ceph_glance - fi - if is_ceph_enabled_for_service nova; then - echo_summary "Configuring Nova for Ceph" - configure_ceph_nova - fi - if is_ceph_enabled_for_service cinder; then - echo_summary "Configuring Cinder for Ceph" - configure_ceph_cinder - fi - if is_ceph_enabled_for_service nova; then - # NOTE (leseb): the part below is a requirement - # to attach Ceph block devices - echo_summary "Configuring libvirt secret" - import_libvirt_secret_ceph - fi - if is_ceph_enabled_for_service manila; then - echo_summary "Configuring Manila for Ceph" - configure_ceph_manila - fi - - if [ "$REMOTE_CEPH" = "False" ]; then + if [[ "$CEPHADM_DEPLOY" = "True" ]]; then + # Configure after the other layer 1 and 2 services have been configured + echo_summary "[cephadm] Configuring additional Ceph services" + configure_ceph + else if is_ceph_enabled_for_service glance; then echo_summary "Configuring Glance for Ceph" - configure_ceph_embedded_glance + configure_ceph_glance fi if is_ceph_enabled_for_service nova; then echo_summary "Configuring Nova for Ceph" - configure_ceph_embedded_nova + configure_ceph_nova fi if is_ceph_enabled_for_service cinder; then echo_summary "Configuring Cinder for Ceph" - configure_ceph_embedded_cinder + configure_ceph_cinder + fi + if is_ceph_enabled_for_service nova; then + # NOTE (leseb): the part below is a requirement + # to attach Ceph block devices + echo_summary "Configuring libvirt secret" + import_libvirt_secret_ceph fi if is_ceph_enabled_for_service manila; then echo_summary "Configuring Manila for Ceph" - configure_ceph_embedded_manila + configure_ceph_manila fi - if [ "$ENABLE_CEPH_RGW" = "True" ]; then - echo_summary "Configuring Rados Gateway with Keystone for Swift" - configure_ceph_embedded_rgw - if [ "$CEPH_CONTAINERIZED" = "False" ]; then - start_ceph_embedded_rgw - else - _configure_ceph_rgw_container + + if [ "$REMOTE_CEPH" = "False" ]; then + if is_ceph_enabled_for_service glance; then + echo_summary "Configuring Glance for Ceph" + configure_ceph_embedded_glance + fi + if is_ceph_enabled_for_service nova; then + echo_summary "Configuring Nova for Ceph" + configure_ceph_embedded_nova + fi + if is_ceph_enabled_for_service cinder; then + echo_summary "Configuring Cinder for Ceph" + configure_ceph_embedded_cinder + fi + if is_ceph_enabled_for_service manila; then + echo_summary "Configuring Manila for Ceph" + configure_ceph_embedded_manila + fi + if [ "$ENABLE_CEPH_RGW" = "True" ]; then + echo_summary "Configuring Rados Gateway with Keystone for Swift" + configure_ceph_embedded_rgw + if [ "$CEPH_CONTAINERIZED" = "False" ]; then + start_ceph_embedded_rgw + else + _configure_ceph_rgw_container + fi fi fi fi @@ -123,24 +145,32 @@ fi if [[ "$1" == "unstack" ]]; then - if [ "$CEPH_CONTAINERIZED" = "False" ]; then - if [ "$REMOTE_CEPH" = "True" ]; then - cleanup_ceph_remote - else - stop_ceph - cleanup_ceph_embedded - fi + if [[ "$CEPHADM_DEPLOY" = "True" ]]; then + cleanup_ceph else - cleanup_containerized_ceph + if [ "$CEPH_CONTAINERIZED" = "False" ]; then + if [ "$REMOTE_CEPH" = "True" ]; then + cleanup_ceph_remote + else + stop_ceph + cleanup_ceph_embedded + fi + else + cleanup_containerized_ceph + fi + cleanup_ceph_general fi - cleanup_ceph_general fi if [[ "$1" == "clean" ]]; then - if [ "$REMOTE_CEPH" = "True" ]; then - cleanup_ceph_remote + if [[ "$CEPHADM_DEPLOY" = "True" ]]; then + cleanup_ceph else - cleanup_ceph_embedded + if [ "$REMOTE_CEPH" = "True" ]; then + cleanup_ceph_remote + else + cleanup_ceph_embedded + fi + cleanup_ceph_general fi - cleanup_ceph_general fi diff --git a/devstack/settings b/devstack/settings index 4ad1107..f4722c7 100644 --- a/devstack/settings +++ b/devstack/settings @@ -23,8 +23,13 @@ CEPH_LOOPBACK_DISK_SIZE=${CEPH_LOOPBACK_DISK_SIZE:-$VOLUME_BACKING_FILE_SIZE} # Disable manage/unmanage snapshot tests on Tempest TEMPEST_VOLUME_MANAGE_SNAPSHOT=False -# Source plugin's lib/ceph -source $CEPH_PLUGIN_DIR/lib/ceph +# Source plugin's lib/cephadm or lib/ceph +# depending on chosen deployment method +if [[ "$CEPHADM_DEPLOY" = "True" ]]; then + source $CEPH_PLUGIN_DIR/lib/cephadm +else + source $CEPH_PLUGIN_DIR/lib/ceph +fi # Set Manila related global variables used by Manila's DevStack plugin. if (is_ceph_enabled_for_service manila); then