diff --git a/devstack/lib/cephadm b/devstack/lib/cephadm new file mode 100644 index 0000000..3ceb508 --- /dev/null +++ b/devstack/lib/cephadm @@ -0,0 +1,409 @@ +#!/bin/bash +# +# lib/cephadm +# Functions to control the configuration +# and operation of the **Ceph** storage service +# when deployed using the cephadm tool + +# ``stack.sh`` calls the entry points in this order: +# +# - pre_install_ceph +# - install_ceph +# - configure_ceph +# - init_ceph +# - shutdown_ceph +# - cleanup_ceph + +# Save trace setting +XTRACE=$(set +o | grep xtrace) +set +o xtrace + +# GENERIC CEPHADM INTERNAL OPTIONS, DO NOT EDIT +CEPH_PUB_KEY="/etc/ceph/ceph.pub" +CONFIG="/etc/ceph/ceph.conf" +BOOTSTRAP_CONFIG="$HOME/bootstrap_ceph.conf" +KEYRING="/etc/ceph/ceph.client.admin.keyring" +REQUIREMENTS=("jq" "lvm" "python3") +TARGET_BIN=/usr/bin + +# DEFAULT OPTIONS +ATTEMPTS=30 +CONTAINER_IMAGE=${CONTAINER_IMAGE:-'quay.io/ceph/ceph:v16.2.7'} +DEVICES=() +FSID="4b5c8c0a-ff60-454b-a1b4-9747aa737d19" +KEYS=("client.openstack") # at least the client.openstack default key should be created +MIN_OSDS=1 +SERVICES=("NFS") +SLEEP=5 + +# POOLS +# E.G. POOLS[test]='rbd' +declare -A POOLS + +# NFS OPTIONS +FSNAME=${FSNAME:-'cephfs'} +NFS_PORT=12345 +NFS_CLIENT=0 +NFS_CLIENT_LOG="/var/log/ceph-$NFS_CLIENT_NAME.log" +NFS_CLIENT_NAME="client.manila" + +# RGW OPTIONS +RGW_PORT=8080 + +# CLIENT CONFIG +CLIENT_CONFIG=$HOME/ceph_client.conf +EXPORT=$HOME/ceph_export.yml +RBD_CLIENT_LOG=/var/log/ceph/qemu-guest-$pid.log + +[ -z "$SUDO" ] && SUDO=sudo + +## Admin + +# Admin: enable debug mode +function set_debug { + if [ "$DEBUG" -eq 1 ]; then + echo "[CEPHADM] Enabling Debug mode" + $SUDO "$CEPHADM" shell --fsid $FSID --config $CONFIG \ + --keyring $KEYRING -- ceph config set mgr mgr/cephadm/log_to_cluster_level debug + echo "[CEPHADM] See debug logs running: ceph -W cephadm --watch-debug" + fi +} + +# Admin: check ceph cluster status +function check_cluster_status { + $SUDO "$CEPHADM" shell --fsid $FSID --config $CONFIG \ + --keyring $KEYRING -- ceph -s -f json-pretty +} + +# Admin: export ceph cluster config spec +function export_spec { + $SUDO "$CEPHADM" shell --fsid $FSID --config $CONFIG \ + --keyring $KEYRING -- ceph orch ls --export > "$EXPORT" + echo "Ceph cluster config exported: $EXPORT" +} + +## Pre-install + +# Pre-install ceph: check depdendencies are available +function prereq { + for cmd in "${REQUIREMENTS[@]}"; do + if ! command -v "$cmd" &> /dev/null; then + echo "Command $cmd not found" + exit 1; + fi + done +} + +# Pre-install ceph: show cluster status +function preview { + echo "---------" + echo "SERVICES" + for daemon in "${SERVICES[@]}"; do + echo "* $daemon" + done + + echo "---------" + echo "POOLS" + for key in "${!POOLS[@]}"; do + echo "* $key:${POOLS[$key]}"; + done + + echo "---------" + echo "KEYS" + for kname in "${KEYS[@]}"; do + echo "* $kname"; + done + + echo "---------" + echo "DEVICES" + for dev in "${DEVICES[@]}"; do + echo "* $dev" + done + [ -z "$DEVICES" ] && echo "Using ALL available devices" + + echo "---------" + echo IP Address: "$HOST_IP" + echo "---------" + echo "Container Image: $CONTAINER_IMAGE" + echo "---------" +} + +# Pre-install ceph: get cephadm binary +function get_cephadm { + curl -O https://raw.githubusercontent.com/ceph/ceph/pacific/src/cephadm/cephadm + $SUDO mv cephadm $TARGET_BIN + $SUDO chmod +x $TARGET_BIN/cephadm + echo "[GET CEPHADM] cephadm is now available" + + if [ -z "$CEPHADM" ]; then + CEPHADM=${TARGET_BIN}/cephadm + fi +} + +# Pre-install ceph: bootstrap config +function bootstrap_config { + cat < "$BOOTSTRAP_CONFIG" +[global] +log to file = true +osd crush chooseleaf type = 0 +osd_pool_default_pg_num = 8 +osd_pool_default_pgp_num = 8 +osd_pool_default_size = 1 +[mon] +mon_warn_on_insecure_global_id_reclaim_allowed = False +mon_warn_on_pool_no_redundancy = False +EOF +} + +## Install + +# Install ceph: run cephadm bootstrap +function start_ceph { + cluster=$(sudo cephadm ls | jq '.[]' | jq 'select(.name | test("^mon*")).fsid') + if [ -z "$cluster" ]; then + $SUDO $CEPHADM --image "$CONTAINER_IMAGE" \ + bootstrap \ + --fsid $FSID \ + --config "$BOOTSTRAP_CONFIG" \ + --output-config $CONFIG \ + --output-keyring $KEYRING \ + --output-pub-ssh-key $CEPH_PUB_KEY \ + --allow-overwrite \ + --allow-fqdn-hostname \ + --skip-monitoring-stack \ + --skip-dashboard \ + --single-host-defaults \ + --skip-firewalld \ + --mon-ip "$HOST_IP" + + test -e $CONFIG + test -e $KEYRING + + # Wait cephadm backend to be operational + sleep "$SLEEP" + fi +} + +# Install ceph: create a loopback device to be used as osd +function dd_ceph { + free_device=$(losetup -f) + sudo dd if=/dev/zero of=/var/lib/ceph-osd.img bs=1 count=0 seek=7G + sudo losetup $free_device /var/lib/ceph-osd.img + sudo pvcreate $free_device + sudo vgcreate ceph_vg $free_device + sudo lvcreate -n ceph_lv_data -l +100%FREE ceph_vg + DEVICES+=("ceph_lv_data") +} + +# Install ceph: add osds +function add_osds { + # let's add some osds + if [ -z "$DEVICES" ]; then + echo "Using ALL available devices" + $SUDO $CEPHADM shell ceph orch apply osd --all-available-devices + else + for item in "${DEVICES[@]}"; do + echo "Creating osd $item on node $HOSTNAME" + $SUDO "$CEPHADM" shell --fsid $FSID --config $CONFIG \ + --keyring $KEYRING -- ceph orch daemon add osd "$HOSTNAME:$item" + done + fi + + while [ "$ATTEMPTS" -ne 0 ]; do + num_osds=$($SUDO $CEPHADM shell --fsid $FSID --config $CONFIG \ + --keyring $KEYRING -- ceph -s -f json | jq '.osdmap | .num_up_osds') + if [ "$num_osds" -ge "$MIN_OSDS" ]; then + break; + fi + ATTEMPTS=$(("$ATTEMPTS" - 1)) + sleep 1 + done + echo "[CEPHADM] OSD(s) deployed: $num_osds" + + # [ "$num_osds" -lt "$MIN_OSDS" ] && exit 255 +} + +# Install ceph: create and enable pools +function add_pools { + # Pools are tied to their application, therefore the function + # iterates over the associative array that defines this relationship + # e.g. { 'volumes': 'rbd', 'manila_data': 'cephfs' } + + [ "${#POOLS[@]}" -eq 0 ] && return; + + for pool in "${!POOLS[@]}"; do + $SUDO "$CEPHADM" shell --fsid $FSID --config $CONFIG \ + --keyring $KEYRING -- ceph osd pool create "$pool" "$DEFAULT_PG_NUM" \ + "$DEFAULT_PGP_NUM" replicated --autoscale-mode on + + # set the application to the pool (which also means rbd init the pool) + $SUDO "$CEPHADM" shell --fsid $FSID --config $CONFIG \ + --keyring $KEYRING -- ceph osd pool application enable "$pool" "${POOLS[$pool]}" + done +} + +# Install ceph: create a keyring +function _create_key { + local name=$1 + local caps + local osd_caps + + if [ "${#POOLS[@]}" -eq 0 ]; then + osd_caps="allow *" + else + caps=$(build_caps) + osd_caps="allow class-read object_prefix rbd_children, $caps" + fi + + $SUDO "$CEPHADM" shell -v "$KEY_EXPORT_DIR:$KEY_EXPORT_DIR" --fsid $FSID --config $CONFIG \ + --keyring $KEYRING -- ceph auth get-or-create "$name" mon "allow r" osd "$osd_caps" \ + -o "$KEY_EXPORT_DIR/$name.keyring" +} + +# Install ceph: create one or more keyrings +function create_keys { + for key_name in "${KEYS[@]}"; do + echo "Creating key $key_name" + _create_key "$key_name" + done +} + +# Install ceph: add MDS +function mds { + # Two pools are generated by this action + # - $FSNAME.FSNAME.data + # - $FSNAME.FSNAME.meta + $SUDO "$CEPHADM" shell --fsid $FSID --config $CONFIG \ + --keyring $KEYRING -- ceph orch apply mds "$FSNAME" \ + --placement="$HOSTNAME" + $SUDO "$CEPHADM" shell --fsid $FSID --config $CONFIG \ + --keyring $KEYRING -- ceph fs volume create "$FSNAME" \ + --placement="$HOSTNAME" +} + +# Install ceph: add NFS +function nfs { + echo "[CEPHADM] Deploy nfs.$FSNAME backend" + $SUDO "$CEPHADM" shell --fsid $FSID --config $CONFIG \ + --keyring $KEYRING -- ceph orch apply nfs \ + "$FSNAME" --placement="$HOSTNAME" --port $NFS_PORT +} + +# Install ceph: add RGW +function rgw { + $SUDO "$CEPHADM" shell --fsid $FSID --config $CONFIG \ + --keyring $KEYRING -- ceph orch apply rgw default default default \ + "--placement=$HOSTNAME count:1" --port "$RGW_PORT" +} + +# Install ceph: services deployment +function enable_services { + for item in "${SERVICES[@]}"; do + case "$item" in + mds|MDS) + echo "Deploying MDS on node $HOSTNAME" + mds + ;; + nfs|NFS) + echo "Deploying NFS on node $HOSTNAME" + nfs + NFS_CLIENT=1 + ;; + rgw|RGW) + echo "Deploying RGW on node $HOSTNAME" + rgw + ;; + esac + done +} + +# Install ceph: client config +function client_config { + echo "Dump the minimal ceph.conf" + cp $CONFIG "$CLIENT_CONFIG" + + cat >> "$CLIENT_CONFIG" <<-EOF +[client.libvirt] +admin socket = /var/run/ceph/$cluster-$type.$id.$pid.$cctid.asok +log file = $RBD_CLIENT_LOG +EOF + + if [ "$NFS_CLIENT" -eq 1 ]; then + cat >> "$CLIENT_CONFIG" <<-EOF +[$NFS_CLIENT_NAME] +client mount uid = 0 +client mount gid = 0 +log file = $NFS_CLIENT_LOG +admin socket = /var/run/ceph/\$cluster-\$type.\$id.\$pid.\$cctid.asok +keyring = $KEY_EXPORT_DIR/$NFS_CLIENT_NAME.keyring +EOF + echo "Client config exported: $CLIENT_CONFIG" + fi +} + +## Remove ceph + +# Remove ceph: remove cluster and zap osds +function stop_ceph { + if ! [ -x "$CEPHADM" ]; then + install_cephadm + CEPHADM=${TARGET_BIN}/cephadm + fi + cluster=$(sudo cephadm ls | jq '.[]' | jq 'select(.name | test("^mon*")).fsid') + if [ -n "$cluster" ]; then + sudo cephadm rm-cluster --zap-osds --fsid "$FSID" --force + echo "[CEPHADM] Cluster deleted" + fi +} + +## devstack-plugin-ceph functions + +function pre_install_ceph { + # Check dependencies for the service. + prereq +} + +function install_ceph { + # Install the service. + bootstrap_config + get_cephadm +} + +function configure_ceph { + # Configure the service. + # noop + : +} + +function init_ceph { + # Initialize and start the service. + preview + start_ceph + # set_debug + dd_ceph + add_osds + add_pools + create_keys + enable_services + check_cluster_status + client_config +} + +function shutdown_ceph { + # Shut the service down. + stop_ceph +} + +function cleanup_ceph { + # Cleanup the service. + # noop + : +} + +# Restore xtrace +$XTRACE + +# Tell emacs to use shell-script-mode +## Local variables: +## mode: shell-script +## End: diff --git a/devstack/plugin.sh b/devstack/plugin.sh index c5fd4cf..91642fe 100644 --- a/devstack/plugin.sh +++ b/devstack/plugin.sh @@ -2,96 +2,124 @@ if [[ "$1" == "source" ]]; then # Initial source - source $TOP_DIR/lib/ceph + if [ "$CEPHADM_DEPLOY" = "True" ]; then + source $TOP_DIR/lib/cephadm + else + source $TOP_DIR/lib/ceph + fi elif [[ "$1" == "stack" && "$2" == "pre-install" ]]; then if [[ "$ENABLE_CEPH_RGW" = "True" ]] && (is_service_enabled swift); then die $LINENO \ "You cannot activate both Swift and Ceph Rados Gateway, \ please disable Swift or set ENABLE_CEPH_RGW=False" fi - echo_summary "Installing Ceph" - check_os_support_ceph - if [ "$REMOTE_CEPH" = "False" ]; then - if [ "$CEPH_CONTAINERIZED" = "True" ]; then - echo_summary "Configuring and initializing Ceph" - deploy_containerized_ceph - else - install_ceph - echo_summary "Configuring Ceph" - configure_ceph - # NOTE (leseb): we do everything here - # because we need to have Ceph started before the main - # OpenStack components. - # Ceph OSD must start here otherwise we can't upload any images. - echo_summary "Initializing Ceph" - start_ceph - fi + if [ "$CEPHADM_DEPLOY" = "True" ]; then + # Set up system services + echo_summary "[cephadm] Configuring system services ceph" + pre_install_ceph else - install_ceph_remote + echo_summary "Installing Ceph" + check_os_support_ceph + if [ "$REMOTE_CEPH" = "False" ]; then + if [ "$CEPH_CONTAINERIZED" = "True" ]; then + echo_summary "Configuring and initializing Ceph" + deploy_containerized_ceph + else + install_ceph + echo_summary "Configuring Ceph" + configure_ceph + # NOTE (leseb): we do everything here + # because we need to have Ceph started before the main + # OpenStack components. + # Ceph OSD must start here otherwise we can't upload any images. + echo_summary "Initializing Ceph" + start_ceph + fi + else + install_ceph_remote + fi fi elif [[ "$1" == "stack" && "$2" == "install" ]]; then - # FIXME(melwitt): This is a hack to get around a namespacing issue with - # Paste and PasteDeploy. For stable/queens, we use the Pike UCA packages - # and the Ceph packages in the Pike UCA are pulling in python-paste and - # python-pastedeploy packages. The python-pastedeploy package satisfies the - # upper-constraints but python-paste does not, so devstack pip installs a - # newer version of it, while python-pastedeploy remains. The mismatch - # between the install path of paste and paste.deploy causes Keystone to - # fail to start, with "ImportError: cannot import name deploy." - if [[ "$TARGET_BRANCH" == stable/queens || "$TARGET_BRANCH" == master ]]; then - pip_install -U --force PasteDeploy + if [ "$CEPHADM_DEPLOY" = "True" ]; then + # Perform installation of service source + echo_summary "[cephadm] Installing ceph" + install_ceph + else + # FIXME(melwitt): This is a hack to get around a namespacing issue with + # Paste and PasteDeploy. For stable/queens, we use the Pike UCA packages + # and the Ceph packages in the Pike UCA are pulling in python-paste and + # python-pastedeploy packages. The python-pastedeploy package satisfies the + # upper-constraints but python-paste does not, so devstack pip installs a + # newer version of it, while python-pastedeploy remains. The mismatch + # between the install path of paste and paste.deploy causes Keystone to + # fail to start, with "ImportError: cannot import name deploy." + if [[ "$TARGET_BRANCH" == stable/queens || "$TARGET_BRANCH" == master ]]; then + pip_install -U --force PasteDeploy + fi fi elif [[ "$1" == "stack" && "$2" == "post-config" ]]; then - if is_ceph_enabled_for_service glance; then - echo_summary "Configuring Glance for Ceph" - configure_ceph_glance - fi - if is_ceph_enabled_for_service nova; then - echo_summary "Configuring Nova for Ceph" - configure_ceph_nova - fi - if is_ceph_enabled_for_service cinder; then - echo_summary "Configuring Cinder for Ceph" - configure_ceph_cinder - fi - if is_ceph_enabled_for_service nova; then - # NOTE (leseb): the part below is a requirement - # to attach Ceph block devices - echo_summary "Configuring libvirt secret" - import_libvirt_secret_ceph - fi - if is_ceph_enabled_for_service manila; then - echo_summary "Configuring Manila for Ceph" - configure_ceph_manila - fi - - if [ "$REMOTE_CEPH" = "False" ]; then + if [ "$CEPHADM_DEPLOY" = "True" ]; then + # Configure after the other layer 1 and 2 services have been configured + echo_summary "[cephadm] Configuring ceph" + configure_ceph + else if is_ceph_enabled_for_service glance; then echo_summary "Configuring Glance for Ceph" - configure_ceph_embedded_glance + configure_ceph_glance fi if is_ceph_enabled_for_service nova; then echo_summary "Configuring Nova for Ceph" - configure_ceph_embedded_nova + configure_ceph_nova fi if is_ceph_enabled_for_service cinder; then echo_summary "Configuring Cinder for Ceph" - configure_ceph_embedded_cinder + configure_ceph_cinder + fi + if is_ceph_enabled_for_service nova; then + # NOTE (leseb): the part below is a requirement + # to attach Ceph block devices + echo_summary "Configuring libvirt secret" + import_libvirt_secret_ceph fi if is_ceph_enabled_for_service manila; then echo_summary "Configuring Manila for Ceph" - configure_ceph_embedded_manila + configure_ceph_manila fi - if [ "$ENABLE_CEPH_RGW" = "True" ]; then - echo_summary "Configuring Rados Gateway with Keystone for Swift" - configure_ceph_embedded_rgw - if [ "$CEPH_CONTAINERIZED" = "False" ]; then - start_ceph_embedded_rgw - else - _configure_ceph_rgw_container + + if [ "$REMOTE_CEPH" = "False" ]; then + if is_ceph_enabled_for_service glance; then + echo_summary "Configuring Glance for Ceph" + configure_ceph_embedded_glance + fi + if is_ceph_enabled_for_service nova; then + echo_summary "Configuring Nova for Ceph" + configure_ceph_embedded_nova + fi + if is_ceph_enabled_for_service cinder; then + echo_summary "Configuring Cinder for Ceph" + configure_ceph_embedded_cinder + fi + if is_ceph_enabled_for_service manila; then + echo_summary "Configuring Manila for Ceph" + configure_ceph_embedded_manila + fi + if [ "$ENABLE_CEPH_RGW" = "True" ]; then + echo_summary "Configuring Rados Gateway with Keystone for Swift" + configure_ceph_embedded_rgw + if [ "$CEPH_CONTAINERIZED" = "False" ]; then + start_ceph_embedded_rgw + else + _configure_ceph_rgw_container + fi fi fi fi +elif [[ "$1" == "stack" && "$2" == "extra" ]]; then + if [ "$CEPHADM_DEPLOY" = "True" ]; then + # Initialize and start the ceph service + echo_summary "[cephadm] Initializing ceph" + init_ceph + fi elif [[ "$1" == "stack" && "$2" == "test-config" ]]; then if is_service_enabled tempest; then iniset $TEMPEST_CONFIG compute-feature-enabled swap_volume False @@ -123,24 +151,32 @@ fi if [[ "$1" == "unstack" ]]; then - if [ "$CEPH_CONTAINERIZED" = "False" ]; then - if [ "$REMOTE_CEPH" = "True" ]; then - cleanup_ceph_remote - else - stop_ceph - cleanup_ceph_embedded - fi + if [ "$CEPHADM_DEPLOY" = "True" ]; then + shutdown_ceph else - cleanup_containerized_ceph + if [ "$CEPH_CONTAINERIZED" = "False" ]; then + if [ "$REMOTE_CEPH" = "True" ]; then + cleanup_ceph_remote + else + stop_ceph + cleanup_ceph_embedded + fi + else + cleanup_containerized_ceph + fi + cleanup_ceph_general fi - cleanup_ceph_general fi if [[ "$1" == "clean" ]]; then - if [ "$REMOTE_CEPH" = "True" ]; then - cleanup_ceph_remote + if [ "$CEPHADM_DEPLOY" = "True" ]; then + cleanup_ceph else - cleanup_ceph_embedded + if [ "$REMOTE_CEPH" = "True" ]; then + cleanup_ceph_remote + else + cleanup_ceph_embedded + fi + cleanup_ceph_general fi - cleanup_ceph_general fi