Deploy with cephadm

Add the option to deploy the Ceph cluster
with the cephadm tool.

Co-Authored-By: Francesco Pantano <fpantano@redhat.com>

Change-Id: Id2a704b136b9e47b7b88ef586282cb5d0f754cf1
This commit is contained in:
Victoria Martinez de la Cruz 2022-01-26 15:13:11 +00:00 committed by Francesco Pantano
parent b0418e177f
commit 144a00c118
No known key found for this signature in database
GPG Key ID: 0458D4D1F41BD75C
4 changed files with 744 additions and 79 deletions

628
devstack/lib/cephadm Executable file
View File

@ -0,0 +1,628 @@
#!/bin/bash
#
# lib/cephadm
# Functions to control the configuration
# and operation of the **Ceph** storage service
# when deployed using the cephadm tool
# ``stack.sh`` calls the entry points in this order:
#
# - pre_install_ceph
# - install_ceph
# - configure_ceph
# - init_ceph
# - cleanup_ceph # unstack || clean
# Save trace setting
XTRACE=$(set +o | grep xtrace)
set +o xtrace
# GENERIC CEPHADM INTERNAL OPTIONS, DO NOT EDIT
CEPH_PUB_KEY="/etc/ceph/ceph.pub"
CONFIG="/etc/ceph/ceph.conf"
BOOTSTRAP_CONFIG="$HOME/bootstrap_ceph.conf"
KEYRING="/etc/ceph/ceph.client.admin.keyring"
REQUIREMENTS=("jq" "lvm" "python3")
TARGET_BIN=/usr/bin
# DEFAULT OPTIONS
ATTEMPTS=30
CONTAINER_IMAGE=${CONTAINER_IMAGE:-'quay.io/ceph/ceph:v16.2.7'}
DEVICES=()
FSID=$(uuidgen)
KEY_EXPORT_DIR="/etc/ceph"
KEYS=("client.openstack") # at least the client.openstack default key should be created
MIN_OSDS=1
SERVICES=()
SLEEP=5
CEPHADM_DEV_OSD=${CEPHADM_DEV_OSD:-"True"}
CEPH_LOOPBACK_DISK_SIZE=${CEPH_LOOPBACK_DISK_SIZE:-8G}
# POOLS
DEFAULT_PG_NUM=${DEFAULT_PG_NUM:-8}
DEFAULT_PGP_NUM=${DEFAULT_PGP_NUM:-8}
# NFS OPTIONS
FSNAME=${FSNAME:-'cephfs'}
NFS_PORT=12345
CEPHFS_CLIENT=0
CEPHFS_CLIENT_LOG="/var/log/ceph-$CEPHFS_CLIENT_NAME.log"
CEPHFS_CLIENT_NAME="client.manila"
CEPHFS_MULTIPLE_FILESYSTEMS=${CEPHFS_MULTIPLE_FILESYSTEMS:-False}
# RGW OPTIONS
RGW_PORT=8080
# CLIENT CONFIG
CEPH_CLIENT_CONFIG=$HOME/ceph_client.conf
CEPH_CONF_DIR=${CEPH_CONF_DIR:-/etc/ceph}
# The resulting client config pointed by the other clients
CEPH_CONF_FILE=${CEPH_CONF_FILE:-$CEPH_CONF_DIR/ceph.conf}
# LOG(s) and EXPORTED CONFIG FILES
EXPORT=$HOME/ceph_export.yml
RBD_CLIENT_LOG=/var/log/ceph/qemu-guest-\$pid.log
# MANILA DEFAULTS
MANILA_CEPH_USER=${MANILA_CEPH_USER:-manila}
# GLANCE DEFAULTS
GLANCE_CEPH_USER=${GLANCE_CEPH_USER:-glance}
GLANCE_CEPH_POOL=${GLANCE_CEPH_POOL:-images}
GLANCE_RGW_BACKEND=${GLANCE_RGW_BACKEND:-False}
# Cinder DEFAULTS
CINDER_CEPH_POOL=${CINDER_CEPH_POOL:-volumes}
CINDER_CEPH_USER=${CINDER_CEPH_USER:-cinder}
CINDER_CEPH_UUID=${CINDER_CEPH_UUID:-$(uuidgen)}
# Cinder Backup DEFAULTS
CINDER_BAK_CEPH_POOL=${CINDER_BAK_CEPH_POOL:-backups}
CINDER_BAK_CEPH_USER=${CINDER_BAK_CEPH_USER:-cinder-bak}
# Nova DEFAUTLS
NOVA_CEPH_POOL=${NOVA_CEPH_POOL:-vms}
[ -z "$SUDO" ] && SUDO=sudo
## Admin
# Admin: enable debug mode
function set_debug {
if [ "$DEBUG" -eq 1 ]; then
echo "[CEPHADM] Enabling Debug mode"
$SUDO "$CEPHADM" shell --fsid $FSID --config $CONFIG \
--keyring $KEYRING -- ceph config set mgr mgr/cephadm/log_to_cluster_level debug
echo "[CEPHADM] See debug logs running: ceph -W cephadm --watch-debug"
fi
}
# Admin: check ceph cluster status
function check_cluster_status {
$SUDO "$CEPHADM" shell --fsid $FSID --config $CONFIG \
--keyring $KEYRING -- ceph -s -f json-pretty
}
# Admin: export ceph cluster config spec
function export_spec {
$SUDO "$CEPHADM" shell --fsid $FSID --config $CONFIG \
--keyring $KEYRING -- ceph orch ls --export > "$EXPORT"
echo "Ceph cluster config exported: $EXPORT"
}
## Pre-install
# Pre-install ceph: check depdendencies are available
function prereq {
for cmd in "${REQUIREMENTS[@]}"; do
if ! command -v "$cmd" &> /dev/null; then
echo "Command $cmd not found"
exit 1;
fi
done
}
# Pre-install ceph: install podman
function _install_podman {
# FIXME(vkmc) Check required for Ubuntu 20.04 LTS (current CI node)
# Remove when our CI is pushed to the next LTS version
if [[ $os_CODENAME =~ (focal) ]]; then
echo "deb https://download.opensuse.org/repositories/devel:/kubic:/libcontainers:/stable/xUbuntu_20.04/ /" \
| sudo tee /etc/apt/sources.list.d/devel:kubic:libcontainers:stable.list
curl -L "https://download.opensuse.org/repositories/devel:/kubic:/libcontainers:/stable/xUbuntu_20.04/Release.key" \
| sudo apt-key add -
sudo apt-get update
sudo apt-get -y upgrade
fi
install_package podman
}
# Pre-install ceph: install required dependencies
function install_deps {
install_package jq ceph-common
_install_podman
if python3_enabled; then
install_package python3-cephfs python3-prettytable python3-rados python3-rbd python3-requests
fi
}
# Pre-install ceph: get cephadm binary
function get_cephadm {
curl -O https://raw.githubusercontent.com/ceph/ceph/pacific/src/cephadm/cephadm
$SUDO mv cephadm $TARGET_BIN
$SUDO chmod +x $TARGET_BIN/cephadm
echo "[GET CEPHADM] cephadm is now available"
if [ -z "$CEPHADM" ]; then
CEPHADM=${TARGET_BIN}/cephadm
fi
}
# Pre-install ceph: bootstrap config
function bootstrap_config {
cat <<EOF > "$BOOTSTRAP_CONFIG"
[global]
log to file = true
osd crush chooseleaf type = 0
osd_pool_default_pg_num = 8
osd_pool_default_pgp_num = 8
osd_pool_default_size = 1
[mon]
mon_warn_on_pool_no_redundancy = False
EOF
}
## Install
# Install ceph: run cephadm bootstrap
function start_ceph {
cluster=$(sudo cephadm ls | jq '.[]' | jq 'select(.name | test("^mon*")).fsid')
if [ -z "$cluster" ]; then
$SUDO $CEPHADM --image "$CONTAINER_IMAGE" \
bootstrap \
--fsid $FSID \
--config "$BOOTSTRAP_CONFIG" \
--output-config $CONFIG \
--output-keyring $KEYRING \
--output-pub-ssh-key $CEPH_PUB_KEY \
--allow-overwrite \
--allow-fqdn-hostname \
--skip-monitoring-stack \
--skip-dashboard \
--single-host-defaults \
--skip-firewalld \
--skip-mon-network \
--mon-ip "$HOST_IP"
test -e $CONFIG
test -e $KEYRING
if [ "$CEPHADM_DEV_OSD" == 'True' ]; then
create_osd_dev
fi
# Wait cephadm backend to be operational
# and add osds via drivegroups
sleep "$SLEEP"
add_osds
fi
}
# Install ceph: create a loopback device to be used as osd
function create_osd_dev {
sudo dd if=/dev/zero of=/var/lib/ceph-osd.img bs=1 count=0 seek="$CEPH_LOOPBACK_DISK_SIZE"
osd_dev=$(sudo losetup -f --show /var/lib/ceph-osd.img)
sudo pvcreate $osd_dev
sudo vgcreate ceph_vg $osd_dev
sudo lvcreate -n ceph_lv_data -l +100%FREE ceph_vg
DEVICES+=("/dev/ceph_vg/ceph_lv_data")
}
# cleanup ceph: delete the osd file and release the loopback device
function delete_osd_dev {
if [ $(sudo lvs --noheadings -o lv_path -S lv_name=ceph_lv_data) ]; then
sudo lvremove --force /dev/ceph_vg/ceph_lv_data
sudo vgremove --force ceph_vg
osd_dev=$(sudo losetup -j /var/lib/ceph-osd.img -l -n -O NAME)
sudo pvremove --force $osd_dev
sudo losetup -d $osd_dev
sudo rm -f /var/lib/ceph-osd.img
sudo partprobe
DEVICES=()
fi
}
# Install ceph: add osds
function add_osds {
# let's add some osds
if [ -z "$DEVICES" ]; then
echo "Using ALL available devices"
$SUDO $CEPHADM shell ceph orch apply osd --all-available-devices
else
for item in "${DEVICES[@]}"; do
echo "Creating osd $item on node $HOSTNAME"
$SUDO "$CEPHADM" shell --fsid $FSID --config $CONFIG \
--keyring $KEYRING -- ceph orch daemon add osd "$HOSTNAME:$item"
done
fi
while [ "$ATTEMPTS" -ne 0 ]; do
num_osds=$($SUDO $CEPHADM shell --fsid $FSID --config $CONFIG \
--keyring $KEYRING -- ceph -s -f json | jq '.osdmap | .num_up_osds')
if [ "$num_osds" -ge "$MIN_OSDS" ]; then
break;
fi
ATTEMPTS=$(("$ATTEMPTS" - 1))
sleep 1
done
echo "[CEPHADM] OSD(s) deployed: $num_osds"
# [ "$num_osds" -lt "$MIN_OSDS" ] && exit 255
}
# Install ceph: create and enable pools
function add_pools {
[ "${#POOLS[@]}" -eq 0 ] && return;
for pool in "${POOLS[@]}"; do
$SUDO "$CEPHADM" shell --fsid $FSID --config $CONFIG \
--keyring $KEYRING -- ceph osd pool create "$pool" "$DEFAULT_PG_NUM" \
"$DEFAULT_PGP_NUM" replicated --autoscale-mode on
# set the application to the pool (which also means rbd init the pool)
$SUDO "$CEPHADM" shell --fsid $FSID --config $CONFIG \
--keyring $KEYRING -- ceph osd pool application enable "$pool" rbd
done
}
# Utility: build caps according to the generated pools
function build_caps {
local CAPS=""
for pool in "${POOLS[@]}"; do
caps="allow rwx pool="$pool
CAPS+=$caps,
done
echo "${CAPS::-1}"
}
# Install ceph: create a keyring
function _create_key {
local name=$1
local caps
local osd_caps
if [ "${#POOLS[@]}" -eq 0 ]; then
osd_caps="allow *"
else
caps=$(build_caps)
osd_caps="allow class-read object_prefix rbd_children, $caps"
fi
$SUDO "$CEPHADM" shell -v "$KEY_EXPORT_DIR:$KEY_EXPORT_DIR" --fsid $FSID --config $CONFIG \
--keyring $KEYRING -- ceph auth get-or-create "$name" mgr "allow rw" mon "allow r" osd "$osd_caps" \
-o "$KEY_EXPORT_DIR/ceph.$name.keyring"
$SUDO chown ${STACK_USER}:$(id -g -n $whoami) \
${CEPH_CONF_DIR}/ceph.$name.keyring
}
# Install ceph: create one or more keyrings
function create_keys {
for key_name in "${KEYS[@]}"; do
echo "Creating key $key_name"
_create_key "$key_name"
done
}
# Install ceph: add MDS
function cephfs_config {
# Two pools are generated by this action
# - $FSNAME.FSNAME.data
# - $FSNAME.FSNAME.meta
# and the mds daemon is deployed
$SUDO "$CEPHADM" shell --fsid $FSID --config $CONFIG \
--keyring $KEYRING -- ceph fs volume create "$FSNAME"
}
# Install ceph: add NFS
function ceph_nfs_config {
# (fpantano) TODO: Build an ingress daemon on top of this
echo "[CEPHADM] Deploy nfs.$FSNAME backend"
$SUDO "$CEPHADM" shell --fsid $FSID --config $CONFIG \
--keyring $KEYRING -- ceph orch apply nfs \
"$FSNAME" --placement="$HOSTNAME" --port $NFS_PORT
}
# RGW pre config
function configure_ceph_embedded_rgw {
# keystone endpoint for radosgw
_create_swift_endpoint
# Create radosgw service user with admin privileges
create_service_user "radosgw" "admin"
if [ "$CEPH_RGW_KEYSTONE_SSL" = "True" ]; then
# radosgw needs to access keystone's revocation list
sudo mkdir -p ${dest}/nss
sudo openssl x509 -in /etc/keystone/ssl/certs/ca.pem -pubkey | \
sudo certutil -d ${dest}/nss -A -n ca -t "TCu,Cu,Tuw"
sudo openssl x509 -in /etc/keystone/ssl/certs/signing_cert.pem -pubkey | \
sudo certutil -A -d ${dest}/nss -n signing_cert -t "P,P,P"
fi
}
# General Ceph utility to set config keys within the mgr
function set_config_key {
local section=$1
local key=$2
local value=$3
$SUDO "$CEPHADM" shell --fsid $FSID --config $CONFIG \
ceph config set ${section} ${key} ${value}
}
# RGW config keys: no iniset anymore, everything is pushed as mgr key/value
function configure_rgw_ceph_section {
# RGW KEYSTONE KEYS
declare -A RGW_CONFIG_KEYS
RGW_CONFIG_KEYS=(['rgw_keystone_api_version']=3
['rgw_keystone_url']="$KEYSTONE_SERVICE_URI"
['rgw_keystone_accepted_roles']="member, _member_, Member, admin"
['rgw_keystone_accepted_admin_roles']="ResellerAdmin"
['rgw_keystone_admin_domain']="$SERVICE_DOMAIN_NAME"
['rgw_keystone_admin_project']="$SERVICE_PROJECT_NAME"
['rgw_keystone_admin_user']="radosgw"
['rgw_s3_auth_use_keystone']="true"
['rgw_keystone_admin_password']="$SERVICE_PASSWORD"
['rgw_keystone_verify_ssl']="false"
)
for k in ${!RGW_CONFIG_KEYS[@]}; do
set_config_key "global" ${k} ${RGW_CONFIG_KEYS[$k]}
done
}
# Install ceph: add RGW
function start_ceph_embedded_rgw {
$SUDO "$CEPHADM" shell --fsid $FSID --config $CONFIG \
--keyring $KEYRING -- ceph orch apply rgw default default default \
"--placement=$HOSTNAME count:1" --port "$RGW_PORT"
}
# Configure cephfs and ceph_nfs
function configure_ceph_manila {
# Deploy mds and configure cephfs
cephfs_config
# Deploy and configure ganesha
[ $MANILA_CEPH_DRIVER == 'cephfsnfs' ] && ceph_nfs_config
# Add manila keys to the list
KEYS+=('client.manila')
}
# Install ceph: services deployment
function enable_services {
for item in "${SERVICES[@]}"; do
case "$item" in
cephfs|CEPHFS)
echo "[CEPHADM] Config cephfs volume on node $HOSTNAME"
cephfs_config
CEPHFS_CLIENT=1
;;
nfs|NFS)
echo "[CEPHADM] Deploying NFS on node $HOSTNAME"
ceph_nfs_config
CEPHFS_CLIENT=1
;;
rgw|RGW)
echo "[CEPHADM] Deploying RGW on node $HOSTNAME"
rgw
;;
esac
done
}
# Install ceph: client config
function client_config {
echo "Dump the minimal ceph.conf"
cp $CONFIG "$CEPH_CLIENT_CONFIG"
cat >> "$CEPH_CLIENT_CONFIG" <<-EOF
[client.libvirt]
admin socket = /var/run/ceph/\$cluster-\$type.\$id.\$pid.\$cctid.asok
log file = $RBD_CLIENT_LOG
EOF
if [ "$CEPHFS_CLIENT" -eq 1 ]; then
cat >> "$CEPH_CLIENT_CONFIG" <<-EOF
[$CEPHFS_CLIENT_NAME]
client mount uid = 0
client mount gid = 0
log file = $CEPHFS_CLIENT_LOG
admin socket = /var/run/ceph/\$cluster-\$type.\$id.\$pid.\$cctid.asok
keyring = $KEY_EXPORT_DIR/ceph.$CEPHFS_CLIENT_NAME.keyring
EOF
echo "Client config exported: $CEPH_CLIENT_CONFIG"
fi
# Nova resolves the keyring using the pattern $cluster.conf
# For this reason we need to override the content of the
# generated (minimal) ceph.conf with the client part.
$SUDO cp $CEPH_CLIENT_CONFIG $CEPH_CONF_FILE
}
## Remove ceph
# Remove ceph: remove cluster and zap osds
function stop_ceph {
if ! [ -x "$CEPHADM" ]; then
get_cephadm
CEPHADM=${TARGET_BIN}/cephadm
fi
cluster=$(sudo cephadm ls | jq '.[]' | jq 'select(.name | test("^mon*")).fsid')
if [ -n "$cluster" ]; then
sudo cephadm rm-cluster --zap-osds --fsid "$FSID" --force
echo "[CEPHADM] Cluster deleted"
fi
}
## devstack-plugin-ceph functions
function pre_install_ceph {
# Check dependencies for the service.
install_deps
prereq
}
function install_ceph {
# Install the service.
bootstrap_config
get_cephadm
start_ceph
}
function config_glance {
if [[ "$GLANCE_RGW_BACKEND" = "True" && "$ENABLE_CEPH_RGW" = "True" ]]; then
# common glance accounts for swift
create_service_user "glance-swift" "ResellerAdmin"
iniset $GLANCE_SWIFT_STORE_CONF ref1 user $SERVICE_PROJECT_NAME:glance-swift
AUTH_URL=$KEYSTONE_SERVICE_URI/v$CEPH_RGW_IDENTITY_API_VERSION
iniset $GLANCE_SWIFT_STORE_CONF ref1 key $SERVICE_PASSWORD
iniset $GLANCE_SWIFT_STORE_CONF ref1 auth_address $AUTH_URL
iniset $GLANCE_SWIFT_STORE_CONF ref1 user_domain_name $SERVICE_DOMAIN_NAME
iniset $GLANCE_SWIFT_STORE_CONF ref1 project_domain_name $SERVICE_DOMAIN_NAME
iniset $GLANCE_SWIFT_STORE_CONF ref1 auth_version $CEPH_RGW_IDENTITY_API_VERSION
iniset $GLANCE_API_CONF glance_store default_store swift
iniset $GLANCE_API_CONF glance_store swift_store_create_container_on_put True
iniset $GLANCE_API_CONF glance_store swift_store_config_file $GLANCE_SWIFT_STORE_CONF
iniset $GLANCE_API_CONF glance_store default_swift_reference ref1
iniset $GLANCE_API_CONF glance_store stores "file, http, swift"
else
iniset $GLANCE_API_CONF DEFAULT show_multiple_locations True
iniset $GLANCE_API_CONF glance_store default_store rbd
iniset $GLANCE_API_CONF glance_store stores "file, http, rbd"
iniset $GLANCE_API_CONF glance_store rbd_store_ceph_conf $CEPH_CONF_FILE
iniset $GLANCE_API_CONF glance_store rbd_store_user $GLANCE_CEPH_USER
iniset $GLANCE_API_CONF glance_store rbd_store_pool $GLANCE_CEPH_POOL
fi
}
function config_nova {
iniset $NOVA_CONF libvirt rbd_user ${CINDER_CEPH_USER}
iniset $NOVA_CONF libvirt rbd_secret_uuid ${CINDER_CEPH_UUID}
iniset $NOVA_CONF libvirt inject_key false
iniset $NOVA_CONF libvirt inject_partition -2
iniset $NOVA_CONF libvirt disk_cachemodes "network=writeback"
iniset $NOVA_CONF libvirt images_type rbd
iniset $NOVA_CONF libvirt images_rbd_pool ${NOVA_CEPH_POOL}
iniset $NOVA_CONF libvirt images_rbd_ceph_conf ${CEPH_CONF_FILE}
}
# import_libvirt_secret_ceph() - Imports Cinder user key into libvirt
# so it can connect to the Ceph cluster while attaching a Cinder block device
function import_libvirt_secret_ceph {
[ "$ENABLE_CEPH_NOVA" == "False" ] && return;
NOVA_VIRSH_SECRET=$($SUDO cat ${CEPH_CONF_DIR}/ceph.client.${CINDER_CEPH_USER}.keyring | awk '/key/ {print $3}')
cat <<EOF | sudo tee secret.xml>/dev/null
<secret ephemeral='no' private='no'>
<uuid>${CINDER_CEPH_UUID}</uuid>
<usage type='ceph'>
<name>client.${CINDER_CEPH_USER} secret</name>
</usage>
</secret>
EOF
$SUDO virsh secret-define --file secret.xml # 2>/dev/null
$SUDO virsh secret-set-value --secret ${CINDER_CEPH_UUID} \
--base64 ${NOVA_VIRSH_SECRET} # 2>/dev/null
$SUDO rm -f secret.xml
}
# _undefine_virsh_secret() - Undefine Cinder key secret from libvirt
function _undefine_virsh_secret {
local virsh_uuid
virsh_uuid=$($SUDO virsh secret-list | awk '/^ ?[0-9a-z]/ { print $1 }')
echo $virsh_uuid
$SUDO virsh secret-undefine ${virsh_uuid} &>/dev/null
}
function configure_ceph {
[ "$ENABLE_CEPH_MANILA" == "True" ] && { SERVICES+=('cephfs'); KEYS+=('client.manila'); }
[ "$MANILA_CEPH_DRIVER" == "cephfsnfs" ] && SERVICES+=('nfs')
[ "$ENABLE_CEPH_GLANCE" == "True" ] && {
POOLS+=($GLANCE_CEPH_POOL);
KEYS+=('client.glance');
config_glance;
}
[ "$ENABLE_CEPH_CINDER" == "True" ] && {
POOLS+=($CINDER_CEPH_POOL);
KEYS+=('client.cinder');
}
[ "$ENABLE_CEPH_C_BAK" == "True" ] && {
POOLS+=($CINDER_BAK_CEPH_POOL);
KEYS+=('client.cinder-bak')
}
[ "$ENABLE_CEPH_NOVA" == "True" ] && {
POOLS+=($NOVA_CEPH_POOL);
KEYS+=('client.cinder');
config_nova
}
enable_services
add_pools
create_keys
client_config
import_libvirt_secret_ceph
}
# Hack: remove this function at some point
function configure_ceph_manila {
# noop
:
}
function cleanup_ceph {
# Cleanup the service.
stop_ceph
delete_osd_dev
# purge ceph config file and keys
$SUDO rm -f ${CEPH_CONF_DIR}/*
_undefine_virsh_secret
}
# is_ceph_enabled_for_service() - checks whether the OpenStack service
# specified as an argument is enabled with Ceph as its storage backend.
function is_ceph_enabled_for_service {
local config config_name enabled service
enabled=1
service=$1
# Construct the global variable ENABLE_CEPH_.* corresponding to a
# $service.
config_name=ENABLE_CEPH_$(echo $service | \
tr '[:lower:]' '[:upper:]' | tr '-' '_')
config=$(eval echo "\$$config_name")
if (is_service_enabled $service) && [[ $config == 'True' ]]; then
enabled=0
fi
return $enabled
}
# Restore xtrace
$XTRACE
# Tell emacs to use shell-script-mode
## Local variables:
## mode: shell-script
## End:

View File

@ -18,3 +18,5 @@ if [[ $ENABLE_CEPH_CINDER == "True" ]]; then
CINDER_ENABLED_BACKENDS=${CINDER_ENABLED_BACKENDS:-ceph} CINDER_ENABLED_BACKENDS=${CINDER_ENABLED_BACKENDS:-ceph}
fi fi
fi fi
CEPHADM_DEPLOY=$(trueorfalse False CEPHADM_DEPLOY)

View File

@ -2,93 +2,115 @@
if [[ "$1" == "source" ]]; then if [[ "$1" == "source" ]]; then
# Initial source # Initial source
source $TOP_DIR/lib/ceph if [[ "$CEPHADM_DEPLOY" = "True" ]]; then
source $TOP_DIR/lib/cephadm
else
source $TOP_DIR/lib/ceph
fi
elif [[ "$1" == "stack" && "$2" == "pre-install" ]]; then elif [[ "$1" == "stack" && "$2" == "pre-install" ]]; then
if [[ "$ENABLE_CEPH_RGW" = "True" ]] && (is_service_enabled swift); then if [[ "$ENABLE_CEPH_RGW" = "True" ]] && (is_service_enabled swift); then
die $LINENO \ die $LINENO \
"You cannot activate both Swift and Ceph Rados Gateway, \ "You cannot activate both Swift and Ceph Rados Gateway, \
please disable Swift or set ENABLE_CEPH_RGW=False" please disable Swift or set ENABLE_CEPH_RGW=False"
fi fi
echo_summary "Installing Ceph" if [[ "$CEPHADM_DEPLOY" = "True" ]]; then
check_os_support_ceph # Set up system services
if [ "$REMOTE_CEPH" = "False" ]; then echo_summary "[cephadm] Configuring system services ceph"
if [ "$CEPH_CONTAINERIZED" = "True" ]; then pre_install_ceph
echo_summary "Configuring and initializing Ceph"
deploy_containerized_ceph
else
install_ceph
echo_summary "Configuring Ceph"
configure_ceph
# NOTE (leseb): we do everything here
# because we need to have Ceph started before the main
# OpenStack components.
# Ceph OSD must start here otherwise we can't upload any images.
echo_summary "Initializing Ceph"
start_ceph
fi
else else
install_ceph_remote echo_summary "Installing Ceph"
check_os_support_ceph
if [ "$REMOTE_CEPH" = "False" ]; then
if [ "$CEPH_CONTAINERIZED" = "True" ]; then
echo_summary "Configuring and initializing Ceph"
deploy_containerized_ceph
else
install_ceph
echo_summary "Configuring Ceph"
configure_ceph
# NOTE (leseb): we do everything here
# because we need to have Ceph started before the main
# OpenStack components.
# Ceph OSD must start here otherwise we can't upload any images.
echo_summary "Initializing Ceph"
start_ceph
fi
else
install_ceph_remote
fi
fi fi
elif [[ "$1" == "stack" && "$2" == "install" ]]; then elif [[ "$1" == "stack" && "$2" == "install" ]]; then
# FIXME(melwitt): This is a hack to get around a namespacing issue with if [[ "$CEPHADM_DEPLOY" = "True" ]]; then
# Paste and PasteDeploy. For stable/queens, we use the Pike UCA packages # Perform installation of service source
# and the Ceph packages in the Pike UCA are pulling in python-paste and echo_summary "[cephadm] Installing ceph"
# python-pastedeploy packages. The python-pastedeploy package satisfies the install_ceph
# upper-constraints but python-paste does not, so devstack pip installs a else
# newer version of it, while python-pastedeploy remains. The mismatch # FIXME(melwitt): This is a hack to get around a namespacing issue with
# between the install path of paste and paste.deploy causes Keystone to # Paste and PasteDeploy. For stable/queens, we use the Pike UCA packages
# fail to start, with "ImportError: cannot import name deploy." # and the Ceph packages in the Pike UCA are pulling in python-paste and
if [[ "$TARGET_BRANCH" == stable/queens || "$TARGET_BRANCH" == master ]]; then # python-pastedeploy packages. The python-pastedeploy package satisfies the
pip_install -U --force PasteDeploy # upper-constraints but python-paste does not, so devstack pip installs a
# newer version of it, while python-pastedeploy remains. The mismatch
# between the install path of paste and paste.deploy causes Keystone to
# fail to start, with "ImportError: cannot import name deploy."
if [[ "$TARGET_BRANCH" == stable/queens || "$TARGET_BRANCH" == master ]]; then
pip_install -U --force PasteDeploy
fi
fi fi
elif [[ "$1" == "stack" && "$2" == "post-config" ]]; then elif [[ "$1" == "stack" && "$2" == "post-config" ]]; then
if is_ceph_enabled_for_service glance; then if [[ "$CEPHADM_DEPLOY" = "True" ]]; then
echo_summary "Configuring Glance for Ceph" # Configure after the other layer 1 and 2 services have been configured
configure_ceph_glance echo_summary "[cephadm] Configuring additional Ceph services"
fi configure_ceph
if is_ceph_enabled_for_service nova; then else
echo_summary "Configuring Nova for Ceph"
configure_ceph_nova
fi
if is_ceph_enabled_for_service cinder; then
echo_summary "Configuring Cinder for Ceph"
configure_ceph_cinder
fi
if is_ceph_enabled_for_service nova; then
# NOTE (leseb): the part below is a requirement
# to attach Ceph block devices
echo_summary "Configuring libvirt secret"
import_libvirt_secret_ceph
fi
if is_ceph_enabled_for_service manila; then
echo_summary "Configuring Manila for Ceph"
configure_ceph_manila
fi
if [ "$REMOTE_CEPH" = "False" ]; then
if is_ceph_enabled_for_service glance; then if is_ceph_enabled_for_service glance; then
echo_summary "Configuring Glance for Ceph" echo_summary "Configuring Glance for Ceph"
configure_ceph_embedded_glance configure_ceph_glance
fi fi
if is_ceph_enabled_for_service nova; then if is_ceph_enabled_for_service nova; then
echo_summary "Configuring Nova for Ceph" echo_summary "Configuring Nova for Ceph"
configure_ceph_embedded_nova configure_ceph_nova
fi fi
if is_ceph_enabled_for_service cinder; then if is_ceph_enabled_for_service cinder; then
echo_summary "Configuring Cinder for Ceph" echo_summary "Configuring Cinder for Ceph"
configure_ceph_embedded_cinder configure_ceph_cinder
fi
if is_ceph_enabled_for_service nova; then
# NOTE (leseb): the part below is a requirement
# to attach Ceph block devices
echo_summary "Configuring libvirt secret"
import_libvirt_secret_ceph
fi fi
if is_ceph_enabled_for_service manila; then if is_ceph_enabled_for_service manila; then
echo_summary "Configuring Manila for Ceph" echo_summary "Configuring Manila for Ceph"
configure_ceph_embedded_manila configure_ceph_manila
fi fi
if [ "$ENABLE_CEPH_RGW" = "True" ]; then
echo_summary "Configuring Rados Gateway with Keystone for Swift" if [ "$REMOTE_CEPH" = "False" ]; then
configure_ceph_embedded_rgw if is_ceph_enabled_for_service glance; then
if [ "$CEPH_CONTAINERIZED" = "False" ]; then echo_summary "Configuring Glance for Ceph"
start_ceph_embedded_rgw configure_ceph_embedded_glance
else fi
_configure_ceph_rgw_container if is_ceph_enabled_for_service nova; then
echo_summary "Configuring Nova for Ceph"
configure_ceph_embedded_nova
fi
if is_ceph_enabled_for_service cinder; then
echo_summary "Configuring Cinder for Ceph"
configure_ceph_embedded_cinder
fi
if is_ceph_enabled_for_service manila; then
echo_summary "Configuring Manila for Ceph"
configure_ceph_embedded_manila
fi
if [ "$ENABLE_CEPH_RGW" = "True" ]; then
echo_summary "Configuring Rados Gateway with Keystone for Swift"
configure_ceph_embedded_rgw
if [ "$CEPH_CONTAINERIZED" = "False" ]; then
start_ceph_embedded_rgw
else
_configure_ceph_rgw_container
fi
fi fi
fi fi
fi fi
@ -123,24 +145,32 @@ fi
if [[ "$1" == "unstack" ]]; then if [[ "$1" == "unstack" ]]; then
if [ "$CEPH_CONTAINERIZED" = "False" ]; then if [[ "$CEPHADM_DEPLOY" = "True" ]]; then
if [ "$REMOTE_CEPH" = "True" ]; then cleanup_ceph
cleanup_ceph_remote
else
stop_ceph
cleanup_ceph_embedded
fi
else else
cleanup_containerized_ceph if [ "$CEPH_CONTAINERIZED" = "False" ]; then
if [ "$REMOTE_CEPH" = "True" ]; then
cleanup_ceph_remote
else
stop_ceph
cleanup_ceph_embedded
fi
else
cleanup_containerized_ceph
fi
cleanup_ceph_general
fi fi
cleanup_ceph_general
fi fi
if [[ "$1" == "clean" ]]; then if [[ "$1" == "clean" ]]; then
if [ "$REMOTE_CEPH" = "True" ]; then if [[ "$CEPHADM_DEPLOY" = "True" ]]; then
cleanup_ceph_remote cleanup_ceph
else else
cleanup_ceph_embedded if [ "$REMOTE_CEPH" = "True" ]; then
cleanup_ceph_remote
else
cleanup_ceph_embedded
fi
cleanup_ceph_general
fi fi
cleanup_ceph_general
fi fi

View File

@ -23,8 +23,13 @@ CEPH_LOOPBACK_DISK_SIZE=${CEPH_LOOPBACK_DISK_SIZE:-$VOLUME_BACKING_FILE_SIZE}
# Disable manage/unmanage snapshot tests on Tempest # Disable manage/unmanage snapshot tests on Tempest
TEMPEST_VOLUME_MANAGE_SNAPSHOT=False TEMPEST_VOLUME_MANAGE_SNAPSHOT=False
# Source plugin's lib/ceph # Source plugin's lib/cephadm or lib/ceph
source $CEPH_PLUGIN_DIR/lib/ceph # depending on chosen deployment method
if [[ "$CEPHADM_DEPLOY" = "True" ]]; then
source $CEPH_PLUGIN_DIR/lib/cephadm
else
source $CEPH_PLUGIN_DIR/lib/ceph
fi
# Set Manila related global variables used by Manila's DevStack plugin. # Set Manila related global variables used by Manila's DevStack plugin.
if (is_ceph_enabled_for_service manila); then if (is_ceph_enabled_for_service manila); then