Deploy with cephadm
Add the option to deploy the Ceph cluster with the cephadm tool. Co-Authored-By: Francesco Pantano <fpantano@redhat.com> Change-Id: Id2a704b136b9e47b7b88ef586282cb5d0f754cf1
This commit is contained in:
parent
b0418e177f
commit
ec62cd384d
|
@ -0,0 +1,501 @@
|
|||
#!/bin/bash
|
||||
#
|
||||
# lib/cephadm
|
||||
# Functions to control the configuration
|
||||
# and operation of the **Ceph** storage service
|
||||
# when deployed using the cephadm tool
|
||||
|
||||
# ``stack.sh`` calls the entry points in this order:
|
||||
#
|
||||
# - pre_install_ceph
|
||||
# - install_ceph
|
||||
# - configure_ceph
|
||||
# - init_ceph
|
||||
# - cleanup_ceph # unstack || clean
|
||||
|
||||
# Save trace setting
|
||||
XTRACE=$(set +o | grep xtrace)
|
||||
set +o xtrace
|
||||
|
||||
# GENERIC CEPHADM INTERNAL OPTIONS, DO NOT EDIT
|
||||
CEPH_PUB_KEY="/etc/ceph/ceph.pub"
|
||||
CONFIG="/etc/ceph/ceph.conf"
|
||||
BOOTSTRAP_CONFIG="$HOME/bootstrap_ceph.conf"
|
||||
KEYRING="/etc/ceph/ceph.client.admin.keyring"
|
||||
REQUIREMENTS=("jq" "lvm" "python3")
|
||||
TARGET_BIN=/usr/bin
|
||||
|
||||
# DEFAULT OPTIONS
|
||||
ATTEMPTS=30
|
||||
CONTAINER_IMAGE=${CONTAINER_IMAGE:-'quay.io/ceph/ceph:v16.2.7'}
|
||||
DEVICES=()
|
||||
FSID="4b5c8c0a-ff60-454b-a1b4-9747aa737d19"
|
||||
KEY_EXPORT_DIR="/etc/ceph"
|
||||
KEYS=("client.openstack") # at least the client.openstack default key should be created
|
||||
MIN_OSDS=1
|
||||
SERVICES=()
|
||||
SLEEP=5
|
||||
CEPHADM_DEV_OSD=${CEPHADM_DEV_OSD:-"True"}
|
||||
CEPH_LOOPBACK_DISK_SIZE=${CEPH_LOOPBACK_DISK_SIZE:-8G}
|
||||
|
||||
# POOLS
|
||||
# E.G. POOLS[test]='rbd'
|
||||
declare -A POOLS
|
||||
|
||||
# NFS OPTIONS
|
||||
FSNAME=${FSNAME:-'cephfs'}
|
||||
NFS_PORT=12345
|
||||
NFS_CLIENT=0
|
||||
NFS_CLIENT_LOG="/var/log/ceph-$NFS_CLIENT_NAME.log"
|
||||
NFS_CLIENT_NAME="client.manila"
|
||||
|
||||
# RGW OPTIONS
|
||||
RGW_PORT=8080
|
||||
|
||||
# CLIENT CONFIG
|
||||
CLIENT_CONFIG=$HOME/ceph_client.conf
|
||||
EXPORT=$HOME/ceph_export.yml
|
||||
RBD_CLIENT_LOG=/var/log/ceph/qemu-guest-$pid.log
|
||||
|
||||
[ -z "$SUDO" ] && SUDO=sudo
|
||||
|
||||
## Admin
|
||||
|
||||
# Admin: enable debug mode
|
||||
function set_debug {
|
||||
if [ "$DEBUG" -eq 1 ]; then
|
||||
echo "[CEPHADM] Enabling Debug mode"
|
||||
$SUDO "$CEPHADM" shell --fsid $FSID --config $CONFIG \
|
||||
--keyring $KEYRING -- ceph config set mgr mgr/cephadm/log_to_cluster_level debug
|
||||
echo "[CEPHADM] See debug logs running: ceph -W cephadm --watch-debug"
|
||||
fi
|
||||
}
|
||||
|
||||
# Admin: check ceph cluster status
|
||||
function check_cluster_status {
|
||||
$SUDO "$CEPHADM" shell --fsid $FSID --config $CONFIG \
|
||||
--keyring $KEYRING -- ceph -s -f json-pretty
|
||||
}
|
||||
|
||||
# Admin: export ceph cluster config spec
|
||||
function export_spec {
|
||||
$SUDO "$CEPHADM" shell --fsid $FSID --config $CONFIG \
|
||||
--keyring $KEYRING -- ceph orch ls --export > "$EXPORT"
|
||||
echo "Ceph cluster config exported: $EXPORT"
|
||||
}
|
||||
|
||||
## Pre-install
|
||||
|
||||
# Pre-install ceph: check depdendencies are available
|
||||
function prereq {
|
||||
for cmd in "${REQUIREMENTS[@]}"; do
|
||||
if ! command -v "$cmd" &> /dev/null; then
|
||||
echo "Command $cmd not found"
|
||||
exit 1;
|
||||
fi
|
||||
done
|
||||
}
|
||||
|
||||
# Pre-install ceph: install required dependencies
|
||||
function install_deps {
|
||||
install_package jq
|
||||
# install_package podman
|
||||
}
|
||||
|
||||
# Pre-install ceph: show cluster status
|
||||
function preview {
|
||||
echo "---------"
|
||||
echo "SERVICES"
|
||||
for daemon in "${SERVICES[@]}"; do
|
||||
echo "* $daemon"
|
||||
done
|
||||
|
||||
echo "---------"
|
||||
echo "POOLS"
|
||||
for key in "${!POOLS[@]}"; do
|
||||
echo "* $key:${POOLS[$key]}";
|
||||
done
|
||||
|
||||
echo "---------"
|
||||
echo "KEYS"
|
||||
for kname in "${KEYS[@]}"; do
|
||||
echo "* $kname";
|
||||
done
|
||||
|
||||
echo "---------"
|
||||
echo "DEVICES"
|
||||
for dev in "${DEVICES[@]}"; do
|
||||
echo "* $dev"
|
||||
done
|
||||
[ -z "$DEVICES" ] && echo "Using ALL available devices"
|
||||
|
||||
echo "---------"
|
||||
echo IP Address: "$HOST_IP"
|
||||
echo "---------"
|
||||
echo "Container Image: $CONTAINER_IMAGE"
|
||||
echo "---------"
|
||||
}
|
||||
|
||||
# Pre-install ceph: get cephadm binary
|
||||
function get_cephadm {
|
||||
curl -O https://raw.githubusercontent.com/ceph/ceph/pacific/src/cephadm/cephadm
|
||||
$SUDO mv cephadm $TARGET_BIN
|
||||
$SUDO chmod +x $TARGET_BIN/cephadm
|
||||
echo "[GET CEPHADM] cephadm is now available"
|
||||
|
||||
if [ -z "$CEPHADM" ]; then
|
||||
CEPHADM=${TARGET_BIN}/cephadm
|
||||
fi
|
||||
}
|
||||
|
||||
# Pre-install ceph: bootstrap config
|
||||
function bootstrap_config {
|
||||
cat <<EOF > "$BOOTSTRAP_CONFIG"
|
||||
[global]
|
||||
log to file = true
|
||||
osd crush chooseleaf type = 0
|
||||
osd_pool_default_pg_num = 8
|
||||
osd_pool_default_pgp_num = 8
|
||||
osd_pool_default_size = 1
|
||||
[mon]
|
||||
mon_warn_on_insecure_global_id_reclaim_allowed = False
|
||||
mon_warn_on_pool_no_redundancy = False
|
||||
EOF
|
||||
}
|
||||
|
||||
## Install
|
||||
|
||||
# Install ceph: run cephadm bootstrap
|
||||
function start_ceph {
|
||||
cluster=$(sudo cephadm ls | jq '.[]' | jq 'select(.name | test("^mon*")).fsid')
|
||||
if [ -z "$cluster" ]; then
|
||||
$SUDO $CEPHADM --image "$CONTAINER_IMAGE" \
|
||||
bootstrap \
|
||||
--fsid $FSID \
|
||||
--config "$BOOTSTRAP_CONFIG" \
|
||||
--output-config $CONFIG \
|
||||
--output-keyring $KEYRING \
|
||||
--output-pub-ssh-key $CEPH_PUB_KEY \
|
||||
--allow-overwrite \
|
||||
--allow-fqdn-hostname \
|
||||
--skip-monitoring-stack \
|
||||
--skip-dashboard \
|
||||
--single-host-defaults \
|
||||
--skip-firewalld \
|
||||
--mon-ip "$HOST_IP"
|
||||
|
||||
test -e $CONFIG
|
||||
test -e $KEYRING
|
||||
|
||||
if [ "$CEPHADM_DEV_OSD" == 'True' ]; then
|
||||
create_osd_dev
|
||||
fi
|
||||
# Wait cephadm backend to be operational
|
||||
# and add osds via drivegroups
|
||||
sleep "$SLEEP"
|
||||
add_osds
|
||||
fi
|
||||
}
|
||||
|
||||
# Install ceph: create a loopback device to be used as osd
|
||||
function create_osd_dev {
|
||||
sudo dd if=/dev/zero of=/var/lib/ceph-osd.img bs=1 count=0 seek="$CEPH_LOOPBACK_DISK_SIZE"
|
||||
osd_dev=$(sudo losetup -f --show /var/lib/ceph-osd.img)
|
||||
sudo pvcreate $osd_dev
|
||||
sudo vgcreate ceph_vg $osd_dev
|
||||
sudo lvcreate -n ceph_lv_data -l +100%FREE ceph_vg
|
||||
DEVICES+=("/dev/ceph_vg/ceph_lv_data")
|
||||
}
|
||||
|
||||
# cleanup ceph: delete the osd file and release the loopback device
|
||||
function delete_osd_dev {
|
||||
if [ $(sudo lvs --noheadings -o lv_path -S lv_name=ceph_lv_data) ]; then
|
||||
sudo lvremove --force /dev/ceph_vg/ceph_lv_data
|
||||
sudo vgremove --force ceph_vg
|
||||
osd_dev=$(sudo losetup -j /var/lib/ceph-osd.img -l -n -O NAME)
|
||||
sudo pvremove --force $osd_dev
|
||||
sudo losetup -d $osd_dev
|
||||
sudo rm -f /var/lib/ceph-osd.img
|
||||
sudo partprobe
|
||||
DEVICES=()
|
||||
fi
|
||||
}
|
||||
|
||||
# Install ceph: add osds
|
||||
function add_osds {
|
||||
# let's add some osds
|
||||
if [ -z "$DEVICES" ]; then
|
||||
echo "Using ALL available devices"
|
||||
$SUDO $CEPHADM shell ceph orch apply osd --all-available-devices
|
||||
else
|
||||
for item in "${DEVICES[@]}"; do
|
||||
echo "Creating osd $item on node $HOSTNAME"
|
||||
$SUDO "$CEPHADM" shell --fsid $FSID --config $CONFIG \
|
||||
--keyring $KEYRING -- ceph orch daemon add osd "$HOSTNAME:$item"
|
||||
done
|
||||
fi
|
||||
|
||||
while [ "$ATTEMPTS" -ne 0 ]; do
|
||||
num_osds=$($SUDO $CEPHADM shell --fsid $FSID --config $CONFIG \
|
||||
--keyring $KEYRING -- ceph -s -f json | jq '.osdmap | .num_up_osds')
|
||||
if [ "$num_osds" -ge "$MIN_OSDS" ]; then
|
||||
break;
|
||||
fi
|
||||
ATTEMPTS=$(("$ATTEMPTS" - 1))
|
||||
sleep 1
|
||||
done
|
||||
echo "[CEPHADM] OSD(s) deployed: $num_osds"
|
||||
|
||||
# [ "$num_osds" -lt "$MIN_OSDS" ] && exit 255
|
||||
}
|
||||
|
||||
# Install ceph: create and enable pools
|
||||
function add_pools {
|
||||
# Pools are tied to their application, therefore the function
|
||||
# iterates over the associative array that defines this relationship
|
||||
# e.g. { 'volumes': 'rbd', 'manila_data': 'cephfs' }
|
||||
|
||||
[ "${#POOLS[@]}" -eq 0 ] && return;
|
||||
|
||||
for pool in "${!POOLS[@]}"; do
|
||||
$SUDO "$CEPHADM" shell --fsid $FSID --config $CONFIG \
|
||||
--keyring $KEYRING -- ceph osd pool create "$pool" "$DEFAULT_PG_NUM" \
|
||||
"$DEFAULT_PGP_NUM" replicated --autoscale-mode on
|
||||
|
||||
# set the application to the pool (which also means rbd init the pool)
|
||||
$SUDO "$CEPHADM" shell --fsid $FSID --config $CONFIG \
|
||||
--keyring $KEYRING -- ceph osd pool application enable "$pool" "${POOLS[$pool]}"
|
||||
done
|
||||
}
|
||||
|
||||
# Install ceph: create a keyring
|
||||
function _create_key {
|
||||
local name=$1
|
||||
local caps
|
||||
local osd_caps
|
||||
|
||||
if [ "${#POOLS[@]}" -eq 0 ]; then
|
||||
osd_caps="allow *"
|
||||
else
|
||||
caps=$(build_caps)
|
||||
osd_caps="allow class-read object_prefix rbd_children, $caps"
|
||||
fi
|
||||
|
||||
$SUDO "$CEPHADM" shell -v "$KEY_EXPORT_DIR:$KEY_EXPORT_DIR" --fsid $FSID --config $CONFIG \
|
||||
--keyring $KEYRING -- ceph auth get-or-create "$name" mon "allow r" osd "$osd_caps" \
|
||||
-o "$KEY_EXPORT_DIR/$name.keyring"
|
||||
}
|
||||
|
||||
# Install ceph: create one or more keyrings
|
||||
function create_keys {
|
||||
for key_name in "${KEYS[@]}"; do
|
||||
echo "Creating key $key_name"
|
||||
_create_key "$key_name"
|
||||
done
|
||||
}
|
||||
|
||||
# Install ceph: add MDS
|
||||
function cephfs_config {
|
||||
# Two pools are generated by this action
|
||||
# - $FSNAME.FSNAME.data
|
||||
# - $FSNAME.FSNAME.meta
|
||||
$SUDO "$CEPHADM" shell --fsid $FSID --config $CONFIG \
|
||||
--keyring $KEYRING -- ceph fs volume create "$FSNAME"
|
||||
if [[ ${CEPHFS_MULTIPLE_FILESYSTEMS} == 'True' ]]; then
|
||||
$SUDO "$CEPHADM" shell --fsid $FSID --config $CONFIG \
|
||||
--keyring $KEYRING -- ceph fs flag set enable_multiple true \
|
||||
--yes-i-really-mean-it
|
||||
# Enable snapshots in CephFS.
|
||||
$SUDO "$CEPHADM" shell --fsid $FSID --config $CONFIG \
|
||||
--keyring $KEYRING -- ceph fs set $FSNAME allow_new_snaps true \
|
||||
--yes-i-really-mean-it
|
||||
fi
|
||||
}
|
||||
|
||||
# Install ceph: add NFS
|
||||
function ceph_nfs_config {
|
||||
# (fpantano) TODO: Build an ingress daemon on top of this
|
||||
echo "[CEPHADM] Deploy nfs.$FSNAME backend"
|
||||
$SUDO "$CEPHADM" shell --fsid $FSID --config $CONFIG \
|
||||
--keyring $KEYRING -- ceph orch apply nfs \
|
||||
"$FSNAME" --placement="$HOSTNAME" --port $NFS_PORT
|
||||
}
|
||||
|
||||
# RGW pre config
|
||||
function configure_ceph_embedded_rgw {
|
||||
|
||||
# keystone endpoint for radosgw
|
||||
_create_swift_endpoint
|
||||
|
||||
# Create radosgw service user with admin privileges
|
||||
create_service_user "radosgw" "admin"
|
||||
|
||||
if [ "$CEPH_RGW_KEYSTONE_SSL" = "True" ]; then
|
||||
# radosgw needs to access keystone's revocation list
|
||||
sudo mkdir -p ${dest}/nss
|
||||
sudo openssl x509 -in /etc/keystone/ssl/certs/ca.pem -pubkey | \
|
||||
sudo certutil -d ${dest}/nss -A -n ca -t "TCu,Cu,Tuw"
|
||||
|
||||
sudo openssl x509 -in /etc/keystone/ssl/certs/signing_cert.pem -pubkey | \
|
||||
sudo certutil -A -d ${dest}/nss -n signing_cert -t "P,P,P"
|
||||
fi
|
||||
}
|
||||
|
||||
# General Ceph utility to set config keys within the mgr
|
||||
function set_config_key {
|
||||
local section=$1
|
||||
local key=$2
|
||||
local value=$3
|
||||
$SUDO "$CEPHADM" shell --fsid $FSID --config $CONFIG \
|
||||
ceph config set ${section} ${key} ${value}
|
||||
}
|
||||
|
||||
# RGW config keys: no iniset anymore, everything is pushed as mgr key/value
|
||||
function configure_rgw_ceph_section {
|
||||
|
||||
# RGW KEYSTONE KEYS
|
||||
declare -A RGW_CONFIG_KEYS
|
||||
|
||||
RGW_CONFIG_KEYS=(['rgw_keystone_api_version']=3
|
||||
['rgw_keystone_url']="$KEYSTONE_SERVICE_URI"
|
||||
['rgw_keystone_accepted_roles']="member, _member_, Member, admin"
|
||||
['rgw_keystone_accepted_admin_roles']="ResellerAdmin"
|
||||
['rgw_keystone_admin_domain']="$SERVICE_DOMAIN_NAME"
|
||||
['rgw_keystone_admin_project']="$SERVICE_PROJECT_NAME"
|
||||
['rgw_keystone_admin_user']="radosgw"
|
||||
['rgw_s3_auth_use_keystone']="true"
|
||||
['rgw_keystone_admin_password']="$SERVICE_PASSWORD"
|
||||
['rgw_keystone_verify_ssl']="false"
|
||||
)
|
||||
|
||||
for k in ${!RGW_CONFIG_KEYS[@]}; do
|
||||
set_config_key "global" ${k} ${RGW_CONFIG_KEYS[$k]}
|
||||
done
|
||||
}
|
||||
|
||||
# Install ceph: add RGW
|
||||
function start_ceph_embedded_rgw {
|
||||
$SUDO "$CEPHADM" shell --fsid $FSID --config $CONFIG \
|
||||
--keyring $KEYRING -- ceph orch apply rgw default default default \
|
||||
"--placement=$HOSTNAME count:1" --port "$RGW_PORT"
|
||||
}
|
||||
|
||||
# Configure cephfs and ceph_nfs
|
||||
function configure_ceph_manila {
|
||||
# Deploy mds and configure cephfs
|
||||
cephfs_config
|
||||
# Deploy and configure ganesha
|
||||
[ $MANILA_CEPH_DRIVER == 'cephfsnfs' ] && ceph_nfs_config
|
||||
# Add manila keys to the list
|
||||
KEYS+=('client.manila')
|
||||
}
|
||||
|
||||
# Install ceph: services deployment
|
||||
function enable_services {
|
||||
for item in "${SERVICES[@]}"; do
|
||||
case "$item" in
|
||||
cephfs|CEPHFS)
|
||||
echo "[CEPHADM] Config cephfs volume on node $HOSTNAME"
|
||||
cephfs_config
|
||||
;;
|
||||
nfs|NFS)
|
||||
echo "[CEPHADM] Deploying NFS on node $HOSTNAME"
|
||||
ceph_nfs_config
|
||||
NFS_CLIENT=1
|
||||
;;
|
||||
rgw|RGW)
|
||||
echo "[CEPHADM] Deploying RGW on node $HOSTNAME"
|
||||
rgw
|
||||
;;
|
||||
esac
|
||||
done
|
||||
}
|
||||
|
||||
# Install ceph: client config
|
||||
function client_config {
|
||||
echo "Dump the minimal ceph.conf"
|
||||
cp $CONFIG "$CLIENT_CONFIG"
|
||||
|
||||
cat >> "$CLIENT_CONFIG" <<-EOF
|
||||
[client.libvirt]
|
||||
admin socket = /var/run/ceph/$cluster-$type.$id.$pid.$cctid.asok
|
||||
log file = $RBD_CLIENT_LOG
|
||||
EOF
|
||||
|
||||
if [ "$NFS_CLIENT" -eq 1 ]; then
|
||||
cat >> "$CLIENT_CONFIG" <<-EOF
|
||||
[$NFS_CLIENT_NAME]
|
||||
client mount uid = 0
|
||||
client mount gid = 0
|
||||
log file = $NFS_CLIENT_LOG
|
||||
admin socket = /var/run/ceph/\$cluster-\$type.\$id.\$pid.\$cctid.asok
|
||||
keyring = $KEY_EXPORT_DIR/$NFS_CLIENT_NAME.keyring
|
||||
EOF
|
||||
echo "Client config exported: $CLIENT_CONFIG"
|
||||
fi
|
||||
}
|
||||
|
||||
## Remove ceph
|
||||
|
||||
# Remove ceph: remove cluster and zap osds
|
||||
function stop_ceph {
|
||||
if ! [ -x "$CEPHADM" ]; then
|
||||
get_cephadm
|
||||
CEPHADM=${TARGET_BIN}/cephadm
|
||||
fi
|
||||
cluster=$(sudo cephadm ls | jq '.[]' | jq 'select(.name | test("^mon*")).fsid')
|
||||
if [ -n "$cluster" ]; then
|
||||
sudo cephadm rm-cluster --zap-osds --fsid "$FSID" --force
|
||||
echo "[CEPHADM] Cluster deleted"
|
||||
fi
|
||||
}
|
||||
|
||||
## devstack-plugin-ceph functions
|
||||
|
||||
function pre_install_ceph {
|
||||
# Check dependencies for the service.
|
||||
install_deps
|
||||
prereq
|
||||
}
|
||||
|
||||
function install_ceph {
|
||||
# Install the service.
|
||||
bootstrap_config
|
||||
get_cephadm
|
||||
}
|
||||
|
||||
function configure_ceph {
|
||||
[ "$ENABLE_CEPH_MANILA" == "True" ] && SERVICES+=('cephfs')
|
||||
[ "$MANILA_CEPH_DRIVER" == "cephfsnfs" ] && SERVICES+=('nfs')
|
||||
enable_services
|
||||
}
|
||||
|
||||
function init_ceph {
|
||||
# Initialize and start the service.
|
||||
# preview
|
||||
# start_ceph
|
||||
# set_debug
|
||||
# create_osd_dev
|
||||
# add_osds
|
||||
add_pools
|
||||
create_keys
|
||||
# TODO: we need to follow the plugin.sh code, so I'm not sure
|
||||
# we can still use enable_services (at least for now)
|
||||
# enable_services
|
||||
check_cluster_status
|
||||
client_config
|
||||
}
|
||||
|
||||
function cleanup_ceph {
|
||||
# Cleanup the service.
|
||||
stop_ceph
|
||||
delete_osd_dev
|
||||
}
|
||||
|
||||
# Restore xtrace
|
||||
$XTRACE
|
||||
|
||||
# Tell emacs to use shell-script-mode
|
||||
## Local variables:
|
||||
## mode: shell-script
|
||||
## End:
|
|
@ -18,3 +18,5 @@ if [[ $ENABLE_CEPH_CINDER == "True" ]]; then
|
|||
CINDER_ENABLED_BACKENDS=${CINDER_ENABLED_BACKENDS:-ceph}
|
||||
fi
|
||||
fi
|
||||
|
||||
CEPHADM_DEPLOY=$(trueorfalse False CEPHADM_DEPLOY)
|
||||
|
|
|
@ -2,93 +2,115 @@
|
|||
|
||||
if [[ "$1" == "source" ]]; then
|
||||
# Initial source
|
||||
source $TOP_DIR/lib/ceph
|
||||
if [[ "$CEPHADM_DEPLOY" = "True" ]]; then
|
||||
source $TOP_DIR/lib/cephadm
|
||||
else
|
||||
source $TOP_DIR/lib/ceph
|
||||
fi
|
||||
elif [[ "$1" == "stack" && "$2" == "pre-install" ]]; then
|
||||
if [[ "$ENABLE_CEPH_RGW" = "True" ]] && (is_service_enabled swift); then
|
||||
die $LINENO \
|
||||
"You cannot activate both Swift and Ceph Rados Gateway, \
|
||||
please disable Swift or set ENABLE_CEPH_RGW=False"
|
||||
fi
|
||||
echo_summary "Installing Ceph"
|
||||
check_os_support_ceph
|
||||
if [ "$REMOTE_CEPH" = "False" ]; then
|
||||
if [ "$CEPH_CONTAINERIZED" = "True" ]; then
|
||||
echo_summary "Configuring and initializing Ceph"
|
||||
deploy_containerized_ceph
|
||||
else
|
||||
install_ceph
|
||||
echo_summary "Configuring Ceph"
|
||||
configure_ceph
|
||||
# NOTE (leseb): we do everything here
|
||||
# because we need to have Ceph started before the main
|
||||
# OpenStack components.
|
||||
# Ceph OSD must start here otherwise we can't upload any images.
|
||||
echo_summary "Initializing Ceph"
|
||||
start_ceph
|
||||
fi
|
||||
if [[ "$CEPHADM_DEPLOY" = "True" ]]; then
|
||||
# Set up system services
|
||||
echo_summary "[cephadm] Configuring system services ceph"
|
||||
pre_install_ceph
|
||||
else
|
||||
install_ceph_remote
|
||||
echo_summary "Installing Ceph"
|
||||
check_os_support_ceph
|
||||
if [ "$REMOTE_CEPH" = "False" ]; then
|
||||
if [ "$CEPH_CONTAINERIZED" = "True" ]; then
|
||||
echo_summary "Configuring and initializing Ceph"
|
||||
deploy_containerized_ceph
|
||||
else
|
||||
install_ceph
|
||||
echo_summary "Configuring Ceph"
|
||||
configure_ceph
|
||||
# NOTE (leseb): we do everything here
|
||||
# because we need to have Ceph started before the main
|
||||
# OpenStack components.
|
||||
# Ceph OSD must start here otherwise we can't upload any images.
|
||||
echo_summary "Initializing Ceph"
|
||||
start_ceph
|
||||
fi
|
||||
else
|
||||
install_ceph_remote
|
||||
fi
|
||||
fi
|
||||
elif [[ "$1" == "stack" && "$2" == "install" ]]; then
|
||||
# FIXME(melwitt): This is a hack to get around a namespacing issue with
|
||||
# Paste and PasteDeploy. For stable/queens, we use the Pike UCA packages
|
||||
# and the Ceph packages in the Pike UCA are pulling in python-paste and
|
||||
# python-pastedeploy packages. The python-pastedeploy package satisfies the
|
||||
# upper-constraints but python-paste does not, so devstack pip installs a
|
||||
# newer version of it, while python-pastedeploy remains. The mismatch
|
||||
# between the install path of paste and paste.deploy causes Keystone to
|
||||
# fail to start, with "ImportError: cannot import name deploy."
|
||||
if [[ "$TARGET_BRANCH" == stable/queens || "$TARGET_BRANCH" == master ]]; then
|
||||
pip_install -U --force PasteDeploy
|
||||
if [[ "$CEPHADM_DEPLOY" = "True" ]]; then
|
||||
# Perform installation of service source
|
||||
echo_summary "[cephadm] Installing ceph"
|
||||
install_ceph
|
||||
else
|
||||
# FIXME(melwitt): This is a hack to get around a namespacing issue with
|
||||
# Paste and PasteDeploy. For stable/queens, we use the Pike UCA packages
|
||||
# and the Ceph packages in the Pike UCA are pulling in python-paste and
|
||||
# python-pastedeploy packages. The python-pastedeploy package satisfies the
|
||||
# upper-constraints but python-paste does not, so devstack pip installs a
|
||||
# newer version of it, while python-pastedeploy remains. The mismatch
|
||||
# between the install path of paste and paste.deploy causes Keystone to
|
||||
# fail to start, with "ImportError: cannot import name deploy."
|
||||
if [[ "$TARGET_BRANCH" == stable/queens || "$TARGET_BRANCH" == master ]]; then
|
||||
pip_install -U --force PasteDeploy
|
||||
fi
|
||||
fi
|
||||
elif [[ "$1" == "stack" && "$2" == "post-config" ]]; then
|
||||
if is_ceph_enabled_for_service glance; then
|
||||
echo_summary "Configuring Glance for Ceph"
|
||||
configure_ceph_glance
|
||||
fi
|
||||
if is_ceph_enabled_for_service nova; then
|
||||
echo_summary "Configuring Nova for Ceph"
|
||||
configure_ceph_nova
|
||||
fi
|
||||
if is_ceph_enabled_for_service cinder; then
|
||||
echo_summary "Configuring Cinder for Ceph"
|
||||
configure_ceph_cinder
|
||||
fi
|
||||
if is_ceph_enabled_for_service nova; then
|
||||
# NOTE (leseb): the part below is a requirement
|
||||
# to attach Ceph block devices
|
||||
echo_summary "Configuring libvirt secret"
|
||||
import_libvirt_secret_ceph
|
||||
fi
|
||||
if is_ceph_enabled_for_service manila; then
|
||||
echo_summary "Configuring Manila for Ceph"
|
||||
configure_ceph_manila
|
||||
fi
|
||||
|
||||
if [ "$REMOTE_CEPH" = "False" ]; then
|
||||
if [[ "$CEPHADM_DEPLOY" = "True" ]]; then
|
||||
# Configure after the other layer 1 and 2 services have been configured
|
||||
echo_summary "[cephadm] Configuring additional Ceph services"
|
||||
configure_ceph
|
||||
else
|
||||
if is_ceph_enabled_for_service glance; then
|
||||
echo_summary "Configuring Glance for Ceph"
|
||||
configure_ceph_embedded_glance
|
||||
configure_ceph_glance
|
||||
fi
|
||||
if is_ceph_enabled_for_service nova; then
|
||||
echo_summary "Configuring Nova for Ceph"
|
||||
configure_ceph_embedded_nova
|
||||
configure_ceph_nova
|
||||
fi
|
||||
if is_ceph_enabled_for_service cinder; then
|
||||
echo_summary "Configuring Cinder for Ceph"
|
||||
configure_ceph_embedded_cinder
|
||||
configure_ceph_cinder
|
||||
fi
|
||||
if is_ceph_enabled_for_service nova; then
|
||||
# NOTE (leseb): the part below is a requirement
|
||||
# to attach Ceph block devices
|
||||
echo_summary "Configuring libvirt secret"
|
||||
import_libvirt_secret_ceph
|
||||
fi
|
||||
if is_ceph_enabled_for_service manila; then
|
||||
echo_summary "Configuring Manila for Ceph"
|
||||
configure_ceph_embedded_manila
|
||||
configure_ceph_manila
|
||||
fi
|
||||
if [ "$ENABLE_CEPH_RGW" = "True" ]; then
|
||||
echo_summary "Configuring Rados Gateway with Keystone for Swift"
|
||||
configure_ceph_embedded_rgw
|
||||
if [ "$CEPH_CONTAINERIZED" = "False" ]; then
|
||||
start_ceph_embedded_rgw
|
||||
else
|
||||
_configure_ceph_rgw_container
|
||||
|
||||
if [ "$REMOTE_CEPH" = "False" ]; then
|
||||
if is_ceph_enabled_for_service glance; then
|
||||
echo_summary "Configuring Glance for Ceph"
|
||||
configure_ceph_embedded_glance
|
||||
fi
|
||||
if is_ceph_enabled_for_service nova; then
|
||||
echo_summary "Configuring Nova for Ceph"
|
||||
configure_ceph_embedded_nova
|
||||
fi
|
||||
if is_ceph_enabled_for_service cinder; then
|
||||
echo_summary "Configuring Cinder for Ceph"
|
||||
configure_ceph_embedded_cinder
|
||||
fi
|
||||
if is_ceph_enabled_for_service manila; then
|
||||
echo_summary "Configuring Manila for Ceph"
|
||||
configure_ceph_embedded_manila
|
||||
fi
|
||||
if [ "$ENABLE_CEPH_RGW" = "True" ]; then
|
||||
echo_summary "Configuring Rados Gateway with Keystone for Swift"
|
||||
configure_ceph_embedded_rgw
|
||||
if [ "$CEPH_CONTAINERIZED" = "False" ]; then
|
||||
start_ceph_embedded_rgw
|
||||
else
|
||||
_configure_ceph_rgw_container
|
||||
fi
|
||||
fi
|
||||
fi
|
||||
fi
|
||||
|
@ -123,24 +145,32 @@ fi
|
|||
|
||||
|
||||
if [[ "$1" == "unstack" ]]; then
|
||||
if [ "$CEPH_CONTAINERIZED" = "False" ]; then
|
||||
if [ "$REMOTE_CEPH" = "True" ]; then
|
||||
cleanup_ceph_remote
|
||||
else
|
||||
stop_ceph
|
||||
cleanup_ceph_embedded
|
||||
fi
|
||||
if [[ "$CEPHADM_DEPLOY" = "True" ]]; then
|
||||
cleanup_ceph
|
||||
else
|
||||
cleanup_containerized_ceph
|
||||
if [ "$CEPH_CONTAINERIZED" = "False" ]; then
|
||||
if [ "$REMOTE_CEPH" = "True" ]; then
|
||||
cleanup_ceph_remote
|
||||
else
|
||||
stop_ceph
|
||||
cleanup_ceph_embedded
|
||||
fi
|
||||
else
|
||||
cleanup_containerized_ceph
|
||||
fi
|
||||
cleanup_ceph_general
|
||||
fi
|
||||
cleanup_ceph_general
|
||||
fi
|
||||
|
||||
if [[ "$1" == "clean" ]]; then
|
||||
if [ "$REMOTE_CEPH" = "True" ]; then
|
||||
cleanup_ceph_remote
|
||||
if [[ "$CEPHADM_DEPLOY" = "True" ]]; then
|
||||
cleanup_ceph
|
||||
else
|
||||
cleanup_ceph_embedded
|
||||
if [ "$REMOTE_CEPH" = "True" ]; then
|
||||
cleanup_ceph_remote
|
||||
else
|
||||
cleanup_ceph_embedded
|
||||
fi
|
||||
cleanup_ceph_general
|
||||
fi
|
||||
cleanup_ceph_general
|
||||
fi
|
||||
|
|
|
@ -23,8 +23,13 @@ CEPH_LOOPBACK_DISK_SIZE=${CEPH_LOOPBACK_DISK_SIZE:-$VOLUME_BACKING_FILE_SIZE}
|
|||
# Disable manage/unmanage snapshot tests on Tempest
|
||||
TEMPEST_VOLUME_MANAGE_SNAPSHOT=False
|
||||
|
||||
# Source plugin's lib/ceph
|
||||
source $CEPH_PLUGIN_DIR/lib/ceph
|
||||
# Source plugin's lib/cephadm or lib/ceph
|
||||
# depending on chosen deployment method
|
||||
if [[ "$CEPHADM_DEPLOY" = "True" ]]; then
|
||||
source $CEPH_PLUGIN_DIR/lib/cephadm
|
||||
else
|
||||
source $CEPH_PLUGIN_DIR/lib/ceph
|
||||
fi
|
||||
|
||||
# Set Manila related global variables used by Manila's DevStack plugin.
|
||||
if (is_ceph_enabled_for_service manila); then
|
||||
|
|
Loading…
Reference in New Issue