Ability to use a remote Ceph cluster

Sometimes we want to run some benchmarks on virtual machines that will be
backed by a Ceph cluster. The first idea that comes in our mind is to
use devstack to quickly get an OpenStack up and running but what about
the configuration of Devstack with this remote cluster?

Thanks to this commit it's now possible to use an already existing Ceph
cluster. In this case Devstack just needs two things:

* the location of the Ceph config file (by default devstack will look
for /etc/ceph/ceph.conf
* the admin key of the remote ceph cluster (by default devstack will
look for /etc/ceph/ceph.client.admin.keyring)

Devstack will then create the necessary pools, users, keys and will
connect the OpenStack environment as usual. During the unstack phase
every pools, users and keys will be deleted on the remote cluster while
local files and ceph-common package will be removed from the current
Devstack host.

To enable this mode simply add REMOTE_CEPH=True to your localrc file.

Change-Id: I1a4b6fd676d50b6a41a09e7beba9b11f8d1478f7
Signed-off-by: Sébastien Han <sebastien.han@enovance.com>
This commit is contained in:
Sébastien Han 2014-12-04 16:22:41 +01:00
parent e750f9c1c6
commit 4eb04a5f9e
3 changed files with 126 additions and 44 deletions

View File

@ -6,14 +6,19 @@ if is_service_enabled ceph; then
source $TOP_DIR/lib/ceph
elif [[ "$1" == "stack" && "$2" == "pre-install" ]]; then
echo_summary "Installing Ceph"
install_ceph
echo_summary "Configuring Ceph"
configure_ceph
# NOTE (leseb): Do everything here because we need to have Ceph started before the main
# OpenStack components. Ceph OSD must start here otherwise we can't upload any images.
echo_summary "Initializing Ceph"
init_ceph
start_ceph
check_os_support_ceph
if [ "$REMOTE_CEPH" = "False" ]; then
install_ceph
echo_summary "Configuring Ceph"
configure_ceph
# NOTE (leseb): Do everything here because we need to have Ceph started before the main
# OpenStack components. Ceph OSD must start here otherwise we can't upload any images.
echo_summary "Initializing Ceph"
init_ceph
start_ceph
else
install_ceph_remote
fi
elif [[ "$1" == "stack" && "$2" == "post-config" ]]; then
if is_service_enabled glance; then
echo_summary "Configuring Glance for Ceph"
@ -32,14 +37,39 @@ if is_service_enabled ceph; then
echo_summary "Configuring libvirt secret"
import_libvirt_secret_ceph
fi
if [ "$REMOTE_CEPH" = "False" ]; then
if is_service_enabled glance; then
echo_summary "Configuring Glance for Ceph"
configure_ceph_embedded_glance
fi
if is_service_enabled nova; then
echo_summary "Configuring Nova for Ceph"
configure_ceph_embedded_nova
fi
if is_service_enabled cinder; then
echo_summary "Configuring Cinder for Ceph"
configure_ceph_embedded_cinder
fi
fi
fi
if [[ "$1" == "unstack" ]]; then
stop_ceph
cleanup_ceph
if [ "$REMOTE_CEPH" = "True" ]; then
cleanup_ceph_remote
else
cleanup_ceph_embedded
stop_ceph
fi
cleanup_ceph_general
fi
if [[ "$1" == "clean" ]]; then
cleanup_ceph
if [ "$REMOTE_CEPH" = "True" ]; then
cleanup_ceph_remote
else
cleanup_ceph_embedded
fi
cleanup_ceph_general
fi
fi

108
lib/ceph
View File

@ -68,6 +68,11 @@ CINDER_CEPH_UUID=${CINDER_CEPH_UUID:-$(uuidgen)}
CEPH_REPLICAS=${CEPH_REPLICAS:-1}
CEPH_REPLICAS_SEQ=$(seq ${CEPH_REPLICAS})
# Connect to an existing Ceph cluster
REMOTE_CEPH=$(trueorfalse False $REMOTE_CEPH)
REMOTE_CEPH_ADMIN_KEY_PATH=${REMOTE_CEPH_ADMIN_KEY_PATH:-$CEPH_CONF_DIR/ceph.client.admin.keyring}
# Functions
# ------------
@ -92,29 +97,69 @@ EOF
sudo rm -f secret.xml
}
# undefine_virsh_secret() - Undefine Cinder key secret from libvirt
function undefine_virsh_secret {
if is_service_enabled cinder || is_service_enabled nova; then
local virsh_uuid=$(sudo virsh secret-list | awk '/^ ?[0-9a-z]/ { print $1 }')
sudo virsh secret-undefine ${virsh_uuid} >/dev/null 2>&1
fi
}
# check_os_support_ceph() - Check if the operating system provides a decent version of Ceph
function check_os_support_ceph {
if [[ ! ${DISTRO} =~ (trusty|f20|f21) ]]; then
echo "WARNING: your distro $DISTRO does not provide (at least) the Firefly release. Please use Ubuntu Trusty or Fedora 20 (and higher)"
if [[ "$FORCE_CEPH_INSTALL" != "yes" ]]; then
die $LINENO "If you wish to install Ceph on this distribution anyway run with FORCE_CEPH_INSTALL=yes"
fi
NO_UPDATE_REPOS=False
fi
}
# cleanup_ceph() - Remove residual data files, anything left over from previous
# runs that a clean run would need to clean up
function cleanup_ceph {
function cleanup_ceph_remote {
# do a proper cleanup from here to avoid leftover on the remote Ceph cluster
if is_service_enabled glance; then
sudo ceph osd pool delete $GLANCE_CEPH_POOL $GLANCE_CEPH_POOL --yes-i-really-really-mean-it > /dev/null 2>&1
sudo ceph auth del client.$GLANCE_CEPH_USER > /dev/null 2>&1
fi
if is_service_enabled cinder; then
sudo ceph osd pool delete $CINDER_CEPH_POOL $CINDER_CEPH_POOL --yes-i-really-really-mean-it > /dev/null 2>&1
sudo ceph auth del client.$CINDER_CEPH_USER > /dev/null 2>&1
fi
if is_service_enabled c-bak; then
sudo ceph osd pool delete $CINDER_BAK_CEPH_POOL $CINDER_BAK_CEPH_POOL --yes-i-really-really-mean-it > /dev/null 2>&1
sudo ceph auth del client.$CINDER_BAK_CEPH_USER > /dev/null 2>&1
fi
if is_service_enabled nova; then
iniset $NOVA_CONF libvirt rbd_secret_uuid ""
sudo ceph osd pool delete $NOVA_CEPH_POOL $NOVA_CEPH_POOL --yes-i-really-really-mean-it > /dev/null 2>&1
fi
}
function cleanup_ceph_embedded {
sudo pkill -f ceph-mon
sudo pkill -f ceph-osd
sudo rm -rf ${CEPH_DATA_DIR}/*/*
sudo rm -rf ${CEPH_CONF_DIR}/*
if egrep -q ${CEPH_DATA_DIR} /proc/mounts; then
sudo umount ${CEPH_DATA_DIR}
fi
if [[ -e ${CEPH_DISK_IMAGE} ]]; then
sudo rm -f ${CEPH_DISK_IMAGE}
fi
uninstall_package ceph ceph-common python-ceph libcephfs1 > /dev/null 2>&1
if is_service_enabled cinder || is_service_enabled nova; then
local virsh_uuid=$(sudo virsh secret-list | awk '/^ ?[0-9a-z]/ { print $1 }')
sudo virsh secret-undefine ${virsh_uuid} >/dev/null 2>&1
fi
if is_service_enabled nova; then
iniset $NOVA_CONF libvirt rbd_secret_uuid ""
fi
}
function cleanup_ceph_general {
undefine_virsh_secret
uninstall_package ceph ceph-common python-ceph libcephfs1 > /dev/null 2>&1
# purge ceph config file and keys
sudo rm -rf ${CEPH_CONF_DIR}/*
}
# configure_ceph() - Set config files, create data dirs, etc
function configure_ceph {
local count=0
@ -130,7 +175,7 @@ function configure_ceph {
sudo mkdir /var/lib/ceph/mon/ceph-$(hostname)
# create a default ceph configuration file
sudo tee -a ${CEPH_CONF_FILE} > /dev/null <<EOF
sudo tee ${CEPH_CONF_FILE} > /dev/null <<EOF
[global]
fsid = ${CEPH_FSID}
mon_initial_members = $(hostname)
@ -203,14 +248,17 @@ EOF
done
}
# configure_ceph_glance() - Glance config needs to come after Glance is set up
function configure_ceph_glance {
function configure_ceph_embedded_glance {
# configure Glance service options, ceph pool, ceph user and ceph key
sudo ceph -c ${CEPH_CONF_FILE} osd pool create ${GLANCE_CEPH_POOL} ${GLANCE_CEPH_POOL_PG} ${GLANCE_CEPH_POOL_PGP}
sudo ceph -c ${CEPH_CONF_FILE} osd pool set ${GLANCE_CEPH_POOL} size ${CEPH_REPLICAS}
if [[ $CEPH_REPLICAS -ne 1 ]]; then
sudo ceph -c ${CEPH_CONF_FILE} osd pool set ${GLANCE_CEPH_POOL} crush_ruleset ${RULE_ID}
fi
}
# configure_ceph_glance() - Glance config needs to come after Glance is set up
function configure_ceph_glance {
sudo ceph -c ${CEPH_CONF_FILE} osd pool create ${GLANCE_CEPH_POOL} ${GLANCE_CEPH_POOL_PG} ${GLANCE_CEPH_POOL_PGP}
sudo ceph -c ${CEPH_CONF_FILE} auth get-or-create client.${GLANCE_CEPH_USER} mon "allow r" osd "allow class-read object_prefix rbd_children, allow rwx pool=${GLANCE_CEPH_POOL}" | sudo tee ${CEPH_CONF_DIR}/ceph.client.${GLANCE_CEPH_USER}.keyring
sudo chown ${STACK_USER}:$(id -g -n $whoami) ${CEPH_CONF_DIR}/ceph.client.${GLANCE_CEPH_USER}.keyring
@ -225,14 +273,17 @@ function configure_ceph_glance {
iniset $GLANCE_API_CONF glance_store rbd_store_pool $GLANCE_CEPH_POOL
}
# configure_ceph_nova() - Nova config needs to come after Nova is set up
function configure_ceph_nova {
function configure_ceph_embedded_nova {
# configure Nova service options, ceph pool, ceph user and ceph key
sudo ceph -c ${CEPH_CONF_FILE} osd pool create ${NOVA_CEPH_POOL} ${NOVA_CEPH_POOL_PG} ${NOVA_CEPH_POOL_PGP}
sudo ceph -c ${CEPH_CONF_FILE} osd pool set ${NOVA_CEPH_POOL} size ${CEPH_REPLICAS}
if [[ $CEPH_REPLICAS -ne 1 ]]; then
sudo -c ${CEPH_CONF_FILE} ceph osd pool set ${NOVA_CEPH_POOL} crush_ruleset ${RULE_ID}
fi
}
# configure_ceph_nova() - Nova config needs to come after Nova is set up
function configure_ceph_nova {
sudo ceph -c ${CEPH_CONF_FILE} osd pool create ${NOVA_CEPH_POOL} ${NOVA_CEPH_POOL_PG} ${NOVA_CEPH_POOL_PGP}
iniset $NOVA_CONF libvirt rbd_user ${CINDER_CEPH_USER}
iniset $NOVA_CONF libvirt rbd_secret_uuid ${CINDER_CEPH_UUID}
iniset $NOVA_CONF libvirt inject_key false
@ -248,15 +299,17 @@ function configure_ceph_nova {
fi
}
# configure_ceph_cinder() - Cinder config needs to come after Cinder is set up
function configure_ceph_cinder {
function configure_ceph_embedded_cinder {
# Configure Cinder service options, ceph pool, ceph user and ceph key
sudo ceph -c ${CEPH_CONF_FILE} osd pool create ${CINDER_CEPH_POOL} ${CINDER_CEPH_POOL_PG} ${CINDER_CEPH_POOL_PGP}
sudo ceph -c ${CEPH_CONF_FILE} osd pool set ${CINDER_CEPH_POOL} size ${CEPH_REPLICAS}
if [[ $CEPH_REPLICAS -ne 1 ]]; then
sudo ceph -c ${CEPH_CONF_FILE} osd pool set ${CINDER_CEPH_POOL} crush_ruleset ${RULE_ID}
fi
}
# configure_ceph_cinder() - Cinder config needs to come after Cinder is set up
function configure_ceph_cinder {
sudo ceph -c ${CEPH_CONF_FILE} osd pool create ${CINDER_CEPH_POOL} ${CINDER_CEPH_POOL_PG} ${CINDER_CEPH_POOL_PGP}
sudo ceph -c ${CEPH_CONF_FILE} auth get-or-create client.${CINDER_CEPH_USER} mon "allow r" osd "allow class-read object_prefix rbd_children, allow rwx pool=${CINDER_CEPH_POOL}, allow rwx pool=${NOVA_CEPH_POOL},allow rx pool=${GLANCE_CEPH_POOL}" | sudo tee ${CEPH_CONF_DIR}/ceph.client.${CINDER_CEPH_USER}.keyring
sudo chown ${STACK_USER}:$(id -g -n $whoami) ${CEPH_CONF_DIR}/ceph.client.${CINDER_CEPH_USER}.keyring
}
@ -270,15 +323,12 @@ function init_ceph {
}
# install_ceph() - Collect source and prepare
function install_ceph_remote {
install_package ceph-common
}
function install_ceph {
# NOTE(dtroyer): At some point it'll be easier to test for unsupported distros,
# leveraging the list in stack.sh
if [[ ${os_CODENAME} =~ trusty ]] || [[ ${os_CODENAME} =~ SchrödingersCat ]] || [[ ${os_CODENAME} =~ Heisenbug ]]; then
NO_UPDATE_REPOS=False
install_package ceph
else
exit_distro_not_supported "Ceph since your distro doesn't provide (at least) the Firefly release. Please use Ubuntu Trusty or Fedora 19/20"
fi
install_package ceph
}
# start_ceph() - Start running processes, including screen

View File

@ -52,11 +52,13 @@ function configure_cinder_backend_ceph {
iniset $CINDER_CONF DEFAULT glance_api_version 2
if is_service_enabled c-bak; then
# Configure Cinder backup service options, ceph pool, ceph user and ceph key
sudo ceph -c ${CEPH_CONF_FILE} osd pool create ${CINDER_BAK_CEPH_POOL} ${CINDER_BAK_CEPH_POOL_PG} ${CINDER_BAK_CEPH_POOL_PGP}
sudo ceph -c ${CEPH_CONF_FILE} osd pool set ${CINDER_BAK_CEPH_POOL} size ${CEPH_REPLICAS}
if [[ $CEPH_REPLICAS -ne 1 ]]; then
sudo ceph -c ${CEPH_CONF_FILE} osd pool set ${CINDER_BAK_CEPH_POOL} crush_ruleset ${RULE_ID}
if [ "$REMOTE_CEPH" = "False" ]; then
# Configure Cinder backup service options, ceph pool, ceph user and ceph key
sudo ceph -c ${CEPH_CONF_FILE} osd pool set ${CINDER_BAK_CEPH_POOL} size ${CEPH_REPLICAS}
if [[ $CEPH_REPLICAS -ne 1 ]]; then
sudo ceph -c ${CEPH_CONF_FILE} osd pool set ${CINDER_BAK_CEPH_POOL} crush_ruleset ${RULE_ID}
fi
fi
sudo ceph -c ${CEPH_CONF_FILE} auth get-or-create client.${CINDER_BAK_CEPH_USER} mon "allow r" osd "allow class-read object_prefix rbd_children, allow rwx pool=${CINDER_BAK_CEPH_POOL}" | sudo tee ${CEPH_CONF_DIR}/ceph.client.${CINDER_BAK_CEPH_USER}.keyring
sudo chown $(whoami):$(whoami) ${CEPH_CONF_DIR}/ceph.client.${CINDER_BAK_CEPH_USER}.keyring