[WIP][DNM] Remote Ceph with cephadm

Add podman as part of preinstall dependency.
Add REMOTE_CEPH capabilities to CEPHADM deployment.
Part of an effort to get multinode ceph with manila testing to run locally,
and hopefully on CI at a future date.

Change-Id: I84249ae268dfe00a112c67e5170b679acb318a25
This commit is contained in:
Ashley Rodriguez 2023-03-07 14:54:34 +00:00
parent 41b6a8c227
commit 64e0635584
5 changed files with 65 additions and 42 deletions

View File

@ -130,11 +130,17 @@
timeout: 10800
voting: false
vars:
configure_swap_size: 8192
tempest_concurrency: 3
devstack_localrc:
ENABLE_FILE_INJECTION: false
ENABLE_VOLUME_MULTIATTACH: true
TEMPEST_RUN_VALIDATION: false
CINDER_CEPH_UUID: d531d2d4-3937-429c-b0c2-658fe41e82aa
DISABLE_CEPHADM_POST_DEPLOY: True
CEPHADM_DEPLOY: true
MYSQL_REDUCE_MEMORY: True
REMOTE_CEPH: False
devstack_plugins:
devstack-plugin-ceph: https://opendev.org/openstack/devstack-plugin-ceph
devstack_services:
@ -149,8 +155,11 @@
group-vars:
subnode:
devstack_localrc:
REMOTE_CEPH: true
REMOTE_CEPH: True
CEPH_IP: "{{ hostvars['controller']['nodepool']['private_ipv4'] }}"
CINDER_CEPH_UUID: d531d2d4-3937-429c-b0c2-658fe41e82aa
DISABLE_CEPHADM_POST_DEPLOY: True
CEPHADM_DEPLOY: true
- job:
name: devstack-plugin-ceph-master-tempest

View File

@ -1,2 +1,3 @@
xfsprogs
dbus-tools
dbus-tools
podman

View File

@ -26,6 +26,8 @@ CEPH_KEYRING="/etc/ceph/ceph.client.admin.keyring"
TARGET_BIN=/usr/bin
# TOGGLED IN THE CI TO SAVE RESOURCES
DISABLE_CEPHADM_POST_DEPLOY=${DISABLE_CEPHADM_POST_DEPLOY:-False}
SSH_USER="stack"
TIMEOUT=${TIMEOUT:-30}
# DEFAULT OPTIONS
ATTEMPTS=30
@ -98,7 +100,7 @@ NOVA_CEPH_POOL=${NOVA_CEPH_POOL:-vms}
function set_debug {
if [ "$DEBUG" -eq 1 ]; then
echo "[CEPHADM] Enabling Debug mode"
$SUDO "$CEPHADM" shell --fsid $FSID --config $CEPH_CONFIG \
$CEPHADM shell --fsid $FSID --config $CEPH_CONFIG \
--keyring $CEPH_KEYRING -- ceph config set mgr mgr/cephadm/log_to_cluster_level debug
echo "[CEPHADM] See debug logs running: ceph -W cephadm --watch-debug"
fi
@ -106,38 +108,20 @@ function set_debug {
# Admin: check ceph cluster status
function check_cluster_status {
$SUDO "$CEPHADM" shell --fsid $FSID --config $CEPH_CONFIG \
$CEPHADM shell --fsid $FSID --config $CEPH_CONFIG \
--keyring $CEPH_KEYRING -- ceph -s -f json-pretty
}
# Admin: export ceph cluster config spec
function export_spec {
$SUDO "$CEPHADM" shell --fsid $FSID --config $CEPH_CONFIG \
$CEPHADM shell --fsid $FSID --config $CEPH_CONFIG \
--keyring $CEPH_KEYRING -- ceph orch ls --export > "$EXPORT"
echo "Ceph cluster config exported: $EXPORT"
}
# Pre-install ceph: install podman
function _install_podman {
# FIXME(vkmc) Check required for Ubuntu 20.04 LTS (current CI node)
# Remove when our CI is pushed to the next LTS version
if ! command -v podman &> /dev/null; then
if [[ $os_CODENAME =~ (focal) ]]; then
echo "deb https://download.opensuse.org/repositories/devel:/kubic:/libcontainers:/stable/xUbuntu_20.04/ /" \
| sudo tee /etc/apt/sources.list.d/devel:kubic:libcontainers:stable.list
curl -L "https://download.opensuse.org/repositories/devel:/kubic:/libcontainers:/stable/xUbuntu_20.04/Release.key" \
| sudo apt-key add -
sudo apt-get update
sudo apt-get -y upgrade
fi
install_package podman
fi
}
# Pre-install ceph: install required dependencies
function install_deps {
install_package jq ceph-common
_install_podman
install_package python3-cephfs python3-prettytable python3-rados python3-rbd python3-requests
}
@ -149,7 +133,7 @@ function get_cephadm {
echo "[GET CEPHADM] cephadm is now available"
if [ -z "$CEPHADM" ]; then
CEPHADM=${TARGET_BIN}/cephadm
CEPHADM="${SUDO} ${TARGET_BIN}/cephadm"
fi
}
@ -176,7 +160,7 @@ EOF
function start_ceph {
cluster=$(sudo cephadm ls | jq '.[]' | jq 'select(.name | test("^mon*")).fsid')
if [ -z "$cluster" ]; then
$SUDO $CEPHADM --image "$CONTAINER_IMAGE" \
$CEPHADM --image "$CONTAINER_IMAGE" \
bootstrap \
--fsid $FSID \
--config "$BOOTSTRAP_CONFIG" \
@ -234,17 +218,17 @@ function add_osds {
# let's add some osds
if [ -z "$DEVICES" ]; then
echo "Using ALL available devices"
$SUDO $CEPHADM shell ceph orch apply osd --all-available-devices
$CEPHADM shell ceph orch apply osd --all-available-devices
else
for item in "${DEVICES[@]}"; do
echo "Creating osd $item on node $HOSTNAME"
$SUDO "$CEPHADM" shell --fsid $FSID --config $CEPH_CONFIG \
$CEPHADM shell --fsid $FSID --config $CEPH_CONFIG \
--keyring $CEPH_KEYRING -- ceph orch daemon add osd "$HOSTNAME:$item"
done
fi
while [ "$ATTEMPTS" -ne 0 ]; do
num_osds=$($SUDO $CEPHADM shell --fsid $FSID --config $CEPH_CONFIG \
num_osds=$($CEPHADM shell --fsid $FSID --config $CEPH_CONFIG \
--keyring $CEPH_KEYRING -- ceph -s -f json | jq '.osdmap | .num_up_osds')
if [ "$num_osds" -ge "$MIN_OSDS" ]; then
break;
@ -263,12 +247,12 @@ function add_pools {
[ "${#POOLS[@]}" -eq 0 ] && return;
for pool in "${POOLS[@]}"; do
$SUDO "$CEPHADM" shell --fsid $FSID --config $CEPH_CONFIG \
$CEPHADM shell --fsid $FSID --config $CEPH_CONFIG \
--keyring $CEPH_KEYRING -- ceph osd pool create "$pool" "$DEFAULT_PG_NUM" \
"$DEFAULT_PGP_NUM" replicated --autoscale-mode on
# set the application to the pool (which also means rbd init the pool)
$SUDO "$CEPHADM" shell --fsid $FSID --config $CEPH_CONFIG \
$CEPHADM shell --fsid $FSID --config $CEPH_CONFIG \
--keyring $CEPH_KEYRING -- ceph osd pool application enable "$pool" rbd
done
}
@ -296,7 +280,7 @@ function _create_key {
osd_caps="allow class-read object_prefix rbd_children, $caps"
fi
$SUDO "$CEPHADM" shell -v "$KEY_EXPORT_DIR:$KEY_EXPORT_DIR" --fsid $FSID --config $CEPH_CONFIG \
$CEPHADM shell -v "$KEY_EXPORT_DIR:$KEY_EXPORT_DIR" --fsid $FSID --config $CEPH_CONFIG \
--keyring $CEPH_KEYRING -- ceph auth get-or-create "$name" mgr "allow rw" mon "allow r" osd "$osd_caps" \
-o "$KEY_EXPORT_DIR/ceph.$name.keyring"
@ -318,7 +302,7 @@ function cephfs_config {
# - $FSNAME.FSNAME.data
# - $FSNAME.FSNAME.meta
# and the mds daemon is deployed
$SUDO "$CEPHADM" shell --fsid $FSID --config $CEPH_CONFIG \
$CEPHADM shell --fsid $FSID --config $CEPH_CONFIG \
--keyring $CEPH_KEYRING -- ceph fs volume create "$FSNAME"
}
@ -326,7 +310,7 @@ function cephfs_config {
function ceph_nfs_config {
# (fpantano) TODO: Build an ingress daemon on top of this
echo "[CEPHADM] Deploy nfs.$FSNAME backend"
$SUDO "$CEPHADM" shell --fsid $FSID --config $CEPH_CONFIG \
$CEPHADM shell --fsid $FSID --config $CEPH_CONFIG \
--keyring $CEPH_KEYRING -- ceph orch apply nfs \
"$FSNAME" --placement="$HOSTNAME" --port $NFS_PORT
}
@ -368,7 +352,7 @@ function set_config_key {
local section=$1
local key=$2
local value=$3
$SUDO "$CEPHADM" shell --fsid $FSID --config $CEPH_CONFIG \
$CEPHADM shell --fsid $FSID --config $CEPH_CONFIG \
ceph config set ${section} ${key} ${value}
}
@ -407,7 +391,7 @@ function configure_rgw_ceph_section {
function rgw {
configure_ceph_embedded_rgw
$SUDO "$CEPHADM" shell --fsid $FSID --config $CEPH_CONFIG \
$CEPHADM shell --fsid $FSID --config $CEPH_CONFIG \
--keyring $CEPH_KEYRING -- ceph orch apply rgw default default default default \
"--placement=$HOSTNAME count:1" --port "$RGW_PORT"
@ -490,7 +474,7 @@ function stop_ceph {
if ! [ -x "$CEPHADM" ]; then
get_cephadm
CEPHADM=${TARGET_BIN}/cephadm
CEPHADM="${SUDO} ${TARGET_BIN}/cephadm"
fi
cluster_deleted=0
@ -523,6 +507,20 @@ function install_ceph {
bootstrap_config
get_cephadm
start_ceph
cat <<EOF > $CEPH_CONF_DIR/CEPH_CLUSTER_IS_READY.txt
Cluster is provisioned.
EOF
}
function ceph_is_ready {
echo "Waiting the cluster to be up"
until ssh $SSH_USER@$CEPH_IP ls /etc/ceph/CEPH_CLUSTER_IS_READY.txt &> /dev/null; do
sleep 1
echo -n .
(( TIMEOUT-- ))
[[ "$TIMEOUT" -eq 0 ]] && exit 1
done
echo
}
function config_glance {
@ -589,7 +587,7 @@ function config_nova {
function set_min_client_version {
if [ ! -z "$CEPH_MIN_CLIENT_VERSION" ]; then
$SUDO "$CEPHADM" shell --fsid $FSID --config $CEPH_CONFIG \
$CEPHADM shell --fsid $FSID --config $CEPH_CONFIG \
--keyring $CEPH_KEYRING -- ceph osd set-require-min-compat-client ${CEPH_MIN_CLIENT_VERSION}
fi
}
@ -661,6 +659,9 @@ function configure_ceph {
enable_services
add_pools
create_keys
if [[ "$REMOTE_CEPH" = "True" ]]; then
scp -r stack@$CEPH_IP:/etc/ceph /etc/ceph
fi
client_config
import_libvirt_secret_ceph
@ -677,8 +678,11 @@ function configure_ceph_manila {
function cleanup_ceph {
# Cleanup the service.
stop_ceph
delete_osd_dev
if [[ "$REMOTE_CEPH" == "True" ]]; then
echo "Remote Ceph cluster, skip stop_ceph and delete_osd_dev"
else
stop_ceph
delete_osd_dev
# purge ceph config file and keys
$SUDO rm -f ${CEPH_CONF_DIR}/*
if is_ceph_enabled_for_service nova; then
@ -687,10 +691,10 @@ function cleanup_ceph {
}
function disable_cephadm {
$SUDO "$CEPHADM" shell --fsid $FSID --config $CEPH_CONFIG \
$CEPHADM shell --fsid $FSID --config $CEPH_CONFIG \
--keyring $CEPH_KEYRING -- ceph orch set backend
$SUDO "$CEPHADM" shell --fsid $FSID --config $CEPH_CONFIG \
$CEPHADM shell --fsid $FSID --config $CEPH_CONFIG \
--keyring $CEPH_KEYRING -- ceph mgr module disable cephadm
}

View File

@ -20,3 +20,4 @@ if [[ $ENABLE_CEPH_CINDER == "True" ]]; then
fi
CEPHADM_DEPLOY=$(trueorfalse False CEPHADM_DEPLOY)
REMOTE_CEPH=$(trueorfalse False REMOTE_CEPH)

View File

@ -40,10 +40,12 @@ elif [[ "$1" == "stack" && "$2" == "pre-install" ]]; then
fi
fi
elif [[ "$1" == "stack" && "$2" == "install" ]]; then
if [[ "$CEPHADM_DEPLOY" = "True" ]]; then
if [[ "$CEPHADM_DEPLOY" = "True" && "$REMOTE_CEPH" = "False" ]]; then
# Perform installation of service source
echo_summary "[cephadm] Installing ceph"
install_ceph
elif [[ "$CEPHADM_DEPLOY" = "True" && "$REMOTE_CEPH" = "True" ]]; then
echo "[CEPHADM] Remote Ceph: Skipping install"
else
# FIXME(melwitt): This is a hack to get around a namespacing issue with
# Paste and PasteDeploy. For stable/queens, we use the Pike UCA packages
@ -59,6 +61,12 @@ elif [[ "$1" == "stack" && "$2" == "install" ]]; then
fi
elif [[ "$1" == "stack" && "$2" == "post-config" ]]; then
if [[ "$CEPHADM_DEPLOY" = "True" ]]; then
if [[ "$REMOTE_CEPH" = "True" ]]; then
export CEPHADM="ssh $SSH_USER@$CEPH_IP sudo ${TARGET_BIN}/cephadm"
ceph_is_ready
else
export CEPHADM="${SUDO} ${TARGET_BIN}/cephadm"
fi
# Configure after the other layer 1 and 2 services have been configured
echo_summary "[cephadm] Configuring additional Ceph services"
configure_ceph