Remote Ceph with cephadm

Add podman ceph-common and jq as part of preinstall dependency.
Add REMOTE_CEPH capabilities to CEPHADM deployment.
Removed set_min_client only if cinder is enabled, this should be set
in any case.
Get FSID from ceph.conf in /etc/ceph to avoid unnecessary override.

Part of an effort to test multinode deployments with cephadm.

Needed-By: I5162815b66d3f3e8cf8c1e246b61b0ea06c1a270
Change-Id: I84249ae268dfe00a112c67e5170b679acb318a25
This commit is contained in:
Ashley Rodriguez 2023-03-07 14:54:34 +00:00
parent f493a2b408
commit b663a9fb83
6 changed files with 45 additions and 54 deletions

View File

@ -99,18 +99,23 @@
parent: tempest-multinode-full-py3
description: |
Integration tests that runs the ceph device plugin across multiple
nodes on py3.
nodes on py3. The Ceph deployment strategy used by this job is Cephadm.
required-projects:
- openstack/cinder-tempest-plugin
- openstack/devstack-plugin-ceph
timeout: 10800
voting: false
vars:
configure_swap_size: 8192
tempest_concurrency: 3
devstack_localrc:
ENABLE_FILE_INJECTION: false
ENABLE_VOLUME_MULTIATTACH: true
TEMPEST_RUN_VALIDATION: true
USE_BLOCK_MIGRATION_FOR_LIVE_MIGRATION: false
CEPHADM_DEPLOY: True
DISABLE_CEPHADM_POST_DEPLOY: True
MYSQL_REDUCE_MEMORY: True
REMOTE_CEPH: False
CINDER_CEPH_UUID: d531d2d4-3937-429c-b0c2-658fe41e82aa
devstack_plugins:
devstack-plugin-ceph: https://opendev.org/openstack/devstack-plugin-ceph
@ -126,7 +131,8 @@
group-vars:
subnode:
devstack_localrc:
REMOTE_CEPH: true
REMOTE_CEPH: True
CEPHADM_DEPLOY: True
CINDER_CEPH_UUID: d531d2d4-3937-429c-b0c2-658fe41e82aa
- job:
@ -140,19 +146,6 @@
devstack_localrc:
TEST_MASTER: true
- job:
name: devstack-plugin-ceph-multinode-tempest-cephadm
parent: devstack-plugin-ceph-multinode-tempest-py3
description: |
Integration tests that runs the ceph device plugin across multiple
nodes on py3.
The ceph deployment strategy used by this job is cephadm.
vars:
devstack_localrc:
USE_BLOCK_MIGRATION_FOR_LIVE_MIGRATION: false
CEPHADM_DEPLOY: true
tempest_concurrency: 1
- project-template:
name: devstack-plugin-ceph-tempest-jobs
description: |
@ -162,15 +155,13 @@
- devstack-plugin-ceph-tempest-py3
- devstack-plugin-ceph-tempest-ubuntu:
voting: false
- devstack-plugin-ceph-multinode-tempest-py3
- devstack-plugin-ceph-cephfs-native:
irrelevant-files: *irrelevant-files
voting: false
- devstack-plugin-ceph-cephfs-nfs:
irrelevant-files: *irrelevant-files
voting: false
# - devstack-plugin-ceph-multinode-tempest-py3
# - devstack-plugin-ceph-multinode-tempest-cephadm:
# voting: false
# - devstack-plugin-ceph-master-tempest:
# voting: false
gate:

View File

@ -2,3 +2,5 @@ xfsprogs
qemu-block-extra
catatonit
podman
jq
ceph-common

View File

@ -1,2 +1,5 @@
xfsprogs
dbus-tools
podman
jq
ceph-common

View File

@ -31,7 +31,11 @@ DISABLE_CEPHADM_POST_DEPLOY=${DISABLE_CEPHADM_POST_DEPLOY:-False}
ATTEMPTS=30
CONTAINER_IMAGE=${CONTAINER_IMAGE:-'quay.io/ceph/ceph:v17.2.3'}
DEVICES=()
if [[ "$REMOTE_CEPH" = "False" ]]; then
FSID=$(uuidgen)
else
FSID=$(cat $CEPH_CONFIG | grep fsid | awk 'BEGIN { RS = "fsid = "} ; { print $0 }' - )
fi
KEY_EXPORT_DIR="/etc/ceph"
KEYS=("client.openstack") # at least the client.openstack default key should be created
MIN_OSDS=1
@ -117,36 +121,16 @@ function export_spec {
echo "Ceph cluster config exported: $EXPORT"
}
# Pre-install ceph: install podman
function _install_podman {
# FIXME(vkmc) Check required for Ubuntu 20.04 LTS (current CI node)
# Remove when our CI is pushed to the next LTS version
if ! command -v podman &> /dev/null; then
if [[ $os_CODENAME =~ (focal) ]]; then
echo "deb https://download.opensuse.org/repositories/devel:/kubic:/libcontainers:/stable/xUbuntu_20.04/ /" \
| sudo tee /etc/apt/sources.list.d/devel:kubic:libcontainers:stable.list
curl -L "https://download.opensuse.org/repositories/devel:/kubic:/libcontainers:/stable/xUbuntu_20.04/Release.key" \
| sudo apt-key add -
sudo apt-get update
sudo apt-get -y upgrade
fi
install_package podman
fi
}
# Pre-install ceph: install required dependencies
function install_deps {
install_package jq ceph-common
_install_podman
if [[ "$REMOTE_CEPH" == "False" ]]; then
install_package python3-cephfs python3-prettytable python3-rados python3-rbd python3-requests
fi
}
# Pre-install ceph: get cephadm binary
function get_cephadm {
# NOTE(gouthamr): cephadm binary here is a python executable, and the
# $os_PACKAGE ("rpm") or $os_release (el9) doesn't really matter. There is
# no ubuntu/debian equivalent being published by the ceph community.
curl -O https://download.ceph.com/rpm-${CEPH_RELEASE}/el9/noarch/cephadm
curl -O https://raw.githubusercontent.com/ceph/ceph/"$CEPH_RELEASE"/src/cephadm/cephadm
$SUDO mv cephadm $TARGET_BIN
$SUDO chmod +x $TARGET_BIN/cephadm
echo "[GET CEPHADM] cephadm is now available"
@ -179,7 +163,7 @@ EOF
function start_ceph {
cluster=$(sudo cephadm ls | jq '.[]' | jq 'select(.name | test("^mon*")).fsid')
if [ -z "$cluster" ]; then
$SUDO $CEPHADM --image "$CONTAINER_IMAGE" \
$SUDO "$CEPHADM" --image "$CONTAINER_IMAGE" \
bootstrap \
--fsid $FSID \
--config "$BOOTSTRAP_CONFIG" \
@ -237,7 +221,7 @@ function add_osds {
# let's add some osds
if [ -z "$DEVICES" ]; then
echo "Using ALL available devices"
$SUDO $CEPHADM shell ceph orch apply osd --all-available-devices
$SUDO "$CEPHADM" shell ceph orch apply osd --all-available-devices
else
for item in "${DEVICES[@]}"; do
echo "Creating osd $item on node $HOSTNAME"
@ -247,7 +231,7 @@ function add_osds {
fi
while [ "$ATTEMPTS" -ne 0 ]; do
num_osds=$($SUDO $CEPHADM shell --fsid $FSID --config $CEPH_CONFIG \
num_osds=$($SUDO "$CEPHADM" shell --fsid $FSID --config $CEPH_CONFIG \
--keyring $CEPH_KEYRING -- ceph -s -f json | jq '.osdmap | .num_up_osds')
if [ "$num_osds" -ge "$MIN_OSDS" ]; then
break;
@ -303,6 +287,7 @@ function _create_key {
--keyring $CEPH_KEYRING -- ceph auth get-or-create "$name" mgr "allow rw" mon "allow r" osd "$osd_caps" \
-o "$KEY_EXPORT_DIR/ceph.$name.keyring"
$SUDO chown ${STACK_USER}:$(id -g -n $whoami) \
${CEPH_CONF_DIR}/ceph.$name.keyring
}
@ -645,7 +630,6 @@ function configure_ceph {
if is_ceph_enabled_for_service cinder; then
POOLS+=($CINDER_CEPH_POOL)
KEYS+=("client.$CINDER_CEPH_USER")
set_min_client_version
fi
if is_ceph_enabled_for_service c-bak; then
@ -662,8 +646,10 @@ function configure_ceph {
[ "$ENABLE_CEPH_RGW" == "True" ] && SERVICES+=('rgw')
enable_services
if [[ "$REMOTE_CEPH" = "False" ]]; then
add_pools
create_keys
fi
client_config
import_libvirt_secret_ceph
@ -680,8 +666,12 @@ function configure_ceph_manila {
function cleanup_ceph {
# Cleanup the service.
if [[ "$REMOTE_CEPH" == "True" ]]; then
echo "Remote Ceph cluster, skipping stop_ceph and delete_osd_dev"
else
stop_ceph
delete_osd_dev
fi
# purge ceph config file and keys
$SUDO rm -f ${CEPH_CONF_DIR}/*
if is_ceph_enabled_for_service nova; then

View File

@ -20,3 +20,4 @@ if [[ $ENABLE_CEPH_CINDER == "True" ]]; then
fi
CEPHADM_DEPLOY=$(trueorfalse False CEPHADM_DEPLOY)
REMOTE_CEPH=$(trueorfalse False REMOTE_CEPH)

View File

@ -40,10 +40,14 @@ elif [[ "$1" == "stack" && "$2" == "pre-install" ]]; then
fi
fi
elif [[ "$1" == "stack" && "$2" == "install" ]]; then
if [[ "$CEPHADM_DEPLOY" = "True" ]]; then
if [[ "$CEPHADM_DEPLOY" = "True" && "$REMOTE_CEPH" = "False" ]]; then
# Perform installation of service source
echo_summary "[cephadm] Installing ceph"
install_ceph
set_min_client_version
elif [[ "$CEPHADM_DEPLOY" = "True" && "$REMOTE_CEPH" = "True" ]]; then
echo "[CEPHADM] Remote Ceph: Skipping install"
get_cephadm
else
# FIXME(melwitt): This is a hack to get around a namespacing issue with
# Paste and PasteDeploy. For stable/queens, we use the Pike UCA packages