Remote Ceph with cephadm
Add podman ceph-common and jq as part of preinstall dependency. Add REMOTE_CEPH capabilities to CEPHADM deployment. Removed set_min_client only if cinder is enabled, this should be set in any case. Get FSID from ceph.conf in /etc/ceph to avoid unnecessary override. Part of an effort to test multinode deployments with cephadm. Needed-By: I5162815b66d3f3e8cf8c1e246b61b0ea06c1a270 Change-Id: I84249ae268dfe00a112c67e5170b679acb318a25
This commit is contained in:
parent
f493a2b408
commit
b663a9fb83
.zuul.yaml
devstack
29
.zuul.yaml
29
.zuul.yaml
@ -99,18 +99,23 @@
|
|||||||
parent: tempest-multinode-full-py3
|
parent: tempest-multinode-full-py3
|
||||||
description: |
|
description: |
|
||||||
Integration tests that runs the ceph device plugin across multiple
|
Integration tests that runs the ceph device plugin across multiple
|
||||||
nodes on py3.
|
nodes on py3. The Ceph deployment strategy used by this job is Cephadm.
|
||||||
required-projects:
|
required-projects:
|
||||||
- openstack/cinder-tempest-plugin
|
- openstack/cinder-tempest-plugin
|
||||||
- openstack/devstack-plugin-ceph
|
- openstack/devstack-plugin-ceph
|
||||||
timeout: 10800
|
timeout: 10800
|
||||||
voting: false
|
|
||||||
vars:
|
vars:
|
||||||
|
configure_swap_size: 8192
|
||||||
|
tempest_concurrency: 3
|
||||||
devstack_localrc:
|
devstack_localrc:
|
||||||
ENABLE_FILE_INJECTION: false
|
ENABLE_FILE_INJECTION: false
|
||||||
ENABLE_VOLUME_MULTIATTACH: true
|
ENABLE_VOLUME_MULTIATTACH: true
|
||||||
TEMPEST_RUN_VALIDATION: true
|
TEMPEST_RUN_VALIDATION: true
|
||||||
USE_BLOCK_MIGRATION_FOR_LIVE_MIGRATION: false
|
USE_BLOCK_MIGRATION_FOR_LIVE_MIGRATION: false
|
||||||
|
CEPHADM_DEPLOY: True
|
||||||
|
DISABLE_CEPHADM_POST_DEPLOY: True
|
||||||
|
MYSQL_REDUCE_MEMORY: True
|
||||||
|
REMOTE_CEPH: False
|
||||||
CINDER_CEPH_UUID: d531d2d4-3937-429c-b0c2-658fe41e82aa
|
CINDER_CEPH_UUID: d531d2d4-3937-429c-b0c2-658fe41e82aa
|
||||||
devstack_plugins:
|
devstack_plugins:
|
||||||
devstack-plugin-ceph: https://opendev.org/openstack/devstack-plugin-ceph
|
devstack-plugin-ceph: https://opendev.org/openstack/devstack-plugin-ceph
|
||||||
@ -126,7 +131,8 @@
|
|||||||
group-vars:
|
group-vars:
|
||||||
subnode:
|
subnode:
|
||||||
devstack_localrc:
|
devstack_localrc:
|
||||||
REMOTE_CEPH: true
|
REMOTE_CEPH: True
|
||||||
|
CEPHADM_DEPLOY: True
|
||||||
CINDER_CEPH_UUID: d531d2d4-3937-429c-b0c2-658fe41e82aa
|
CINDER_CEPH_UUID: d531d2d4-3937-429c-b0c2-658fe41e82aa
|
||||||
|
|
||||||
- job:
|
- job:
|
||||||
@ -140,19 +146,6 @@
|
|||||||
devstack_localrc:
|
devstack_localrc:
|
||||||
TEST_MASTER: true
|
TEST_MASTER: true
|
||||||
|
|
||||||
- job:
|
|
||||||
name: devstack-plugin-ceph-multinode-tempest-cephadm
|
|
||||||
parent: devstack-plugin-ceph-multinode-tempest-py3
|
|
||||||
description: |
|
|
||||||
Integration tests that runs the ceph device plugin across multiple
|
|
||||||
nodes on py3.
|
|
||||||
The ceph deployment strategy used by this job is cephadm.
|
|
||||||
vars:
|
|
||||||
devstack_localrc:
|
|
||||||
USE_BLOCK_MIGRATION_FOR_LIVE_MIGRATION: false
|
|
||||||
CEPHADM_DEPLOY: true
|
|
||||||
tempest_concurrency: 1
|
|
||||||
|
|
||||||
- project-template:
|
- project-template:
|
||||||
name: devstack-plugin-ceph-tempest-jobs
|
name: devstack-plugin-ceph-tempest-jobs
|
||||||
description: |
|
description: |
|
||||||
@ -162,15 +155,13 @@
|
|||||||
- devstack-plugin-ceph-tempest-py3
|
- devstack-plugin-ceph-tempest-py3
|
||||||
- devstack-plugin-ceph-tempest-ubuntu:
|
- devstack-plugin-ceph-tempest-ubuntu:
|
||||||
voting: false
|
voting: false
|
||||||
|
- devstack-plugin-ceph-multinode-tempest-py3
|
||||||
- devstack-plugin-ceph-cephfs-native:
|
- devstack-plugin-ceph-cephfs-native:
|
||||||
irrelevant-files: *irrelevant-files
|
irrelevant-files: *irrelevant-files
|
||||||
voting: false
|
voting: false
|
||||||
- devstack-plugin-ceph-cephfs-nfs:
|
- devstack-plugin-ceph-cephfs-nfs:
|
||||||
irrelevant-files: *irrelevant-files
|
irrelevant-files: *irrelevant-files
|
||||||
voting: false
|
voting: false
|
||||||
# - devstack-plugin-ceph-multinode-tempest-py3
|
|
||||||
# - devstack-plugin-ceph-multinode-tempest-cephadm:
|
|
||||||
# voting: false
|
|
||||||
# - devstack-plugin-ceph-master-tempest:
|
# - devstack-plugin-ceph-master-tempest:
|
||||||
# voting: false
|
# voting: false
|
||||||
gate:
|
gate:
|
||||||
|
@ -2,3 +2,5 @@ xfsprogs
|
|||||||
qemu-block-extra
|
qemu-block-extra
|
||||||
catatonit
|
catatonit
|
||||||
podman
|
podman
|
||||||
|
jq
|
||||||
|
ceph-common
|
||||||
|
@ -1,2 +1,5 @@
|
|||||||
xfsprogs
|
xfsprogs
|
||||||
dbus-tools
|
dbus-tools
|
||||||
|
podman
|
||||||
|
jq
|
||||||
|
ceph-common
|
||||||
|
@ -31,7 +31,11 @@ DISABLE_CEPHADM_POST_DEPLOY=${DISABLE_CEPHADM_POST_DEPLOY:-False}
|
|||||||
ATTEMPTS=30
|
ATTEMPTS=30
|
||||||
CONTAINER_IMAGE=${CONTAINER_IMAGE:-'quay.io/ceph/ceph:v17.2.3'}
|
CONTAINER_IMAGE=${CONTAINER_IMAGE:-'quay.io/ceph/ceph:v17.2.3'}
|
||||||
DEVICES=()
|
DEVICES=()
|
||||||
|
if [[ "$REMOTE_CEPH" = "False" ]]; then
|
||||||
FSID=$(uuidgen)
|
FSID=$(uuidgen)
|
||||||
|
else
|
||||||
|
FSID=$(cat $CEPH_CONFIG | grep fsid | awk 'BEGIN { RS = "fsid = "} ; { print $0 }' - )
|
||||||
|
fi
|
||||||
KEY_EXPORT_DIR="/etc/ceph"
|
KEY_EXPORT_DIR="/etc/ceph"
|
||||||
KEYS=("client.openstack") # at least the client.openstack default key should be created
|
KEYS=("client.openstack") # at least the client.openstack default key should be created
|
||||||
MIN_OSDS=1
|
MIN_OSDS=1
|
||||||
@ -117,36 +121,16 @@ function export_spec {
|
|||||||
echo "Ceph cluster config exported: $EXPORT"
|
echo "Ceph cluster config exported: $EXPORT"
|
||||||
}
|
}
|
||||||
|
|
||||||
# Pre-install ceph: install podman
|
|
||||||
function _install_podman {
|
|
||||||
# FIXME(vkmc) Check required for Ubuntu 20.04 LTS (current CI node)
|
|
||||||
# Remove when our CI is pushed to the next LTS version
|
|
||||||
if ! command -v podman &> /dev/null; then
|
|
||||||
if [[ $os_CODENAME =~ (focal) ]]; then
|
|
||||||
echo "deb https://download.opensuse.org/repositories/devel:/kubic:/libcontainers:/stable/xUbuntu_20.04/ /" \
|
|
||||||
| sudo tee /etc/apt/sources.list.d/devel:kubic:libcontainers:stable.list
|
|
||||||
curl -L "https://download.opensuse.org/repositories/devel:/kubic:/libcontainers:/stable/xUbuntu_20.04/Release.key" \
|
|
||||||
| sudo apt-key add -
|
|
||||||
sudo apt-get update
|
|
||||||
sudo apt-get -y upgrade
|
|
||||||
fi
|
|
||||||
install_package podman
|
|
||||||
fi
|
|
||||||
}
|
|
||||||
|
|
||||||
# Pre-install ceph: install required dependencies
|
# Pre-install ceph: install required dependencies
|
||||||
function install_deps {
|
function install_deps {
|
||||||
install_package jq ceph-common
|
if [[ "$REMOTE_CEPH" == "False" ]]; then
|
||||||
_install_podman
|
|
||||||
install_package python3-cephfs python3-prettytable python3-rados python3-rbd python3-requests
|
install_package python3-cephfs python3-prettytable python3-rados python3-rbd python3-requests
|
||||||
|
fi
|
||||||
}
|
}
|
||||||
|
|
||||||
# Pre-install ceph: get cephadm binary
|
# Pre-install ceph: get cephadm binary
|
||||||
function get_cephadm {
|
function get_cephadm {
|
||||||
# NOTE(gouthamr): cephadm binary here is a python executable, and the
|
curl -O https://raw.githubusercontent.com/ceph/ceph/"$CEPH_RELEASE"/src/cephadm/cephadm
|
||||||
# $os_PACKAGE ("rpm") or $os_release (el9) doesn't really matter. There is
|
|
||||||
# no ubuntu/debian equivalent being published by the ceph community.
|
|
||||||
curl -O https://download.ceph.com/rpm-${CEPH_RELEASE}/el9/noarch/cephadm
|
|
||||||
$SUDO mv cephadm $TARGET_BIN
|
$SUDO mv cephadm $TARGET_BIN
|
||||||
$SUDO chmod +x $TARGET_BIN/cephadm
|
$SUDO chmod +x $TARGET_BIN/cephadm
|
||||||
echo "[GET CEPHADM] cephadm is now available"
|
echo "[GET CEPHADM] cephadm is now available"
|
||||||
@ -179,7 +163,7 @@ EOF
|
|||||||
function start_ceph {
|
function start_ceph {
|
||||||
cluster=$(sudo cephadm ls | jq '.[]' | jq 'select(.name | test("^mon*")).fsid')
|
cluster=$(sudo cephadm ls | jq '.[]' | jq 'select(.name | test("^mon*")).fsid')
|
||||||
if [ -z "$cluster" ]; then
|
if [ -z "$cluster" ]; then
|
||||||
$SUDO $CEPHADM --image "$CONTAINER_IMAGE" \
|
$SUDO "$CEPHADM" --image "$CONTAINER_IMAGE" \
|
||||||
bootstrap \
|
bootstrap \
|
||||||
--fsid $FSID \
|
--fsid $FSID \
|
||||||
--config "$BOOTSTRAP_CONFIG" \
|
--config "$BOOTSTRAP_CONFIG" \
|
||||||
@ -237,7 +221,7 @@ function add_osds {
|
|||||||
# let's add some osds
|
# let's add some osds
|
||||||
if [ -z "$DEVICES" ]; then
|
if [ -z "$DEVICES" ]; then
|
||||||
echo "Using ALL available devices"
|
echo "Using ALL available devices"
|
||||||
$SUDO $CEPHADM shell ceph orch apply osd --all-available-devices
|
$SUDO "$CEPHADM" shell ceph orch apply osd --all-available-devices
|
||||||
else
|
else
|
||||||
for item in "${DEVICES[@]}"; do
|
for item in "${DEVICES[@]}"; do
|
||||||
echo "Creating osd $item on node $HOSTNAME"
|
echo "Creating osd $item on node $HOSTNAME"
|
||||||
@ -247,7 +231,7 @@ function add_osds {
|
|||||||
fi
|
fi
|
||||||
|
|
||||||
while [ "$ATTEMPTS" -ne 0 ]; do
|
while [ "$ATTEMPTS" -ne 0 ]; do
|
||||||
num_osds=$($SUDO $CEPHADM shell --fsid $FSID --config $CEPH_CONFIG \
|
num_osds=$($SUDO "$CEPHADM" shell --fsid $FSID --config $CEPH_CONFIG \
|
||||||
--keyring $CEPH_KEYRING -- ceph -s -f json | jq '.osdmap | .num_up_osds')
|
--keyring $CEPH_KEYRING -- ceph -s -f json | jq '.osdmap | .num_up_osds')
|
||||||
if [ "$num_osds" -ge "$MIN_OSDS" ]; then
|
if [ "$num_osds" -ge "$MIN_OSDS" ]; then
|
||||||
break;
|
break;
|
||||||
@ -303,6 +287,7 @@ function _create_key {
|
|||||||
--keyring $CEPH_KEYRING -- ceph auth get-or-create "$name" mgr "allow rw" mon "allow r" osd "$osd_caps" \
|
--keyring $CEPH_KEYRING -- ceph auth get-or-create "$name" mgr "allow rw" mon "allow r" osd "$osd_caps" \
|
||||||
-o "$KEY_EXPORT_DIR/ceph.$name.keyring"
|
-o "$KEY_EXPORT_DIR/ceph.$name.keyring"
|
||||||
|
|
||||||
|
|
||||||
$SUDO chown ${STACK_USER}:$(id -g -n $whoami) \
|
$SUDO chown ${STACK_USER}:$(id -g -n $whoami) \
|
||||||
${CEPH_CONF_DIR}/ceph.$name.keyring
|
${CEPH_CONF_DIR}/ceph.$name.keyring
|
||||||
}
|
}
|
||||||
@ -645,7 +630,6 @@ function configure_ceph {
|
|||||||
if is_ceph_enabled_for_service cinder; then
|
if is_ceph_enabled_for_service cinder; then
|
||||||
POOLS+=($CINDER_CEPH_POOL)
|
POOLS+=($CINDER_CEPH_POOL)
|
||||||
KEYS+=("client.$CINDER_CEPH_USER")
|
KEYS+=("client.$CINDER_CEPH_USER")
|
||||||
set_min_client_version
|
|
||||||
fi
|
fi
|
||||||
|
|
||||||
if is_ceph_enabled_for_service c-bak; then
|
if is_ceph_enabled_for_service c-bak; then
|
||||||
@ -662,8 +646,10 @@ function configure_ceph {
|
|||||||
[ "$ENABLE_CEPH_RGW" == "True" ] && SERVICES+=('rgw')
|
[ "$ENABLE_CEPH_RGW" == "True" ] && SERVICES+=('rgw')
|
||||||
|
|
||||||
enable_services
|
enable_services
|
||||||
|
if [[ "$REMOTE_CEPH" = "False" ]]; then
|
||||||
add_pools
|
add_pools
|
||||||
create_keys
|
create_keys
|
||||||
|
fi
|
||||||
client_config
|
client_config
|
||||||
import_libvirt_secret_ceph
|
import_libvirt_secret_ceph
|
||||||
|
|
||||||
@ -680,8 +666,12 @@ function configure_ceph_manila {
|
|||||||
|
|
||||||
function cleanup_ceph {
|
function cleanup_ceph {
|
||||||
# Cleanup the service.
|
# Cleanup the service.
|
||||||
|
if [[ "$REMOTE_CEPH" == "True" ]]; then
|
||||||
|
echo "Remote Ceph cluster, skipping stop_ceph and delete_osd_dev"
|
||||||
|
else
|
||||||
stop_ceph
|
stop_ceph
|
||||||
delete_osd_dev
|
delete_osd_dev
|
||||||
|
fi
|
||||||
# purge ceph config file and keys
|
# purge ceph config file and keys
|
||||||
$SUDO rm -f ${CEPH_CONF_DIR}/*
|
$SUDO rm -f ${CEPH_CONF_DIR}/*
|
||||||
if is_ceph_enabled_for_service nova; then
|
if is_ceph_enabled_for_service nova; then
|
||||||
|
@ -20,3 +20,4 @@ if [[ $ENABLE_CEPH_CINDER == "True" ]]; then
|
|||||||
fi
|
fi
|
||||||
|
|
||||||
CEPHADM_DEPLOY=$(trueorfalse False CEPHADM_DEPLOY)
|
CEPHADM_DEPLOY=$(trueorfalse False CEPHADM_DEPLOY)
|
||||||
|
REMOTE_CEPH=$(trueorfalse False REMOTE_CEPH)
|
||||||
|
@ -40,10 +40,14 @@ elif [[ "$1" == "stack" && "$2" == "pre-install" ]]; then
|
|||||||
fi
|
fi
|
||||||
fi
|
fi
|
||||||
elif [[ "$1" == "stack" && "$2" == "install" ]]; then
|
elif [[ "$1" == "stack" && "$2" == "install" ]]; then
|
||||||
if [[ "$CEPHADM_DEPLOY" = "True" ]]; then
|
if [[ "$CEPHADM_DEPLOY" = "True" && "$REMOTE_CEPH" = "False" ]]; then
|
||||||
# Perform installation of service source
|
# Perform installation of service source
|
||||||
echo_summary "[cephadm] Installing ceph"
|
echo_summary "[cephadm] Installing ceph"
|
||||||
install_ceph
|
install_ceph
|
||||||
|
set_min_client_version
|
||||||
|
elif [[ "$CEPHADM_DEPLOY" = "True" && "$REMOTE_CEPH" = "True" ]]; then
|
||||||
|
echo "[CEPHADM] Remote Ceph: Skipping install"
|
||||||
|
get_cephadm
|
||||||
else
|
else
|
||||||
# FIXME(melwitt): This is a hack to get around a namespacing issue with
|
# FIXME(melwitt): This is a hack to get around a namespacing issue with
|
||||||
# Paste and PasteDeploy. For stable/queens, we use the Pike UCA packages
|
# Paste and PasteDeploy. For stable/queens, we use the Pike UCA packages
|
||||||
|
Loading…
x
Reference in New Issue
Block a user