[WIP][DNM] Remote Ceph with cephadm
Add podman as part of preinstall dependency. Add REMOTE_CEPH capabilities to CEPHADM deployment. Part of an effort to get multinode ceph with manila testing to run locally, and hopefully on CI at a future date. Change-Id: I84249ae268dfe00a112c67e5170b679acb318a25
This commit is contained in:
parent
d1c7a2db8b
commit
c8d3168fa2
108
.zuul.yaml
108
.zuul.yaml
|
@ -36,18 +36,6 @@
|
|||
(^tempest\.(api|scenario\.test_encrypted_cinder_volumes|scenario\.test_volume|scenario\.test_shelve_instance)|(^cinder_tempest_plugin))
|
||||
tempest_test_blacklist: '{{ ansible_user_dir }}/{{ zuul.projects["opendev.org/openstack/devstack-plugin-ceph"].src_dir }}/tempest_skiplist.txt'
|
||||
|
||||
- job:
|
||||
name: devstack-plugin-ceph-tempest-ubuntu
|
||||
parent: devstack-plugin-ceph-tempest-py3-base
|
||||
description: |
|
||||
Integration tests that runs with the ceph devstack plugin using ceph
|
||||
packages from the distro.
|
||||
|
||||
This job enable the multiattach feature enable from stein on.
|
||||
vars:
|
||||
devstack_localrc:
|
||||
ENABLE_VOLUME_MULTIATTACH: true
|
||||
|
||||
- job:
|
||||
name: devstack-plugin-ceph-tempest-py3
|
||||
parent: devstack-plugin-ceph-tempest-py3-base
|
||||
|
@ -59,65 +47,6 @@
|
|||
DISABLE_CEPHADM_POST_DEPLOY: True
|
||||
CEPHADM_DEPLOY: true
|
||||
|
||||
- job:
|
||||
name: devstack-plugin-ceph-compute-local-ephemeral
|
||||
parent: devstack-plugin-ceph-tempest-py3-base
|
||||
description: |
|
||||
Integration tests that runs with the ceph devstack plugin and py3.
|
||||
|
||||
This job does not configure Nova to use rbd for ephemeral storage. It
|
||||
also enables the direct download of images via rbd into the local
|
||||
imagecache for Nova.
|
||||
vars:
|
||||
devstack_local_conf:
|
||||
post-config:
|
||||
$NOVA_CONF:
|
||||
glance:
|
||||
enable_rbd_download: True
|
||||
rbd_user: glance
|
||||
rbd_ceph_conf: /etc/ceph/ceph.conf
|
||||
rbd_pool: images
|
||||
libvirt:
|
||||
images_type: default
|
||||
|
||||
- job:
|
||||
name: devstack-plugin-ceph-cephfs-native
|
||||
description: |
|
||||
Runs manila tempest plugin tests with Native CephFS as a manila back
|
||||
end (DHSS=False)
|
||||
parent: manila-tempest-plugin-cephfs-native-cephadm
|
||||
|
||||
- job:
|
||||
name: devstack-plugin-ceph-cephfs-nfs
|
||||
description: |
|
||||
Runs manila tempest plugin tests with CephFS via NFS-Ganesha as a manila
|
||||
back end (DHSS=False)
|
||||
parent: manila-tempest-plugin-cephfs-nfs
|
||||
nodeset: devstack-single-node-centos-9-stream
|
||||
vars:
|
||||
# TODO(gouthamr): some tests are disabled due to bugs
|
||||
# IPv6 Tests: https://bugs.launchpad.net/manila/+bug/1998489
|
||||
# snapshot clone fs sync: https://bugs.launchpad.net/manila/+bug/1989273
|
||||
tempest_exclude_regex: "\
|
||||
(^manila_tempest_tests.tests.scenario.*IPv6.*)|\
|
||||
(^manila_tempest_tests.tests.scenario.test_share_basic_ops.TestShareBasicOpsNFS.test_write_data_to_share_created_from_snapshot)"
|
||||
devstack_localrc:
|
||||
MANILA_OPTGROUP_cephfsnfs_cephfs_ganesha_server_ip: "{{ hostvars[inventory_hostname]['nodepool']['private_ipv4'] }}"
|
||||
CEPH_RELEASE: "quincy"
|
||||
MANILA_SETUP_IPV6: false
|
||||
NEUTRON_CREATE_INITIAL_NETWORKS: true
|
||||
IP_VERSION: 4
|
||||
|
||||
|
||||
|
||||
- job:
|
||||
name: devstack-plugin-ceph-tempest-fedora-latest
|
||||
parent: devstack-plugin-ceph-tempest-py3
|
||||
description: |
|
||||
Integration tests that runs with the ceph devstack plugin on Fedora.
|
||||
nodeset: devstack-single-node-fedora-latest
|
||||
voting: false
|
||||
|
||||
- job:
|
||||
name: devstack-plugin-ceph-multinode-tempest-py3
|
||||
parent: tempest-multinode-full-py3
|
||||
|
@ -130,11 +59,17 @@
|
|||
timeout: 10800
|
||||
voting: false
|
||||
vars:
|
||||
configure_swap_size: 8192
|
||||
tempest_concurrency: 3
|
||||
devstack_localrc:
|
||||
ENABLE_FILE_INJECTION: false
|
||||
ENABLE_VOLUME_MULTIATTACH: true
|
||||
TEMPEST_RUN_VALIDATION: false
|
||||
CINDER_CEPH_UUID: d531d2d4-3937-429c-b0c2-658fe41e82aa
|
||||
DISABLE_CEPHADM_POST_DEPLOY: True
|
||||
CEPHADM_DEPLOY: true
|
||||
MYSQL_REDUCE_MEMORY: True
|
||||
REMOTE_CEPH: False
|
||||
devstack_plugins:
|
||||
devstack-plugin-ceph: https://opendev.org/openstack/devstack-plugin-ceph
|
||||
devstack_services:
|
||||
|
@ -149,19 +84,11 @@
|
|||
group-vars:
|
||||
subnode:
|
||||
devstack_localrc:
|
||||
REMOTE_CEPH: true
|
||||
REMOTE_CEPH: True
|
||||
CEPH_IP: "{{ hostvars['controller']['nodepool']['private_ipv4'] }}"
|
||||
CINDER_CEPH_UUID: d531d2d4-3937-429c-b0c2-658fe41e82aa
|
||||
|
||||
- job:
|
||||
name: devstack-plugin-ceph-master-tempest
|
||||
parent: devstack-plugin-ceph-tempest-py3-base
|
||||
description: |
|
||||
Integration tests that runs with the ceph devstack plugin
|
||||
using Ceph master as the backend.
|
||||
branches: master
|
||||
vars:
|
||||
devstack_localrc:
|
||||
TEST_MASTER: true
|
||||
DISABLE_CEPHADM_POST_DEPLOY: True
|
||||
CEPHADM_DEPLOY: true
|
||||
|
||||
- job:
|
||||
name: devstack-plugin-ceph-multinode-tempest-cephadm
|
||||
|
@ -183,20 +110,11 @@
|
|||
check:
|
||||
jobs:
|
||||
- devstack-plugin-ceph-tempest-py3
|
||||
- devstack-plugin-ceph-tempest-ubuntu:
|
||||
- devstack-plugin-ceph-multinode-tempest-py3:
|
||||
voting: false
|
||||
- devstack-plugin-ceph-cephfs-native:
|
||||
irrelevant-files: *irrelevant-files
|
||||
- devstack-plugin-ceph-multinode-tempest-cephadm:
|
||||
voting: false
|
||||
- devstack-plugin-ceph-cephfs-nfs:
|
||||
irrelevant-files: *irrelevant-files
|
||||
voting: false
|
||||
# - devstack-plugin-ceph-tempest-fedora-latest
|
||||
# - devstack-plugin-ceph-multinode-tempest-py3
|
||||
# - devstack-plugin-ceph-multinode-tempest-cephadm:
|
||||
# voting: false
|
||||
# - devstack-plugin-ceph-master-tempest:
|
||||
# voting: false
|
||||
|
||||
gate:
|
||||
jobs:
|
||||
- devstack-plugin-ceph-tempest-py3
|
||||
|
|
|
@ -1,2 +1,3 @@
|
|||
xfsprogs
|
||||
dbus-tools
|
||||
dbus-tools
|
||||
podman
|
||||
|
|
|
@ -26,6 +26,8 @@ CEPH_KEYRING="/etc/ceph/ceph.client.admin.keyring"
|
|||
TARGET_BIN=/usr/bin
|
||||
# TOGGLED IN THE CI TO SAVE RESOURCES
|
||||
DISABLE_CEPHADM_POST_DEPLOY=${DISABLE_CEPHADM_POST_DEPLOY:-False}
|
||||
SSH_USER="stack"
|
||||
TIMEOUT=${TIMEOUT:-30}
|
||||
|
||||
# DEFAULT OPTIONS
|
||||
ATTEMPTS=30
|
||||
|
@ -98,7 +100,7 @@ NOVA_CEPH_POOL=${NOVA_CEPH_POOL:-vms}
|
|||
function set_debug {
|
||||
if [ "$DEBUG" -eq 1 ]; then
|
||||
echo "[CEPHADM] Enabling Debug mode"
|
||||
$SUDO "$CEPHADM" shell --fsid $FSID --config $CEPH_CONFIG \
|
||||
$CEPHADM shell --fsid $FSID --config $CEPH_CONFIG \
|
||||
--keyring $CEPH_KEYRING -- ceph config set mgr mgr/cephadm/log_to_cluster_level debug
|
||||
echo "[CEPHADM] See debug logs running: ceph -W cephadm --watch-debug"
|
||||
fi
|
||||
|
@ -106,38 +108,20 @@ function set_debug {
|
|||
|
||||
# Admin: check ceph cluster status
|
||||
function check_cluster_status {
|
||||
$SUDO "$CEPHADM" shell --fsid $FSID --config $CEPH_CONFIG \
|
||||
$CEPHADM shell --fsid $FSID --config $CEPH_CONFIG \
|
||||
--keyring $CEPH_KEYRING -- ceph -s -f json-pretty
|
||||
}
|
||||
|
||||
# Admin: export ceph cluster config spec
|
||||
function export_spec {
|
||||
$SUDO "$CEPHADM" shell --fsid $FSID --config $CEPH_CONFIG \
|
||||
$CEPHADM shell --fsid $FSID --config $CEPH_CONFIG \
|
||||
--keyring $CEPH_KEYRING -- ceph orch ls --export > "$EXPORT"
|
||||
echo "Ceph cluster config exported: $EXPORT"
|
||||
}
|
||||
|
||||
# Pre-install ceph: install podman
|
||||
function _install_podman {
|
||||
# FIXME(vkmc) Check required for Ubuntu 20.04 LTS (current CI node)
|
||||
# Remove when our CI is pushed to the next LTS version
|
||||
if ! command -v podman &> /dev/null; then
|
||||
if [[ $os_CODENAME =~ (focal) ]]; then
|
||||
echo "deb https://download.opensuse.org/repositories/devel:/kubic:/libcontainers:/stable/xUbuntu_20.04/ /" \
|
||||
| sudo tee /etc/apt/sources.list.d/devel:kubic:libcontainers:stable.list
|
||||
curl -L "https://download.opensuse.org/repositories/devel:/kubic:/libcontainers:/stable/xUbuntu_20.04/Release.key" \
|
||||
| sudo apt-key add -
|
||||
sudo apt-get update
|
||||
sudo apt-get -y upgrade
|
||||
fi
|
||||
install_package podman
|
||||
fi
|
||||
}
|
||||
|
||||
# Pre-install ceph: install required dependencies
|
||||
function install_deps {
|
||||
install_package jq ceph-common
|
||||
_install_podman
|
||||
install_package python3-cephfs python3-prettytable python3-rados python3-rbd python3-requests
|
||||
}
|
||||
|
||||
|
@ -149,7 +133,7 @@ function get_cephadm {
|
|||
echo "[GET CEPHADM] cephadm is now available"
|
||||
|
||||
if [ -z "$CEPHADM" ]; then
|
||||
CEPHADM=${TARGET_BIN}/cephadm
|
||||
CEPHADM="${SUDO} ${TARGET_BIN}/cephadm"
|
||||
fi
|
||||
}
|
||||
|
||||
|
@ -176,7 +160,7 @@ EOF
|
|||
function start_ceph {
|
||||
cluster=$(sudo cephadm ls | jq '.[]' | jq 'select(.name | test("^mon*")).fsid')
|
||||
if [ -z "$cluster" ]; then
|
||||
$SUDO $CEPHADM --image "$CONTAINER_IMAGE" \
|
||||
$CEPHADM --image "$CONTAINER_IMAGE" \
|
||||
bootstrap \
|
||||
--fsid $FSID \
|
||||
--config "$BOOTSTRAP_CONFIG" \
|
||||
|
@ -234,17 +218,17 @@ function add_osds {
|
|||
# let's add some osds
|
||||
if [ -z "$DEVICES" ]; then
|
||||
echo "Using ALL available devices"
|
||||
$SUDO $CEPHADM shell ceph orch apply osd --all-available-devices
|
||||
$CEPHADM shell ceph orch apply osd --all-available-devices
|
||||
else
|
||||
for item in "${DEVICES[@]}"; do
|
||||
echo "Creating osd $item on node $HOSTNAME"
|
||||
$SUDO "$CEPHADM" shell --fsid $FSID --config $CEPH_CONFIG \
|
||||
$CEPHADM shell --fsid $FSID --config $CEPH_CONFIG \
|
||||
--keyring $CEPH_KEYRING -- ceph orch daemon add osd "$HOSTNAME:$item"
|
||||
done
|
||||
fi
|
||||
|
||||
while [ "$ATTEMPTS" -ne 0 ]; do
|
||||
num_osds=$($SUDO $CEPHADM shell --fsid $FSID --config $CEPH_CONFIG \
|
||||
num_osds=$($CEPHADM shell --fsid $FSID --config $CEPH_CONFIG \
|
||||
--keyring $CEPH_KEYRING -- ceph -s -f json | jq '.osdmap | .num_up_osds')
|
||||
if [ "$num_osds" -ge "$MIN_OSDS" ]; then
|
||||
break;
|
||||
|
@ -263,12 +247,12 @@ function add_pools {
|
|||
[ "${#POOLS[@]}" -eq 0 ] && return;
|
||||
|
||||
for pool in "${POOLS[@]}"; do
|
||||
$SUDO "$CEPHADM" shell --fsid $FSID --config $CEPH_CONFIG \
|
||||
$CEPHADM shell --fsid $FSID --config $CEPH_CONFIG \
|
||||
--keyring $CEPH_KEYRING -- ceph osd pool create "$pool" "$DEFAULT_PG_NUM" \
|
||||
"$DEFAULT_PGP_NUM" replicated --autoscale-mode on
|
||||
|
||||
# set the application to the pool (which also means rbd init the pool)
|
||||
$SUDO "$CEPHADM" shell --fsid $FSID --config $CEPH_CONFIG \
|
||||
$CEPHADM shell --fsid $FSID --config $CEPH_CONFIG \
|
||||
--keyring $CEPH_KEYRING -- ceph osd pool application enable "$pool" rbd
|
||||
done
|
||||
}
|
||||
|
@ -296,7 +280,7 @@ function _create_key {
|
|||
osd_caps="allow class-read object_prefix rbd_children, $caps"
|
||||
fi
|
||||
|
||||
$SUDO "$CEPHADM" shell -v "$KEY_EXPORT_DIR:$KEY_EXPORT_DIR" --fsid $FSID --config $CEPH_CONFIG \
|
||||
$CEPHADM shell -v "$KEY_EXPORT_DIR:$KEY_EXPORT_DIR" --fsid $FSID --config $CEPH_CONFIG \
|
||||
--keyring $CEPH_KEYRING -- ceph auth get-or-create "$name" mgr "allow rw" mon "allow r" osd "$osd_caps" \
|
||||
-o "$KEY_EXPORT_DIR/ceph.$name.keyring"
|
||||
|
||||
|
@ -318,7 +302,7 @@ function cephfs_config {
|
|||
# - $FSNAME.FSNAME.data
|
||||
# - $FSNAME.FSNAME.meta
|
||||
# and the mds daemon is deployed
|
||||
$SUDO "$CEPHADM" shell --fsid $FSID --config $CEPH_CONFIG \
|
||||
$CEPHADM shell --fsid $FSID --config $CEPH_CONFIG \
|
||||
--keyring $CEPH_KEYRING -- ceph fs volume create "$FSNAME"
|
||||
}
|
||||
|
||||
|
@ -326,7 +310,7 @@ function cephfs_config {
|
|||
function ceph_nfs_config {
|
||||
# (fpantano) TODO: Build an ingress daemon on top of this
|
||||
echo "[CEPHADM] Deploy nfs.$FSNAME backend"
|
||||
$SUDO "$CEPHADM" shell --fsid $FSID --config $CEPH_CONFIG \
|
||||
$CEPHADM shell --fsid $FSID --config $CEPH_CONFIG \
|
||||
--keyring $CEPH_KEYRING -- ceph orch apply nfs \
|
||||
"$FSNAME" --placement="$HOSTNAME" --port $NFS_PORT
|
||||
}
|
||||
|
@ -368,7 +352,7 @@ function set_config_key {
|
|||
local section=$1
|
||||
local key=$2
|
||||
local value=$3
|
||||
$SUDO "$CEPHADM" shell --fsid $FSID --config $CEPH_CONFIG \
|
||||
$CEPHADM shell --fsid $FSID --config $CEPH_CONFIG \
|
||||
ceph config set ${section} ${key} ${value}
|
||||
}
|
||||
|
||||
|
@ -407,7 +391,7 @@ function configure_rgw_ceph_section {
|
|||
function rgw {
|
||||
configure_ceph_embedded_rgw
|
||||
|
||||
$SUDO "$CEPHADM" shell --fsid $FSID --config $CEPH_CONFIG \
|
||||
$CEPHADM shell --fsid $FSID --config $CEPH_CONFIG \
|
||||
--keyring $CEPH_KEYRING -- ceph orch apply rgw default default default default \
|
||||
"--placement=$HOSTNAME count:1" --port "$RGW_PORT"
|
||||
|
||||
|
@ -490,7 +474,7 @@ function stop_ceph {
|
|||
|
||||
if ! [ -x "$CEPHADM" ]; then
|
||||
get_cephadm
|
||||
CEPHADM=${TARGET_BIN}/cephadm
|
||||
CEPHADM="${SUDO} ${TARGET_BIN}/cephadm"
|
||||
fi
|
||||
|
||||
cluster_deleted=0
|
||||
|
@ -523,6 +507,20 @@ function install_ceph {
|
|||
bootstrap_config
|
||||
get_cephadm
|
||||
start_ceph
|
||||
$SUDO cat >> "CEPH_CLUSTER_IS_READY.txt" <<-EOF
|
||||
Cluster is provisioned.
|
||||
EOF
|
||||
}
|
||||
|
||||
function ceph_is_ready {
|
||||
echo "Waiting the cluster to be up"
|
||||
until ssh $SSH_USER@$CEPH_IP ls /etc/ceph/CEPH_CLUSTER_IS_READY.txt &> /dev/null; do
|
||||
sleep 1
|
||||
echo -n .
|
||||
(( TIMEOUT-- ))
|
||||
[[ "$TIMEOUT" -eq 0 ]] && exit 1
|
||||
done
|
||||
echo
|
||||
}
|
||||
|
||||
function config_glance {
|
||||
|
@ -589,7 +587,7 @@ function config_nova {
|
|||
|
||||
function set_min_client_version {
|
||||
if [ ! -z "$CEPH_MIN_CLIENT_VERSION" ]; then
|
||||
$SUDO "$CEPHADM" shell --fsid $FSID --config $CEPH_CONFIG \
|
||||
$CEPHADM shell --fsid $FSID --config $CEPH_CONFIG \
|
||||
--keyring $CEPH_KEYRING -- ceph osd set-require-min-compat-client ${CEPH_MIN_CLIENT_VERSION}
|
||||
fi
|
||||
}
|
||||
|
@ -661,6 +659,9 @@ function configure_ceph {
|
|||
enable_services
|
||||
add_pools
|
||||
create_keys
|
||||
if [[ "$REMOTE_CEPH" = "True" ]]; then
|
||||
scp -r stack@$CEPH_IP:/etc/ceph /etc/ceph
|
||||
fi
|
||||
client_config
|
||||
import_libvirt_secret_ceph
|
||||
|
||||
|
@ -677,20 +678,25 @@ function configure_ceph_manila {
|
|||
|
||||
function cleanup_ceph {
|
||||
# Cleanup the service.
|
||||
stop_ceph
|
||||
delete_osd_dev
|
||||
if [[ "$REMOTE_CEPH" == "True" ]]; then
|
||||
echo "Remote Ceph cluster, skip stop_ceph and delete_osd_dev"
|
||||
else
|
||||
stop_ceph
|
||||
delete_osd_dev
|
||||
fi
|
||||
# purge ceph config file and keys
|
||||
$SUDO rm -f ${CEPH_CONF_DIR}/*
|
||||
$SUDO rm -f CEPH_CLUSTER_IS_READY.txt
|
||||
if is_ceph_enabled_for_service nova; then
|
||||
_undefine_virsh_secret
|
||||
fi
|
||||
}
|
||||
|
||||
function disable_cephadm {
|
||||
$SUDO "$CEPHADM" shell --fsid $FSID --config $CEPH_CONFIG \
|
||||
$CEPHADM shell --fsid $FSID --config $CEPH_CONFIG \
|
||||
--keyring $CEPH_KEYRING -- ceph orch set backend
|
||||
|
||||
$SUDO "$CEPHADM" shell --fsid $FSID --config $CEPH_CONFIG \
|
||||
$CEPHADM shell --fsid $FSID --config $CEPH_CONFIG \
|
||||
--keyring $CEPH_KEYRING -- ceph mgr module disable cephadm
|
||||
}
|
||||
|
||||
|
|
|
@ -20,3 +20,4 @@ if [[ $ENABLE_CEPH_CINDER == "True" ]]; then
|
|||
fi
|
||||
|
||||
CEPHADM_DEPLOY=$(trueorfalse False CEPHADM_DEPLOY)
|
||||
REMOTE_CEPH=$(trueorfalse False REMOTE_CEPH)
|
||||
|
|
|
@ -40,10 +40,12 @@ elif [[ "$1" == "stack" && "$2" == "pre-install" ]]; then
|
|||
fi
|
||||
fi
|
||||
elif [[ "$1" == "stack" && "$2" == "install" ]]; then
|
||||
if [[ "$CEPHADM_DEPLOY" = "True" ]]; then
|
||||
if [[ "$CEPHADM_DEPLOY" = "True" && "$REMOTE_CEPH" = "False" ]]; then
|
||||
# Perform installation of service source
|
||||
echo_summary "[cephadm] Installing ceph"
|
||||
install_ceph
|
||||
elif [[ "$CEPHADM_DEPLOY" = "True" && "$REMOTE_CEPH" = "True" ]]; then
|
||||
echo "[CEPHADM] Remote Ceph: Skipping install"
|
||||
else
|
||||
# FIXME(melwitt): This is a hack to get around a namespacing issue with
|
||||
# Paste and PasteDeploy. For stable/queens, we use the Pike UCA packages
|
||||
|
@ -59,6 +61,12 @@ elif [[ "$1" == "stack" && "$2" == "install" ]]; then
|
|||
fi
|
||||
elif [[ "$1" == "stack" && "$2" == "post-config" ]]; then
|
||||
if [[ "$CEPHADM_DEPLOY" = "True" ]]; then
|
||||
if [[ "$REMOTE_CEPH" = "True" ]]; then
|
||||
export CEPHADM="ssh $SSH_USER@$CEPH_IP sudo ${TARGET_BIN}/cephadm"
|
||||
ceph_is_ready
|
||||
else
|
||||
export CEPHADM="${SUDO} ${TARGET_BIN}/cephadm"
|
||||
fi
|
||||
# Configure after the other layer 1 and 2 services have been configured
|
||||
echo_summary "[cephadm] Configuring additional Ceph services"
|
||||
configure_ceph
|
||||
|
|
Loading…
Reference in New Issue