ca2486efb4
Manila supports using a standalone NFS-Ganesha server as well as a ceph orchestrator deployed NFS-Ganesha cluster ("ceph nfs service"). We've only ever allowed using ceph orch deployed NFS with ceph orch deployed clusters through this devstack plugin. With this change, the plugin can optionally deploy a standalone NFS-Ganesha service with a ceph orch deployed ceph cluster. This will greatly simplify testing when we sunset the package based installation/deployment of ceph. Depends-On: I2198eee3892b2bb0eb835ec66e21b708152b33a9 Change-Id: If983bb5d5a5fc0c16c1cead84b5fa30ea961d21b Implements: bp/cephadm-deploy Signed-off-by: Goutham Pacha Ravi <gouthampravi@gmail.com>
78 lines
3.4 KiB
Plaintext
78 lines
3.4 KiB
Plaintext
# Devstack settings
|
|
|
|
# CEPH_PLUGIN_DIR contains the path to devstack-plugin-ceph/devstack directory
|
|
|
|
CEPH_PLUGIN_DIR=$(readlink -f $(dirname ${BASH_SOURCE[0]}))
|
|
|
|
# Add ceph plugin specific settings
|
|
# NOTE: Currently these are redundant since ceph
|
|
# plugin job defn defines them already, but
|
|
# once DEVSTACK_GATE_CEPH is removed, these
|
|
# won't be redundant, so its ok to have these
|
|
# anyways.
|
|
|
|
if [ "$ENABLE_CEPH_ISCSI" = "True" ]; then
|
|
TEMPEST_STORAGE_PROTOCOL=iSCSI
|
|
else
|
|
TEMPEST_STORAGE_PROTOCOL=ceph
|
|
fi
|
|
# VOLUME_BACKING_FILE_SIZE should be sourced from devstack/stackrc but define
|
|
# a default here if not set already.
|
|
VOLUME_BACKING_FILE_SIZE=${VOLUME_BACKING_FILE_SIZE:-8GB}
|
|
CEPH_LOOPBACK_DISK_SIZE=${CEPH_LOOPBACK_DISK_SIZE:-$VOLUME_BACKING_FILE_SIZE}
|
|
# Disable manage/unmanage snapshot tests on Tempest
|
|
TEMPEST_VOLUME_MANAGE_SNAPSHOT=False
|
|
|
|
# Source plugin's lib/cephadm or lib/ceph
|
|
# depending on chosen deployment method
|
|
if [[ "$CEPHADM_DEPLOY" = "True" ]]; then
|
|
source $CEPH_PLUGIN_DIR/lib/cephadm
|
|
else
|
|
source $CEPH_PLUGIN_DIR/lib/ceph
|
|
fi
|
|
|
|
# Set Manila related global variables used by Manila's DevStack plugin.
|
|
if (is_ceph_enabled_for_service manila); then
|
|
if [[ $MANILA_CEPH_DRIVER == 'cephfsnative' ]]; then
|
|
MANILA_DHSS=$(trueorfalse False MANILA_DHSS)
|
|
MANILA_ENABLED_SHARE_PROTOCOLS=CEPHFS
|
|
MANILA_DEFAULT_SHARE_TYPE=cephfstype
|
|
MANILA_DEFAULT_SHARE_TYPE_EXTRA_SPECS='snapshot_support=True create_share_from_snapshot_support=True'
|
|
|
|
MANILA_ENABLED_BACKENDS=cephfsnative1
|
|
MANILA_CONFIGURE_GROUPS=cephfsnative1
|
|
|
|
MANILA_OPTGROUP_cephfsnative1_share_driver=manila.share.drivers.cephfs.cephfs_native.CephFSNativeDriver
|
|
MANILA_OPTGROUP_cephfsnative1_driver_handles_share_servers=$MANILA_DHSS
|
|
MANILA_OPTGROUP_cephfsnative1_share_backend_name=CEPHFSNATIVE1
|
|
MANILA_OPTGROUP_cephfsnative1_cephfs_conf_path=${CEPH_CONF_FILE}
|
|
MANILA_OPTGROUP_cephfsnative1_cephfs_auth_id=${MANILA_CEPH_USER}
|
|
elif [ $MANILA_CEPH_DRIVER == 'cephfsnfs' ]; then
|
|
MANILA_DHSS=$(trueorfalse False MANILA_DHSS)
|
|
MANILA_ENABLED_SHARE_PROTOCOLS=NFS
|
|
MANILA_DEFAULT_SHARE_TYPE=cephfsnfstype
|
|
MANILA_DEFAULT_SHARE_TYPE_EXTRA_SPECS='snapshot_support=True create_share_from_snapshot_support=True'
|
|
|
|
MANILA_ENABLED_BACKENDS=cephfsnfs1
|
|
MANILA_CONFIGURE_GROUPS=cephfsnfs1
|
|
|
|
MANILA_OPTGROUP_cephfsnfs1_share_driver=manila.share.drivers.cephfs.driver.CephFSDriver
|
|
MANILA_OPTGROUP_cephfsnfs1_driver_handles_share_servers=$MANILA_DHSS
|
|
MANILA_OPTGROUP_cephfsnfs1_share_backend_name=CEPHFSNFS1
|
|
MANILA_OPTGROUP_cephfsnfs1_cephfs_conf_path=${CEPH_CONF_FILE}
|
|
MANILA_OPTGROUP_cephfsnfs1_cephfs_auth_id=${MANILA_CEPH_USER}
|
|
MANILA_OPTGROUP_cephfsnfs1_cephfs_protocol_helper_type=NFS
|
|
|
|
if [[ $CEPHADM_DEPLOY_NFS == "True" ]]; then
|
|
MANILA_OPTGROUP_cephfsnfs1_cephfs_nfs_cluster_id=${FSNAME}
|
|
else
|
|
MANILA_OPTGROUP_cephfsnfs1_cephfs_ganesha_server_ip=$HOST_IP
|
|
MANILA_CEPH_GANESHA_RADOS_STORE=$(trueorfalse False MANILA_CEPH_GANESHA_RADOS_STORE)
|
|
if [ "$MANILA_CEPH_GANESHA_RADOS_STORE" = "True" ]; then
|
|
MANILA_OPTGROUP_cephfsnfs1_ganesha_rados_store_enable=${MANILA_CEPH_GANESHA_RADOS_STORE}
|
|
MANILA_OPTGROUP_cephfsnfs1_ganesha_rados_store_pool_name=${CEPHFS_DATA_POOL}
|
|
fi
|
|
fi
|
|
fi
|
|
fi
|