Add Manila support for Ubuntu
Change-Id: I74314bfcc6b52d524bb84f2232a988f275b9afbf Co-Authored-By: John Spray <john.spray@redhat.com>
This commit is contained in:
@@ -9,7 +9,8 @@ As part of ```stack.sh```:
|
||||
|
||||
* Installs Ceph (client and server) packages
|
||||
* Creates a Ceph cluster for use with openstack services
|
||||
* Configures Ceph as the storage backend for Cinder, Cinder Backup, Nova & Glance services
|
||||
* Configures Ceph as the storage backend for Cinder, Cinder Backup, Nova,
|
||||
Manila, and Glance services
|
||||
* Supports Ceph cluster running local or remote to openstack services
|
||||
|
||||
As part of ```unstack.sh``` | ```clean.sh```:
|
||||
@@ -32,8 +33,8 @@ This plugin also gets used to configure Ceph as the storage backend for the upst
|
||||
ENABLE_CEPH_$SERVICE=False
|
||||
```
|
||||
|
||||
_where $SERVICE can be CINDER, C_BAK, GLANCE, or NOVA corresponding to
|
||||
Cinder, Cinder Backup, Glance, and Nova services respectively._
|
||||
_where $SERVICE can be CINDER, C_BAK, GLANCE, MANILA or NOVA corresponding to
|
||||
Cinder, Cinder Backup, Glance, Manila and Nova services respectively._
|
||||
|
||||
* Then run ```stack.sh``` and wait for the _magic_ to happen :)
|
||||
|
||||
|
||||
@@ -67,6 +67,13 @@ CINDER_CEPH_POOL_PGP=${CINDER_CEPH_POOL_PGP:-8}
|
||||
CINDER_CEPH_USER=${CINDER_CEPH_USER:-cinder}
|
||||
CINDER_CEPH_UUID=${CINDER_CEPH_UUID:-$(uuidgen)}
|
||||
|
||||
# Manila
|
||||
CEPHFS_POOL_PG=${CEPHFS_POOL_PG:-8}
|
||||
CEPHFS_METADATA_POOL=${CEPHFS_CEPH_POOL:-cephfs_metadata}
|
||||
CEPHFS_DATA_POOL=${CEPHFS_CEPH_POOL:-cephfs_data}
|
||||
MANILA_CEPH_USER=${MANILA_CEPH_USER:-manila}
|
||||
MDS_ID=${MDS_ID:-a}
|
||||
|
||||
# Set ``CEPH_REPLICAS`` to configure how many replicas are to be
|
||||
# configured for your Ceph cluster. By default we are configuring
|
||||
# only one replica since this is way less CPU and memory intensive. If
|
||||
@@ -187,6 +194,9 @@ if is_ceph_enabled_for_service nova; then
|
||||
sudo ceph osd pool delete $NOVA_CEPH_POOL $NOVA_CEPH_POOL \
|
||||
--yes-i-really-really-mean-it > /dev/null 2>&1
|
||||
fi
|
||||
if is_ceph_enabled_for_service manila; then
|
||||
sudo ceph auth del client.$MANILA_CEPH_USER
|
||||
fi
|
||||
}
|
||||
|
||||
function cleanup_ceph_embedded {
|
||||
@@ -244,6 +254,10 @@ sudo ceph-mon -c ${CEPH_CONF_FILE} --mkfs -i $(hostname) \
|
||||
--keyring /var/lib/ceph/tmp/keyring.mon.$(hostname)
|
||||
|
||||
if is_ubuntu; then
|
||||
# TODO (rraja): Do a Ceph version check. If version >= Infernalis, then
|
||||
# make sure that user "ceph" is the owner of files within
|
||||
# ${CEPH_DATA_DIR}.
|
||||
sudo chown -R ceph ${CEPH_DATA_DIR}
|
||||
sudo touch /var/lib/ceph/mon/ceph-$(hostname)/upstart
|
||||
sudo initctl emit ceph-mon id=$(hostname)
|
||||
else
|
||||
@@ -314,6 +328,12 @@ for rep in ${CEPH_REPLICAS_SEQ}; do
|
||||
fi
|
||||
done
|
||||
|
||||
# create a MDS
|
||||
sudo mkdir -p ${CEPH_DATA_DIR}/mds/ceph-${MDS_ID}
|
||||
sudo ceph -c ${CEPH_CONF_FILE} auth get-or-create mds.${MDS_ID} \
|
||||
mon 'allow profile mds ' osd 'allow rw' mds 'allow' | \
|
||||
sudo tee ${CEPH_DATA_DIR}/mds/ceph-${MDS_ID}/keyring
|
||||
|
||||
# bootstrap rados gateway
|
||||
sudo mkdir ${CEPH_DATA_DIR}/radosgw/ceph-radosgw.$(hostname)
|
||||
sudo ceph auth get-or-create client.radosgw.$(hostname) \
|
||||
@@ -329,6 +349,11 @@ else
|
||||
sudo touch \
|
||||
${CEPH_DATA_DIR}/radosgw/ceph-radosgw.$(hostname)/{sysvinit,done}
|
||||
fi
|
||||
|
||||
# TODO (rraja): Do a Ceph version check. If version >= Infernalis, then
|
||||
# make sure that user "ceph" is the owner of files within
|
||||
# ${CEPH_DATA_DIR}.
|
||||
sudo chown -R ceph ${CEPH_DATA_DIR}
|
||||
}
|
||||
|
||||
function configure_ceph_embedded_rgw {
|
||||
@@ -425,6 +450,29 @@ iniset $GLANCE_API_CONF glance_store rbd_store_user $GLANCE_CEPH_USER
|
||||
iniset $GLANCE_API_CONF glance_store rbd_store_pool $GLANCE_CEPH_POOL
|
||||
}
|
||||
|
||||
function configure_ceph_manila {
|
||||
sudo ceph -c ${CEPH_CONF_FILE} osd pool create ${CEPHFS_METADATA_POOL} ${CEPHFS_POOL_PG}
|
||||
sudo ceph -c ${CEPH_CONF_FILE} osd pool create ${CEPHFS_DATA_POOL} ${CEPHFS_POOL_PG}
|
||||
sudo ceph -c ${CEPH_CONF_FILE} fs new cephfs ${CEPHFS_METADATA_POOL} ${CEPHFS_DATA_POOL}
|
||||
sudo ceph -c ${CEPH_CONF_FILE} auth get-or-create client.${MANILA_CEPH_USER} \
|
||||
mon "allow *" \
|
||||
osd "allow rw" \
|
||||
mds "allow *" | \
|
||||
sudo tee ${CEPH_CONF_DIR}/ceph.client.${MANILA_CEPH_USER}.keyring
|
||||
sudo chown ${STACK_USER}:$(id -g -n $whoami) ${CEPH_CONF_DIR}/ceph.client.${MANILA_CEPH_USER}.keyring
|
||||
# To enable snapshots in Ceph.
|
||||
sudo ceph -c ${CEPH_CONF_FILE} mds set allow_new_snaps true --yes-i-really-mean-it
|
||||
}
|
||||
|
||||
function configure_ceph_embedded_manila {
|
||||
sudo ceph -c ${CEPH_CONF_FILE} osd pool set ${CEPHFS_DATA_POOL} size ${CEPH_REPLICAS}
|
||||
sudo ceph -c ${CEPH_CONF_FILE} osd pool set ${CEPHFS_METADATA_POOL} size ${CEPH_REPLICAS}
|
||||
if [[ $CEPH_REPLICAS -ne 1 ]]; then
|
||||
sudo ceph -c ${CEPH_CONF_FILE} osd pool set ${CEPHFS_DATA_POOL} crush_ruleset ${RULE_ID}
|
||||
sudo ceph -c ${CEPH_CONF_FILE} osd pool set ${CEPHFS_METADATA_POOL} crush_ruleset ${RULE_ID}
|
||||
fi
|
||||
}
|
||||
|
||||
function configure_ceph_embedded_nova {
|
||||
# configure Nova service options, ceph pool, ceph user and ceph key
|
||||
sudo ceph -c ${CEPH_CONF_FILE} osd pool \
|
||||
@@ -509,13 +557,29 @@ install_package ceph-common
|
||||
|
||||
function install_ceph {
|
||||
if is_ubuntu; then
|
||||
wget -q -O- 'https://download.ceph.com/keys/release.asc' \
|
||||
| sudo apt-key add -
|
||||
# use wip-manila development repo until Ceph patches needed by Manila's Ceph
|
||||
# driver are available in a release package.
|
||||
if is_ceph_enabled_for_service manila; then
|
||||
wget -q -O- 'https://download.ceph.com/keys/autobuild.asc' \
|
||||
| sudo apt-key add -
|
||||
|
||||
echo deb http://ceph.com/debian-${CEPH_RELEASE} $(lsb_release -sc) main \
|
||||
| sudo tee /etc/apt/sources.list.d/ceph.list
|
||||
echo deb \
|
||||
http://gitbuilder.ceph.com/ceph-deb-$(lsb_release -sc)-x86_64-basic/ref/wip-manila \
|
||||
$(lsb_release -sc) main | sudo tee /etc/apt/sources.list.d/ceph.list
|
||||
else
|
||||
wget -q -O- 'https://download.ceph.com/keys/release.asc' \
|
||||
| sudo apt-key add -
|
||||
|
||||
echo deb http://ceph.com/debian-${CEPH_RELEASE} $(lsb_release -sc) main \
|
||||
| sudo tee /etc/apt/sources.list.d/ceph.list
|
||||
|
||||
fi
|
||||
|
||||
# Update package repo and restore global variable setting after use.
|
||||
local tmp_retry_update=$RETRY_UPDATE
|
||||
RETRY_UPDATE=True
|
||||
install_package ceph radosgw libnss3-tools
|
||||
RETRY_UPDATE=$tmp_retry_update
|
||||
else
|
||||
# Install directly from distro repos. See LP bug 1521073 for more details.
|
||||
# If distro doesn't carry latest ceph, users can install latest ceph repo
|
||||
@@ -528,10 +592,15 @@ fi
|
||||
# start_ceph() - Start running processes, including screen
|
||||
function start_ceph {
|
||||
if is_ubuntu; then
|
||||
# TODO (rraja): Do a Ceph version check. If version >= Infernalis, then
|
||||
# make sure that user "ceph" is the owner of files within
|
||||
# ${CEPH_DATA_DIR}.
|
||||
sudo chown -R ceph ${CEPH_DATA_DIR}
|
||||
sudo initctl emit ceph-mon id=$(hostname)
|
||||
for id in $(sudo ceph -c ${CEPH_CONF_FILE} osd ls); do
|
||||
sudo start ceph-osd id=${id}
|
||||
done
|
||||
sudo start ceph-mds id=${MDS_ID}
|
||||
else
|
||||
sudo service ceph start
|
||||
fi
|
||||
@@ -545,6 +614,7 @@ function stop_ceph {
|
||||
if is_ubuntu; then
|
||||
sudo service ceph-mon-all stop > /dev/null 2>&1
|
||||
sudo service ceph-osd-all stop > /dev/null 2>&1
|
||||
sudo service ceph-mds-all stop > /dev/null 2>&1
|
||||
else
|
||||
sudo service ceph stop > /dev/null 2>&1
|
||||
fi
|
||||
|
||||
@@ -3,6 +3,7 @@
|
||||
ENABLE_CEPH_CINDER=$(trueorfalse True ENABLE_CEPH_CINDER)
|
||||
ENABLE_CEPH_C_BAK=$(trueorfalse True ENABLE_CEPH_C_BAK)
|
||||
ENABLE_CEPH_GLANCE=$(trueorfalse True ENABLE_CEPH_GLANCE)
|
||||
ENABLE_CEPH_MANILA=$(trueorfalse True ENABLE_CEPH_MANILA)
|
||||
ENABLE_CEPH_NOVA=$(trueorfalse True ENABLE_CEPH_NOVA)
|
||||
|
||||
if [[ $ENABLE_CEPH_CINDER == "True" ]]; then
|
||||
|
||||
@@ -40,6 +40,10 @@ elif [[ "$1" == "stack" && "$2" == "post-config" ]]; then
|
||||
echo_summary "Configuring libvirt secret"
|
||||
import_libvirt_secret_ceph
|
||||
fi
|
||||
if is_ceph_enabled_for_service manila; then
|
||||
echo_summary "Configuring Manila for Ceph"
|
||||
configure_ceph_manila
|
||||
fi
|
||||
|
||||
if [ "$REMOTE_CEPH" = "False" ]; then
|
||||
if is_ceph_enabled_for_service glance; then
|
||||
@@ -54,6 +58,10 @@ elif [[ "$1" == "stack" && "$2" == "post-config" ]]; then
|
||||
echo_summary "Configuring Cinder for Ceph"
|
||||
configure_ceph_embedded_cinder
|
||||
fi
|
||||
if is_ceph_enabled_for_service manila; then
|
||||
echo_summary "Configuring Manila for Ceph"
|
||||
configure_ceph_embedded_manila
|
||||
fi
|
||||
# FIXME: Fix this once radosgw service is running
|
||||
|
||||
#echo_summary "Configuring Rados Gateway with Keystone for Swift"
|
||||
|
||||
@@ -17,3 +17,18 @@ CEPH_LOOPBACK_DISK_SIZE=8G
|
||||
# Source plugin's lib/ceph
|
||||
|
||||
source $CEPH_PLUGIN_DIR/lib/ceph
|
||||
|
||||
# Set Manila related global variables used by Manila's DevStack plugin.
|
||||
if is_ceph_enabled_for_service manila; then
|
||||
ENABLED_SHARE_PROTOCOLS="CEPHFS"
|
||||
MANILA_DEFAULT_SHARE_TYPE=cephfstype
|
||||
|
||||
MANILA_ENABLED_BACKENDS=cephfsnative1
|
||||
MANILA_CONFIGURE_GROUPS=cephfsnative1
|
||||
|
||||
MANILA_OPTGROUP_cephfsnative1_share_driver=manila.share.drivers.cephfs.CephFSNativeDriver
|
||||
MANILA_OPTGROUP_cephfsnative1_driver_handles_share_servers=False
|
||||
MANILA_OPTGROUP_cephfsnative1_share_backend_name=CEPHFSNATIVE1
|
||||
MANILA_OPTGROUP_cephfsnative1_cephfs_conf_path=${CEPH_CONF_FILE}
|
||||
MANILA_OPTGROUP_cephfsnative1_cephfs_auth_id=${MANILA_CEPH_USER}
|
||||
fi
|
||||
|
||||
Reference in New Issue
Block a user