DevStack plugin to configure Ceph backend.
You can not select more than 25 topics Topics must start with a letter or number, can include dashes ('-') and can be up to 35 characters long.

ceph 36KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477478479480481482483484485486487488489490491492493494495496497498499500501502503504505506507508509510511512513514515516517518519520521522523524525526527528529530531532533534535536537538539540541542543544545546547548549550551552553554555556557558559560561562563564565566567568569570571572573574575576577578579580581582583584585586587588589590591592593594595596597598599600601602603604605606607608609610611612613614615616617618619620621622623624625626627628629630631632633634635636637638639640641642643644645646647648649650651652653654655656657658659660661662663664665666667668669670671672673674675676677678679680681682683684685686687688689690691692693694695696697698699700701702703704705706707708709710711712713714715716717718719720721722723724725726727728729730731732733734735736737738739740741742743744745746747748749750751752753754755756757758759760761762763764765766767768769770771772773774775776777778779780781782783784785786787788789790791792793794795796797798799800801802803804805806807808809810811812813814815816817818819820821822823824825826827828829830831832833834835836837838839840841842843844845846847848849850851852853854855856857858859860861862863864865866867868869870871872873874875876877878879880881882883884885886887888889890891892893894895896897898899900901902903904905906907908909910911912913914915916917918919920921922923924925926927928929930931932933934935936937938939940941942943944945946947948949950951952953954955956957958959960961962963964965966967968969970971972973974975976977978979980981982983984985
  1. #!/bin/bash
  2. #
  3. # lib/ceph
  4. # Functions to control the configuration
  5. # and operation of the **Ceph** storage service
  6. # Dependencies:
  7. #
  8. # - ``functions`` file
  9. # - ``CEPH_DATA_DIR`` or ``DATA_DIR`` must be defined
  10. # ``stack.sh`` calls the entry points in this order:
  11. #
  12. # - install_ceph
  13. # - configure_ceph
  14. # - init_ceph
  15. # - start_ceph
  16. # - stop_ceph
  17. # - cleanup_ceph
  18. # - cleanup_containerized_ceph
  19. # Save trace setting
  20. XTRACE=$(set +o | grep xtrace)
  21. set +o xtrace
  22. # Defaults
  23. # --------
  24. CEPH_RELEASE=${CEPH_RELEASE:-hammer}
  25. # Deploy a Ceph demo container instead of a non-containerized version
  26. CEPH_CONTAINERIZED=$(trueorfalse False CEPH_CONTAINERIZED)
  27. # Set ``CEPH_DATA_DIR`` to the location of Ceph drives and objects.
  28. # Default is the common DevStack data directory.
  29. CEPH_DATA_DIR=${CEPH_DATA_DIR:-/var/lib/ceph}
  30. CEPH_DISK_IMAGE=${CEPH_DISK_IMAGE:-${CEPH_DATA_DIR}/drives/images/ceph.img}
  31. # Set ``CEPH_CONF_DIR`` to the location of the configuration files.
  32. # Default is ``/etc/ceph``.
  33. CEPH_CONF_DIR=${CEPH_CONF_DIR:-/etc/ceph}
  34. # DevStack will create a loop-back disk formatted as XFS to store the
  35. # Ceph data. Set ``CEPH_LOOPBACK_DISK_SIZE`` to the disk size in
  36. # kilobytes.
  37. VOLUME_BACKING_FILE_SIZE=${VOLUME_BACKING_FILE_SIZE:-8GB}
  38. CEPH_LOOPBACK_DISK_SIZE_DEFAULT=${CEPH_LOOPBACK_DISK_SIZE_DEFAULT:-$VOLUME_BACKING_FILE_SIZE}
  39. CEPH_LOOPBACK_DISK_SIZE=\
  40. ${CEPH_LOOPBACK_DISK_SIZE:-$CEPH_LOOPBACK_DISK_SIZE_DEFAULT}
  41. # Common
  42. CEPH_FSID=$(uuidgen)
  43. CEPH_CONF_FILE=${CEPH_CONF_DIR}/ceph.conf
  44. # RBD configuration defaults
  45. CEPH_RBD_DEFAULT_FEATURES=${CEPH_RBD_DEFAULT_FEATURES:-1}
  46. # Glance
  47. GLANCE_CEPH_USER=${GLANCE_CEPH_USER:-glance}
  48. GLANCE_CEPH_POOL=${GLANCE_CEPH_POOL:-images}
  49. GLANCE_CEPH_POOL_PG=${GLANCE_CEPH_POOL_PG:-8}
  50. GLANCE_CEPH_POOL_PGP=${GLANCE_CEPH_POOL_PGP:-8}
  51. GLANCE_RGW_BACKEND=${GLANCE_RGW_BACKEND:-False}
  52. # Nova
  53. NOVA_CEPH_POOL=${NOVA_CEPH_POOL:-vms}
  54. NOVA_CEPH_POOL_PG=${NOVA_CEPH_POOL_PG:-8}
  55. NOVA_CEPH_POOL_PGP=${NOVA_CEPH_POOL_PGP:-8}
  56. # Cinder
  57. CINDER_CEPH_POOL=${CINDER_CEPH_POOL:-volumes}
  58. CINDER_CEPH_POOL_PG=${CINDER_CEPH_POOL_PG:-8}
  59. CINDER_CEPH_POOL_PGP=${CINDER_CEPH_POOL_PGP:-8}
  60. CINDER_CEPH_USER=${CINDER_CEPH_USER:-cinder}
  61. CINDER_CEPH_UUID=${CINDER_CEPH_UUID:-$(uuidgen)}
  62. # Manila
  63. CEPHFS_POOL_PG=${CEPHFS_POOL_PG:-8}
  64. # Multiple filesystems enable more than one devstack to share
  65. # the same REMOTE_CEPH cluster. Note that in addition to setting
  66. # CEPHFS_MULTIPLE_FILESYSTEMS and REMOTE_CEPH, each devstack
  67. # needs to set distinct values for CEPHFS_FILESYSTEM,
  68. # CEPHFS_METADATA_POOL, and CEPHFS_DATA_POOL.
  69. CEPHFS_MULTIPLE_FILESYSTEMS=${CEPHFS_MULTIPLE_FILESYSTEMS:-False}
  70. CEPHFS_FILESYSTEM=${CEPHFS_FILESYSTEM:-cephfs}
  71. CEPHFS_METADATA_POOL=${CEPHFS_METADATA_POOL:-cephfs_metadata}
  72. CEPHFS_DATA_POOL=${CEPHFS_DATA_POOL:-cephfs_data}
  73. MANILA_CEPH_DRIVER=${MANILA_CEPH_DRIVER:-cephfsnative}
  74. MANILA_CEPH_USER=${MANILA_CEPH_USER:-manila}
  75. MDS_ID=${MDS_ID:-a}
  76. # Set ``CEPH_REPLICAS`` to configure how many replicas are to be
  77. # configured for your Ceph cluster. By default we are configuring
  78. # only one replica since this is way less CPU and memory intensive. If
  79. # you are planning to test Ceph replication feel free to increase this value
  80. CEPH_REPLICAS=${CEPH_REPLICAS:-1}
  81. CEPH_REPLICAS_SEQ=$(seq ${CEPH_REPLICAS})
  82. # Rados gateway
  83. CEPH_RGW_PORT=${CEPH_RGW_PORT:-8080}
  84. CEPH_RGW_IDENTITY_API_VERSION=${CEPH_RGW_IDENTITY_API_VERSION:-2.0}
  85. # Ceph REST API (for containerized version only)
  86. # Default is 5000, but Keystone already listens on 5000
  87. CEPH_REST_API_PORT=${CEPH_REST_API_PORT:-5001}
  88. # Connect to an existing Ceph cluster
  89. REMOTE_CEPH=$(trueorfalse False REMOTE_CEPH)
  90. REMOTE_CEPH_ADMIN_KEY_PATH=\
  91. ${REMOTE_CEPH_ADMIN_KEY_PATH:-$CEPH_CONF_DIR/ceph.client.admin.keyring}
  92. REMOTE_CEPH_RGW=$(trueorfalse False REMOTE_CEPH_RGW)
  93. # Cinder encrypted volume tests are not supported with a Ceph backend due to
  94. # bug 1463525.
  95. ATTACH_ENCRYPTED_VOLUME_AVAILABLE=False
  96. # OpenStack CI test instances will have a set of opt in package mirrors in
  97. # /etc/apt/sources.list.available.d/ which will include the ceph package
  98. # mirror. If this file exists we can link to it in /etc/apt/sources.list.d/
  99. # to enable it.
  100. APT_REPOSITORY_FILE="/etc/apt/sources.list.available.d/ceph-deb-hammer.list"
  101. # If the package mirror file doesn't exist, fetch from here
  102. APT_REPOSITORY_ENTRY="\
  103. deb http://download.ceph.com/debian-${CEPH_RELEASE} $(lsb_release -sc) main"
  104. # Set INIT_SYSTEM to upstart, systemd, or init. In our domain it should be
  105. # safe to assume that if the init system is not upstart or systemd that it
  106. # is sysvinit rather than other theoretical possibilities like busybox.
  107. INIT_SYSTEM=$(init --version 2>/dev/null | grep -qs upstart && echo upstart \
  108. || cat /proc/1/comm)
  109. # Set RUN_AS to 'root' or 'ceph'. Starting with Infernalis, ceph daemons
  110. # run as the ceph user rather than as the root user. We set this variable
  111. # properly later, after ceph-common package is installed.
  112. #
  113. RUN_AS='unknown'
  114. # Functions
  115. # ------------
  116. # Containerized Ceph
  117. function deploy_containerized_ceph {
  118. install_package docker docker.io ceph-common
  119. DOCKER_EXEC="docker exec ceph-demo"
  120. initial_configure_ceph
  121. sudo docker run -d \
  122. --name ceph-demo \
  123. --net=host \
  124. -v ${CEPH_CONF_DIR}:${CEPH_CONF_DIR} \
  125. -v ${CEPH_DATA_DIR}:${CEPH_DATA_DIR} \
  126. -e MON_IP=${SERVICE_HOST} \
  127. -e CEPH_PUBLIC_NETWORK=$(grep -o ${SERVICE_HOST%??}0/.. /proc/net/fib_trie | head -1) \
  128. -e RGW_CIVETWEB_PORT=${CEPH_RGW_PORT} \
  129. -e RESTAPI_PORT=${CEPH_REST_API_PORT} \
  130. ceph/demo
  131. # wait for ceph to be healthy then continue
  132. ceph_status
  133. }
  134. function wait_for_daemon {
  135. timeout=20
  136. daemon_to_test=$1
  137. while [ $timeout -ne 0 ]; do
  138. if eval $daemon_to_test; then
  139. return 0
  140. fi
  141. sleep 1
  142. let timeout=timeout-1
  143. done
  144. return 1
  145. }
  146. function ceph_status {
  147. echo "Waiting for Ceph to be ready"
  148. return $(wait_for_daemon "sudo docker exec ceph-demo ceph health | grep -sq HEALTH_OK")
  149. }
  150. # is_ceph_enabled_for_service() - checks whether the OpenStack service
  151. # specified as an argument is enabled with Ceph as its storage backend.
  152. function is_ceph_enabled_for_service {
  153. local config config_name enabled service
  154. enabled=1
  155. service=$1
  156. # Construct the global variable ENABLE_CEPH_.* corresponding to a
  157. # $service.
  158. config_name=ENABLE_CEPH_$(echo $service | \
  159. tr '[:lower:]' '[:upper:]' | tr '-' '_')
  160. config=$(eval echo "\$$config_name")
  161. if (is_service_enabled $service) && [[ $config == 'True' ]]; then
  162. enabled=0
  163. fi
  164. return $enabled
  165. }
  166. # _get_ceph_version() - checks version of Ceph mon daemon or CLI based on an
  167. # argument. Checking mon daemon version requires the mon daemon to be up
  168. # and healthy.
  169. function _get_ceph_version {
  170. local ceph_version_str
  171. if [[ $1 == 'cli' ]]; then
  172. ceph_version_str=$(sudo ceph --version | cut -d ' ' -f 3 | \
  173. cut -d '.' -f 1,2)
  174. elif [[ $1 == 'mon' ]]; then
  175. ceph_version_str=$(sudo ceph daemon mon.$(hostname) version | \
  176. cut -d '"' -f 4 | cut -f 1,2 -d '.')
  177. else
  178. die $LINENO "Invalid argument. The get_ceph_version function needs \
  179. an argument that can be 'cli' or 'mon'."
  180. fi
  181. echo $ceph_version_str
  182. }
  183. # _run_as_ceph_or_root() - Starting with Infernalis, ceph daemons run as the ceph user
  184. # rather than as root. Check the version and return 'root' or 'ceph'.
  185. #
  186. # This function presupposes that ceph-common package has been installed first.
  187. function _run_as_ceph_or_root {
  188. local ceph_version
  189. ceph_version=$(_get_ceph_version cli)
  190. if [[ $(echo $ceph_version '>=' 9.2 | bc -l) == 1 ]] ; then
  191. echo ceph
  192. else
  193. echo root
  194. fi
  195. }
  196. # import_libvirt_secret_ceph() - Imports Cinder user key into libvirt
  197. # so it can connect to the Ceph cluster while attaching a Cinder block device
  198. function import_libvirt_secret_ceph {
  199. cat <<EOF | sudo tee secret.xml>/dev/null
  200. <secret ephemeral='no' private='no'>
  201. <uuid>${CINDER_CEPH_UUID}</uuid>
  202. <usage type='ceph'>
  203. <name>client.${CINDER_CEPH_USER} secret</name>
  204. </usage>
  205. </secret>
  206. EOF
  207. sudo virsh secret-define --file secret.xml
  208. sudo virsh secret-set-value --secret ${CINDER_CEPH_UUID} \
  209. --base64 $(sudo ceph -c ${CEPH_CONF_FILE} \
  210. auth get-key client.${CINDER_CEPH_USER})
  211. sudo rm -f secret.xml
  212. }
  213. # _undefine_virsh_secret() - Undefine Cinder key secret from libvirt
  214. function _undefine_virsh_secret {
  215. if is_ceph_enabled_for_service cinder || \
  216. is_ceph_enabled_for_service nova; then
  217. local virsh_uuid
  218. virsh_uuid=$(sudo virsh secret-list | awk '/^ ?[0-9a-z]/ { print $1 }')
  219. sudo virsh secret-undefine ${virsh_uuid} &>/dev/null
  220. fi
  221. }
  222. # check_os_support_ceph() - Check if the OS provides a decent version of Ceph
  223. function check_os_support_ceph {
  224. if [[ ! ${DISTRO} =~ (trusty|xenial|jessie|sid|f24|f25|rhel7) ]]; then
  225. echo "WARNING: your distro $DISTRO does not provide \
  226. (at least) the Firefly release. \
  227. Please use Ubuntu Trusty or Fedora 24 (and higher)"
  228. if [[ "$FORCE_CEPH_INSTALL" != "yes" ]]; then
  229. die $LINENO "If you wish to install Ceph on this distribution \
  230. anyway run with FORCE_CEPH_INSTALL=yes, \
  231. this assumes that YOU will setup the proper repositories"
  232. fi
  233. NO_UPDATE_REPOS=False
  234. fi
  235. }
  236. # cleanup_ceph() - Remove residual data files, anything left over from previous
  237. # runs that a clean run would need to clean up
  238. function cleanup_ceph_remote {
  239. # do a proper cleanup from here to avoid leftover on the remote Ceph cluster
  240. if is_ceph_enabled_for_service glance; then
  241. sudo ceph -c ${CEPH_CONF_FILE} osd pool delete $GLANCE_CEPH_POOL $GLANCE_CEPH_POOL \
  242. --yes-i-really-really-mean-it > /dev/null 2>&1
  243. sudo ceph -c ${CEPH_CONF_FILE} auth del client.$GLANCE_CEPH_USER > /dev/null 2>&1
  244. fi
  245. if is_ceph_enabled_for_service cinder; then
  246. sudo ceph -c ${CEPH_CONF_FILE} osd pool delete $CINDER_CEPH_POOL $CINDER_CEPH_POOL \
  247. --yes-i-really-really-mean-it > /dev/null 2>&1
  248. sudo ceph -c ${CEPH_CONF_FILE} auth del client.$CINDER_CEPH_USER > /dev/null 2>&1
  249. fi
  250. if is_ceph_enabled_for_service c-bak; then
  251. sudo ceph -c ${CEPH_CONF_FILE} osd pool delete $CINDER_BAK_CEPH_POOL $CINDER_BAK_CEPH_POOL \
  252. --yes-i-really-really-mean-it > /dev/null 2>&1
  253. sudo ceph -c ${CEPH_CONF_FILE} auth del client.$CINDER_BAK_CEPH_USER > /dev/null 2>&1
  254. fi
  255. if is_ceph_enabled_for_service nova; then
  256. iniset $NOVA_CONF libvirt rbd_secret_uuid ""
  257. sudo ceph -c ${CEPH_CONF_FILE} osd pool delete $NOVA_CEPH_POOL $NOVA_CEPH_POOL \
  258. --yes-i-really-really-mean-it > /dev/null 2>&1
  259. fi
  260. if is_ceph_enabled_for_service manila; then
  261. sudo ceph -c ${CEPH_CONF_FILE} fs rm $CEPHFS_FILESYSTEM \
  262. --yes-i-really-mean-it
  263. sudo ceph -c ${CEPH_CONF_FILE} osd pool delete $CEPHFS_METADATA_POOL $CEPHFS_METADATA_POOL \
  264. --yes-i-really-really-mean-it > /dev/null 2>&1
  265. sudo ceph -c ${CEPH_CONF_FILE} osd pool delete $CEPHFS_DATA_POOL $CEPHFS_DATA_POOL \
  266. --yes-i-really-really-mean-it > /dev/null 2>&1
  267. sudo ceph -c ${CEPH_CONF_FILE} auth del client.$MANILA_CEPH_USER > /dev/null 2>&1
  268. fi
  269. }
  270. function cleanup_ceph_embedded {
  271. sudo killall -w -9 ceph-mon ceph-osd ceph-mds
  272. if [ "$ENABLE_CEPH_RGW" = "True" ]; then
  273. sudo killall -w -9 radosgw
  274. fi
  275. sudo rm -rf ${CEPH_DATA_DIR}/*/*
  276. if egrep -q ${CEPH_DATA_DIR} /proc/mounts; then
  277. sudo umount ${CEPH_DATA_DIR}
  278. fi
  279. if [[ -e ${CEPH_DISK_IMAGE} ]]; then
  280. sudo rm -f ${CEPH_DISK_IMAGE}
  281. fi
  282. # purge ceph config file and keys
  283. sudo rm -rf ${CEPH_CONF_DIR}/*
  284. # purge repo
  285. sudo apt-add-repository --remove "$APT_REPOSITORY_ENTRY"
  286. }
  287. function cleanup_ceph_general {
  288. _undefine_virsh_secret
  289. if is_ceph_enabled_for_service manila && [ $MANILA_CEPH_DRIVER == 'cephfsnfs' ]; then
  290. cleanup_nfs_ganesha
  291. fi
  292. }
  293. function cleanup_containerized_ceph {
  294. sudo docker rm -f ceph-demo
  295. sudo rm -rf ${CEPH_CONF_DIR}/*
  296. sudo rm -rf ${CEPH_DATA_DIR}
  297. }
  298. function initial_configure_ceph {
  299. # create a backing file disk
  300. create_disk ${CEPH_DISK_IMAGE} ${CEPH_DATA_DIR} ${CEPH_LOOPBACK_DISK_SIZE}
  301. # populate ceph directory
  302. sudo mkdir -p \
  303. ${CEPH_DATA_DIR}/{bootstrap-mds,bootstrap-osd,mds,mon,osd,tmp,radosgw}
  304. }
  305. # configure_ceph() - Set config files, create data dirs, etc
  306. function configure_ceph {
  307. local count=0
  308. RUN_AS=$(_run_as_ceph_or_root)
  309. echo "ceph daemons will run as $RUN_AS"
  310. initial_configure_ceph
  311. # create ceph monitor initial key and directory
  312. sudo ceph-authtool ${CEPH_DATA_DIR}/tmp/keyring.mon.$(hostname) \
  313. --create-keyring --name=mon. --add-key=$(ceph-authtool \
  314. --gen-print-key) --cap mon 'allow *'
  315. sudo mkdir -p ${CEPH_DATA_DIR}/mon/ceph-$(hostname)
  316. # create a default ceph configuration file
  317. cat <<EOF | sudo tee ${CEPH_CONF_FILE}>/dev/null
  318. [global]
  319. fsid = ${CEPH_FSID}
  320. mon_initial_members = $(hostname)
  321. mon_host = ${SERVICE_HOST}
  322. auth_cluster_required = cephx
  323. auth_service_required = cephx
  324. auth_client_required = cephx
  325. filestore_xattr_use_omap = true
  326. osd crush chooseleaf type = 0
  327. osd journal size = 100
  328. osd pool default size = ${CEPH_REPLICAS}
  329. rbd default features = ${CEPH_RBD_DEFAULT_FEATURES}
  330. EOF
  331. # bootstrap the ceph monitor
  332. sudo ceph-mon -c ${CEPH_CONF_FILE} --mkfs -i $(hostname) \
  333. --keyring ${CEPH_DATA_DIR}/tmp/keyring.mon.$(hostname)
  334. if [[ $RUN_AS == 'ceph' ]] ; then
  335. sudo chown -R ceph. ${CEPH_DATA_DIR}
  336. fi
  337. if [[ $INIT_SYSTEM == 'upstart' ]] ; then
  338. sudo touch ${CEPH_DATA_DIR}/mon/ceph-$(hostname)/upstart
  339. sudo initctl emit ceph-mon id=$(hostname)
  340. elif [[ $INIT_SYSTEM == 'systemd' ]]; then
  341. sudo systemctl enable ceph-mon@$(hostname)
  342. sudo systemctl start ceph-mon@$(hostname)
  343. else
  344. sudo touch ${CEPH_DATA_DIR}/mon/ceph-$(hostname)/sysvinit
  345. sudo service ceph start mon.$(hostname)
  346. fi
  347. local ceph_version
  348. ceph_version=$(_get_ceph_version mon)
  349. if [[ $(echo $ceph_version '>=' 11.1 | bc -l) == 1 ]] ; then
  350. sudo ceph-create-keys --cluster ceph --id $(hostname)
  351. fi
  352. # wait for the admin key to come up
  353. # otherwise we will not be able to do the actions below
  354. until [ -f ${CEPH_CONF_DIR}/ceph.client.admin.keyring ]; do
  355. echo_summary "Waiting for the Ceph admin key to be ready..."
  356. count=$(($count + 1))
  357. if [ $count -eq 3 ]; then
  358. die $LINENO "Maximum of 3 retries reached"
  359. fi
  360. sleep 5
  361. done
  362. # create a simple rule to take OSDs instead of hosts with CRUSH
  363. # then apply this rule to the default pool
  364. if [[ $CEPH_REPLICAS -ne 1 ]]; then
  365. sudo ceph -c ${CEPH_CONF_FILE} \
  366. osd crush rule create-simple devstack default osd
  367. RULE_ID=$(sudo ceph -c ${CEPH_CONF_FILE} \
  368. osd crush rule dump devstack | \
  369. awk '/rule_id/ {print $2}' | \
  370. cut -d ',' -f1)
  371. sudo ceph -c ${CEPH_CONF_FILE} \
  372. osd pool set rbd crush_ruleset ${RULE_ID}
  373. sudo ceph -c ${CEPH_CONF_FILE} \
  374. osd pool set data crush_ruleset ${RULE_ID}
  375. sudo ceph -c ${CEPH_CONF_FILE} \
  376. osd pool set metadata crush_ruleset ${RULE_ID}
  377. fi
  378. # create the OSD(s)
  379. for rep in ${CEPH_REPLICAS_SEQ}; do
  380. OSD_ID=$(sudo ceph -c ${CEPH_CONF_FILE} osd create)
  381. if [[ $RUN_AS == 'ceph' ]] ; then
  382. sudo mkdir -p ${CEPH_DATA_DIR}/osd/ceph-${OSD_ID}
  383. sudo chown ceph. ${CEPH_DATA_DIR}/osd/ceph-${OSD_ID}
  384. sudo ceph-osd -c ${CEPH_CONF_FILE} --setuser ceph --setgroup ceph -i ${OSD_ID} --mkfs
  385. else
  386. sudo mkdir -p ${CEPH_DATA_DIR}/osd/ceph-${OSD_ID}
  387. sudo ceph-osd -c ${CEPH_CONF_FILE} -i ${OSD_ID} --mkfs
  388. fi
  389. sudo ceph -c ${CEPH_CONF_FILE} auth get-or-create osd.${OSD_ID} \
  390. mon 'allow profile osd ' osd 'allow *' | \
  391. sudo tee ${CEPH_DATA_DIR}/osd/ceph-${OSD_ID}/keyring
  392. if [[ $RUN_AS == 'ceph' ]] ; then
  393. sudo chown ceph. ${CEPH_DATA_DIR}/osd/ceph-${OSD_ID}/keyring
  394. fi
  395. # ceph's init script is parsing ${CEPH_DATA_DIR}/osd/ceph-${OSD_ID}/
  396. # and looking for a file 'upstart' or 'sysinitv'
  397. # thanks to these 'touches' we are able to control OSDs daemons
  398. # from the init script.
  399. if [[ $INIT_SYSTEM == 'upstart' ]] ; then
  400. sudo touch ${CEPH_DATA_DIR}/osd/ceph-${OSD_ID}/upstart
  401. elif [[ $INIT_SYSTEM == 'systemd' ]] ; then
  402. sudo systemctl enable ceph-osd@${OSD_ID}
  403. else
  404. sudo touch ${CEPH_DATA_DIR}/osd/ceph-${OSD_ID}/sysvinit
  405. fi
  406. done
  407. if is_ceph_enabled_for_service manila; then
  408. # create a MDS
  409. sudo mkdir -p ${CEPH_DATA_DIR}/mds/ceph-${MDS_ID}
  410. if [[ $RUN_AS == 'ceph' ]] ; then
  411. sudo chown ceph. ${CEPH_DATA_DIR}/mds/ceph-${MDS_ID}
  412. fi
  413. sudo ceph -c ${CEPH_CONF_FILE} auth get-or-create mds.${MDS_ID} \
  414. mon 'allow profile mds ' osd 'allow rw' mds 'allow' \
  415. -o ${CEPH_DATA_DIR}/mds/ceph-${MDS_ID}/keyring
  416. if [[ $RUN_AS == 'ceph' ]] ; then
  417. sudo chown ceph. /var/lib/ceph/mds/ceph-${MDS_ID}/keyring
  418. fi
  419. if [[ $INIT_SYSTEM == 'upstart' ]] ; then
  420. sudo touch ${CEPH_DATA_DIR}/mds/ceph-${MDS_ID}/upstart
  421. elif [[ $INIT_SYSTEM == 'systemd' ]] ; then
  422. sudo systemctl enable ceph-mds@${MDS_ID}
  423. else
  424. sudo touch ${CEPH_DATA_DIR}/mds/ceph-${MDS_ID}/sysvinit
  425. fi
  426. fi
  427. if [ "$ENABLE_CEPH_RGW" = "True" ]; then
  428. _configure_ceph_rgw
  429. fi
  430. }
  431. function _configure_rgw_ceph_section {
  432. configure_ceph_embedded_rgw_paths
  433. if [[ ! "$(egrep "\[${key}\]" ${CEPH_CONF_FILE})" ]]; then
  434. cat <<EOF | sudo tee -a ${CEPH_CONF_FILE}>/dev/null
  435. [${key}]
  436. host = $(hostname)
  437. keyring = ${dest}/keyring
  438. rgw socket path = /tmp/radosgw-$(hostname).sock
  439. log file = /var/log/ceph/radosgw-$(hostname).log
  440. rgw data = ${dest}
  441. rgw print continue = false
  442. rgw frontends = civetweb port=${CEPH_RGW_PORT}
  443. rgw keystone url = http://${SERVICE_HOST}:35357
  444. rgw s3 auth use keystone = true
  445. nss db path = ${dest}/nss
  446. rgw keystone admin user = radosgw
  447. rgw keystone admin password = $SERVICE_PASSWORD
  448. rgw keystone accepted roles = Member, _member_, admin, ResellerAdmin
  449. EOF
  450. if [[ $CEPH_RGW_KEYSTONE_API_VERSION == '2.0' && \
  451. ! "$(grep -sq "rgw keystone admin tenant = $SERVICE_PROJECT_NAME" ${CEPH_CONF_FILE} )" ]]; then
  452. cat <<EOF | sudo tee -a ${CEPH_CONF_FILE}>/dev/null
  453. rgw keystone admin tenant = $SERVICE_PROJECT_NAME
  454. EOF
  455. else
  456. cat <<EOF | sudo tee -a ${CEPH_CONF_FILE}>/dev/null
  457. rgw keystone admin project = $SERVICE_PROJECT_NAME
  458. rgw keystone admin domain = $SERVICE_DOMAIN_NAME
  459. rgw keystone api version = 3
  460. EOF
  461. fi
  462. fi
  463. }
  464. function _configure_ceph_rgw_container {
  465. _configure_rgw_ceph_section
  466. sudo docker restart ceph-demo
  467. }
  468. function _configure_ceph_rgw {
  469. # bootstrap rados gateway
  470. _configure_rgw_ceph_section
  471. sudo mkdir -p $dest
  472. sudo ceph auth get-or-create $key \
  473. osd 'allow rwx' mon 'allow rw' \
  474. -o ${dest}/keyring
  475. if [[ $INIT_SYSTEM == 'upstart' ]] ; then
  476. sudo touch ${dest}/{upstart,done}
  477. elif [[ $INIT_SYSTEM == 'systemd' ]] ; then
  478. sudo systemctl enable ceph-radosgw@rgw.$(hostname)
  479. else
  480. sudo touch ${dest}/{sysvinit,done}
  481. fi
  482. if [[ $RUN_AS == 'ceph' ]] ; then
  483. sudo chown -R ceph. ${CEPH_DATA_DIR}
  484. fi
  485. }
  486. function _create_swift_endpoint {
  487. local swift_service
  488. swift_service=$(get_or_create_service "swift" "object-store" "Swift Service")
  489. local swift_endpoint
  490. swift_endpoint="$SWIFT_SERVICE_PROTOCOL://$SERVICE_HOST:${CEPH_RGW_PORT}/swift/v1"
  491. get_or_create_endpoint $swift_service \
  492. "$REGION_NAME" $swift_endpoint $swift_endpoint $swift_endpoint
  493. }
  494. function configure_ceph_embedded_rgw_paths {
  495. if [[ "$CEPH_CONTAINERIZED" == "True" ]]; then
  496. dest=${CEPH_DATA_DIR}/radosgw/$(hostname)
  497. key=client.radosgw.gateway
  498. elif [[ $INIT_SYSTEM == 'systemd' ]] ; then
  499. dest=${CEPH_DATA_DIR}/radosgw/ceph-rgw.$(hostname)
  500. key=client.rgw.$(hostname)
  501. else
  502. dest=${CEPH_DATA_DIR}/radosgw/ceph-radosgw.$(hostname)
  503. key=client.radosgw.$(hostname)
  504. fi
  505. }
  506. function configure_ceph_embedded_rgw {
  507. configure_ceph_embedded_rgw_paths
  508. # keystone endpoint for radosgw
  509. if [[ "$KEYSTONE_CATALOG_BACKEND" = 'sql' ]]; then
  510. _create_swift_endpoint
  511. fi
  512. # Let keystone generate the certs, rgw needs these.
  513. keystone-manage pki_setup --rebuild
  514. # Create radosgw service user with admin privileges
  515. create_service_user "radosgw" "admin"
  516. # radosgw needs to access keystone's revocation list
  517. sudo mkdir -p ${dest}/nss
  518. sudo openssl x509 -in /etc/keystone/ssl/certs/ca.pem -pubkey | \
  519. sudo certutil -d ${dest}/nss -A -n ca -t "TCu,Cu,Tuw"
  520. sudo openssl x509 -in /etc/keystone/ssl/certs/signing_cert.pem -pubkey | \
  521. sudo certutil -A -d ${dest}/nss -n signing_cert -t "P,P,P"
  522. }
  523. function start_ceph_embedded_rgw {
  524. # radosgw service is started here as it needs the keystone pki_setup as a
  525. # pre-requisite
  526. if [[ $INIT_SYSTEM == 'upstart' ]] ; then
  527. sudo start radosgw id=radosgw.$(hostname)
  528. elif [[ $INIT_SYSTEM == 'systemd' ]] ; then
  529. sudo systemctl enable ceph-radosgw@rgw.$(hostname)
  530. sudo systemctl start ceph-radosgw@rgw.$(hostname)
  531. else
  532. sudo service ceph start rgw.$(hostname)
  533. fi
  534. }
  535. function configure_ceph_embedded_glance {
  536. # configure Glance service options, ceph pool, ceph user and ceph key
  537. if [[ $CEPH_REPLICAS -ne 1 ]]; then
  538. sudo $DOCKER_EXEC ceph -c ${CEPH_CONF_FILE} osd pool \
  539. set ${GLANCE_CEPH_POOL} crush_ruleset ${RULE_ID}
  540. fi
  541. }
  542. # configure_ceph_glance() - Glance config needs to come after Glance is set up
  543. function configure_ceph_glance {
  544. if [[ "$GLANCE_RGW_BACKEND" = "True" && "$ENABLE_CEPH_RGW" = "True" ]]; then
  545. # common glance accounts for swift
  546. create_service_user "glance-swift" "ResellerAdmin"
  547. iniset $GLANCE_SWIFT_STORE_CONF ref1 user $SERVICE_PROJECT_NAME:glance-swift
  548. AUTH_URL=$KEYSTONE_SERVICE_URI/v$CEPH_RGW_KEYSTONE_API_VERSION
  549. iniset $GLANCE_SWIFT_STORE_CONF ref1 key $SERVICE_PASSWORD
  550. iniset $GLANCE_SWIFT_STORE_CONF ref1 auth_address $AUTH_URL
  551. iniset $GLANCE_SWIFT_STORE_CONF ref1 user_domain_name $SERVICE_DOMAIN_NAME
  552. iniset $GLANCE_SWIFT_STORE_CONF ref1 project_domain_name $SERVICE_DOMAIN_NAME
  553. iniset $GLANCE_SWIFT_STORE_CONF ref1 auth_version $CEPH_RGW_KEYSTONE_API_VERSION
  554. iniset $GLANCE_API_CONF glance_store default_store swift
  555. iniset $GLANCE_API_CONF glance_store swift_store_create_container_on_put True
  556. iniset $GLANCE_API_CONF glance_store swift_store_config_file $GLANCE_SWIFT_STORE_CONF
  557. iniset $GLANCE_API_CONF glance_store default_swift_reference ref1
  558. iniset $GLANCE_API_CONF glance_store stores "file, http, swift"
  559. else
  560. sudo $DOCKER_EXEC ceph -c ${CEPH_CONF_FILE} osd pool create \
  561. ${GLANCE_CEPH_POOL} ${GLANCE_CEPH_POOL_PG} ${GLANCE_CEPH_POOL_PGP}
  562. sudo $DOCKER_EXEC ceph -c ${CEPH_CONF_FILE} auth \
  563. get-or-create client.${GLANCE_CEPH_USER} \
  564. mon "allow r" \
  565. osd "allow class-read object_prefix rbd_children, \
  566. allow rwx pool=${GLANCE_CEPH_POOL}" | \
  567. sudo tee ${CEPH_CONF_DIR}/ceph.client.${GLANCE_CEPH_USER}.keyring
  568. sudo chown ${STACK_USER}:$(id -g -n $whoami) \
  569. ${CEPH_CONF_DIR}/ceph.client.${GLANCE_CEPH_USER}.keyring
  570. iniset $GLANCE_API_CONF DEFAULT show_multiple_locations True
  571. iniset $GLANCE_API_CONF glance_store default_store rbd
  572. iniset $GLANCE_API_CONF glance_store stores "file, http, rbd"
  573. iniset $GLANCE_API_CONF glance_store rbd_store_ceph_conf $CEPH_CONF_FILE
  574. iniset $GLANCE_API_CONF glance_store rbd_store_user $GLANCE_CEPH_USER
  575. iniset $GLANCE_API_CONF glance_store rbd_store_pool $GLANCE_CEPH_POOL
  576. fi
  577. }
  578. function configure_ceph_manila {
  579. sudo $DOCKER_EXEC ceph -c ${CEPH_CONF_FILE} osd pool create ${CEPHFS_METADATA_POOL} \
  580. ${CEPHFS_POOL_PG}
  581. sudo $DOCKER_EXEC ceph -c ${CEPH_CONF_FILE} osd pool create ${CEPHFS_DATA_POOL} \
  582. ${CEPHFS_POOL_PG}
  583. if [[ ${CEPHFS_MULTIPLE_FILESYSTEMS} == 'True' ]]; then
  584. sudo ceph -c ${CEPH_CONF_FILE} fs flag set enable_multiple true \
  585. --yes-i-really-mean-it
  586. fi
  587. sudo $DOCKER_EXEC ceph -c ${CEPH_CONF_FILE} fs new ${CEPHFS_FILESYSTEM} ${CEPHFS_METADATA_POOL} \
  588. ${CEPHFS_DATA_POOL}
  589. sudo $DOCKER_EXEC ceph -c ${CEPH_CONF_FILE} auth get-or-create \
  590. client.${MANILA_CEPH_USER} \
  591. mon "allow *" osd "allow rw" mds "allow *" \
  592. -o ${CEPH_CONF_DIR}/ceph.client.${MANILA_CEPH_USER}.keyring
  593. sudo chown ${STACK_USER}:$(id -g -n $whoami) \
  594. ${CEPH_CONF_DIR}/ceph.client.${MANILA_CEPH_USER}.keyring
  595. # Enable snapshots in CephFS.
  596. sudo $DOCKER_EXEC ceph -c ${CEPH_CONF_FILE} mds set allow_new_snaps true \
  597. --yes-i-really-mean-it
  598. # Make manila's libcephfs client a root user.
  599. cat <<EOF | sudo tee -a ${CEPH_CONF_FILE}>/dev/null
  600. [client.${MANILA_CEPH_USER}]
  601. client mount uid = 0
  602. client mount gid = 0
  603. EOF
  604. if [ $MANILA_CEPH_DRIVER == 'cephfsnfs' ]; then
  605. configure_nfs_ganesha
  606. # NFS-Ganesha server cannot run alongwith with other kernel NFS server.
  607. sudo systemctl stop nfs-server
  608. sudo systemctl disable nfs-server
  609. sudo systemctl enable nfs-ganesha
  610. sudo systemctl start nfs-ganesha
  611. fi
  612. # RESTART DOCKER CONTAINER
  613. }
  614. function configure_nfs_ganesha {
  615. # Configure NFS-Ganesha to work with Manila's CephFS driver
  616. sudo mkdir -p /etc/ganesha/export.d
  617. sudo touch /etc/ganesha/export.d/INDEX.conf
  618. echo "%include /etc/ganesha/export.d/INDEX.conf" | sudo tee /etc/ganesha/ganesha.conf
  619. }
  620. function cleanup_nfs_ganesha {
  621. sudo systemctl stop nfs-ganesha
  622. sudo systemctl disable nfs-ganesha
  623. sudo uninstall_package nfs-ganesha nfs-ganesha-ceph
  624. }
  625. function configure_ceph_embedded_manila {
  626. if [[ $CEPH_REPLICAS -ne 1 ]]; then
  627. sudo $DOCKER_EXEC ceph -c ${CEPH_CONF_FILE} osd pool set ${CEPHFS_DATA_POOL} \
  628. crush_ruleset ${RULE_ID}
  629. sudo $DOCKER_EXEC ceph -c ${CEPH_CONF_FILE} osd pool set ${CEPHFS_METADATA_POOL} \
  630. crush_ruleset ${RULE_ID}
  631. fi
  632. }
  633. function configure_ceph_embedded_nova {
  634. # configure Nova service options, ceph pool, ceph user and ceph key
  635. if [[ $CEPH_REPLICAS -ne 1 ]]; then
  636. sudo $DOCKER_EXEC ceph -c ${CEPH_CONF_FILE} osd pool \
  637. set ${NOVA_CEPH_POOL} crush_ruleset ${RULE_ID}
  638. fi
  639. }
  640. # configure_ceph_nova() - Nova config needs to come after Nova is set up
  641. function configure_ceph_nova {
  642. sudo $DOCKER_EXEC ceph -c ${CEPH_CONF_FILE} osd pool create \
  643. ${NOVA_CEPH_POOL} ${NOVA_CEPH_POOL_PG} ${NOVA_CEPH_POOL_PGP}
  644. iniset $NOVA_CONF libvirt rbd_user ${CINDER_CEPH_USER}
  645. iniset $NOVA_CONF libvirt rbd_secret_uuid ${CINDER_CEPH_UUID}
  646. iniset $NOVA_CONF libvirt inject_key false
  647. iniset $NOVA_CONF libvirt inject_partition -2
  648. iniset $NOVA_CONF libvirt disk_cachemodes "network=writeback"
  649. iniset $NOVA_CONF libvirt images_type rbd
  650. iniset $NOVA_CONF libvirt images_rbd_pool ${NOVA_CEPH_POOL}
  651. iniset $NOVA_CONF libvirt images_rbd_ceph_conf ${CEPH_CONF_FILE}
  652. if ! is_ceph_enabled_for_service cinder; then
  653. sudo $DOCKER_EXEC ceph -c ${CEPH_CONF_FILE} \
  654. auth get-or-create client.${CINDER_CEPH_USER} \
  655. mon "allow r" \
  656. osd "allow class-read object_prefix rbd_children, \
  657. allow rwx pool=${CINDER_CEPH_POOL}, \
  658. allow rwx pool=${NOVA_CEPH_POOL}, \
  659. allow rwx pool=${GLANCE_CEPH_POOL}" | \
  660. sudo tee \
  661. ${CEPH_CONF_DIR}/ceph.client.${CINDER_CEPH_USER}.keyring \
  662. > /dev/null
  663. sudo chown ${STACK_USER}:$(id -g -n $whoami) \
  664. ${CEPH_CONF_DIR}/ceph.client.${CINDER_CEPH_USER}.keyring
  665. fi
  666. }
  667. function configure_ceph_embedded_cinder {
  668. # Configure Cinder service options, ceph pool, ceph user and ceph key
  669. if [[ $CEPH_REPLICAS -ne 1 ]]; then
  670. sudo $DOCKER_EXEC ceph -c ${CEPH_CONF_FILE} osd pool \
  671. set ${CINDER_CEPH_POOL} crush_ruleset ${RULE_ID}
  672. fi
  673. }
  674. # configure_ceph_cinder() - Cinder config needs to come after Cinder is set up
  675. function configure_ceph_cinder {
  676. sudo $DOCKER_EXEC ceph -c ${CEPH_CONF_FILE} osd pool create \
  677. ${CINDER_CEPH_POOL} ${CINDER_CEPH_POOL_PG} ${CINDER_CEPH_POOL_PGP}
  678. sudo $DOCKER_EXEC ceph -c ${CEPH_CONF_FILE} auth get-or-create \
  679. client.${CINDER_CEPH_USER} \
  680. mon "allow r" \
  681. osd "allow class-read object_prefix rbd_children, \
  682. allow rwx pool=${CINDER_CEPH_POOL}, allow rwx pool=${NOVA_CEPH_POOL}, \
  683. allow rwx pool=${GLANCE_CEPH_POOL}" | \
  684. sudo tee ${CEPH_CONF_DIR}/ceph.client.${CINDER_CEPH_USER}.keyring
  685. sudo chown ${STACK_USER}:$(id -g -n $whoami) \
  686. ${CEPH_CONF_DIR}/ceph.client.${CINDER_CEPH_USER}.keyring
  687. }
  688. # init_ceph() - Initialize databases, etc.
  689. function init_ceph {
  690. # clean up from previous (possibly aborted) runs
  691. # make sure to kill all ceph processes first
  692. sudo pkill -f ceph-mon || true
  693. sudo pkill -f ceph-osd || true
  694. if [ "$ENABLE_CEPH_RGW" = "True" ]; then
  695. sudo pkill -f radosgw || true
  696. fi
  697. if is_ceph_enabled_for_service manila; then
  698. sudo pkill -f ceph-mds || true
  699. fi
  700. }
  701. # install_ceph() - Collect source and prepare
  702. function install_ceph_remote {
  703. install_package ceph-common
  704. }
  705. function install_ceph {
  706. if is_ubuntu; then
  707. CEPH_PACKAGES="ceph libnss3-tools"
  708. install_package software-properties-common
  709. if is_ceph_enabled_for_service manila; then
  710. # The 'apt' package manager needs the following package to access
  711. # HTTPS enabled repositories such as the Ceph repos hosted by the
  712. # shaman/chacra system.
  713. install_package apt-transport-https
  714. if [ $MANILA_CEPH_DRIVER == 'cephfsnfs' ]; then
  715. if [ $os_CODENAME != 'xenial' ]; then
  716. die $LINENO "Need Ubuntu xenial to setup Manila with CephFS NFS-Ganesha driver"
  717. fi
  718. curl -L https://shaman.ceph.com/api/repos/ceph/luminous/latest/ubuntu/$os_CODENAME/repo | \
  719. sudo tee /etc/apt/sources.list.d/ceph.list
  720. curl -L https://shaman.ceph.com/api/repos/nfs-ganesha/next/latest/ubuntu/$os_CODENAME/flavors/ceph_luminous/repo | \
  721. sudo tee /etc/apt/sources.list.d/ext-nfs-ganesha.list
  722. CEPH_PACKAGES="${CEPH_PACKAGES} ceph-mds libcephfs2 nfs-ganesha nfs-ganesha-ceph"
  723. else
  724. if ! [[ $os_CODENAME =~ (xenial|trusty) ]]; then
  725. die $LINENO "Need Ubuntu trusty or xenial to setup Manila with CephFS native driver"
  726. fi
  727. curl -L https://shaman.ceph.com/api/repos/ceph/jewel/latest/ubuntu/$os_CODENAME/repo | \
  728. sudo tee /etc/apt/sources.list.d/ceph.list
  729. CEPH_PACKAGES="${CEPH_PACKAGES} ceph-mds libcephfs1"
  730. fi
  731. elif [ -f "$APT_REPOSITORY_FILE" ]; then
  732. # Opt into Openstack CI provided package repo mirror
  733. if [ -f "/etc/apt/sources.list.d/$(basename $APT_REPOSITORY_FILE)" ] ; then
  734. # This case can be removed once the CI images are updated to
  735. # remove this file.
  736. sudo rm "/etc/apt/sources.list.d/$(basename $APT_REPOSITORY_FILE)"
  737. fi
  738. sudo ln -s $APT_REPOSITORY_FILE "/etc/apt/sources.list.d/$(basename $APT_REPOSITORY_FILE)"
  739. else
  740. # the gate requires that we use mirrored package repositories for
  741. # reliability, so the most recent ceph packages are mirrored and
  742. # configured in $APT_REPOSITORY_FILE. The gate environment will
  743. # ensure that this file is present, so if it doesn't exist we're
  744. # likely not running in a gate environment and are free to fetch
  745. # packages from ceph.com.
  746. sudo apt-add-repository "$APT_REPOSITORY_ENTRY"
  747. # install the release key for ceph.com package authentication
  748. wget -q -O- 'https://download.ceph.com/keys/release.asc' \
  749. | sudo apt-key add -
  750. fi
  751. if [ "$ENABLE_CEPH_RGW" = "True" ]; then
  752. CEPH_PACKAGES="${CEPH_PACKAGES} radosgw"
  753. fi
  754. # Update package repo.
  755. REPOS_UPDATED=False
  756. install_package ${CEPH_PACKAGES}
  757. else
  758. DISTRO_TYPE=${os_VENDOR,,}
  759. RELEASE=$(echo $os_RELEASE | awk -F . '{print $1}')
  760. CEPH_PACKAGES="ceph"
  761. if is_ceph_enabled_for_service manila; then
  762. if [ $MANILA_CEPH_DRIVER == 'cephfsnfs' ]; then
  763. if [ $DISTRO_TYPE == 'centos' ] && [ $RELEASE == 7 ]; then
  764. curl -L https://shaman.ceph.com/api/repos/ceph/luminous/latest/$DISTRO_TYPE/$RELEASE/repo | \
  765. sudo tee /etc/yum.repos.d/ext-ceph.repo
  766. curl -L https://shaman.ceph.com/api/repos/nfs-ganesha/next/latest/$DISTRO_TYPE/$RELEASE/flavors/ceph_luminous/repo | \
  767. sudo tee /etc/yum.repos.d/ext-ganesha.repo
  768. fi
  769. CEPH_PACKAGES="${CEPH_PACKAGES} nfs-ganesha nfs-ganesha-ceph"
  770. else
  771. if [ $DISTRO_TYPE == 'centos' ] && [ $RELEASE == 7 ]; then
  772. curl -L https://shaman.ceph.com/api/repos/ceph/jewel/latest/$DISTRO_TYPE/$RELEASE/repo | \
  773. sudo tee /etc/yum.repos.d/ext-ceph.repo
  774. fi
  775. fi
  776. fi
  777. if [ "$ENABLE_CEPH_RGW" = "True" ]; then
  778. install_package ceph-radosgw
  779. CEPH_PACKAGES="${CEPH_PACKAGES} ceph-radosgw"
  780. fi
  781. install_package ${CEPH_PACKAGES}
  782. fi
  783. }
  784. # start_ceph() - Start running processes, including screen
  785. function start_ceph {
  786. if [[ $RUN_AS == 'ceph' ]] ; then
  787. sudo chown -R ceph. ${CEPH_DATA_DIR}
  788. fi
  789. if [[ $INIT_SYSTEM == 'upstart' ]] ; then
  790. sudo initctl emit ceph-mon id=$(hostname)
  791. for id in $(sudo ceph -c ${CEPH_CONF_FILE} osd ls); do
  792. sudo start ceph-osd id=${id}
  793. done
  794. if is_ceph_enabled_for_service manila; then
  795. sudo start ceph-mds id=${MDS_ID}
  796. fi
  797. elif [[ $INIT_SYSTEM == 'systemd' ]] ; then
  798. sudo systemctl start ceph-mon@$(hostname)
  799. for id in $(sudo ceph -c ${CEPH_CONF_FILE} osd ls); do
  800. sudo systemctl start ceph-osd@$id
  801. done
  802. if is_ceph_enabled_for_service manila; then
  803. sudo systemctl start ceph-mds@${MDS_ID}
  804. fi
  805. else
  806. sudo service ceph start
  807. fi
  808. }
  809. # stop_ceph() - Stop running processes (non-screen)
  810. function stop_ceph {
  811. if [[ $INIT_SYSTEM == 'upstart' ]] ; then
  812. sudo stop ceph-mon-all > /dev/null 2>&1
  813. sudo stop ceph-osd-all > /dev/null 2>&1
  814. if [ "$ENABLE_CEPH_RGW" = "True" ]; then
  815. sudo stop radosgw-all > /dev/null 2>&1
  816. fi
  817. if is_ceph_enabled_for_service manila; then
  818. sudo service ceph-mds-all stop > /dev/null 2>&1
  819. fi
  820. elif [[ $INIT_SYSTEM == 'systemd' ]] ; then
  821. if [ "$ENABLE_CEPH_RGW" = "True" ]; then
  822. sudo systemctl stop ceph-radosgw@rgw.$(hostname)
  823. fi
  824. if is_ceph_enabled_for_service manila; then
  825. sudo systemctl stop ceph-mds@${MDS_ID}
  826. if [ $MANILA_CEPH_DRIVER == 'cephfsnfs' ]; then
  827. sudo systemctl stop nfs-ganesha
  828. fi
  829. fi
  830. # if mon is dead or unhealthy we won't get the list
  831. # of osds but should continue anyways.
  832. ids=$(sudo ceph -c ${CEPH_CONF_FILE} osd ls 2>/dev/null --connect-timeout 5)
  833. for id in $ids; do
  834. sudo systemctl stop ceph-osd@$id
  835. done
  836. sudo systemctl stop ceph-mon@$(hostname)
  837. else
  838. sudo service ceph stop > /dev/null 2>&1
  839. fi
  840. }
  841. # Restore xtrace
  842. $XTRACE
  843. ## Local variables:
  844. ## mode: shell-script
  845. ## End: