[hopem] Added ceph-osd-replication-count config and pg settings.

This commit is contained in:
Adam Gandelman
2013-09-24 11:18:44 -07:00
3 changed files with 23 additions and 2 deletions

View File

@@ -28,6 +28,16 @@ options:
The *available* block device on which to create LVM volume group.
May also be set to None for deployments that will not need local
storage (eg, Ceph/RBD-backed volumes).
ceph-osd-replication-count:
default: 2
type: int
description: |
This value dictates the number of replicas ceph must make of any
object it stores withing the cinder rbd pool. Of course, this only
applies if using Ceph as a backend store. Note that once the cinder
rbd pool has been created, changing this value will not have any
effect (although it can be changed in ceph by manually configuring
your ceph cluster).
volume-group:
default: cinder-volumes
type: string

View File

@@ -202,7 +202,18 @@ EOF
if eligible_leader 'res_cinder_vip'; then
# Create the cinder pool if it does not already exist
if ! rados --id $SERVICE_NAME lspools | grep -q cinder; then
rados --id $SERVICE_NAME mkpool cinder
local num_osds=$(ceph --id $SERVICE_NAME osd ls| egrep "[^\s]"| wc -l)
local cfg_key='ceph-osd-replication-count'
local rep_count="$(config-get $cfg_key)"
if [ -z "$rep_count" ]
then
rep_count=2
juju-log "config returned empty string for $cfg_key - using value of 2"
fi
local num_pgs=$(((num_osds*100)/rep_count))
ceph --id $SERVICE_NAME osd pool create cinder $num_pgs $num_pgs
ceph --id $SERVICE_NAME osd pool set cinder size $rep_count
# TODO: set appropriate crush ruleset
fi
fi

View File

@@ -1 +1 @@
27
28