Merge "Remove osd_pool_default_min_size to allow Ceph cluster to do the right thing by default"

This commit is contained in:
Jenkins 2017-05-23 15:58:48 +00:00 committed by Gerrit Code Review
commit aeb11f384f
6 changed files with 21 additions and 2 deletions

View File

@ -101,6 +101,7 @@ parameter_defaults:
CephMonKey: 'AQC+Ox1VmEr3BxAALZejqeHj50Nj6wJDvs96OQ=='
CephAdminKey: 'AQDLOh1VgEp6FRAAFzT7Zw+Y9V6JJExQAsRnRQ=='
CephClientKey: 'AQC+vYNXgDAgAhAAc8UoYt+OTz5uhV7ItLdwUw=='
CephPoolDefaultSize: 1
NovaEnableRbdBackend: true
CinderEnableRbdBackend: true
CinderBackupBackend: ceph

View File

@ -90,6 +90,7 @@ parameter_defaults:
CephMonKey: 'AQC+Ox1VmEr3BxAALZejqeHj50Nj6wJDvs96OQ=='
CephAdminKey: 'AQDLOh1VgEp6FRAAFzT7Zw+Y9V6JJExQAsRnRQ=='
CephClientKey: 'AQC+vYNXgDAgAhAAc8UoYt+OTz5uhV7ItLdwUw=='
CephPoolDefaultSize: 1
SwiftCeilometerPipelineEnabled: false
NeutronServicePlugins: 'router, networking_bgpvpn.neutron.services.plugin.BGPVPNPlugin, networking_l2gw.services.l2gateway.plugin.L2GatewayPlugin'
BgpvpnServiceProvider: 'BGPVPN:Dummy:networking_bgpvpn.neutron.services.service_drivers.driver_api.BGPVPNDriver:default'

View File

@ -20,3 +20,5 @@ parameter_defaults:
GlanceBackend: rbd
GnocchiBackend: rbd
CinderEnableIscsiBackend: false
CephPoolDefaultSite: 1

View File

@ -99,7 +99,6 @@ outputs:
service_name: ceph_base
config_settings:
tripleo::profile::base::ceph::enable_ceph_storage: {get_param: ControllerEnableCephStorage}
ceph::profile::params::osd_pool_default_min_size: 1
ceph::profile::params::osds: {/srv/data: {}}
ceph::profile::params::manage_repo: false
ceph::profile::params::authentication_type: cephx

View File

@ -78,6 +78,10 @@ parameters:
MonitoringSubscriptionCephMon:
default: 'overcloud-ceph-mon'
type: string
CephPoolDefaultSize:
description: default minimum replication for RBD copies
type: number
default: 3
resources:
CephBase:
@ -102,7 +106,7 @@ outputs:
ceph::profile::params::mon_key: {get_param: CephMonKey}
ceph::profile::params::osd_pool_default_pg_num: 32
ceph::profile::params::osd_pool_default_pgp_num: 32
ceph::profile::params::osd_pool_default_size: 3
ceph::profile::params::osd_pool_default_size: {get_param: CephPoolDefaultSize}
# repeat returns items in a list, so we need to map_merge twice
tripleo::profile::base::ceph::mon::ceph_pools:
map_merge:

View File

@ -0,0 +1,12 @@
---
fixes:
- |
Removed the hard coding of osd_pool_default_min_size. Setting this value
to 1 can result in data loss in operating production deployments. Not
setting this value (or setting it to 0) will allow ceph to calculate the
value based on the current setting of osd_pool_default_size. If the
replication count is 3, then the calculated min_size is 2. If the
replication count is 1, then the calcualted min_size is 1. For a POC
deployments using a single OSD, set osd_pool_default_size = 1. See
description at http://docs.ceph.com/docs/master/rados/configuration/pool-pg-config-ref/
Added CephPoolDefaultSize to set default replication size. Default value is 3.