Jackie Huang 46d78550bd ceph: add version 13.2.2 and align with stx 3.0
ceph-disk is used by stx puppet manifest but it's replaced by
ceph-volume in ceph 14.1+, and there might be other incompatible
issues, so add version 13.2.2 to align with stx 3.0, the version
14.1 is kept so it may be used by future stx release.

This includes the following changes:
- Add recipe and patches for version 13.2.2
- Set preferred version to 13.2.2
- Rename ceph_%.bbappend to ceph_14.1.0.bbappend
- Update the conf file and scripts from stx 2.0 to stx 3.0
- Remove the useless files in stx-integ

fix #483
fix #497

Signed-off-by: Jackie Huang <jackie.huang@windriver.com>
Signed-off-by: Babak Sarashki <Babak.SarAshki@windriver.com>
2020-06-17 08:55:01 -07:00

59 lines
2.1 KiB
Plaintext

[global]
# Unique ID for the cluster.
fsid = %CLUSTER_UUID%
# Public network where the monitor is connected to, i.e, 128.224.0.0/16
#public network = 127.0.0.1/24
# For version 0.55 and beyond, you must explicitly enable
# or disable authentication with "auth" entries in [global].
auth_cluster_required = cephx
auth_service_required = cephx
auth_client_required = cephx
osd_journal_size = 1024
# Uncomment the following line if you are mounting with ext4
# filestore xattr use omap = true
# Number of replicas of objects. Write an object 2 times.
# Cluster cannot reach an active + clean state until there's enough OSDs
# to handle the number of copies of an object. In this case, it requires
# at least 2 OSDs
osd_pool_default_size = 2
# Allow writing one copy in a degraded state.
osd_pool_default_min_size = 1
# Ensure you have a realistic number of placement groups. We recommend
# approximately 100 per OSD. E.g., total number of OSDs multiplied by 100
# divided by the number of replicas (i.e., osd pool default size). So for
# 2 OSDs and osd pool default size = 2, we'd recommend approximately
# (100 * 2) / 2 = 100.
osd_pool_default_pg_num = 64
osd_pool_default_pgp_num = 64
osd_crush_chooseleaf_type = 1
setuser match path = /var/lib/ceph/$type/$cluster-$id
# Override Jewel default of 2 reporters. StarlingX has replication factor 2
mon_osd_min_down_reporters = 1
# Use Hammer's report interval default value
osd_mon_report_interval_max = 120
# Configure max PGs per OSD to cover worst-case scenario of all possible
# StarlingX deployments i.e. AIO-SX with one OSD. Otherwise using
# the default value provided by Ceph Mimic leads to "too many PGs per OSD"
# health warning as the pools needed by stx-openstack are being created.
mon_max_pg_per_osd = 2048
osd_max_pg_per_osd_hard_ratio = 1.2
[osd]
osd_mkfs_type = xfs
osd_mkfs_options_xfs = "-f"
osd_mount_options_xfs = "rw,noatime,inode64,logbufs=8,logbsize=256k"
[mon]
mon warn on legacy crush tunables = false
# Quiet new warnings on move to Hammer
mon pg warn max per osd = 2048
mon pg warn max object skew = 0
mgr initial modules = restful