tripleo-heat-templates/puppet/services/ceph-mon.yaml
Giulio Fidente c243560d20 Revert "Use optimal (instead of default) tunables for Ceph on upgrade"
This reverts commit 5e9f855f7c.

The above would have fixed the issue but is only possible if the OSDs
are upgraded first. We probably need to disable flag warnings
completely instead. [1]

1. http://docs.ceph.com/docs/master/rados/operations/crush-map/#warning-when-tunables-are-non-optimal

Change-Id: I429e9f7f220a844b5ca61734287e514c96ea5e6c
2017-07-20 18:43:50 +02:00

178 lines
6.0 KiB
YAML

heat_template_version: pike
description: >
Ceph Monitor service.
parameters:
ServiceData:
default: {}
description: Dictionary packing service data
type: json
ServiceNetMap:
default: {}
description: Mapping of service_name -> network name. Typically set
via parameter_defaults in the resource registry. This
mapping overrides those in ServiceNetMapDefaults.
type: json
DefaultPasswords:
default: {}
type: json
RoleName:
default: ''
description: Role name on which the service is applied
type: string
RoleParameters:
default: {}
description: Parameters specific to the role
type: json
EndpointMap:
default: {}
description: Mapping of service endpoint -> protocol. Typically set
via parameter_defaults in the resource registry.
type: json
CephIPv6:
default: False
type: boolean
CephMonKey:
description: The Ceph monitors key. Can be created with ceph-authtool --gen-print-key.
type: string
hidden: true
CinderRbdPoolName:
default: volumes
type: string
ManilaCephFSDataPoolName:
default: manila_data
type: string
ManilaCephFSMetadataPoolName:
default: manila_metadata
type: string
CinderBackupRbdPoolName:
default: backups
type: string
GlanceRbdPoolName:
default: images
type: string
GnocchiRbdPoolName:
default: metrics
type: string
NovaRbdPoolName:
default: vms
type: string
CephPools:
description: >
It can be used to override settings for one of the predefined pools, or to create
additional ones. Example:
{
"volumes": {
"size": 5,
"pg_num": 128,
"pgp_num": 128
}
}
default: {}
type: json
CephValidationRetries:
type: number
default: 40
description: Number of retry attempts for Ceph validation
CephValidationDelay:
type: number
default: 30
description: Interval (in seconds) in between validation checks
MonitoringSubscriptionCephMon:
default: 'overcloud-ceph-mon'
type: string
CephPoolDefaultSize:
description: default minimum replication for RBD copies
type: number
default: 3
resources:
CephBase:
type: ./ceph-base.yaml
properties:
ServiceData: {get_param: ServiceData}
ServiceNetMap: {get_param: ServiceNetMap}
DefaultPasswords: {get_param: DefaultPasswords}
EndpointMap: {get_param: EndpointMap}
RoleName: {get_param: RoleName}
RoleParameters: {get_param: RoleParameters}
outputs:
role_data:
description: Role data for the Ceph Monitor service.
value:
service_name: ceph_mon
monitoring_subscription: {get_param: MonitoringSubscriptionCephMon}
config_settings:
map_merge:
- get_attr: [CephBase, role_data, config_settings]
- ceph::profile::params::ms_bind_ipv6: {get_param: CephIPv6}
ceph::profile::params::mon_key: {get_param: CephMonKey}
ceph::profile::params::osd_pool_default_pg_num: 32
ceph::profile::params::osd_pool_default_pgp_num: 32
ceph::profile::params::osd_pool_default_size: {get_param: CephPoolDefaultSize}
# repeat returns items in a list, so we need to map_merge twice
tripleo::profile::base::ceph::mon::ceph_pools:
map_merge:
- map_merge:
repeat:
for_each:
<%pool%>:
- {get_param: CinderRbdPoolName}
- {get_param: ManilaCephFSDataPoolName}
- {get_param: ManilaCephFSMetadataPoolName}
- {get_param: CinderBackupRbdPoolName}
- {get_param: NovaRbdPoolName}
- {get_param: GlanceRbdPoolName}
- {get_param: GnocchiRbdPoolName}
template:
<%pool%>:
pg_num: "%{hiera('ceph::profile::params::osd_pool_default_pg_num')}"
pgp_num: "%{hiera('ceph::profile::params::osd_pool_default_pgp_num')}"
size: "%{hiera('ceph::profile::params::osd_pool_default_size')}"
- {get_param: CephPools}
tripleo.ceph_mon.firewall_rules:
'110 ceph_mon':
dport:
- 6789
service_config_settings:
get_attr: [CephBase, role_data, service_config_settings]
step_config: |
include ::tripleo::profile::base::ceph::mon
upgrade_batch_tasks:
# Note we perform these tasks in list order, but they are all step0 so
# we can perform a rolling upgrade of all mon nodes in step0, then a
# rolling upgrade of all osd nodes in step1
- name: Check status
tags: step0,validation
shell: ceph health | egrep -sq "HEALTH_OK|HEALTH_WARN"
- name: Stop CephMon
tags: step0
service:
name: ceph-mon@{{ ansible_hostname }}
state: stopped
- name: Update Ceph packages
tags: step0
yum:
name: ceph-mon
state: latest
- name: Start CephMon
tags: step0
service:
name: ceph-mon@{{ ansible_hostname }}
state: started
# ceph-ansible
# https://github.com/ceph/ceph-ansible/blob/master/infrastructure-playbooks/rolling_update.yml#L149-L157
- name: Wait for the monitor to join the quorum...
tags: step0,ceph_quorum_validation
shell: |
ceph -s | grep monmap | sed 's/.*quorum//' | egrep -sq {{ ansible_hostname }}
register: ceph_quorum_nodecheck
until: ceph_quorum_nodecheck.rc == 0
retries: {get_param: CephValidationRetries}
delay: {get_param: CephValidationDelay}
- name: ceph osd crush tunables default
tags: step0
shell: ceph osd crush tunables default