Francesco Pantano c9cd67124f
Add filestore to bluestore migration tags
Currently ceph-ansible assumes the cluster
(which is updated to ceph4) is using bluestore
just because it's the default on ceph-ansible.
This patch adds some missing tags to make sure
the variable contained in the heat stack are
properly regenerated and the fs2bs playbook can
be executed with the right variables.

Change-Id: I373636bbc9d296f41b63de76719320727d3f4fb9
2020-07-03 17:21:04 +02:00

115 lines
3.4 KiB
YAML

heat_template_version: rocky
description: >
Ceph OSD service.
parameters:
ServiceData:
default: {}
description: Dictionary packing service data
type: json
ServiceNetMap:
default: {}
description: Mapping of service_name -> network name. Typically set
via parameter_defaults in the resource registry. This
mapping overrides those in ServiceNetMapDefaults.
type: json
DefaultPasswords:
default: {}
type: json
RoleName:
default: ''
description: Role name on which the service is applied
type: string
RoleParameters:
default: {}
description: Parameters specific to the role
type: json
EndpointMap:
default: {}
description: Mapping of service endpoint -> protocol. Typically set
via parameter_defaults in the resource registry.
type: json
CephAnsibleDisksConfig:
type: json
description: Disks config settings for ceph-ansible
default:
devices:
- /dev/vdb
osd_scenario: lvm
osd_objectstore: bluestore
CephEnableDashboard:
type: boolean
default: false
description: Parameter used to trigger the dashboard deployment.
conditions:
dashboard_enabled: {equals: [{get_param: CephEnableDashboard}, true]}
resources:
CephBase:
type: ./ceph-base.yaml
properties:
ServiceData: {get_param: ServiceData}
ServiceNetMap: {get_param: ServiceNetMap}
DefaultPasswords: {get_param: DefaultPasswords}
EndpointMap: {get_param: EndpointMap}
RoleName: {get_param: RoleName}
RoleParameters: {get_param: RoleParameters}
CephOsdAnsibleVars:
type: OS::Heat::Value
properties:
type: json
value:
vars: {get_param: CephAnsibleDisksConfig}
outputs:
role_data:
description: Role data for the Ceph OSD service.
value:
service_name: ceph_osd
firewall_rules:
'111 ceph_osd':
dport:
list_concat:
- - '6800-7300'
- if:
- dashboard_enabled
- - '9100'
- []
upgrade_tasks:
- name: Check legacy Ceph hieradata
tags: validation
when: step|int == 0
shell: test "nil" == "$(hiera -c /etc/puppet/hiera.yaml ceph::profile::params::osds)"
service_config_settings:
collectd:
tripleo.collectd.plugins.ceph_osd:
- ceph
collectd::plugin::ceph::daemons: []
puppet_config:
config_image: ''
config_volume: ''
step_config: ''
docker_config: {}
external_deploy_tasks:
list_concat:
- {get_attr: [CephBase, role_data, external_deploy_tasks]}
- - name: ceph_osd_external_deploy_init
when: step|int == 1
tags:
- ceph
- ceph_fstobs
- ceph_systemd
block:
- name: set ceph-ansible group vars osds
set_fact:
ceph_ansible_group_vars_osds: {get_attr: [CephOsdAnsibleVars, value, vars]}
- name: generate ceph-ansible group vars osds
copy:
dest: "{{playbook_dir}}/ceph-ansible/group_vars/osds.yml"
content: "{{ceph_ansible_group_vars_osds|to_nice_yaml}}"
external_update_tasks: {get_attr: [CephBase, role_data, external_update_tasks]}
external_upgrade_tasks: {get_attr: [CephBase, role_data, external_upgrade_tasks]}