Remove from master the ceph-ansible references

ceph-ansible is not used anymore in master and both ceph
deployment and day2 ops are managed by the new cephadm tool.
This change aligns the tripleo-heat-templates tree to make sure
only cephadm is used to deploy Ceph (see [1]).

[1] https://blueprints.launchpad.net/tripleo/+spec/tripleo-ceph

Change-Id: Ib87615112264bd65e38ed7fb4440cca62f067de5
This commit is contained in:
Francesco Pantano 2021-06-24 16:58:15 +02:00
parent c904c7555c
commit dd5a5fc240
No known key found for this signature in database
GPG Key ID: 0458D4D1F41BD75C
25 changed files with 42 additions and 2244 deletions

View File

@ -1,9 +1,9 @@
resource_registry:
OS::TripleO::Services::CephMgr: ../../deployment/ceph-ansible/ceph-mgr.yaml
OS::TripleO::Services::CephMon: ../../deployment/ceph-ansible/ceph-mon.yaml
OS::TripleO::Services::CephOSD: ../../deployment/ceph-ansible/ceph-osd.yaml
OS::TripleO::Services::CephGrafana: ../../deployment/ceph-ansible/ceph-grafana.yaml
OS::TripleO::Services::CephClient: ../../deployment/ceph-ansible/ceph-client.yaml
OS::TripleO::Services::CephMgr: ../../deployment/cephadm/ceph-mgr.yaml
OS::TripleO::Services::CephMon: ../../deployment/cephadm/ceph-mon.yaml
OS::TripleO::Services::CephOSD: ../../deployment/cephadm/ceph-osd.yaml
OS::TripleO::Services::CephGrafana: ../../deployment/cephadm/ceph-grafana.yaml
OS::TripleO::Services::CephClient: ../../deployment/cephadm/ceph-client.yaml
OS::TripleO::Services::CeilometerAgentCentral: ../../deployment/ceilometer/ceilometer-agent-central-container-puppet.yaml
OS::TripleO::Services::CeilometerAgentNotification: ../../deployment/ceilometer/ceilometer-agent-notification-container-puppet.yaml
OS::TripleO::Services::CeilometerAgentIpmi: ../../deployment/ceilometer/ceilometer-agent-ipmi-container-puppet.yaml
@ -119,38 +119,17 @@ parameter_defaults:
Debug: true
DockerPuppetDebug: True
CephAnsibleDisksConfig:
osd_objectstore: bluestore
osd_scenario: lvm
lvm_volumes:
- data: ceph_lv_data
data_vg: ceph_vg
db: ceph_lv_db
db_vg: ceph_vg
wal: ceph_lv_wal
wal_vg: ceph_vg
CephPoolDefaultPgNum: 32
CephAnsibleRepo: "tripleo-centos-ceph-nautilus"
CephPoolDefaultPgNum: 8
CephPoolDefaultSize: 1
CephPools:
- name: altrbd
pg_num: 8
rule_name: replicated_rule
CephAnsibleExtraConfig:
centos_package_dependencies: []
ceph_osd_docker_memory_limit: '1g'
ceph_mds_docker_memory_limit: '1g'
handler_health_mon_check_retries: 10
handler_health_mon_check_delay: 20
#NOTE: These ID's and keys should be regenerated for
# a production deployment. What is here is suitable for
# developer and CI testing only.
CephClusterFSID: '4b5c8c0a-ff60-454b-a1b4-9747aa737d19'
CephClientKey: 'AQC+vYNXgDAgAhAAc8UoYt+OTz5uhV7ItLdwUw=='
CephAnsiblePlaybookVerbosity: 1
CephAnsibleEnvironmentVariables:
ANSIBLE_SSH_RETRIES: 4
DEFAULT_FORKS: 3
CephEnableDashboard: true
NovaEnableRbdBackend: true
CinderEnableRbdBackend: true
@ -189,3 +168,10 @@ parameter_defaults:
- tty6
# Remove ContainerCli once this scenario is tested on CentOS8
ContainerCli: podman
CephConfigPath: "/etc/ceph"
CephClientConfigVars: "{{ playbook_dir }}/cephadm/ceph_client.yml"
CephSpecFqdn: true
CephOsdSpec:
data_devices:
paths:
- /dev/ceph_vg/ceph_lv_data

View File

@ -1,8 +1,8 @@
resource_registry:
OS::TripleO::Services::CephClient: ../../deployment/ceph-ansible/ceph-client.yaml
OS::TripleO::Services::CephMgr: ../../deployment/ceph-ansible/ceph-mgr.yaml
OS::TripleO::Services::CephMon: ../../deployment/ceph-ansible/ceph-mon.yaml
OS::TripleO::Services::CephOSD: ../../deployment/ceph-ansible/ceph-osd.yaml
OS::TripleO::Services::CephClient: ../../deployment/cephadm/ceph-client.yaml
OS::TripleO::Services::CephMgr: ../../deployment/cephadm/ceph-mgr.yaml
OS::TripleO::Services::CephMon: ../../deployment/cephadm/ceph-mon.yaml
OS::TripleO::Services::CephOSD: ../../deployment/cephadm/ceph-osd.yaml
OS::TripleO::Services::CinderApi: OS::Heat::None
OS::TripleO::Services::CinderBackup: OS::Heat::None
OS::TripleO::Services::CinderScheduler: OS::Heat::None
@ -75,34 +75,13 @@ parameter_defaults:
nova::compute::libvirt::virt_type: qemu
octavia::controller::connection_retry_interval: 10
Debug: true
CephAnsibleDisksConfig:
osd_objectstore: bluestore
osd_scenario: lvm
lvm_volumes:
- data: ceph_lv_data
data_vg: ceph_vg
db: ceph_lv_db
db_vg: ceph_vg
wal: ceph_lv_wal
wal_vg: ceph_vg
CephPoolDefaultPgNum: 32
CephPoolDefaultPgNum: 8
CephPoolDefaultSize: 1
CephPools:
- name: altrbd
pg_num: 8
rule_name: replicated_rule
CephAnsibleExtraConfig:
centos_package_dependencies: []
ceph_osd_docker_memory_limit: '1g'
ceph_mds_docker_memory_limit: '1g'
handler_health_mon_check_retries: 10
handler_health_mon_check_delay: 20
CephAnsibleSkipTags: ''
#NOTE: These ID's and keys should be regenerated for
# a production deployment. What is here is suitable for
# developer and CI testing only.
CephClusterFSID: '4b5c8c0a-ff60-454b-a1b4-9747aa737d19'
CephAnsibleRepo: "tripleo-centos-ceph-nautilus"
CephClusterName: mycephcluster
CephClientKey: 'AQC+vYNXgDAgAhAAc8UoYt+OTz5uhV7ItLdwUw=='
NovaEnableRbdBackend: true
@ -117,3 +96,10 @@ parameter_defaults:
OctaviaCaKeyPassphrase: 'upstreamci'
OctaviaGenerateCerts: true
ContainerCli: podman
CephConfigPath: "/etc/ceph"
CephClientConfigVars: "{{ playbook_dir }}/cephadm/ceph_client.yml"
CephSpecFqdn: true
CephOsdSpec:
data_devices:
paths:
- /dev/ceph_vg/ceph_lv_data

View File

@ -1,8 +1,8 @@
resource_registry:
OS::TripleO::Services::CephClient: ../../deployment/ceph-ansible/ceph-client.yaml
OS::TripleO::Services::CephMgr: ../../deployment/ceph-ansible/ceph-mgr.yaml
OS::TripleO::Services::CephMon: ../../deployment/ceph-ansible/ceph-mon.yaml
OS::TripleO::Services::CephOSD: ../../deployment/ceph-ansible/ceph-osd.yaml
OS::TripleO::Services::CephClient: ../../deployment/cephadm/ceph-client.yaml
OS::TripleO::Services::CephMgr: ../../deployment/cephadm/ceph-mgr.yaml
OS::TripleO::Services::CephMon: ../../deployment/cephadm/ceph-mon.yaml
OS::TripleO::Services::CephOSD: ../../deployment/cephadm/ceph-osd.yaml
OS::TripleO::Services::CinderApi: OS::Heat::None
OS::TripleO::Services::CinderBackup: OS::Heat::None
OS::TripleO::Services::CinderScheduler: OS::Heat::None
@ -33,37 +33,15 @@ parameter_defaults:
8CF1A7EA-7B4B-4433-AC83-17675514B1B8: {"foo2": "bar2"}
Debug: true
HideSensitiveLogs: false
CephAnsibleDisksConfig:
osd_objectstore: bluestore
osd_scenario: lvm
lvm_volumes:
- data: ceph_lv_data
data_vg: ceph_vg
db: ceph_lv_db
db_vg: ceph_vg
wal: ceph_lv_wal
wal_vg: ceph_vg
CephPoolDefaultPgNum: 32
CephPoolDefaultPgNum: 8
CephPoolDefaultSize: 1
CephAnsibleExtraConfig:
centos_package_dependencies: []
ceph_osd_docker_memory_limit: '1g'
ceph_mds_docker_memory_limit: '1g'
mon_host_v1: { 'enabled': False }
handler_health_mon_check_retries: 10
handler_health_mon_check_delay: 20
#NOTE: These ID's and keys should be regenerated for
# a production deployment. What is here is suitable for
# developer and CI testing only.
CephClusterFSID: '4b5c8c0a-ff60-454b-a1b4-9747aa737d19'
CephClientKey: 'AQC+vYNXgDAgAhAAc8UoYt+OTz5uhV7ItLdwUw=='
CephAnsiblePlaybookVerbosity: 1
CephAnsibleEnvironmentVariables:
ANSIBLE_SSH_RETRIES: 4
DEFAULT_FORKS: 3
NovaEnableRbdBackend: true
CinderEnableRbdBackend: true
CephAnsibleRepo: "tripleo-centos-ceph-nautilus"
CinderBackupBackend: ceph
GlanceBackend: rbd
CinderEnableIscsiBackend: false
@ -90,3 +68,10 @@ parameter_defaults:
- tty5
- tty6
ContainerCli: podman
CephConfigPath: "/etc/ceph"
CephClientConfigVars: "{{ playbook_dir }}/cephadm/ceph_client.yml"
CephSpecFqdn: true
CephOsdSpec:
data_devices:
paths:
- /dev/ceph_vg/ceph_lv_data

View File

@ -1,760 +0,0 @@
heat_template_version: wallaby
description: >
Ceph base service. Shared by all Ceph services.
parameters:
ServiceData:
default: {}
description: Dictionary packing service data
type: json
ServiceNetMap:
default: {}
description: Mapping of service_name -> network name. Typically set
via parameter_defaults in the resource registry. Use
parameter_merge_strategies to merge it with the defaults.
type: json
CephDashboardAdminPassword:
description: Admin password for the dashboard component
type: string
hidden: true
CephGrafanaAdminPassword:
description: Admin password for grafana component
type: string
hidden: true
RoleName:
default: ''
description: Role name on which the service is applied
type: string
RoleParameters:
default: {}
description: Parameters specific to the role
type: json
EndpointMap:
default: {}
description: Mapping of service endpoint -> protocol. Typically set
via parameter_defaults in the resource registry.
type: json
StackUpdateType:
type: string
description: >
Type of update, to differentiate between UPGRADE and UPDATE cases
when StackAction is UPDATE (both are the same stack action).
constraints:
- allowed_values: ['', 'UPGRADE']
default: ''
NodeDataLookup:
type: json
default: {}
description: json containing per-node configuration map
DeploymentServerBlacklist:
default: []
type: comma_delimited_list
description: >
List of server hostnames to blacklist from any triggered deployments.
ContainerCli:
type: string
default: 'podman'
description: CLI tool used to manage containers.
constraints:
- allowed_values: ['docker', 'podman']
CephEnableDashboard:
type: boolean
default: false
description: Parameter used to trigger the dashboard deployment.
NodeExporterContainerImage:
description: Ceph NodeExporter container image
default: ''
type: string
CephAnsiblePlaybook:
type: comma_delimited_list
description: >
List of paths to the ceph-ansible playbooks to execute. If not
specified, the playbook will be determined automatically
depending on type of operation being performed
(deploy/update/upgrade).
default: ['default']
CephAnsibleExtraConfig:
type: json
description: Extra vars for the ceph-ansible playbook
default: {}
CephAnsibleSkipTags:
type: string
description: List of ceph-ansible tags to skip
default: 'package-install,with_pkg'
CephConfigOverrides:
type: json
description: Extra config settings to dump into ceph.conf
default: {}
CephClusterFSID:
type: string
description: The Ceph cluster FSID. Must be a UUID.
CephClusterName:
type: string
default: ceph
description: The Ceph cluster name.
constraints:
- allowed_pattern: "[a-zA-Z0-9]+"
description: >
The Ceph cluster name must be at least 1 character and contain only
letters and numbers.
CephMsgrSecureMode:
type: boolean
default: false
description: >
Enable Ceph msgr2 secure mode to enable on-wire encryption between Ceph
daemons and also between Ceph clients and daemons.
CephPoolDefaultPgNum:
description: default pg_num to use for the RBD pools
type: number
default: 16
CephPools:
description: >
It can be used to override settings for one of the predefined pools, or to create
additional ones. Example:
[{"name": "volumes", "pg_num": 64, "rule_name": "replicated_rule"},
{"name": "vms", "target_size_ratio": "0.4", "rule_name": "replicated_rule"}]
default: []
type: json
CinderRbdPoolName:
default: volumes
type: string
CinderRbdExtraPools:
default: []
description: >
List of extra Ceph pools for use with RBD backends for Cinder. An
extra Cinder RBD backend driver is created for each pool in the
list. This is in addition to the standard RBD backend driver
associated with the CinderRbdPoolName.
type: comma_delimited_list
CinderBackupRbdPoolName:
default: backups
type: string
GlanceRbdPoolName:
default: images
type: string
GlanceBackend:
default: swift
description: The short name of the Glance backend to use. Should be one
of swift, rbd, cinder, or file
type: string
constraints:
- allowed_values: ['swift', 'file', 'rbd', 'cinder']
GnocchiRbdPoolName:
default: metrics
type: string
NovaRbdPoolName:
default: vms
type: string
description: The pool name for RBD backend ephemeral storage.
tags:
- role_specific
CephClientKey:
description: The Ceph client key. Can be created with ceph-authtool --gen-print-key.
type: string
hidden: true
constraints:
- allowed_pattern: "^[a-zA-Z0-9+/]{38}==$"
CephClientUserName:
default: openstack
type: string
CephRgwClientName:
default: radosgw
type: string
CephRgwKey:
description: The cephx key for the radosgw client. Can be created
with ceph-authtool --gen-print-key.
type: string
hidden: true
constraints:
- allowed_pattern: "^[a-zA-Z0-9+/]{38}==$"
CephPoolDefaultSize:
description: default minimum replication for RBD copies
type: number
default: 3
ManilaCephFSDataPoolName:
default: manila_data
type: string
ManilaCephFSMetadataPoolName:
default: manila_metadata
type: string
ManilaCephFSShareBackendName:
default: cephfs
type: string
ManilaCephFSCephFSAuthId:
default: manila
type: string
CephManilaClientKey:
default: ''
description: The Ceph client key. Can be created with ceph-authtool --gen-print-key.
type: string
hidden: true
constraints:
- allowed_pattern: "^[a-zA-Z0-9+/]{38}==$"
CephIPv6:
default: False
type: boolean
SwiftPassword:
description: The password for the swift service account
type: string
hidden: true
ContainerCephDaemonImage:
description: image
type: string
CephAnsiblePlaybookVerbosity:
default: 1
description: The number of '-v', '-vv', etc. passed to ansible-playbook command
type: number
constraints:
- range: { min: 1, max: 5 }
CephAnsibleEnvironmentVariables:
default: {}
description: Mapping of Ansible environment variables to override defaults.
type: json
# start DEPRECATED options for compatibility with older versions
SwiftFetchDirGetTempurl:
default: ''
description: A temporary Swift URL to download the fetch_directory from.
type: string
SwiftFetchDirPutTempurl:
default: ''
description: A temporary Swift URL to upload the fetch_directory to.
type: string
LocalCephAnsibleFetchDirectoryBackup:
default: ''
description: Filesystem path on undercloud to persist a copy of the data
from the ceph-ansible fetch directory. Used as an alternative
to backing up the fetch_directory in Swift. Path must be
writable and readable by the user running ansible from
config-download, e.g. the mistral user in the mistral-executor
container is able to read/write to /var/lib/mistral/ceph_fetch
type: string
CephOsdPercentageMin:
default: 0
description: The minimum percentage of Ceph OSDs which must be running and
in the Ceph cluster, according to ceph osd stat, for the
deployment not to fail. Used to catch deployment errors early.
Set this value to 0 to disable this check. Deprecated in Wallaby
because of the move from ceph-ansible to cephadm; the later only
brings up OSDs out of band and deployment does not block while
waiting for them to come up, thus we cannot do this anymore.
type: number
# end DEPRECATED options for compatibility with older versions
ContainerImageRegistryCredentials:
type: json
hidden: true
description: |
Mapping of image registry hosts to login credentials. Must be in the following example format
docker.io:
username: pa55word
'192.0.2.1:8787':
registry_username: password
default: {}
CephExtraKeys:
type: json
hidden: true
description: |
List of maps describing extra keys which will be created on the deployed
Ceph cluster. Uses ceph-ansible/library/ceph_key.py ansible module. Each
item in the list must be in the following example format
- name: "client.glance"
caps:
mgr: "allow *"
mon: "profile rbd"
osd: "profile rbd pool=images"
key: "AQBRgQ9eAAAAABAAv84zEilJYZPNuJ0Iwn9Ndg=="
mode: "0600"
default: []
CephAnsibleRepo:
type: string
description: |
The repository that should be used to install the right ceph-ansible
package. This value can be used by tripleo-validations to double check
the right ceph-ansible version is installed.
default: 'centos-ceph-nautilus'
CephAnsibleWarning:
type: boolean
description: |
In particular scenarios we want this validation to show the warning but
don't fail because the package is installed on the system but repos are
disabled.
default: true
CephExternalMultiConfig:
type: json
hidden: true
description: |
List of maps describing extra overrides which will be applied when configuring
extra external Ceph clusters. If this list is non-empty, ceph-ansible will run
an extra count(list) times using the same parameters as the first run except
each parameter within each map will override the defaults. If the following
were used, the second run would configure the overcloud to also use the ceph2
cluster with all the previous parameters except /etc/ceph/ceph2.conf would have
a mon_host entry containing the value of external_cluster_mon_ips below, and
not the default CephExternalMonHost. Subsequent ceph-ansible runs are restricted
to just ceph clients. CephExternalMultiConfig may not be used to deploy additional
internal Ceph clusters within one Heat stack. The map for each list should contain
not tripleo-heat-template parameters but ceph-ansible parameters.
- cluster: 'ceph2'
fsid: 'e2cba068-5f14-4b0f-b047-acf375c0004a'
external_cluster_mon_ips: '172.18.0.5,172.18.0.6,172.18.0.7'
keys:
- name: "client.openstack"
caps:
mgr: "allow *"
mon: "profile rbd"
osd: "osd: profile rbd pool=volumes, profile rbd pool=backups, profile rbd pool=vms, profile rbd pool=images"
key: "AQCwmeRcAAAAABAA6SQU/bGqFjlfLro5KxrB1Q=="
mode: "0600"
dashboard_enabled: false
default: []
CinderEnableRbdBackend:
default: false
description: Whether to enable or not the Rbd backend for Cinder
type: boolean
NovaEnableRbdBackend:
default: false
description: Whether to enable the Rbd backend for Nova ephemeral storage.
type: boolean
tags:
- role_specific
CinderBackupBackend:
default: swift
description: The short name of the Cinder Backup backend to use.
type: string
constraints:
- allowed_values: ['swift', 'ceph', 'nfs', 'gcs', 's3']
GnocchiBackend:
default: swift
description: The short name of the Gnocchi backend to use. Should be one
of swift, rbd, file or s3.
type: string
constraints:
- allowed_values: ['swift', 'file', 'rbd', 's3']
EnableInternalTLS:
type: boolean
default: false
CephConfigPath:
type: string
default: "/var/lib/tripleo-config/ceph"
description: |
The path where the Ceph Cluster config files are stored on the host.
CephAnsibleSkipClient:
description: |
This boolean (when true) prevents the ceph-ansible client role execution
by adding the ceph-ansible tag 'ceph_client' to the --skip-tags list.
type: boolean
default: true
parameter_groups:
- label: deprecated
description: Do not use deprecated params, they will be removed.
parameters:
- LocalCephAnsibleFetchDirectoryBackup
- SwiftFetchDirGetTempurl
- SwiftFetchDirPutTempurl
- CephIPv6
- CephOsdPercentageMin
conditions:
custom_registry_host:
yaql:
data: {get_param: ContainerCephDaemonImage}
expression: $.data.split('/')[0].matches('(\.|:)')
perform_upgrade:
equals: [{get_param: StackUpdateType}, 'UPGRADE']
ceph_ansible_skip_tags_set:
not:
equals:
- {get_param: CephAnsibleSkipTags}
- ''
ceph_authenticated_registry:
and:
- not:
yaql:
data:
cred: {get_param: ContainerImageRegistryCredentials}
ns:
yaql:
expression: let(location => $.data.rightSplit(':', 1)[0]) -> regex('(?:https?://)?(.*?)/(.*)').split($location)[1]
data: {get_param: ContainerCephDaemonImage}
expression: let(c => $.data.cred) -> $c.get($.data.ns, {}).keys().last(default => "").isEmpty()
- not:
yaql:
data:
cred: {get_param: ContainerImageRegistryCredentials}
ns:
yaql:
expression: let(location => $.data.rightSplit(':', 1)[0]) -> regex('(?:https?://)?(.*?)/(.*)').split($location)[1]
data: {get_param: ContainerCephDaemonImage}
expression: let(c => $.data.cred) -> $c.get($.data.ns, {}).values().last(default => "").isEmpty()
is_ipv6:
equals:
- {get_param: [ServiceData, net_ip_version_map, {get_param: [ServiceNetMap, CephMonNetwork]}]}
- 6
resources:
ContainerImageUrlParts:
type: OS::Heat::Value
properties:
type: json
value:
host:
if:
- custom_registry_host
- yaql:
expression: let(location => $.data.rightSplit(':', 1)[0]) -> regex('(?:https?://)?(.*?)/(.*)').split($location)[1]
data: {get_param: ContainerCephDaemonImage}
- docker.io
image:
if:
- custom_registry_host
- yaql:
expression: let(location => $.data.rightSplit(':', 1)[0]) -> regex('(?:https?://)?(.*?)/(.*)').split($location)[2]
data: {get_param: ContainerCephDaemonImage}
- yaql:
expression: $.data.rightSplit(':', 1)[0]
data: {get_param: ContainerCephDaemonImage}
image_tag:
yaql:
expression: $.data.rightSplit(':', 1)[1]
data: {get_param: ContainerCephDaemonImage}
DefaultCephConfigOverrides:
type: OS::Heat::Value
properties:
type: json
value:
vars:
global:
osd_pool_default_size: {get_param: CephPoolDefaultSize}
osd_pool_default_pg_num: {get_param: CephPoolDefaultPgNum}
osd_pool_default_pgp_num: {get_param: CephPoolDefaultPgNum}
CephBasePoolVars:
type: OS::Heat::Value
properties:
type: json
value:
vars:
gnocchi_pool:
name: {get_param: GnocchiRbdPoolName}
enabled:
if:
- equals:
- {get_param: GnocchiBackend}
- 'rbd'
- true
- false
nova_pool:
name: {get_param: NovaRbdPoolName}
enabled: {get_param: NovaEnableRbdBackend}
glance_pool:
name: {get_param: GlanceRbdPoolName}
enabled:
if:
- equals:
- {get_param: GlanceBackend}
- 'rbd'
- true
- false
cinder_pool:
name: {get_param: CinderRbdPoolName}
enabled: {get_param: CinderEnableRbdBackend}
cinder_extra_pools: {get_param: CinderRbdExtraPools}
cinder_backup_pool:
name: {get_param: CinderBackupRbdPoolName}
enabled:
if:
- equals:
- {get_param: CinderBackupBackend}
- 'ceph'
- true
- false
extra_pools: {get_param: CephPools}
pg_num: {get_param: CephPoolDefaultPgNum}
CephBaseAnsibleVars:
type: OS::Heat::Value
properties:
type: json
value:
vars:
containerized_deployment: true
user_config: true
ceph_stable: true
ceph_origin: distro
openstack_config: true
pools: []
ntp_service_enabled: true
generate_fsid: false
fsid: { get_param: CephClusterFSID }
cluster: { get_param: CephClusterName }
configure_firewall: false
ceph_docker_registry: {get_attr: [ContainerImageUrlParts, value, host]}
ceph_docker_image: {get_attr: [ContainerImageUrlParts, value, image]}
ceph_docker_image_tag: {get_attr: [ContainerImageUrlParts, value, image_tag]}
ceph_docker_registry_auth:
if:
- ceph_authenticated_registry
- true
- false
ceph_docker_registry_username:
yaql:
data:
cred: {get_param: ContainerImageRegistryCredentials}
ns: {get_attr: [ContainerImageUrlParts, value, host]}
expression: let(c => $.data.cred) -> $c.get($.data.ns, {}).keys().last(default => "")
ceph_docker_registry_password:
yaql:
data:
cred: {get_param: ContainerImageRegistryCredentials}
ns: {get_attr: [ContainerImageUrlParts, value, host]}
expression: let(c => $.data.cred) -> $c.get($.data.ns, {}).values().last(default => "")
public_network:
list_join:
- ','
- get_param: [ServiceData, net_cidr_map, {get_param: [ServiceNetMap, CephMonNetwork]}]
monitor_address_block:
list_join:
- ','
- get_param: [ServiceData, net_cidr_map, {get_param: [ServiceNetMap, CephMonNetwork]}]
cluster_network:
list_join:
- ','
- get_param: [ServiceData, net_cidr_map, {get_param: [ServiceNetMap, CephClusterNetwork]}]
ip_version:
if:
- is_ipv6
- ipv6
- ipv4
dashboard_enabled: {get_param: CephEnableDashboard}
cephfs: {get_param: ManilaCephFSShareBackendName}
node_exporter_container_image: {get_param: NodeExporterContainerImage}
outputs:
role_data:
description: Role data for the Ceph base service.
value:
service_name: ceph_base
upgrade_tasks: []
puppet_config: {}
docker_config: {}
config_settings: {}
external_deploy_tasks:
- name: ceph_base_external_deploy_init
when: step|int == 1
tags:
- ceph
- ceph_systemd
- ceph_fstobs
block:
- name: ensure ceph-ansible is installed
include_role:
name: ceph
tasks_from: ceph-ansible-installed
vars:
fail_without_ceph_ansible: {get_param: CephAnsibleWarning}
ceph_ansible_repo: {get_param: CephAnsibleRepo}
tags:
- opendev-validation
- opendev-validation-ceph
- name: Check if Ceph dependencies are installed
import_role:
role: ceph
tasks_from: ceph-dependencies-installed
tags:
- opendev-validation
- opendev-validation-ceph
vars:
fail_without_deps: true
packages: lvm2
tripleo_delegate_to: "{{ groups['overcloud'] | default([]) }}"
- name: set tripleo-ceph-prepare facts
set_fact:
ceph_pools: {get_attr: [CephBasePoolVars, value, vars]}
manila_pools:
data: {get_param: ManilaCephFSDataPoolName}
metadata: {get_param: ManilaCephFSMetadataPoolName}
data_pg_num: {get_param: CephPoolDefaultPgNum}
metadata_pg_num: {get_param: CephPoolDefaultPgNum}
ceph_keys:
openstack_client:
name: {get_param: CephClientUserName}
key: {get_param: CephClientKey}
manila:
name: {get_param: ManilaCephFSCephFSAuthId}
key: {get_param: CephManilaClientKey}
radosgw:
name: {get_param: CephRgwClientName}
key: {get_param: CephRgwKey}
extra_keys: {get_param: CephExtraKeys}
ceph_default_overrides:
if:
- {get_param: CephMsgrSecureMode}
- map_merge:
- {get_attr: [DefaultCephConfigOverrides, value, vars]}
- global:
ms_cluster_mode: secure
ms_service_mode: secure
ms_client_mode: secure
- {get_attr: [DefaultCephConfigOverrides, value, vars]}
ceph_config_overrides: {get_param: CephConfigOverrides}
- name: set ceph-ansible facts
set_fact:
blacklisted_hostnames: {get_param: DeploymentServerBlacklist}
ceph_ansible_group_vars_all:
if:
- {get_param: CephEnableDashboard}
- map_merge:
- if:
- {get_param: EnableInternalTLS}
- dashboard_tls_external: true
dashboard_grafana_api_no_ssl_verify: true
- {get_attr: [CephBaseAnsibleVars, value, vars]}
- dashboard_admin_password: {get_param: CephDashboardAdminPassword}
grafana_admin_password: {get_param: CephGrafanaAdminPassword}
- {get_attr: [CephBaseAnsibleVars, value, vars]}
ceph_ansible_extra_vars:
map_merge:
- {get_param: CephAnsibleExtraConfig}
- ireallymeanit: "yes"
fetch_directory: "{{playbook_dir}}/ceph-ansible/fetch_dir"
container_binary: {get_param: ContainerCli}
uuid_content:
yaql:
expression: dict($.data.keys().select($.toLower()).zip($.data.values()))
data: {get_param: NodeDataLookup}
ceph_external_multi_config: {get_param: CephExternalMultiConfig}
- name: include common ceph defaults
include_role:
name: tripleo_ceph_common
- name: create ceph-ansible working directory
include_role:
name: tripleo_ceph_work_dir
tasks_from: prepare
- name: prepare for ceph-ansible uuid gathering
include_role:
name: tripleo_ceph_uuid
tasks_from: prepare
- name: ceph_base_external_deploy_task
when: step|int == 2
tags:
- ceph
- ceph_systemd
- ceph_fstobs
block:
- name: set ceph-ansible facts
set_fact:
ceph_ansible_skip_tags:
if:
- {get_param: CephAnsibleSkipClient}
- list_join:
- ','
- - get_param: CephAnsibleSkipTags
- 'ceph_client'
- {get_param: CephAnsibleSkipTags}
ceph_ansible_playbook_verbosity: {get_param: CephAnsiblePlaybookVerbosity}
ceph_ansible_playbooks_param: {get_param: CephAnsiblePlaybook}
ceph_ansible_environment_variables:
yaql:
data: {get_param: CephAnsibleEnvironmentVariables}
expression: $.data.items().select($.join('='))
- name: Refresh ceph-ansible group_vars variables
include_role:
name: tripleo_ceph_work_dir
tasks_from: prepare
- name: get ssh private key
include_role:
name: tripleo_ceph_work_dir
tasks_from: get_ssh_private_key
- name: run nodes-uuid
include_role:
name: tripleo_ceph_uuid
tasks_from: gather
- name: run ceph-ansible
include_role:
name: tripleo_ceph_run_ansible
- name: configure ceph clients
include_role:
name: tripleo_ceph_client
vars:
tripleo_ceph_client_config_home: {get_param: CephConfigPath}
when: {get_param: CephAnsibleSkipClient}
- include_role:
name: tripleo_ceph_client
name: tripleo client role
vars:
tripleo_ceph_client_config_home: {get_param: CephConfigPath}
multiple: "{{ item }}"
loop: "{{ ceph_external_multi_config }}"
when:
- ceph_external_multi_config is defined
- {get_param: CephAnsibleSkipClient}
- name: ensure ceph health is OK before proceeding
import_role:
role: ceph
tasks_from: ceph-health
vars:
fail_on_ceph_health_err: true
osd_percentage_min: {get_param: CephOsdPercentageMin}
tripleo_delegate_to: "{{ groups['ceph_mon'] | default([]) }}"
tags:
- opendev-validation
- opendev-validation-ceph
external_update_tasks:
- when: step|int == 0
tags: ceph
block:
- name: set ceph_ansible_playbooks_default
set_fact:
ceph_ansible_playbooks_default: ["/usr/share/ceph-ansible/infrastructure-playbooks/rolling_update.yml"]
external_upgrade_tasks:
- when: step|int == 0
tags:
- never
- ceph_systemd
block:
- name: stop if _limit is unset
fail:
msg: The ceph-ansible systemd units migration playbook limit is not set; please use -e ceph_ansible_limit=##nodename##
when: (ceph_ansible_limit is not defined) or (ceph_ansible_limit|length == 0)
- name: set ceph_ansible_playbooks_default
set_fact:
ceph_ansible_playbooks_default: ["/usr/share/ceph-ansible/infrastructure-playbooks/docker-to-podman.yml"]
- when: step|int == 0
tags:
- never
- ceph_fstobs
block:
- name: stop if _limit is unset
fail:
msg: The ceph-ansible filestore to bluestore playbook limit is not set; please use -e ceph_ansible_limit=##nodename##
when: (ceph_ansible_limit is not defined) or (ceph_ansible_limit|length == 0)
- name: ensure ceph health is OK before proceeding
import_role:
role: ceph
tasks_from: ceph-health
vars:
fail_on_ceph_health_err: true
fail_on_ceph_health_warn: true
osd_percentage_min: 0
tripleo_delegate_to: "{{ groups['ceph_mon'] | default([]) }}"
tags:
- ceph_health
- name: set ceph_ansible_playbooks_default
set_fact:
ceph_ansible_playbooks_default: ["/usr/share/ceph-ansible/infrastructure-playbooks/filestore-to-bluestore.yml"]
- when: step|int == 0
tags: ceph
block:
- name: set ceph_ansible_playbooks_default
set_fact:
ceph_ansible_playbooks_default: ["/usr/share/ceph-ansible/infrastructure-playbooks/rolling_update.yml"]
post_upgrade_tasks:
- name: Clean puppet-ceph package
when:
- (step | int) == 3
package:
name: puppet-ceph
state: absent

View File

@ -1,71 +0,0 @@
heat_template_version: wallaby
description: >
Ceph Client service.
parameters:
ServiceData:
default: {}
description: Dictionary packing service data
type: json
ServiceNetMap:
default: {}
description: Mapping of service_name -> network name. Typically set
via parameter_defaults in the resource registry. Use
parameter_merge_strategies to merge it with the defaults.
type: json
RoleName:
default: ''
description: Role name on which the service is applied
type: string
RoleParameters:
default: {}
description: Parameters specific to the role
type: json
EndpointMap:
default: {}
description: Mapping of service endpoint -> protocol. Typically set
via parameter_defaults in the resource registry.
type: json
resources:
CephBase:
type: ./ceph-base.yaml
properties:
ServiceData: {get_param: ServiceData}
ServiceNetMap: {get_param: ServiceNetMap}
EndpointMap: {get_param: EndpointMap}
RoleName: {get_param: RoleName}
RoleParameters: {get_param: RoleParameters}
outputs:
role_data:
description: Role data for the Ceph Client service.
value:
service_name: ceph_client
upgrade_tasks: []
puppet_config: {}
docker_config: {}
external_deploy_tasks:
list_concat:
- {get_attr: [CephBase, role_data, external_deploy_tasks]}
- - name: ceph_client_external_deploy_init
when: step|int == 1
tags:
- ceph
- ceph_fstobs
- ceph_systemd
block:
- name: set ceph-ansible group vars clients
set_fact:
ceph_ansible_group_vars_clients: {}
ceph_client_config_overrides:
client:
rbd_concurrent_management_ops: 20
- name: generate ceph-ansible group vars clients
copy:
dest: "{{playbook_dir}}/ceph-ansible/group_vars/clients.yml"
content: "{{ceph_ansible_group_vars_clients|to_nice_yaml}}"
external_update_tasks: {get_attr: [CephBase, role_data, external_update_tasks]}
external_upgrade_tasks: {get_attr: [CephBase, role_data, external_upgrade_tasks]}
config_settings: {}

View File

@ -1,73 +0,0 @@
heat_template_version: wallaby
description: >
Ceph External service.
parameters:
ServiceData:
default: {}
description: Dictionary packing service data
type: json
ServiceNetMap:
default: {}
description: Mapping of service_name -> network name. Typically set
via parameter_defaults in the resource registry. Use
parameter_merge_strategies to merge it with the defaults.
type: json
RoleName:
default: ''
description: Role name on which the service is applied
type: string
RoleParameters:
default: {}
description: Parameters specific to the role
type: json
EndpointMap:
default: {}
description: Mapping of service endpoint -> protocol. Typically set
via parameter_defaults in the resource registry.
type: json
CephExternalMonHost:
default: ''
type: string
description: List of externally managed Ceph Mon Host IPs. Only used for external Ceph deployments.
resources:
CephBase:
type: ./ceph-base.yaml
properties:
ServiceData: {get_param: ServiceData}
ServiceNetMap: {get_param: ServiceNetMap}
EndpointMap: {get_param: EndpointMap}
RoleName: {get_param: RoleName}
RoleParameters: {get_param: RoleParameters}
outputs:
role_data:
description: Role data for the Ceph External service.
value:
service_name: ceph_client
upgrade_tasks: []
puppet_config: {}
docker_config: {}
config_settings: {}
external_deploy_tasks:
list_concat:
- {get_attr: [CephBase, role_data, external_deploy_tasks]}
- - name: ceph_external_external_deploy_init
when: step|int == 1
tags:
- ceph
- ceph_fstobs
- ceph_systemd
block:
- name: set ceph-ansible group vars clients
set_fact:
ceph_ansible_group_vars_clients:
external_cluster_mon_ips: {get_param: CephExternalMonHost}
- name: generate ceph-ansible group vars clients
copy:
dest: "{{playbook_dir}}/ceph-ansible/group_vars/clients.yml"
content: "{{ceph_ansible_group_vars_clients|to_nice_yaml}}"
external_update_tasks: {get_attr: [CephBase, role_data, external_update_tasks]}
external_upgrade_tasks: {get_attr: [CephBase, role_data, external_upgrade_tasks]}

View File

@ -1,186 +0,0 @@
heat_template_version: wallaby
description: >
Ceph Grafana service.
parameters:
ServiceData:
default: {}
description: Dictionary packing service data
type: json
ServiceNetMap:
default: {}
description: Mapping of service_name -> network name. Typically set
via parameter_defaults in the resource registry. Use
parameter_merge_strategies to merge it with the defaults.
type: json
RoleName:
default: ''
description: Role name on which the service is applied
type: string
RoleParameters:
default: {}
description: Parameters specific to the role
type: json
EndpointMap:
default: {}
description: Mapping of service endpoint -> protocol. Typically set
via parameter_defaults in the resource registry.
type: json
CephGrafanaAdminUser:
default: 'admin'
description: Admin user for grafana component
type: string
GrafanaPlugins:
default: ['vonage-status-panel', 'grafana-piechart-panel']
type: comma_delimited_list
description: >
List of plugins to enable on the grafana container
GrafanaContainerImage:
description: Grafana container image
type: string
GrafanaDashboardsPath:
default: ''
description: ceph dashboards templates built for grafana
type: string
GrafanaDashboardPort:
type: number
default: 3100
description: Parameter that defines the ceph grafana port.
GrafanaDataSource:
default: 'Dashboard'
description: Grafana datasource
type: string
PrometheusContainerImage:
description: Ceph Prometheus container image
type: string
AlertManagerContainerImage:
description: Ceph AlertManager container image
type: string
EnableInternalTLS:
type: boolean
default: false
CertificateKeySize:
type: string
default: '2048'
description: Specifies the private key size used when creating the
certificate.
GrafanaCertificateKeySize:
type: string
default: ''
description: Override the private key size used when creating the
certificate for this service
conditions:
key_size_override_set:
not: {equals: [{get_param: GrafanaCertificateKeySize}, '']}
resources:
CephBase:
type: ./ceph-base.yaml
properties:
ServiceData: {get_param: ServiceData}
ServiceNetMap: {get_param: ServiceNetMap}
EndpointMap: {get_param: EndpointMap}
RoleName: {get_param: RoleName}
RoleParameters: {get_param: RoleParameters}
CephGrafanaAnsibleVars:
type: OS::Heat::Value
properties:
type: json
value:
vars:
grafana_admin_user: {get_param: CephGrafanaAdminUser}
grafana_container_image: {get_param: GrafanaContainerImage}
grafana_dashboards_path: {get_param: GrafanaDashboardsPath}
grafana_datasource: {get_param: GrafanaDataSource}
grafana_plugins: {get_param: GrafanaPlugins}
grafana_port: {get_param: GrafanaDashboardPort}
prometheus_container_image: {get_param: PrometheusContainerImage}
prometheus_port: 9092
alertmanager_container_image: {get_param: AlertManagerContainerImage}
outputs:
role_data:
description: Role data for the Ceph Dashboard service.
value:
service_name: ceph_grafana
firewall_rules:
'123 ceph_dashboard':
dport:
- {get_param: GrafanaDashboardPort}
- 9090
- 9092
- 9093
- 9094
- 9100
- 9283
upgrade_tasks: []
puppet_config: {}
docker_config: {}
external_deploy_tasks:
list_concat:
- {get_attr: [CephBase, role_data, external_deploy_tasks]}
- - name: ceph_dashboard_external_deploy_init
when: step == '1'
tags:
- ceph
- ceph_fstobs
- ceph_systemd
block:
- name: set ceph-ansible group vars
set_fact:
ceph_ansible_group_vars_grafana:
if:
- {get_param: EnableInternalTLS}
- map_merge:
- {get_attr: [CephGrafanaAnsibleVars, value, vars]}
- grafana_crt: '/etc/pki/tls/certs/ceph_grafana.crt'
grafana_key: '/etc/pki/tls/private/ceph_grafana.key'
- {get_attr: [CephGrafanaAnsibleVars, value, vars]}
- name: generate ceph-ansible grafana-server group vars
copy:
dest: "{{playbook_dir}}/ceph-ansible/group_vars/grafana-server.yml"
content: "{{ceph_ansible_group_vars_grafana|to_nice_yaml}}"
external_update_tasks: {get_attr: [CephBase, role_data, external_update_tasks]}
external_upgrade_tasks: {get_attr: [CephBase, role_data, external_upgrade_tasks]}
metadata_settings:
if:
- {get_param: EnableInternalTLS}
- - service: ceph_grafana
network: {get_param: [ServiceNetMap, CephGrafanaNetwork]}
type: node
deploy_steps_tasks:
- name: Certificate generation
when:
- step|int == 1
- enable_internal_tls
block:
- include_role:
name: linux-system-roles.certificate
vars:
certificate_requests:
- name: ceph_grafana
dns:
str_replace:
template: "{{fqdn_$NETWORK}}"
params:
$NETWORK: {get_param: [ServiceNetMap, CephGrafanaNetwork]}
principal:
str_replace:
template: "ceph_grafana/{{fqdn_$NETWORK}}@{{idm_realm}}"
params:
$NETWORK: {get_param: [ServiceNetMap, CephGrafanaNetwork]}
run_after: |
# Get grafana systemd unit
grafana_unit=$(systemctl list-unit-files | awk '/grafana/ {print $1}')
# Restart the grafana systemd unit
if [ -z "$grafana_unit" ]; then
systemctl restart "$grafana_unit"
fi
key_size:
if:
- key_size_override_set
- {get_param: GrafanaCertificateKeySize}
- {get_param: CertificateKeySize}
ca: ipa

View File

@ -1,81 +0,0 @@
heat_template_version: wallaby
description: >
Ceph Metadata service.
parameters:
ServiceData:
default: {}
description: Dictionary packing service data
type: json
ServiceNetMap:
default: {}
description: Mapping of service_name -> network name. Typically set
via parameter_defaults in the resource registry. Use
parameter_merge_strategies to merge it with the defaults.
type: json
RoleName:
default: ''
description: Role name on which the service is applied
type: string
RoleParameters:
default: {}
description: Parameters specific to the role
type: json
EndpointMap:
default: {}
description: Mapping of service endpoint -> protocol. Typically set
via parameter_defaults in the resource registry.
type: json
CephEnableDashboard:
type: boolean
default: false
description: Parameter used to trigger the dashboard deployment.
resources:
CephBase:
type: ./ceph-base.yaml
properties:
ServiceData: {get_param: ServiceData}
ServiceNetMap: {get_param: ServiceNetMap}
EndpointMap: {get_param: EndpointMap}
RoleName: {get_param: RoleName}
RoleParameters: {get_param: RoleParameters}
outputs:
role_data:
description: Role data for the Ceph Metadata service.
value:
service_name: ceph_mds
firewall_rules:
'112 ceph_mds':
dport:
list_concat:
- - '6800-7300'
- if:
- {get_param: CephEnableDashboard}
- - '9100'
- []
upgrade_tasks: {get_attr: [CephBase, role_data, upgrade_tasks]}
post_upgrade_tasks: {get_attr: [CephBase, role_data, post_upgrade_tasks]}
puppet_config: {}
docker_config: {}
external_deploy_tasks:
list_concat:
- {get_attr: [CephBase, role_data, external_deploy_tasks]}
- - name: ceph_mds_external_deploy_init
when: step|int == 1
tags:
- ceph
- ceph_fstobs
- ceph_systemd
block:
- name: set ceph-ansible group vars mdss
set_fact:
ceph_ansible_group_vars_mdss: {}
- name: generate ceph-ansible group vars mdss
copy:
dest: "{{playbook_dir}}/ceph-ansible/group_vars/mdss.yml"
content: "{{ceph_ansible_group_vars_mdss|to_nice_yaml}}"
external_update_tasks: {get_attr: [CephBase, role_data, external_update_tasks]}
external_upgrade_tasks: {get_attr: [CephBase, role_data, external_upgrade_tasks]}

View File

@ -1,179 +0,0 @@
heat_template_version: wallaby
description: >
Ceph Manager service.
parameters:
ServiceData:
default: {}
description: Dictionary packing service data
type: json
ServiceNetMap:
default: {}
description: Mapping of service_name -> network name. Typically set
via parameter_defaults in the resource registry. Use
parameter_merge_strategies to merge it with the defaults.
type: json
RoleName:
default: ''
description: Role name on which the service is applied
type: string
RoleParameters:
default: {}
description: Parameters specific to the role
type: json
EndpointMap:
default: {}
description: Mapping of service endpoint -> protocol. Typically set
via parameter_defaults in the resource registry.
type: json
CephDashboardAdminUser:
default: 'admin'
description: Admin user for the dashboard component
type: string
CephEnableDashboard:
type: boolean
default: false
description: Parameter used to trigger the dashboard deployment.
CephDashboardPort:
type: number
default: 8444
description: Parameter that defines the ceph dashboard port.
CephDashboardAdminRO:
type: boolean
default: true
description: Parameter used to set a read-only admin user.
EnableInternalTLS:
type: boolean
default: false
CertificateKeySize:
type: string
default: '2048'
description: Specifies the private key size used when creating the
certificate.
CephCertificateKeySize:
type: string
default: ''
description: Override the private key size used when creating the
certificate for this service
conditions:
internal_tls_enabled:
and:
- {get_param: CephEnableDashboard}
- {get_param: EnableInternalTLS}
key_size_override_set:
not: {equals: [{get_param: CephCertificateKeySize}, '']}
resources:
CephBase:
type: ./ceph-base.yaml
properties:
ServiceData: {get_param: ServiceData}
ServiceNetMap: {get_param: ServiceNetMap}
EndpointMap: {get_param: EndpointMap}
RoleName: {get_param: RoleName}
RoleParameters: {get_param: RoleParameters}
CephMgrAnsibleVars:
type: OS::Heat::Value
properties:
type: json
value:
vars:
ceph_mgr_docker_extra_env: '-e MGR_DASHBOARD=0'
dashboard_admin_user: {get_param: CephDashboardAdminUser}
dashboard_rgw_api_host: {get_param: [EndpointMap, CephRgwInternal, host]}
dashboard_rgw_api_port: {get_param: [EndpointMap, CephRgwInternal, port]}
dashboard_rgw_api_scheme: {get_param: [EndpointMap, CephRgwInternal, protocol]}
dashboard_rgw_api_no_ssl_verify: false
dashboard_port: {get_param: CephDashboardPort}
dashboard_admin_user_ro: {get_param: CephDashboardAdminRO}
dashboard_protocol:
if:
- internal_tls_enabled
- 'https'
- 'http'
outputs:
role_data:
description: Role data for the Ceph Manager service.
value:
service_name: ceph_mgr
firewall_rules:
'113 ceph_mgr':
dport:
list_concat:
- - '6800-7300'
- if:
- {get_param: CephEnableDashboard}
- - {get_param: CephDashboardPort}
upgrade_tasks: []
puppet_config: {}
docker_config: {}
external_deploy_tasks:
list_concat:
- {get_attr: [CephBase, role_data, external_deploy_tasks]}
- - name: ceph_mgr_external_deploy_init
when: step|int == 1
tags:
- ceph
- ceph_fstobs
- ceph_systemd
block:
- name: set ceph-ansible group vars mgrs
set_fact:
ceph_ansible_group_vars_mgrs:
if:
- internal_tls_enabled
- map_merge:
- {get_attr: [CephMgrAnsibleVars, value, vars]}
- dashboard_crt: /etc/pki/tls/certs/ceph_dashboard.crt
dashboard_key: /etc/pki/tls/private/ceph_dashboard.key
- {get_attr: [CephMgrAnsibleVars, value, vars]}
- name: generate ceph-ansible group vars mgrs
copy:
dest: "{{playbook_dir}}/ceph-ansible/group_vars/mgrs.yml"
content: "{{ceph_ansible_group_vars_mgrs|to_nice_yaml}}"
external_update_tasks: {get_attr: [CephBase, role_data, external_update_tasks]}
external_upgrade_tasks: {get_attr: [CephBase, role_data, external_upgrade_tasks]}
metadata_settings:
if:
- internal_tls_enabled
- - service: ceph_dashboard
network: {get_param: [ServiceNetMap, CephDashboardNetwork]}
type: node
deploy_steps_tasks:
if:
- internal_tls_enabled
- - name: Certificate generation
when: step|int == 1
block:
- include_role:
name: linux-system-roles.certificate
vars:
certificate_requests:
- name: ceph_dashboard
dns:
str_replace:
template: "{{fqdn_$NETWORK}}"
params:
$NETWORK: {get_param: [ServiceNetMap, CephDashboardNetwork]}
principal:
str_replace:
template: "ceph_dashboard/{{fqdn_$NETWORK}}@{{idm_realm}}"
params:
$NETWORK: {get_param: [ServiceNetMap, CephDashboardNetwork]}
run_after: |
# Get mgr systemd unit
mgr_unit=$(systemctl list-units | awk '/ceph-mgr/ {print $1}')
# Restart the mgr systemd unit
if [ -n "$mgr_unit" ]; then
systemctl restart "$mgr_unit"
fi
key_size:
if:
- key_size_override_set
- {get_param: CephCertificateKeySize}
- {get_param: CertificateKeySize}
ca: ipa

View File

@ -1,122 +0,0 @@
heat_template_version: wallaby
description: >
Ceph Monitor service.
parameters:
ServiceData:
default: {}
description: Dictionary packing service data
type: json
ServiceNetMap:
default: {}
description: Mapping of service_name -> network name. Typically set
via parameter_defaults in the resource registry. Use
parameter_merge_strategies to merge it with the defaults.
type: json
RoleName:
default: ''
description: Role name on which the service is applied
type: string
RoleParameters:
default: {}
description: Parameters specific to the role
type: json
EndpointMap:
default: {}
description: Mapping of service endpoint -> protocol. Typically set
via parameter_defaults in the resource registry.
type: json
CephValidationRetries:
type: number
default: 40
description: Number of retry attempts for Ceph validation
CephValidationDelay:
type: number
default: 30
description: Interval (in seconds) in between validation checks
CephEnableDashboard:
type: boolean
default: false
description: Parameter used to trigger the dashboard deployment.
CephClusterName:
type: string
default: ceph
description: The Ceph cluster name.
constraints:
- allowed_pattern: "[a-zA-Z0-9]+"
description: >
The Ceph cluster name must be at least 1 character and contain only
letters and numbers.
resources:
CephBase:
type: ./ceph-base.yaml
properties:
ServiceData: {get_param: ServiceData}
ServiceNetMap: {get_param: ServiceNetMap}
EndpointMap: {get_param: EndpointMap}
RoleName: {get_param: RoleName}
RoleParameters: {get_param: RoleParameters}
outputs:
role_data:
description: Role data for the Ceph Monitor service.
value:
service_name: ceph_mon
firewall_rules:
'110 ceph_mon':
dport:
list_concat:
- - 6789
- - 3300
- if:
- {get_param: CephEnableDashboard}
- - '9100'
service_config_settings:
collectd:
tripleo.collectd.plugins.ceph_osd:
- ceph
collectd::plugin::ceph::daemons: []
upgrade_tasks: {get_attr: [CephBase, role_data, upgrade_tasks]}
post_upgrade_tasks: {get_attr: [CephBase, role_data, post_upgrade_tasks]}
puppet_config: {}
docker_config: {}
external_deploy_tasks:
list_concat:
- {get_attr: [CephBase, role_data, external_deploy_tasks]}
- - name: ceph_mon_external_deploy_init
when: step|int == 1
tags:
- ceph
- ceph_fstobs
- ceph_systemd
block:
- name: set ceph-ansible group vars mons
set_fact:
ceph_ansible_group_vars_mons: {}
- name: generate ceph-ansible group vars mons
copy:
dest: "{{playbook_dir}}/ceph-ansible/group_vars/mons.yml"
content: "{{ceph_ansible_group_vars_mons|to_nice_yaml}}"
- - name: ceph_base_external_deploy_finish
# NOTE(gfidente): enforce minimum client version to mimic to use RBD clone v2 with Cinder
when: step|int == 3
become: true
tags:
- ceph
block:
- name: Get ceph-mon container ID
# Supposed to work with cephadm and ceph-ansible formats
# ceph-4b5c8c0a-ff60-454b-a1b4-9747aa737d19-mon.standalone.localdomain
# ceph-mon-standalone
command: "{{ container_cli }} ps -q -f 'name=ceph-?(.*)-mon.*'"
register: ceph_mon_id
delegate_to: "{{ groups['ceph_mon'][0] }}"
- name: Enforce minimum Ceph clients version to Mimic
command: "{{ container_cli }} exec {{ ceph_mon_id.stdout_lines[0] }} ceph --cluster {{ cluster }} osd set-require-min-compat-client mimic"
delegate_to: "{{ groups['ceph_mon'][0] }}"
vars:
cluster: {get_param: CephClusterName}
external_update_tasks: {get_attr: [CephBase, role_data, external_update_tasks]}
external_upgrade_tasks: {get_attr: [CephBase, role_data, external_upgrade_tasks]}

View File

@ -1,137 +0,0 @@
heat_template_version: wallaby
description: >
Ceph NFS Ganeshaservice.
parameters:
ServiceData:
default: {}
description: Dictionary packing service data
type: json
ServiceNetMap:
default: {}
description: Mapping of service_name -> network name. Typically set
via parameter_defaults in the resource registry. Use
parameter_merge_strategies to merge it with the defaults.
type: json
RoleName:
default: ''
description: Role name on which the service is applied
type: string
RoleParameters:
default: {}
description: Parameters specific to the role
type: json
EndpointMap:
default: {}
description: Mapping of service endpoint -> protocol. Typically set
via parameter_defaults in the resource registry.
type: json
ManilaCephFSCephFSAuthId:
type: string
default: 'manila'
ManilaCephFSNFSIdmapConf:
type: string
default: '/etc/ganesha/idmap.conf'
ManilaCephFSNFSIdmapOverrides:
type: json
description: Extra config settings to dump into idmap.conf
default: {}
resources:
CephBase:
type: ./ceph-base.yaml
properties:
ServiceData: {get_param: ServiceData}
ServiceNetMap: {get_param: ServiceNetMap}
EndpointMap: {get_param: EndpointMap}
RoleName: {get_param: RoleName}
RoleParameters: {get_param: RoleParameters}
outputs:
role_data:
description: Role data for the Ceph NFS Ganesha service.
value:
service_name: ceph_nfs
firewall_rules:
'120 ceph_nfs':
dport:
# We support only NFS 4.1 to start
- 2049
upgrade_tasks:
- name: Create hiera data to upgrade ceph_nfs in a stepwise manner.
when:
- step|int == 1
- cluster_recreate|bool
block:
- name: set ceph_nfs upgrade node facts in a single-node environment
set_fact:
ceph_nfs_short_node_names_upgraded: "{{ ceph_nfs_short_node_names }}"
cacheable: no
when: groups['ceph_nfs'] | length <= 1
- name: set ceph_nfs upgrade node facts from the limit option
set_fact:
ceph_nfs_short_node_names_upgraded: "{{ ceph_nfs_short_node_names_upgraded|default([]) + [item.split('.')[0]] }}"
cacheable: no
when:
- groups['ceph_nfs'] | length > 1
- item.split('.')[0] in ansible_limit.split(':')
loop: "{{ ceph_nfs_short_node_names | default([]) }}"
- fail:
msg: >
You can't upgrade ceph_nfs without staged
upgrade. You need to use the limit option in order
to do so.
when: >-
ceph_nfs_short_node_names_upgraded is not defined or
ceph_nfs_short_node_names_upgraded | length == 0
- debug:
msg: "Prepare ceph_nfs upgrade for {{ ceph_nfs_short_node_names_upgraded }}"
- name: add the ceph_nfs short name to hiera data for the upgrade.
include_role:
name: tripleo_upgrade_hiera
tasks_from: set.yml
vars:
tripleo_upgrade_key: ceph_nfs_short_node_names_override
tripleo_upgrade_value: "{{ceph_nfs_short_node_names_upgraded}}"
- name: remove the extra hiera data needed for the upgrade.
include_role:
name: tripleo_upgrade_hiera
tasks_from: remove.yml
vars:
tripleo_upgrade_key: ceph_nfs_short_node_names_override
when: ceph_nfs_short_node_names_upgraded | length == ceph_nfs_short_node_names | length
step_config: 'include tripleo::profile::pacemaker::ceph_nfs'
puppet_config: {}
# step_config seems to be ignored if docker_config is present
#docker_config: {}
external_deploy_tasks:
list_concat:
- {get_attr: [CephBase, role_data, external_deploy_tasks]}
- - name: ceph_nfs_external_deploy_init
when: step|int == 1
tags:
- ceph
- ceph_fstobs
- ceph_systemd
block:
- name: set ceph-ansible group vars nfss
set_fact:
ceph_ansible_group_vars_nfss:
ceph_nfs_bind_addr: {get_param: [EndpointMap, GaneshaInternal, host_nobrackets]}
ceph_nfs_enable_service: false
ceph_nfs_use_pacemaker: true
ceph_nfs_dynamic_exports: true
ceph_nfs_service_suffix: pacemaker
nfs_obj_gw: false
ceph_nfs_rados_backend: true
ceph_nfs_disable_caching: true
ceph_nfs_ceph_user: {get_param: ManilaCephFSCephFSAuthId}
ceph_nfs_idmap_conf: {get_param: ManilaCephFSNFSIdmapConf}
ceph_nfs_idmap_overrides: {get_param: ManilaCephFSNFSIdmapOverrides}
- name: generate ceph-ansible group vars nfss
copy:
dest: "{{playbook_dir}}/ceph-ansible/group_vars/nfss.yml"
content: "{{ceph_ansible_group_vars_nfss|to_nice_yaml}}"
external_update_tasks: {get_attr: [CephBase, role_data, external_update_tasks]}
external_upgrade_tasks: {get_attr: [CephBase, role_data, external_upgrade_tasks]}

View File

@ -1,137 +0,0 @@
heat_template_version: wallaby
description: >
Ceph OSD service.
parameters:
ServiceData:
default: {}
description: Dictionary packing service data
type: json
ServiceNetMap:
default: {}
description: Mapping of service_name -> network name. Typically set
via parameter_defaults in the resource registry. Use
parameter_merge_strategies to merge it with the defaults.
type: json
RoleName:
default: ''
description: Role name on which the service is applied
type: string
RoleParameters:
default: {}
description: Parameters specific to the role
type: json
EndpointMap:
default: {}
description: Mapping of service endpoint -> protocol. Typically set
via parameter_defaults in the resource registry.
type: json
CephAnsibleDisksConfig:
type: json
description: Disks config settings.
default:
devices: []
osd_scenario: lvm
osd_objectstore: bluestore
CephEnableDashboard:
type: boolean
default: false
description: Parameter used to trigger the dashboard deployment.
resources:
CephBase:
type: ./ceph-base.yaml
properties:
ServiceData: {get_param: ServiceData}
ServiceNetMap: {get_param: ServiceNetMap}
EndpointMap: {get_param: EndpointMap}
RoleName: {get_param: RoleName}
RoleParameters: {get_param: RoleParameters}
outputs:
role_data:
description: Role data for the Ceph OSD service.
value:
service_name: ceph_osd
firewall_rules:
'111 ceph_osd':
dport:
list_concat:
- - '6800-7300'
- if:
- {get_param: CephEnableDashboard}
- - '9100'
service_config_settings:
collectd:
tripleo.collectd.plugins.ceph_osd:
- ceph
collectd::plugin::ceph::daemons: []
upgrade_tasks:
list_concat:
- {get_attr: [CephBase, role_data, upgrade_tasks]}
- - block:
- name: Check for docker cli
stat:
path: "/run/docker.sock"
register: check_docker_cli
check_mode: false
- name: Set noout flag
shell: "{{ container_client }} exec -u root ceph-mon-${HOSTNAME%%.*} ceph osd set {{ item }}"
become: true
with_items:
- noout
- norecover
- nobackfill
- norebalance
- nodeep-scrub
vars:
container_client: |-
{% set container_client = 'podman' %}
{% if check_docker_cli.stat.exists|bool %}
{% set container_client = 'docker' %}
{% endif %}
{{ container_client }}
delegate_to: "{{ ceph_mon_short_bootstrap_node_name }}"
tags:
- never
- system_upgrade
- system_upgrade_prepare
when:
- step|int == 1
- upgrade_leapp_enabled
post_upgrade_tasks:
list_concat:
- {get_attr: [CephBase, role_data, post_upgrade_tasks]}
- - name: Unset noout flag
shell: "{{ container_cli }} exec -u root ceph-mon-${HOSTNAME%%.*} ceph osd unset {{ item }}"
with_items:
- noout
- norecover
- nobackfill
- norebalance
- nodeep-scrub
when: step|int == 2
become: true
delegate_to: "{{ ceph_mon_short_bootstrap_node_name }}"
puppet_config: {}
docker_config: {}
external_deploy_tasks:
list_concat:
- {get_attr: [CephBase, role_data, external_deploy_tasks]}
- - name: ceph_osd_external_deploy_init
when: step|int == 1
tags:
- ceph
- ceph_fstobs
- ceph_systemd
block:
- name: set ceph-ansible group vars osds
set_fact:
ceph_ansible_group_vars_osds: {get_param: CephAnsibleDisksConfig}
- name: generate ceph-ansible group vars osds
copy:
dest: "{{playbook_dir}}/ceph-ansible/group_vars/osds.yml"
content: "{{ceph_ansible_group_vars_osds|to_nice_yaml}}"
external_update_tasks: {get_attr: [CephBase, role_data, external_update_tasks]}
external_upgrade_tasks: {get_attr: [CephBase, role_data, external_upgrade_tasks]}

View File

@ -1,99 +0,0 @@
heat_template_version: wallaby
description: >
Ceph RBD Mirror service.
parameters:
ServiceData:
default: {}
description: Dictionary packing service data
type: json
ServiceNetMap:
default: {}
description: Mapping of service_name -> network name. Typically set
via parameter_defaults in the resource registry. Use
parameter_merge_strategies to merge it with the defaults.
type: json
RoleName:
default: ''
description: Role name on which the service is applied
type: string
RoleParameters:
default: {}
description: Parameters specific to the role
type: json
EndpointMap:
default: {}
description: Mapping of service endpoint -> protocol. Typically set
via parameter_defaults in the resource registry.
type: json
CephRbdMirrorCopyAdminKey:
default: false
description: Some admins like to copy the admin key everywhere
type: boolean
CephRbdMirrorConfigure:
default: true
description: Perform mirror configuration between local and remote pool
type: boolean
CephRbdMirrorPool:
default: ''
description: Name of the local pool to mirror to remote cluster
type: string
CephRbdMirrorRemoteCluster:
default: 'not-ceph'
description: The name given to the remote Ceph cluster from the local cluster.
keys will reside in the /etc/ceph directory
type: string
CephRbdMirrorRemoteUser:
default: ''
description: The rbd-mirror daemon needs a user to authenticate with the
remote cluster. By default, this key should be available under
/etc/ceph/<remote_cluster>.client.<remote_user>.keyring
type: string
resources:
CephBase:
type: ./ceph-base.yaml
properties:
ServiceData: {get_param: ServiceData}
ServiceNetMap: {get_param: ServiceNetMap}
EndpointMap: {get_param: EndpointMap}
RoleName: {get_param: RoleName}
RoleParameters: {get_param: RoleParameters}
outputs:
role_data:
description: Role data for the Ceph RBD Mirror service.
value:
service_name: ceph_rbdmirror
firewall_rules:
'114 ceph_rbdmirror':
dport:
- '6800-7300'
upgrade_tasks: []
puppet_config: {}
docker_config: {}
external_deploy_tasks:
list_concat:
- {get_attr: [CephBase, role_data, external_deploy_tasks]}
- - name: ceph_rbdmirror_external_deploy_init
when: step|int == 1
tags:
- ceph
- ceph_fstobs
- ceph_systemd
block:
- name: set ceph-ansible group vars rbdmirrors
set_fact:
ceph_ansible_group_vars_rbdmirrors:
copy_admin_key: {get_param: CephRbdMirrorCopyAdminKey}
ceph_rbd_mirror_configure: {get_param: CephRbdMirrorConfigure}
ceph_rbd_mirror_pool: {get_param: CephRbdMirrorPool}
ceph_rbd_mirror_remote_cluster: {get_param: CephRbdMirrorRemoteCluster}
ceph_rbd_mirror_remote_user: {get_param: CephRbdMirrorRemoteUser}
- name: generate ceph-ansible group vars rbdmirrors
copy:
dest: "{{playbook_dir}}/ceph-ansible/group_vars/rbdmirrors.yml"
content: "{{ceph_ansible_group_vars_rbdmirrors|to_nice_yaml}}"
external_update_tasks: {get_attr: [CephBase, role_data, external_update_tasks]}
external_upgrade_tasks: {get_attr: [CephBase, role_data, external_upgrade_tasks]}

View File

@ -1,200 +0,0 @@
heat_template_version: wallaby
description: >
Ceph RadosGW service.
parameters:
ServiceData:
default: {}
description: Dictionary packing service data
type: json
ServiceNetMap:
default: {}
description: Mapping of service_name -> network name. Typically set
via parameter_defaults in the resource registry. Use
parameter_merge_strategies to merge it with the defaults.
type: json
RoleName:
default: ''
description: Role name on which the service is applied
type: string
RoleParameters:
default: {}
description: Parameters specific to the role
type: json
EndpointMap:
default: {}
description: Mapping of service endpoint -> protocol. Typically set
via parameter_defaults in the resource registry.
type: json
SwiftPassword:
description: The password for the swift service account
type: string
hidden: true
KeystoneRegion:
type: string
default: 'regionOne'
description: Keystone region for endpoint
CephEnableDashboard:
type: boolean
default: false
description: Parameter used to trigger the dashboard deployment.
EnableInternalTLS:
type: boolean
default: false
CertificateKeySize:
type: string
default: '2048'
description: Specifies the private key size used when creating the
certificate.
CephRgwCertificateKeySize:
type: string
default: ''
description: Override the private key size used when creating the
certificate for this service
conditions:
key_size_override_set:
not: {equals: [{get_param: CephRgwCertificateKeySize}, '']}
resources:
CephBase:
type: ./ceph-base.yaml
properties:
ServiceData: {get_param: ServiceData}
ServiceNetMap: {get_param: ServiceNetMap}
EndpointMap: {get_param: EndpointMap}
RoleName: {get_param: RoleName}
RoleParameters: {get_param: RoleParameters}
CephRgwAnsibleVars:
type: OS::Heat::Value
properties:
type: json
value:
vars:
radosgw_keystone: true
radosgw_keystone_ssl: false
radosgw_address_block:
list_join:
- ','
- get_param: [ServiceData, net_cidr_map, {get_param: [ServiceNetMap, CephRgwNetwork]}]
radosgw_frontend_port: {get_param: [EndpointMap, CephRgwInternal, port]}
outputs:
role_data:
description: Role data for the Ceph RadosGW service.
value:
service_name: ceph_rgw
firewall_rules:
'122 ceph rgw':
dport:
list_concat:
- - {get_param: [EndpointMap, CephRgwInternal, port]}
- if:
- {get_param: CephEnableDashboard}
- - '9100'
keystone_resources:
swift:
endpoints:
public: {get_param: [EndpointMap, CephRgwPublic, uri]}
internal: {get_param: [EndpointMap, CephRgwInternal, uri]}
admin: {get_param: [EndpointMap, CephRgwAdmin, uri]}
users:
swift:
password: {get_param: SwiftPassword}
region: {get_param: KeystoneRegion}
service: 'object-store'
roles:
- ResellerAdmin
- swiftoperator
upgrade_tasks: {get_attr: [CephBase, role_data, upgrade_tasks]}
post_upgrade_tasks: {get_attr: [CephBase, role_data, post_upgrade_tasks]}
puppet_config: {}
docker_config: {}
external_deploy_tasks:
list_concat:
- {get_attr: [CephBase, role_data, external_deploy_tasks]}
- - name: ceph_rgw_external_deploy_init
when: step|int == 1
tags:
- ceph
- ceph_fstobs
- ceph_systemd
block:
- name: set ceph-ansible group vars rgws
set_fact:
ceph_ansible_group_vars_rgws:
if:
- {get_param: EnableInternalTLS}
- map_merge:
- {get_attr: [CephRgwAnsibleVars, value, vars]}
- radosgw_frontend_ssl_certificate: '/etc/pki/tls/certs/ceph_rgw.pem'
- {get_attr: [CephRgwAnsibleVars, value, vars]}
ceph_rgw_config_overrides:
global:
rgw_keystone_api_version: 3
rgw_keystone_url: {get_param: [EndpointMap, KeystoneInternal, uri_no_suffix]}
rgw_keystone_accepted_roles: 'member, Member, admin'
rgw_keystone_accepted_admin_roles: ResellerAdmin, swiftoperator
rgw_keystone_admin_domain: default
rgw_keystone_admin_project: service
rgw_keystone_admin_user: swift
rgw_keystone_admin_password: {get_param: SwiftPassword}
rgw_keystone_implicit_tenants: 'true'
rgw_keystone_revocation_interval: '0'
rgw_s3_auth_use_keystone: 'true'
rgw_swift_versioning_enabled: 'true'
rgw_swift_account_in_url: 'true'
rgw_trust_forwarded_https: 'true'
- name: generate ceph-ansible group vars rgws
copy:
dest: "{{playbook_dir}}/ceph-ansible/group_vars/rgws.yml"
content: "{{ceph_ansible_group_vars_rgws|to_nice_yaml}}"
external_update_tasks: {get_attr: [CephBase, role_data, external_update_tasks]}
external_upgrade_tasks: {get_attr: [CephBase, role_data, external_upgrade_tasks]}
metadata_settings:
if:
- {get_param: EnableInternalTLS}
- - service: ceph_rgw
network: {get_param: [ServiceNetMap, CephRgwNetwork]}
type: node
deploy_steps_tasks:
- name: Certificate generation
when:
- step|int == 1
- enable_internal_tls
block:
- include_role:
name: linux-system-roles.certificate
vars:
certificate_requests:
- name: ceph_rgw
dns:
str_replace:
template: "{{fqdn_$NETWORK}}"
params:
$NETWORK: {get_param: [ServiceNetMap, CephRgwNetwork]}
principal:
str_replace:
template: "ceph_rgw/{{fqdn_$NETWORK}}@{{idm_realm}}"
params:
$NETWORK: {get_param: [ServiceNetMap, CephRgwNetwork]}
run_after: |
# Create PEM file
pemfile=/etc/pki/tls/certs/ceph_rgw.pem
cat /etc/pki/tls/certs/ceph_rgw.crt /etc/ipa/ca.crt /etc/pki/tls/private/ceph_rgw.key > $pemfile
chmod 0640 $pemfile
chown 472:472 $pemfile
# Get ceph rgw systemd unit
rgw_unit=$(systemctl list-unit-files | awk '/radosgw/ {print $1}')
# Restart the rgw systemd unit
if [ -n "$rgw_unit" ]; then
systemctl restart "$rgw_unit"
fi
key_size:
if:
- key_size_override_set
- {get_param: CephRgwCertificateKeySize}
- {get_param: CertificateKeySize}
ca: ipa

View File

@ -1,31 +0,0 @@
# DEPRECATED: use environments/external-ceph.yaml instead
resource_registry:
OS::TripleO::Services::CephExternal: ../../deployment/ceph-ansible/ceph-external.yaml
parameter_defaults:
# NOTE: These example parameters are required when using CephExternal
#CephClusterFSID: '4b5c8c0a-ff60-454b-a1b4-9747aa737d19'
#CephClientKey: 'AQDLOh1VgEp6FRAAFzT7Zw+Y9V6JJExQAsRnRQ=='
#CephExternalMonHost: '172.16.1.7, 172.16.1.8'
# the following parameters enable Ceph backends for Cinder, Glance, Gnocchi and Nova
NovaEnableRbdBackend: true
CinderEnableRbdBackend: true
CinderBackupBackend: ceph
GlanceBackend: rbd
# Uncomment below if enabling legacy telemetry
# GnocchiBackend: rbd
# If the Ceph pools which host VMs, Volumes and Images do not match these
# names OR the client keyring to use is not named 'openstack', edit the
# following as needed.
NovaRbdPoolName: vms
CinderRbdPoolName: volumes
CinderBackupRbdPoolName: backups
GlanceRbdPoolName: images
# Uncomment below if enabling legacy telemetry
# GnocchiRbdPoolName: metrics
CephClientUserName: openstack
# finally we disable the Cinder LVM backend
CinderEnableIscsiBackend: false

View File

@ -1,19 +0,0 @@
resource_registry:
OS::TripleO::Services::CephMgr: ../../deployment/ceph-ansible/ceph-mgr.yaml
OS::TripleO::Services::CephMon: ../../deployment/ceph-ansible/ceph-mon.yaml
OS::TripleO::Services::CephOSD: ../../deployment/ceph-ansible/ceph-osd.yaml
OS::TripleO::Services::CephClient: ../../deployment/ceph-ansible/ceph-client.yaml
parameter_defaults:
# Ensure that if user overrides CephAnsiblePlaybook via some env
# file, we go back to default when they stop passing their env file.
CephAnsiblePlaybook: ['default']
CinderEnableIscsiBackend: false
CinderEnableRbdBackend: true
CinderBackupBackend: ceph
GlanceBackend: rbd
## Uncomment below if enabling legacy telemetry
# GnocchiBackend: rbd
GlanceShowMultipleLocations: true
# NovaEnableRbdBackend can be set on individual roles to choose RBD or local ephemeral storage

View File

@ -1,25 +0,0 @@
resource_registry:
OS::TripleO::Services::CephMgr: ../../deployment/ceph-ansible/ceph-mgr.yaml
OS::TripleO::Services::CephMon: ../../deployment/ceph-ansible/ceph-mon.yaml
OS::TripleO::Services::CephOSD: ../../deployment/ceph-ansible/ceph-osd.yaml
OS::TripleO::Services::CephClient: ../../deployment/ceph-ansible/ceph-client.yaml
parameter_defaults:
# Ensure that if user overrides CephAnsiblePlaybook via some env
# file, we go back to default when they stop passing their env file.
CephAnsiblePlaybook: ['default']
CinderEnableIscsiBackend: false
CinderEnableRbdBackend: true
CinderBackupBackend: ceph
NovaEnableRbdBackend: true
GlanceBackend: rbd
## Uncomment below if enabling legacy telemetry
# GnocchiBackend: rbd
## Set to enable on-wire encryption
## Using secure mode can cause a performance degradation with the storage cluster.
## The severity of the performance degradation can vary depending on several
## environmental factors.
## Test the performance impact in a non-production environment before implementing.
# CephMsgrSecureMode: true

View File

@ -1,5 +0,0 @@
resource_registry:
OS::TripleO::Services::CephGrafana: ../../deployment/ceph-ansible/ceph-grafana.yaml
parameter_defaults:
CephEnableDashboard: true

View File

@ -1,2 +0,0 @@
resource_registry:
OS::TripleO::Services::CephMds: ../../deployment/ceph-ansible/ceph-mds.yaml

View File

@ -1,2 +0,0 @@
resource_registry:
OS::TripleO::Services::CephRbdMirror: ../../deployment/ceph-ansible/ceph-rbdmirror.yaml

View File

@ -1,6 +0,0 @@
resource_registry:
OS::TripleO::Services::CephRgw: ../../deployment/ceph-ansible/ceph-rgw.yaml
OS::TripleO::Services::SwiftProxy: OS::Heat::None
OS::TripleO::Services::SwiftDispersion: OS::Heat::None
OS::TripleO::Services::SwiftStorage: OS::Heat::None
OS::TripleO::Services::SwiftRingBuilder: OS::Heat::None

View File

@ -9,7 +9,7 @@
# Environment file for deploying a remote site of distributed compute
# nodes (DCN) in a separate stack (multi-stack) deployment with storage;
# either HCI or with separate storage nodes. It should be used in
# combination with environments/ceph-ansible/ceph-ansible.yaml.
# combination with environments/cephadm/cephadm.yaml.
parameter_defaults:
# When running Cinder A/A, whether to connect to Etcd via the local IP for the Etcd network. If set to true, the ip on the local node will be used. If set to false, the VIP on the Etcd network will be used instead. Defaults to false.
# Type: boolean

View File

@ -6,9 +6,9 @@ resource_registry:
# Only manila-share is pacemaker managed:
OS::TripleO::Services::ManilaShare: ../deployment/manila/manila-share-pacemaker-puppet.yaml
OS::TripleO::Services::ManilaBackendCephFs: ../deployment/manila/manila-backend-cephfs.yaml
# ceph-nfs (ganesha) service is installed and configured by ceph-ansible
# ceph-nfs (ganesha) service is installed and configured by cephadm
# but it's still managed by pacemaker
OS::TripleO::Services::CephNfs: ../deployment/ceph-ansible/ceph-nfs.yaml
OS::TripleO::Services::CephNfs: ../deployment/cephadm/cephadm.yaml
parameter_defaults:

View File

@ -32,7 +32,7 @@ environments:
Environment file for deploying a remote site of distributed compute
nodes (DCN) in a separate stack (multi-stack) deployment with storage;
either HCI or with separate storage nodes. It should be used in
combination with environments/ceph-ansible/ceph-ansible.yaml.
combination with environments/cephadm/cephadm.yaml.
files:
<<: *dcn_files
deployment/cinder/cinder-base.yaml:

View File

@ -258,15 +258,6 @@ CONFIG_RESOURCE_TYPES = [
WORKFLOW_TASKS_EXCLUSIONS = [
'./deployment/octavia/octavia-deployment-config.yaml',
'./deployment/ceph-ansible/ceph-external.yaml',
'./deployment/ceph-ansible/ceph-osd.yaml',
'./deployment/ceph-ansible/ceph-rbdmirror.yaml',
'./deployment/ceph-ansible/ceph-client.yaml',
'./deployment/ceph-ansible/ceph-mds.yaml',
'./deployment/ceph-ansible/ceph-rgw.yaml',
'./deployment/ceph-ansible/ceph-base.yaml',
'./deployment/ceph-ansible/ceph-mon.yaml',
'./deployment/ceph-ansible/ceph-mgr.yaml',
]
@ -310,18 +301,6 @@ def compare_parameters(old_impl_path, new_impl_path):
return set(old_impl_params).difference(set(new_impl_params))
def compare_ceph_parameters(path):
old_path = base_path + "/deployment/ceph-ansible/"
new_path = base_path + "/deployment/cephadm/"
missing = compare_parameters(old_path, new_path)
if missing:
print("ERROR: Some parameters are missing in Ceph implementation at"
"'%s' compared to that in '%s' and they are: %s" %
(new_path, old_path, missing))
return 1
return 0
def validate_role_name(filename):
with open(filename, 'r') as f:
tpl = yaml.load(f.read(), Loader=yaml.SafeLoader)
@ -689,9 +668,7 @@ def validate_docker_service(filename, tpl):
for section_name in REQUIRED_DOCKER_SECTIONS_OVERRIDES.get(filename, REQUIRED_DOCKER_SECTIONS):
if section_name not in role_data:
# add an exception if both step_config is used in docker
# service, deployment/ceph-ansible/ceph-nfs.yaml uses
# additional step_config to add pacemaker resources
# add an exception if both step_config is used in docker service
if (section_name == 'docker_config' and
role_data.get('step_config', '')):
print('ERROR: %s appears to be a barematal-puppet service'
@ -1359,7 +1336,6 @@ param_map = {}
for base_path in path_args:
if os.path.isdir(base_path):
exit_val |= compare_ceph_parameters(base_path)
for subdir, dirs, files in os.walk(base_path):
if '.tox' in dirs:
dirs.remove('.tox')