treasuremap/global/software/charts/osh/openstack-tenant-ceph/ceph-client.yaml
Sergiy Markin cb1f128152 Global manifest update
This PS upgrades the way we use to pull the charts. Instead of using git
for pulling every chart this PS builds all needed charts locally and
publishes them via local http server on port 8282.

The main goal is to fix intermittent failures of airskiff site
deployment gate.

Change-Id: Iba16dc2c1f863e5996b736c89384564ade7c3d61
2023-05-04 04:53:44 +00:00

212 lines
5.7 KiB
YAML

---
schema: armada/Chart/v1
metadata:
schema: metadata/Document/v1
name: tenant-ceph-client-global
layeringDefinition:
abstract: true
layer: global
storagePolicy: cleartext
labels:
name: tenant-ceph-client-global
substitutions:
# Chart source
- src:
schema: pegleg/SoftwareVersions/v1
name: software-versions
path: .charts.ucp.tenant-ceph-client
dest:
path: .source
# Images
- src:
schema: pegleg/SoftwareVersions/v1
name: software-versions
path: .images.ceph.tenant-ceph-client
dest:
path: .values.images.tags
# IP addresses
- src:
schema: pegleg/CommonAddresses/v1
name: common-addresses
path: .storage.ceph.public_cidr
dest:
path: .values.network.public
- src:
schema: pegleg/CommonAddresses/v1
name: common-addresses
path: .storage.ceph.cluster_cidr
dest:
path: .values.network.cluster
- src:
schema: pegleg/EndpointCatalogue/v1
name: ucp_endpoints
path: .ceph.tenant_ceph_mon
dest:
path: .values.endpoints.ceph_mon
- src:
schema: pegleg/EndpointCatalogue/v1
name: ucp_endpoints
path: .ceph.tenant_ceph_mgr
dest:
path: .values.endpoints.ceph_mgr
# Secrets
- dest:
path: .values.conf.ceph.global.fsid
src:
schema: deckhand/Passphrase/v1
name: tenant_ceph_fsid
path: .
data:
chart_name: tenant-ceph-client
release: tenant-ceph-client
namespace: tenant-ceph
protected:
continue_processing: true
wait:
timeout: 900
labels:
release_group: airship-tenant-ceph-client
install:
no_hooks: false
upgrade:
no_hooks: false
pre:
delete:
- type: job
labels:
release_group: airship-tenant-ceph-client
values:
labels:
job:
node_selector_key: tenant-ceph-control-plane
node_selector_value: enabled
mds:
node_selector_key: tenant-ceph-mds
node_selector_value: enabled
mgr:
node_selector_key: tenant-ceph-mgr
node_selector_value: enabled
endpoints:
ceph_mon:
namespace: tenant-ceph
ceph_mgr:
namespace: tenant-ceph
monitoring:
prometheus:
ceph_mgr:
port: 9284
ceph_mgr_modules_config:
prometheus:
server_port: 9284
deployment:
ceph: true
bootstrap:
enabled: true
manifests:
deployment_mds: false
conf:
features:
mds: false
pool:
spec:
# RBD pool
- name: rbd
application: rbd
replication: 3
percent_total_data: 10
# Cinder volumes pool
- name: cinder.volumes
application: cinder-volume
replication: 3
percent_total_data: 40
# RadosGW pools
- name: .rgw.root
application: rgw
replication: 3
percent_total_data: 0.1
- name: default.rgw.control
application: rgw
replication: 3
percent_total_data: 0.1
- name: default.rgw.data.root
application: rgw
replication: 3
percent_total_data: 0.1
- name: default.rgw.gc
application: rgw
replication: 3
percent_total_data: 0.1
- name: default.rgw.log
application: rgw
replication: 3
percent_total_data: 0.1
- name: default.rgw.intent-log
application: rgw
replication: 3
percent_total_data: 0.1
- name: default.rgw.meta
application: rgw
replication: 3
percent_total_data: 0.1
- name: default.rgw.usage
application: rgw
replication: 3
percent_total_data: 0.1
- name: default.rgw.users.keys
application: rgw
replication: 3
percent_total_data: 0.1
- name: default.rgw.users.email
application: rgw
replication: 3
percent_total_data: 0.1
- name: default.rgw.users.swift
application: rgw
replication: 3
percent_total_data: 0.1
- name: default.rgw.users.uid
application: rgw
replication: 3
percent_total_data: 0.1
- name: default.rgw.buckets.extra
application: rgw
replication: 3
percent_total_data: 0.1
- name: default.rgw.buckets.index
application: rgw
replication: 3
percent_total_data: 3
- name: default.rgw.buckets.data
application: rgw
replication: 3
percent_total_data: 30
# NOTE(alanmeadowS) spport 4.x 16.04 kernels (non-HWE)
crush:
tunables: 'hammer'
# NOTE(alanmeadows): This is required ATM for bootstrapping a Ceph
# cluster with only one OSD. Depending on OSD targeting & site
# configuration this can be changed.
target:
osd: 1
pg_per_osd: 100
protected: true
default:
# NOTE(alanmeadows): This is required ATM for bootstrapping a Ceph
# cluster with only one OSD. Depending on OSD targeting & site
# configuration this can be changed.
crush_rule: replicated_rule
ceph:
global:
# NOTE(mb874d): This is required ATM for bootstrapping a Ceph
# cluster with only one OSD. Depending on OSD targeting & site
# configuration this can be changed.
osd_pool_default_size: 1
...