openstack-helm/ceph-client/values.yaml
Steve Wilkerson 9c44676649 Ceph-config-helper: Add rgwadmin python package
This adds the rgwadmin python package to the ceph config helper
image, allowing it to be used for creating s3 buckets for radosgw
to be used with elasticsearch

Change-Id: I2c633a4715ef1fdccd833cc8eeaefddbecb0cec4
2018-06-12 18:24:59 +00:00

556 lines
14 KiB
YAML

# Copyright 2017 The Openstack-Helm Authors.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
deployment:
ceph: true
client_secrets: false
rbd_provisioner: true
cephfs_provisioner: true
rgw_keystone_user_and_endpoints: false
release_group: null
images:
pull_policy: IfNotPresent
tags:
ceph_bootstrap: 'docker.io/ceph/daemon:tag-build-master-luminous-ubuntu-16.04'
ceph_cephfs_provisioner: 'quay.io/external_storage/cephfs-provisioner:v0.1.1'
ceph_config_helper: 'docker.io/port/ceph-config-helper:v1.10.3'
ceph_mds: 'docker.io/ceph/daemon:tag-build-master-luminous-ubuntu-16.04'
ceph_mgr: 'docker.io/ceph/daemon:tag-build-master-luminous-ubuntu-16.04'
ceph_rbd_pool: 'docker.io/port/ceph-config-helper:v1.10.3'
ceph_rbd_provisioner: 'quay.io/external_storage/rbd-provisioner:v0.1.1'
ceph_rgw: 'docker.io/ceph/daemon:tag-build-master-luminous-ubuntu-16.04'
dep_check: 'quay.io/stackanetes/kubernetes-entrypoint:v0.3.1'
ks_endpoints: 'docker.io/openstackhelm/heat:newton'
ks_service: 'docker.io/openstackhelm/heat:newton'
ks_user: 'docker.io/openstackhelm/heat:newton'
image_repo_sync: docker.io/docker:17.07.0
local_registry:
active: false
exclude:
- dep_check
- image_repo_sync
labels:
job:
node_selector_key: openstack-control-plane
node_selector_value: enabled
provisioner:
node_selector_key: openstack-control-plane
node_selector_value: enabled
mds:
node_selector_key: ceph-mds
node_selector_value: enabled
rgw:
node_selector_key: ceph-rgw
node_selector_value: enabled
mgr:
node_selector_key: ceph-mgr
node_selector_value: enabled
pod:
dns_policy: "ClusterFirstWithHostNet"
replicas:
cephfs_provisioner: 2
rbd_provisioner: 2
mds: 2
mgr: 2
rgw: 2
affinity:
anti:
type:
default: preferredDuringSchedulingIgnoredDuringExecution
topologyKey:
default: kubernetes.io/hostname
resources:
enabled: false
mds:
requests:
memory: "10Mi"
cpu: "250m"
limits:
memory: "50Mi"
cpu: "500m"
rgw:
requests:
memory: "5Mi"
cpu: "250m"
limits:
memory: "50Mi"
cpu: "500m"
rbd_provisioner:
requests:
memory: "5Mi"
cpu: "250m"
limits:
memory: "50Mi"
cpu: "500m"
cephfs_provisioner:
requests:
memory: "5Mi"
cpu: "250m"
limits:
memory: "50Mi"
cpu: "500m"
mgr:
requests:
memory: "5Mi"
cpu: "250m"
limits:
memory: "50Mi"
cpu: "500m"
jobs:
bootstrap:
limits:
memory: "1024Mi"
cpu: "2000m"
requests:
memory: "128Mi"
cpu: "500m"
ks_endpoints:
requests:
memory: "128Mi"
cpu: "100m"
limits:
memory: "1024Mi"
cpu: "2000m"
ks_service:
requests:
memory: "128Mi"
cpu: "100m"
limits:
memory: "1024Mi"
cpu: "2000m"
ks_user:
requests:
memory: "128Mi"
cpu: "100m"
limits:
memory: "1024Mi"
cpu: "2000m"
image_repo_sync:
requests:
memory: "128Mi"
cpu: "100m"
limits:
memory: "1024Mi"
cpu: "2000m"
secrets:
keyrings:
mon: ceph-mon-keyring
mds: ceph-bootstrap-mds-keyring
osd: ceph-bootstrap-osd-keyring
rgw: ceph-bootstrap-rgw-keyring
mgr: ceph-bootstrap-mgr-keyring
admin: ceph-client-admin-keyring
identity:
admin: ceph-keystone-admin
swift: ceph-keystone-user
user_rgw: ceph-keystone-user-rgw
network:
public: 192.168.0.0/16
cluster: 192.168.0.0/16
port:
mon: 6789
rgw: 8088
mgr: 7000
conf:
features:
mds: true
rgw: true
mgr: true
pool:
#NOTE(portdirect): this drives a simple approximation of
# https://ceph.com/pgcalc/, the `target.osd` key should be set to match the
# expected number of osds in a cluster, and the `target.pg_per_osd` should be
# set to match the desired number of placement groups on each OSD.
crush:
#NOTE(portdirect): to use RBD devices with Ubuntu 16.04's 4.4.x series
# kernel this should be set to `hammer`
tunables: null
target:
#NOTE(portdirect): arbitrarily we set the default number of expected OSD's to 5
# to match the number of nodes in the OSH gate.
osd: 5
pg_per_osd: 100
default:
#NOTE(portdirect): this should be 'same_host' for a single node
# cluster to be in a healthy state
crush_rule: replicated_rule
#NOTE(portdirect): this section describes the pools that will be managed by
# the ceph pool management job, as it tunes the pgs and crush rule, based on
# the above.
spec:
# RBD pool
- name: rbd
application: rbd
replication: 3
percent_total_data: 40
# CephFS pools
- name: cephfs_metadata
application: cephfs
replication: 3
percent_total_data: 5
- name: cephfs_data
application: cephfs
replication: 3
percent_total_data: 10
# RadosGW pools
- name: .rgw.root
application: rgw
replication: 3
percent_total_data: 0.1
- name: default.rgw.control
application: rgw
replication: 3
percent_total_data: 0.1
- name: default.rgw.data.root
application: rgw
replication: 3
percent_total_data: 0.1
- name: default.rgw.gc
application: rgw
replication: 3
percent_total_data: 0.1
- name: default.rgw.log
application: rgw
replication: 3
percent_total_data: 0.1
- name: default.rgw.intent-log
application: rgw
replication: 3
percent_total_data: 0.1
- name: default.rgw.meta
application: rgw
replication: 3
percent_total_data: 0.1
- name: default.rgw.usage
application: rgw
replication: 3
percent_total_data: 0.1
- name: default.rgw.users.keys
application: rgw
replication: 3
percent_total_data: 0.1
- name: default.rgw.users.email
application: rgw
replication: 3
percent_total_data: 0.1
- name: default.rgw.users.swift
application: rgw
replication: 3
percent_total_data: 0.1
- name: default.rgw.users.uid
application: rgw
replication: 3
percent_total_data: 0.1
- name: default.rgw.buckets.extra
application: rgw
replication: 3
percent_total_data: 0.1
- name: default.rgw.buckets.index
application: rgw
replication: 3
percent_total_data: 3
- name: default.rgw.buckets.data
application: rgw
replication: 3
percent_total_data: 34.8
rgw_ks:
enabled: false
config:
rgw_keystone_api_version: 3
rgw_keystone_accepted_roles: "admin, _member_"
rgw_keystone_implicit_tenants: false
rgw_keystone_make_new_tenants: true
rgw_s3_auth_use_keystone: true
rgw_swift_account_in_url: true
ceph:
global:
# auth
cephx: true
cephx_require_signatures: false
cephx_cluster_require_signatures: true
cephx_service_require_signatures: false
osd:
osd_mkfs_type: xfs
osd_mkfs_options_xfs: -f -i size=2048
osd_max_object_name_len: 256
ms_bind_port_min: 6800
ms_bind_port_max: 7100
dependencies:
dynamic:
common:
local_image_registry:
jobs:
- ceph-client-image-repo-sync
services:
- endpoint: node
service: local_image_registry
static:
bootstrap:
jobs: null
services:
- endpoint: internal
service: ceph_mon
cephfs_client_key_generator:
jobs: null
cephfs_provisioner:
jobs:
- ceph-rbd-pool
services:
- endpoint: internal
service: ceph_mon
ks_endpoints:
jobs:
- ceph-ks-service
services:
- endpoint: internal
service: identity
ks_service:
services:
- endpoint: internal
service: identity
ks_user:
services:
- endpoint: internal
service: identity
mds:
jobs:
- ceph-storage-keys-generator
- ceph-mds-keyring-generator
- ceph-rbd-pool
services:
- endpoint: internal
service: ceph_mon
mgr:
jobs:
- ceph-storage-keys-generator
- ceph-mgr-keyring-generator
services:
- endpoint: internal
service: ceph_mon
namespace_client_key_cleaner:
jobs: null
namespace_client_key_generator:
jobs: null
rbd_pool:
services:
- endpoint: internal
service: ceph_mon
rbd_provisioner:
jobs:
- ceph-rbd-pool
services:
- endpoint: internal
service: ceph_mon
rgw:
jobs:
- ceph-storage-keys-generator
- ceph-rgw-keyring-generator
- ceph-rbd-pool
services:
- endpoint: internal
service: ceph_mon
image_repo_sync:
services:
- endpoint: internal
service: local_image_registry
bootstrap:
enabled: false
script: |
ceph -s
function ensure_pool () {
ceph osd pool stats $1 || ceph osd pool create $1 $2
local test_luminous=$(ceph tell osd.* version | egrep -c "12.2|luminous")
if [[ ${test_luminous} -gt 0 ]]; then
ceph osd pool application enable $1 $3
fi
}
#ensure_pool volumes 8 cinder
# Uncomment below to enable mgr modules
# For a list of available modules:
# http://docs.ceph.com/docs/master/mgr/
# This overrides mgr_initial_modules (default: restful, status)
# Any module not listed here will be disabled
ceph_mgr_enabled_modules:
- restful
- status
- prometheus
# You can configure your mgr modules
# below. Each module has its own set
# of key/value. Refer to the doc
# above for more info. For example:
#ceph_mgr_modules_config:
# dashboard:
# port: 7000
# localpool:
# failure_domain: host
# subtree: rack
# pg_num: "128"
# num_rep: "3"
# min_size: "2"
# if you change provision_storage_class to false
# it is presumed you manage your own storage
# class definition externally
storageclass:
rbd:
provision_storage_class: true
provisioner: ceph.com/rbd
name: general
monitors: null
pool: rbd
admin_id: admin
admin_secret_name: pvc-ceph-conf-combined-storageclass
admin_secret_namespace: ceph
user_id: admin
user_secret_name: pvc-ceph-client-key
image_format: "2"
image_features: layering
cephfs:
provision_storage_class: true
provisioner: ceph.com/cephfs
name: cephfs
admin_id: admin
user_secret_name: pvc-ceph-cephfs-client-key
admin_secret_name: pvc-ceph-conf-combined-storageclass
admin_secret_namespace: ceph
endpoints:
cluster_domain_suffix: cluster.local
local_image_registry:
name: docker-registry
namespace: docker-registry
hosts:
default: localhost
internal: docker-registry
node: localhost
host_fqdn_override:
default: null
port:
registry:
node: 5000
identity:
name: keystone
namespace: null
auth:
admin:
region_name: RegionOne
username: admin
password: password
project_name: admin
user_domain_name: default
project_domain_name: default
swift:
role: admin
region_name: RegionOne
username: swift
password: password
project_name: service
user_domain_name: default
project_domain_name: default
hosts:
default: keystone-api
public: keystone
host_fqdn_override:
default: null
path:
default: /v3
scheme:
default: http
port:
admin:
default: 35357
api:
default: 80
object_store:
name: swift
namespace: null
hosts:
default: ceph-rgw
host_fqdn_override:
default: null
path:
default: /swift/v1/KEY_$(tenant_id)s
scheme:
default: http
port:
api:
default: 8088
ceph_mon:
namespace: null
hosts:
default: ceph-mon
discovery: ceph-mon-discovery
host_fqdn_override:
default: null
port:
mon:
default: 6789
ceph_mgr:
namespace: null
hosts:
default: ceph-mgr
host_fqdn_override:
default: null
port:
mgr:
default: 7000
metrics:
default: 9283
scheme:
default: http
monitoring:
prometheus:
enabled: true
ceph_mgr:
scrape: true
port: 9283
manifests:
configmap_bin: true
configmap_bin_ks: true
configmap_bin_common: true
configmap_etc: true
deployment_mds: true
deployment_rbd_provisioner: true
deployment_cephfs_provisioner: true
deployment_rgw: true
deployment_mgr: true
job_bootstrap: false
job_cephfs_client_key: true
job_image_repo_sync: true
job_ks_endpoints: true
job_ks_service: true
job_ks_user: true
job_namespace_client_key_cleaner: true
job_namespace_client_key: true
job_rbd_pool: true
secret_keystone_rgw: true
secret_keystone: true
service_mgr: true
service_rgw: true
storageclass_cephfs: true
storageclass_rbd: true