openstack-helm/tools/deployment/baremetal/030-ceph.sh
portdirect bf126f46b1 Ceph: Pod per OSD via Daemonsets.
This PS extends the host targeting utility to create daemonsets
for storage focused charts. This PS supports both block device
and bind mount backed journals and OSDs.

Change-Id: Id90b197ba3e4f383eea7ce6137ed77b3ef4e5625
2018-02-13 22:53:20 +00:00

183 lines
4.7 KiB
Bash
Executable File

#!/bin/bash
# Copyright 2017 The Openstack-Helm Authors.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
set -xe
#NOTE: Pull images and lint chart
make pull-images ceph
#NOTE: Deploy command
uuidgen > /tmp/ceph-fs-uuid.txt
tee /tmp/ceph.yaml <<EOF
labels:
jobs:
node_selector_key: openstack-helm-node-class
node_selector_value: primary
provisioner:
node_selector_key: openstack-helm-node-class
node_selector_value: primary
mon:
node_selector_key: openstack-helm-node-class
node_selector_value: primary
mds:
node_selector_key: openstack-helm-node-class
node_selector_value: primary
osd:
node_selector_key: openstack-helm-node-class
node_selector_value: primary
rgw:
node_selector_key: openstack-helm-node-class
node_selector_value: primary
mgr:
node_selector_key: openstack-helm-node-class
node_selector_value: primary
endpoints:
identity:
namespace: openstack
object_store:
namespace: ceph
ceph_mon:
namespace: ceph
network:
public: 172.17.0.1/16
cluster: 172.17.0.1/16
deployment:
storage_secrets: true
ceph: true
rbd_provisioner: true
cephfs_provisioner: true
client_secrets: false
rgw_keystone_user_and_endpoints: false
bootstrap:
enabled: true
conf:
rgw_ks:
enabled: true
ceph:
global:
fsid: ${CEPH_FS_ID}
osd_pool_default_size: 1
osd:
osd_crush_chooseleaf_type: 0
pool:
crush:
tunables: ${CRUSH_TUNABLES}
target:
osd: 1
pg_per_osd: 100
default:
crush_rule: same_host
spec:
# RBD pool
- name: rbd
application: rbd
replication: 1
percent_total_data: 40
# CephFS pools
- name: cephfs_metadata
application: cephfs
replication: 1
percent_total_data: 5
- name: cephfs_data
application: cephfs
replication: 1
percent_total_data: 10
# RadosGW pools
- name: .rgw.root
application: rgw
replication: 1
percent_total_data: 0.1
- name: default.rgw.control
application: rgw
replication: 1
percent_total_data: 0.1
- name: default.rgw.data.root
application: rgw
replication: 1
percent_total_data: 0.1
- name: default.rgw.gc
application: rgw
replication: 1
percent_total_data: 0.1
- name: default.rgw.log
application: rgw
replication: 1
percent_total_data: 0.1
- name: default.rgw.intent-log
application: rgw
replication: 1
percent_total_data: 0.1
- name: default.rgw.meta
application: rgw
replication: 1
percent_total_data: 0.1
- name: default.rgw.usage
application: rgw
replication: 1
percent_total_data: 0.1
- name: default.rgw.users.keys
application: rgw
replication: 1
percent_total_data: 0.1
- name: default.rgw.users.email
application: rgw
replication: 1
percent_total_data: 0.1
- name: default.rgw.users.swift
application: rgw
replication: 1
percent_total_data: 0.1
- name: default.rgw.users.uid
application: rgw
replication: 1
percent_total_data: 0.1
- name: default.rgw.buckets.extra
application: rgw
replication: 1
percent_total_data: 0.1
- name: default.rgw.buckets.index
application: rgw
replication: 1
percent_total_data: 3
- name: default.rgw.buckets.data
application: rgw
replication: 1
percent_total_data: 34.8
storage:
osd:
- data:
type: directory
location: /var/lib/openstack-helm/ceph/osd/osd-one
journal:
type: directory
location: /var/lib/openstack-helm/ceph/osd/journal-one
EOF
helm install ./ceph \
--namespace=ceph \
--name=ceph \
--values=/tmp/ceph.yaml
#NOTE: Wait for deploy
./tools/deployment/common/wait-for-pods.sh ceph
#NOTE: Validate deploy
MON_POD=$(kubectl get pods \
--namespace=ceph \
--selector="application=ceph" \
--selector="component=mon" \
--no-headers | awk '{ print $1; exit }')
kubectl exec -n ceph ${MON_POD} -- ceph -s