Use RBD external provisioner
Currently RBD storageclass uses in-tree provisioner kubernetes.io/rbd. Since containerized kubernetes controller image doesn't include rbd binary, in-tree provisioner errors out. This fix uses external provisioner ceph.com/rbd. Closes-Bug: #1707937 Change-Id: Iad443a54c0229c0356beb6d872365298248c40c9
This commit is contained in:
parent
e30c1cf414
commit
da9539bb87
19
ceph/templates/bin/_rbd-provisioner.sh.tpl
Normal file
19
ceph/templates/bin/_rbd-provisioner.sh.tpl
Normal file
@ -0,0 +1,19 @@
|
||||
#!/bin/bash
|
||||
|
||||
# Copyright 2017 The Openstack-Helm Authors.
|
||||
#
|
||||
# Licensed under the Apache License, Version 2.0 (the "License");
|
||||
# you may not use this file except in compliance with the License.
|
||||
# You may obtain a copy of the License at
|
||||
#
|
||||
# http://www.apache.org/licenses/LICENSE-2.0
|
||||
#
|
||||
# Unless required by applicable law or agreed to in writing, software
|
||||
# distributed under the License is distributed on an "AS IS" BASIS,
|
||||
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
# See the License for the specific language governing permissions and
|
||||
# limitations under the License.
|
||||
|
||||
set -ex
|
||||
|
||||
exec /usr/local/bin/rbd-provisioner -id ${POD_NAME}
|
@ -74,3 +74,5 @@ data:
|
||||
{{ tuple "bin/_variables_entrypoint.sh.tpl" . | include "helm-toolkit.utils.template" | indent 4 }}
|
||||
check_zombie_mons.py: |
|
||||
{{ tuple "bin/_check_zombie_mons.py.tpl" . | include "helm-toolkit.utils.template" | indent 4 }}
|
||||
rbd-provisioner.sh: |
|
||||
{{ tuple "bin/_rbd-provisioner.sh.tpl" . | include "helm-toolkit.utils.template" | indent 4 }}
|
||||
|
56
ceph/templates/deployment-rbd-provisioner.yaml
Normal file
56
ceph/templates/deployment-rbd-provisioner.yaml
Normal file
@ -0,0 +1,56 @@
|
||||
# Copyright 2017 The Openstack-Helm Authors.
|
||||
#
|
||||
# Licensed under the Apache License, Version 2.0 (the "License");
|
||||
# you may not use this file except in compliance with the License.
|
||||
# You may obtain a copy of the License at
|
||||
#
|
||||
# http://www.apache.org/licenses/LICENSE-2.0
|
||||
#
|
||||
# Unless required by applicable law or agreed to in writing, software
|
||||
# distributed under the License is distributed on an "AS IS" BASIS,
|
||||
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
# See the License for the specific language governing permissions and
|
||||
# limitations under the License.
|
||||
|
||||
{{- if .Values.manifests_enabled.rbd_provisioner }}
|
||||
{{- $envAll := . }}
|
||||
{{- $dependencies := .Values.dependencies.rbd_provisioner }}
|
||||
---
|
||||
kind: Deployment
|
||||
apiVersion: extensions/v1beta1
|
||||
metadata:
|
||||
name: ceph-rbd-provisioner
|
||||
spec:
|
||||
replicas: {{ .Values.replicas.rbd_provisioner }}
|
||||
strategy:
|
||||
type: Recreate
|
||||
template:
|
||||
metadata:
|
||||
labels:
|
||||
{{ tuple $envAll "rbd" "provisioner" | include "helm-toolkit.snippets.kubernetes_metadata_labels" | indent 8 }}
|
||||
spec:
|
||||
containers:
|
||||
- name: ceph-rbd-provisioner
|
||||
image: {{ .Values.images.rbd_provisioner }}
|
||||
imagePullPolicy: {{ .Values.images.pull_policy }}
|
||||
{{ tuple $envAll $envAll.Values.pod.resources.rbd_provisioner | include "helm-toolkit.snippets.kubernetes_resources" | indent 10 }}
|
||||
env:
|
||||
- name: PROVISIONER_NAME
|
||||
value: {{ .Values.storageclass.provisioner }}
|
||||
- name: POD_NAME
|
||||
valueFrom:
|
||||
fieldRef:
|
||||
fieldPath: metadata.name
|
||||
command:
|
||||
- /tmp/rbd-provisioner.sh
|
||||
volumeMounts:
|
||||
- name: ceph-bin
|
||||
mountPath: /tmp/rbd-provisioner.sh
|
||||
subPath: rbd-provisioner.sh
|
||||
readOnly: true
|
||||
volumes:
|
||||
- name: ceph-bin
|
||||
configMap:
|
||||
name: ceph-bin
|
||||
defaultMode: 0555
|
||||
{{- end }}
|
@ -20,7 +20,7 @@ apiVersion: storage.k8s.io/v1
|
||||
kind: StorageClass
|
||||
metadata:
|
||||
name: {{ .Values.storageclass.name }}
|
||||
provisioner: kubernetes.io/rbd
|
||||
provisioner: {{ .Values.storageclass.provisioner }}
|
||||
parameters:
|
||||
monitors: {{ tuple "ceph_mon" "internal" "mon" . | include "helm-toolkit.endpoints.host_and_port_endpoint_uri_lookup" }}
|
||||
adminId: {{ .Values.storageclass.admin_id }}
|
||||
|
@ -16,10 +16,12 @@ manifests_enabled:
|
||||
storage_secrets: true
|
||||
client_secrets: true
|
||||
deployment: true
|
||||
rbd_provisioner: true
|
||||
|
||||
replicas:
|
||||
rgw: 3
|
||||
mon_check: 1
|
||||
rbd_provisioner: 2
|
||||
|
||||
service:
|
||||
mon:
|
||||
@ -30,6 +32,7 @@ images:
|
||||
dep_check: docker.io/kolla/ubuntu-source-kubernetes-entrypoint:4.0.0
|
||||
daemon: quay.io/attcomdev/ceph-daemon:tag-build-master-jewel-ubuntu-16.04
|
||||
ceph_config_helper: docker.io/port/ceph-config-helper:v1.6.8
|
||||
rbd_provisioner: quay.io/external_storage/rbd-provisioner:v0.1.1
|
||||
pull_policy: "IfNotPresent"
|
||||
|
||||
labels:
|
||||
@ -94,6 +97,13 @@ pod:
|
||||
limits:
|
||||
memory: "50Mi"
|
||||
cpu: "500m"
|
||||
rbd_provisioner:
|
||||
requests:
|
||||
memory: "5Mi"
|
||||
cpu: "250m"
|
||||
limits:
|
||||
memory: "50Mi"
|
||||
cpu: "500m"
|
||||
jobs:
|
||||
bootstrap:
|
||||
limits:
|
||||
@ -219,6 +229,10 @@ dependencies:
|
||||
services:
|
||||
- service: ceph_mon
|
||||
endpoint: internal
|
||||
rbd_provisioner:
|
||||
jobs:
|
||||
- service: ceph_mon
|
||||
endpoint: internal
|
||||
|
||||
ceph:
|
||||
enabled:
|
||||
@ -249,6 +263,7 @@ bootstrap:
|
||||
# class definition externally
|
||||
storageclass:
|
||||
provision_storage_class: true
|
||||
provisioner: ceph.com/rbd
|
||||
name: general
|
||||
monitors: null
|
||||
pool: rbd
|
||||
|
@ -59,38 +59,11 @@ procedure is opinionated *only to standardize the deployment process for
|
||||
users and developers*, and to limit questions to a known working
|
||||
deployment. Instructions will expand as the project becomes more mature.
|
||||
|
||||
Kube Controller Manager
|
||||
KubeADM Deployment
|
||||
-----------------------
|
||||
|
||||
This guide assumes you will be using Ceph to fulfill the
|
||||
PersistentVolumeClaims that will be made against your Kubernetes cluster.
|
||||
In order to use Ceph, you will need to leverage a custom Kubernetes
|
||||
Controller with the necessary
|
||||
`RDB <http://docs.ceph.com/docs/jewel/rbd/rbd/>`__ utilities. For your
|
||||
convenience, we are maintaining this along with the Openstack-Helm
|
||||
project. If you would like to check the current
|
||||
`tags <https://quay.io/repository/attcomdev/kube-controller-manager?tab=tags>`__
|
||||
or the
|
||||
`security <https://quay.io/repository/attcomdev/kube-controller-manager/image/eedc2bf21cca5647a26e348ee3427917da8b17c25ead38e832e1ed7c2ef1b1fd?tab=vulnerabilities>`__
|
||||
of these pre-built containers, you may view them at `our public Quay
|
||||
container
|
||||
registry <https://quay.io/repository/attcomdev/kube-controller-manager?tab=tags>`__.
|
||||
If you would prefer to build this container yourself, or add any
|
||||
additional packages, you are free to use our GitHub
|
||||
`dockerfiles <https://github.com/att-comdev/dockerfiles/tree/master/kube-controller-manager>`__
|
||||
repository to do so.
|
||||
|
||||
To replace the Kube Controller Manager, run the following commands
|
||||
on every node in your cluster before executing ``kubeadm init``:
|
||||
|
||||
::
|
||||
|
||||
export CEPH_KUBE_CONTROLLER_MANAGER_IMAGE=quay.io/attcomdev/kube-controller-manager:v1.6.8
|
||||
export BASE_KUBE_CONTROLLER_MANAGER_IMAGE=gcr.io/google_containers/kube-controller-manager-amd64:v1.6.8
|
||||
sudo docker pull ${CEPH_KUBE_CONTROLLER_MANAGER_IMAGE}
|
||||
sudo docker tag ${CEPH_KUBE_CONTROLLER_MANAGER_IMAGE} ${BASE_KUBE_CONTROLLER_MANAGER_IMAGE}
|
||||
|
||||
Afterwards, you can ``kubeadm init`` as such:
|
||||
Once the dependencies are installed, bringing up a ``kubeadm`` environment
|
||||
should just require a single command on the master node:
|
||||
|
||||
::
|
||||
|
||||
@ -204,24 +177,22 @@ completed.
|
||||
Installing Ceph Host Requirements
|
||||
~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
|
||||
|
||||
You need to ensure that ``ceph-common`` or equivalent is
|
||||
installed on each of our hosts. Using our Ubuntu example:
|
||||
You need to ensure that ``ceph-common`` or equivalent is installed on each of
|
||||
our hosts. Using our Ubuntu example:
|
||||
|
||||
::
|
||||
|
||||
sudo apt-get install ceph-common -y
|
||||
|
||||
Kube Controller Manager DNS Resolution
|
||||
~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
|
||||
Kubernetes Node DNS Resolution
|
||||
~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
|
||||
|
||||
You will need to allow the Kubernetes Controller to use the
|
||||
Kubernetes service DNS server, and add the Kubernetes search suffix
|
||||
to the controller's resolv.conf. As of now, the Kubernetes controller
|
||||
only mirrors the host's ``resolv.conf``. This is not sufficient if you
|
||||
want the controller to know how to correctly resolve container service
|
||||
endpoints.
|
||||
For each of the nodes to know how to reach Ceph endpoints, each host much also
|
||||
have an entry for ``kube-dns``. Since we are using Ubuntu for our example, place
|
||||
these changes in ``/etc/network/interfaces`` to ensure they remain after reboot.
|
||||
|
||||
First, find out what the IP Address of your ``kube-dns`` deployment is:
|
||||
To do this you will first need to find out what the IP Address of your
|
||||
``kube-dns`` deployment is:
|
||||
|
||||
::
|
||||
|
||||
@ -230,26 +201,6 @@ First, find out what the IP Address of your ``kube-dns`` deployment is:
|
||||
kube-dns 10.96.0.10 <none> 53/UDP,53/TCP 1d
|
||||
admin@kubenode01:~$
|
||||
|
||||
Then update the controller manager configuration to match:
|
||||
|
||||
::
|
||||
|
||||
admin@kubenode01:~$ CONTROLLER_MANAGER_POD=$(kubectl get -n kube-system pods -l component=kube-controller-manager --no-headers -o name | head -1 | awk -F '/' '{ print $NF }')
|
||||
admin@kubenode01:~$ kubectl exec -n kube-system ${CONTROLLER_MANAGER_POD} -- sh -c "cat > /etc/resolv.conf <<EOF
|
||||
nameserver 10.96.0.10
|
||||
nameserver 8.8.8.8
|
||||
search cluster.local svc.cluster.local
|
||||
EOF"
|
||||
|
||||
Kubernetes Node DNS Resolution
|
||||
~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
|
||||
|
||||
For each of the nodes to know exactly how to communicate with Ceph (and
|
||||
thus MariaDB) endpoints, each host much also have an entry for
|
||||
``kube-dns``. Since we are using Ubuntu for our example, place these
|
||||
changes in ``/etc/network/interfaces`` to ensure they remain after
|
||||
reboot.
|
||||
|
||||
Now we are ready to continue with the Openstack-Helm installation.
|
||||
|
||||
Openstack-Helm Preparation
|
||||
|
@ -39,12 +39,6 @@ if [ "x$PVC_BACKEND" == "xceph" ]; then
|
||||
kubectl label nodes ceph-mon=enabled --all
|
||||
kubectl label nodes ceph-osd=enabled --all
|
||||
kubectl label nodes ceph-mds=enabled --all
|
||||
CONTROLLER_MANAGER_POD=$(kubectl get -n kube-system pods -l component=kube-controller-manager --no-headers -o name | awk -F '/' '{ print $NF; exit }')
|
||||
kubectl exec -n kube-system ${CONTROLLER_MANAGER_POD} -- sh -c "cat > /etc/resolv.conf <<EOF
|
||||
nameserver 10.96.0.10
|
||||
nameserver ${UPSTREAM_DNS}
|
||||
search cluster.local svc.cluster.local
|
||||
EOF"
|
||||
|
||||
if [ "x$INTEGRATION" == "xmulti" ]; then
|
||||
SUBNET_RANGE="$(find_multi_subnet_range)"
|
||||
@ -79,6 +73,7 @@ EOF"
|
||||
helm install --namespace=openstack ${WORK_DIR}/ceph --name=ceph-openstack-config \
|
||||
--set manifests_enabled.storage_secrets=false \
|
||||
--set manifests_enabled.deployment=false \
|
||||
--set manifests_enabled.rbd_provisioner=false \
|
||||
--set ceph.namespace=ceph \
|
||||
--set network.public=$osd_public_network \
|
||||
--set network.cluster=$osd_cluster_network
|
||||
|
@ -19,8 +19,4 @@ source ${WORK_DIR}/tools/gate/funcs/kube.sh
|
||||
kubeadm_aio_reqs_install
|
||||
sudo docker pull ${KUBEADM_IMAGE} || kubeadm_aio_build
|
||||
|
||||
if [ "x$PVC_BACKEND" == "xceph" ]; then
|
||||
ceph_kube_controller_manager_replace
|
||||
fi
|
||||
|
||||
kubeadm_aio_launch
|
||||
|
@ -23,8 +23,6 @@ export SERVICE_TEST_TIMEOUT=${SERVICE_TEST_TIMEOUT:="600"}
|
||||
|
||||
export KUBECONFIG=${HOME}/.kubeadm-aio/admin.conf
|
||||
export KUBEADM_IMAGE=openstackhelm/kubeadm-aio:${KUBE_VERSION}
|
||||
export BASE_KUBE_CONTROLLER_MANAGER_IMAGE=gcr.io/google_containers/kube-controller-manager-amd64:${KUBE_VERSION}
|
||||
export CEPH_KUBE_CONTROLLER_MANAGER_IMAGE=quay.io/attcomdev/kube-controller-manager:${KUBE_VERSION}
|
||||
|
||||
export LOOPBACK_CREATE=${LOOPBACK_CREATE:="false"}
|
||||
export LOOPBACK_DEVS=${LOOPBACK_DEVS:="3"}
|
||||
|
Loading…
Reference in New Issue
Block a user