diff --git a/doc/source/contributor/development-environment-devstack.rst b/doc/source/contributor/development-environment-devstack.rst index d667689e..a458f615 100644 --- a/doc/source/contributor/development-environment-devstack.rst +++ b/doc/source/contributor/development-environment-devstack.rst @@ -106,9 +106,8 @@ Kubernetes Integration By default, Qinling uses Kubernetes as its orchestrator backend, so a k8s all-in-one environment (and some other related tools, e.g. kubectl) is also -setup during devstack installation. +set up during devstack installation. -The idea and most of the scripts come from -`OpenStack-Helm `_ -project originally, but may be probably changed as the project evolving in -future. +Qinling devstack script uses `kubeadm `_ +for Kubernetes installation, refer to ``tools/gate/kubeadm/setup_gate.sh`` for +more detailed information about Qinling devstack installation. diff --git a/qinling_tempest_plugin/functions/test_python_process_limit.py b/qinling_tempest_plugin/functions/test_python_process_limit.py index 29f82a4f..e04419b5 100644 --- a/qinling_tempest_plugin/functions/test_python_process_limit.py +++ b/qinling_tempest_plugin/functions/test_python_process_limit.py @@ -17,24 +17,22 @@ import resource import time -def main(number=128, **kwargs): - for name, desc in [ - ('RLIMIT_NPROC', 'number of processes'), - ]: - limit_num = getattr(resource, name) - soft, hard = resource.getrlimit(limit_num) - print('Maximum %-25s (%-15s) : %20s %20s' % (desc, name, soft, hard)) +def main(number=10, **kwargs): + soft, hard = resource.getrlimit(resource.RLIMIT_NPROC) + print('(soft, hard): %20s %20s' % (soft, hard)) + + # We set a small number inside the function to avoid being affected by + # outside. + resource.setrlimit(resource.RLIMIT_NPROC, (number, hard)) processes = [] - - for i in range(0, number): + for i in range(0, number+1): p = Process( target=_sleep, args=(i,) ) p.start() processes.append(p) - for p in processes: p.join() diff --git a/qinling_tempest_plugin/tests/api/test_executions.py b/qinling_tempest_plugin/tests/api/test_executions.py index 5df22f3e..0e371557 100644 --- a/qinling_tempest_plugin/tests/api/test_executions.py +++ b/qinling_tempest_plugin/tests/api/test_executions.py @@ -212,6 +212,7 @@ class ExecutionsTest(base.BaseQinlingTest): self.assertEqual('failed', body['status']) result = jsonutils.loads(body['result']) + self.assertNotIn('error', result) self.assertIn( 'Too many open files', result['output'] ) @@ -229,6 +230,7 @@ class ExecutionsTest(base.BaseQinlingTest): self.assertEqual('failed', body['status']) result = jsonutils.loads(body['result']) + self.assertNotIn('error', result) self.assertIn( 'too much resource consumption', result['output'] ) diff --git a/tools/gate/dump_logs.sh b/tools/gate/dump_logs.sh deleted file mode 100644 index 97c82a06..00000000 --- a/tools/gate/dump_logs.sh +++ /dev/null @@ -1,97 +0,0 @@ -#!/bin/bash -set +xe - -# if we can't find kubectl, fail immediately because it is likely -# the whitespace linter fails - no point to collect logs. -if ! type "kubectl" &> /dev/null; then - exit $1 -fi - -echo "Capturing logs from environment." -mkdir -p ${LOGS_DIR}/k8s/etc -sudo cp -a /etc/kubernetes ${LOGS_DIR}/k8s/etc -sudo chmod 777 --recursive ${LOGS_DIR}/* - -mkdir -p ${LOGS_DIR}/k8s -for OBJECT_TYPE in nodes \ - namespace \ - storageclass; do - kubectl get ${OBJECT_TYPE} -o yaml > ${LOGS_DIR}/k8s/${OBJECT_TYPE}.yaml -done -kubectl describe nodes > ${LOGS_DIR}/k8s/nodes.txt -for OBJECT_TYPE in svc \ - pods \ - jobs \ - deployments \ - daemonsets \ - statefulsets \ - configmaps \ - secrets; do - kubectl get --all-namespaces ${OBJECT_TYPE} -o yaml > \ - ${LOGS_DIR}/k8s/${OBJECT_TYPE}.yaml -done - -mkdir -p ${LOGS_DIR}/k8s/pods -kubectl get pods -a --all-namespaces -o json | jq -r \ - '.items[].metadata | .namespace + " " + .name' | while read line; do - NAMESPACE=$(echo $line | awk '{print $1}') - NAME=$(echo $line | awk '{print $2}') - kubectl get --namespace $NAMESPACE pod $NAME -o json | jq -r \ - '.spec.containers[].name' | while read line; do - CONTAINER=$(echo $line | awk '{print $1}') - kubectl logs $NAME --namespace $NAMESPACE -c $CONTAINER > \ - ${LOGS_DIR}/k8s/pods/$NAMESPACE-$NAME-$CONTAINER.txt - done -done - -mkdir -p ${LOGS_DIR}/k8s/svc -kubectl get svc -o json --all-namespaces | jq -r \ - '.items[].metadata | .namespace + " " + .name' | while read line; do - NAMESPACE=$(echo $line | awk '{print $1}') - NAME=$(echo $line | awk '{print $2}') - kubectl describe svc $NAME --namespace $NAMESPACE > \ - ${LOGS_DIR}/k8s/svc/$NAMESPACE-$NAME.txt -done - -mkdir -p ${LOGS_DIR}/k8s/pvc -kubectl get pvc -o json --all-namespaces | jq -r \ - '.items[].metadata | .namespace + " " + .name' | while read line; do - NAMESPACE=$(echo $line | awk '{print $1}') - NAME=$(echo $line | awk '{print $2}') - kubectl describe pvc $NAME --namespace $NAMESPACE > \ - ${LOGS_DIR}/k8s/pvc/$NAMESPACE-$NAME.txt -done - -mkdir -p ${LOGS_DIR}/k8s/rbac -for OBJECT_TYPE in clusterroles \ - roles \ - clusterrolebindings \ - rolebindings; do - kubectl get ${OBJECT_TYPE} -o yaml > ${LOGS_DIR}/k8s/rbac/${OBJECT_TYPE}.yaml -done - -mkdir -p ${LOGS_DIR}/k8s/descriptions -for NAMESPACE in $(kubectl get namespaces -o name | awk -F '/' '{ print $NF }') ; do - for OBJECT in $(kubectl get all --show-all -n $NAMESPACE -o name) ; do - OBJECT_TYPE=$(echo $OBJECT | awk -F '/' '{ print $1 }') - OBJECT_NAME=$(echo $OBJECT | awk -F '/' '{ print $2 }') - mkdir -p ${LOGS_DIR}/k8s/descriptions/${NAMESPACE}/${OBJECT_TYPE} - kubectl describe -n $NAMESPACE $OBJECT > ${LOGS_DIR}/k8s/descriptions/${NAMESPACE}/$OBJECT_TYPE/$OBJECT_NAME.txt - done -done - -NODE_NAME=$(hostname) -mkdir -p ${LOGS_DIR}/nodes/${NODE_NAME} -echo "${NODE_NAME}" > ${LOGS_DIR}/nodes/master.txt -sudo docker logs kubelet 2> ${LOGS_DIR}/nodes/${NODE_NAME}/kubelet.txt -sudo docker logs kubeadm-aio 2>&1 > ${LOGS_DIR}/nodes/${NODE_NAME}/kubeadm-aio.txt -sudo docker images --digests --no-trunc --all > ${LOGS_DIR}/nodes/${NODE_NAME}/images.txt -sudo du -h --max-depth=1 /var/lib/docker | sort -hr > ${LOGS_DIR}/nodes/${NODE_NAME}/docker-size.txt -sudo iptables-save > ${LOGS_DIR}/nodes/${NODE_NAME}/iptables.txt -sudo ip a > ${LOGS_DIR}/nodes/${NODE_NAME}/ip.txt -sudo route -n > ${LOGS_DIR}/nodes/${NODE_NAME}/routes.txt -sudo arp -a > ${LOGS_DIR}/nodes/${NODE_NAME}/arp.txt -cat /etc/resolv.conf > ${LOGS_DIR}/nodes/${NODE_NAME}/resolv.conf -sudo lshw > ${LOGS_DIR}/nodes/${NODE_NAME}/hardware.txt - -exit $1 \ No newline at end of file diff --git a/tools/gate/funcs/common.sh b/tools/gate/funcs/common.sh deleted file mode 100644 index 9f5218a3..00000000 --- a/tools/gate/funcs/common.sh +++ /dev/null @@ -1,55 +0,0 @@ -#!/bin/bash -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. - -function base_install { - if [ "x$HOST_OS" == "xubuntu" ]; then - sudo apt-get update -y - sudo apt-get install -y --no-install-recommends \ - iproute2 \ - iptables \ - ipcalc \ - nmap \ - lshw \ - screen - elif [ "x$HOST_OS" == "xcentos" ]; then - sudo yum install -y \ - epel-release - # ipcalc is in the initscripts package - sudo yum install -y \ - iproute \ - iptables \ - initscripts \ - nmap \ - lshw - elif [ "x$HOST_OS" == "xfedora" ]; then - sudo dnf install -y \ - iproute \ - iptables \ - ipcalc \ - nmap \ - lshw - fi -} - -function create_k8s_screen { - # Starts a proxy to the Kubernetes API server in a screen session - sudo screen -S kube_proxy -X quit || true - sudo screen -dmS kube_proxy && sudo screen -S kube_proxy -X screen -t kube_proxy - sudo screen -S kube_proxy -p kube_proxy -X stuff 'kubectl proxy --accept-hosts=".*" --address="0.0.0.0"\n' -} - -function gate_base_setup { - # Install base requirements - base_install -} diff --git a/tools/gate/funcs/kube.sh b/tools/gate/funcs/kube.sh deleted file mode 100644 index 764356e8..00000000 --- a/tools/gate/funcs/kube.sh +++ /dev/null @@ -1,157 +0,0 @@ -#!/bin/bash -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. - -function kube_wait_for_pods { - # From Kolla-Kubernetes, orginal authors Kevin Fox & Serguei Bezverkhi - # Default wait timeout is 180 seconds - set +x - end=$(date +%s) - if ! [ -z $2 ]; then - end=$((end + $2)) - else - end=$((end + 180)) - fi - while true; do - kubectl get pods --namespace=$1 -o json | jq -r \ - '.items[].status.phase' | grep Pending > /dev/null && \ - PENDING=True || PENDING=False - query='.items[]|select(.status.phase=="Running")' - query="$query|.status.containerStatuses[].ready" - kubectl get pods --namespace=$1 -o json | jq -r "$query" | \ - grep false > /dev/null && READY="False" || READY="True" - kubectl get jobs -o json --namespace=$1 | jq -r \ - '.items[] | .spec.completions == .status.succeeded' | \ - grep false > /dev/null && JOBR="False" || JOBR="True" - [ $PENDING == "False" -a $READY == "True" -a $JOBR == "True" ] && \ - break || true - sleep 1 - now=$(date +%s) - [ $now -gt $end ] && echo "containers failed to start." && \ - kubectl get pods --namespace $1 -o wide && exit -1 - done - set -x -} - -function kube_wait_for_nodes { - # Default wait timeout is 180 seconds - set +x - end=$(date +%s) - if ! [ -z $2 ]; then - end=$((end + $2)) - else - end=$((end + 180)) - fi - while true; do - NUMBER_OF_NODES_EXPECTED=$1 - NUMBER_OF_NODES=$(kubectl get nodes --no-headers -o name | wc -l) - [ $NUMBER_OF_NODES -eq $NUMBER_OF_NODES_EXPECTED ] && \ - NODES_ONLINE="True" || NODES_ONLINE="False" - while read SUB_NODE; do - echo $SUB_NODE | grep -q ^Ready && NODES_READY="True" || NODES_READY="False" - done < <(kubectl get nodes --no-headers | awk '{ print $2 }') - [ $NODES_ONLINE == "True" -a $NODES_READY == "True" ] && \ - break || true - sleep 5 - now=$(date +%s) - [ $now -gt $end ] && echo "Nodes Failed to be ready in time." && \ - kubectl get nodes -o wide && exit -1 - done - set -x -} - -function kubeadm_aio_reqs_install { - GET_DOCKER="True" - if [ "x$HOST_OS" == "xubuntu" ]; then - sudo apt-get install -y --no-install-recommends -qq \ - jq - elif [ "x$HOST_OS" == "xcentos" ]; then - sudo yum install -y \ - epel-release - sudo yum install -y \ - docker-latest \ - jq - GET_DOCKER="False" - sudo cp -f /usr/lib/systemd/system/docker-latest.service /etc/systemd/system/docker.service - sudo sed -i "s|/var/lib/docker-latest|/var/lib/docker|g" /etc/systemd/system/docker.service - sudo sed -i 's/^OPTIONS/#OPTIONS/g' /etc/sysconfig/docker-latest - sudo sed -i "s|^MountFlags=slave|MountFlags=shared|g" /etc/systemd/system/docker.service - sudo sed -i "/--seccomp-profile/,+1 d" /etc/systemd/system/docker.service - echo "DOCKER_STORAGE_OPTIONS=--storage-driver=overlay" | sudo tee /etc/sysconfig/docker-latest-storage - sudo setenforce 0 || true - sudo systemctl daemon-reload - sudo systemctl restart docker - elif [ "x$HOST_OS" == "xfedora" ]; then - GET_DOCKER="False" - sudo dnf install -y \ - docker-latest \ - jq - sudo cp -f /usr/lib/systemd/system/docker-latest.service /etc/systemd/system/docker.service - sudo sed -i "s|/var/lib/docker-latest|/var/lib/docker|g" /etc/systemd/system/docker.service - echo "DOCKER_STORAGE_OPTIONS=--storage-driver=overlay2" | sudo tee /etc/sysconfig/docker-latest-storage - sudo setenforce 0 || true - sudo systemctl daemon-reload - sudo systemctl restart docker - fi - - # Install docker and kubectl - TMP_DIR=$(mktemp -d) - - if [ ! -x /usr/local/bin/kubectl ]; then - curl -sSL https://storage.googleapis.com/kubernetes-release/release/${KUBE_VERSION}/bin/linux/amd64/kubectl -o ${TMP_DIR}/kubectl - chmod +x ${TMP_DIR}/kubectl - sudo mv ${TMP_DIR}/kubectl /usr/local/bin/kubectl - fi - - if [ "$GET_DOCKER" == "True" ]; then - curl -fsSL get.docker.com -o ${TMP_DIR}/get-docker.sh - sudo sh ${TMP_DIR}/get-docker.sh - fi - - rm -rf ${TMP_DIR} -} - -function kubeadm_aio_build { - sudo docker build --pull -t ${KUBEADM_IMAGE} tools/kubeadm-aio -} - -function kubeadm_aio_launch { - ${WORK_DIR}/tools/kubeadm-aio/kubeadm-aio-launcher.sh - - rm -rf ${HOME}/.kube - mkdir -p ${HOME}/.kube - cat ${KUBECONFIG} > ${HOME}/.kube/config - kube_wait_for_pods kube-system 300 - kube_wait_for_pods default 300 -} - -function kubeadm_aio_clean { - sudo docker rm -f kubeadm-aio || true - sudo docker rm -f kubelet || true - sudo docker ps -aq | xargs -r -l1 -P16 sudo docker rm -f - sudo rm -rfv \ - /etc/cni/net.d \ - /etc/kubernetes \ - /var/lib/etcd \ - /var/etcd \ - /var/lib/kubelet/* \ - /var/lib/nova \ - ${HOME}/.kubeadm-aio/admin.conf \ - /var/lib/openstack-helm \ - /var/lib/nfs-provisioner || true -} - -function ceph_kube_controller_manager_replace { - sudo docker pull ${CEPH_KUBE_CONTROLLER_MANAGER_IMAGE} - sudo docker tag ${CEPH_KUBE_CONTROLLER_MANAGER_IMAGE} ${BASE_KUBE_CONTROLLER_MANAGER_IMAGE} -} \ No newline at end of file diff --git a/tools/gate/funcs/network.sh b/tools/gate/funcs/network.sh deleted file mode 100755 index e3dfbdaf..00000000 --- a/tools/gate/funcs/network.sh +++ /dev/null @@ -1,88 +0,0 @@ -#!/bin/bash -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. - -function net_default_iface { - sudo ip -4 route list 0/0 | awk '{ print $5; exit }' -} - -function net_default_host_addr { - sudo ip addr | awk "/inet / && /$(net_default_iface)/{print \$2; exit }" -} - -function net_default_host_ip { - echo $(net_default_host_addr) | awk -F '/' '{ print $1; exit }' -} - -function net_resolv_pre_kube { - sudo cp -f /etc/resolv.conf /etc/resolv-pre-kube.conf - sudo rm -f /etc/resolv.conf - cat << EOF | sudo tee /etc/resolv.conf -nameserver ${UPSTREAM_DNS} -EOF -} - -function net_resolv_post_kube { - sudo cp -f /etc/resolv-pre-kube.conf /etc/resolv.conf -} - -function net_hosts_pre_kube { - sudo cp -f /etc/hosts /etc/hosts-pre-kube - sudo sed -i "/$(hostname)/d" /etc/hosts - sudo sed -i "/127.0.0.1/d" /etc/hosts - sudo sed -i "1 i 127.0.0.1 localhost" /etc/hosts - - host_ip=$(net_default_host_ip) - echo "${host_ip} $(hostname)" | sudo tee -a /etc/hosts -} - -function net_hosts_post_kube { - sudo cp -f /etc/hosts-pre-kube /etc/hosts -} - -function find_subnet_range { - if [ "x$HOST_OS" == "xubuntu" ]; then - ipcalc $(net_default_host_addr) | awk '/^Network/ { print $2 }' - else - eval $(ipcalc --network --prefix $(net_default_host_addr)) - echo "$NETWORK/$PREFIX" - fi -} - -function find_multi_subnet_range { - : ${PRIMARY_NODE_IP:="$(cat /etc/nodepool/primary_node | tail -1)"} - : ${SUB_NODE_IPS:="$(cat /etc/nodepool/sub_nodes)"} - NODE_IPS="${PRIMARY_NODE_IP} ${SUB_NODE_IPS}" - NODE_IP_UNSORTED=$(mktemp --suffix=.txt) - for NODE_IP in $NODE_IPS; do - echo $NODE_IP >> ${NODE_IP_UNSORTED} - done - NODE_IP_SORTED=$(mktemp --suffix=.txt) - sort -V ${NODE_IP_UNSORTED} > ${NODE_IP_SORTED} - rm -f ${NODE_IP_UNSORTED} - FIRST_IP_SUBNET=$(ipcalc "$(head -n 1 ${NODE_IP_SORTED})/24" | awk '/^Network/ { print $2 }') - LAST_IP_SUBNET=$(ipcalc "$(tail -n 1 ${NODE_IP_SORTED})/24" | awk '/^Network/ { print $2 }') - rm -f ${NODE_IP_SORTED} - function ip_diff { - echo $(($(echo $LAST_IP_SUBNET | awk -F '.' "{ print \$$1}") - $(echo $FIRST_IP_SUBNET | awk -F '.' "{ print \$$1}"))) - } - for X in {1..4}; do - if ! [ "$(ip_diff $X)" -eq "0" ]; then - SUBMASK=$(((($X - 1 )) * 8)) - break - elif [ $X -eq "4" ]; then - SUBMASK=24 - fi - done - echo ${FIRST_IP_SUBNET%/*}/${SUBMASK} -} \ No newline at end of file diff --git a/tools/gate/kubeadm_aio.sh b/tools/gate/kubeadm_aio.sh deleted file mode 100644 index 6f24c136..00000000 --- a/tools/gate/kubeadm_aio.sh +++ /dev/null @@ -1,25 +0,0 @@ -#!/bin/bash -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. -set -ex -: ${WORK_DIR:="$( cd "$( dirname "${BASH_SOURCE[0]}" )" && pwd )"} -source ${WORK_DIR}/tools/gate/vars.sh -source ${WORK_DIR}/tools/gate/funcs/network.sh -source ${WORK_DIR}/tools/gate/funcs/kube.sh - -kubeadm_aio_reqs_install - -# Re-use the docker image pre-built by openstack-helm team. -sudo docker pull ${KUBEADM_IMAGE} || kubeadm_aio_build - -kubeadm_aio_launch diff --git a/tools/gate/setup_gate.sh b/tools/gate/setup_gate.sh deleted file mode 100755 index 2a468799..00000000 --- a/tools/gate/setup_gate.sh +++ /dev/null @@ -1,41 +0,0 @@ -#!/bin/bash -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. -set -ex -export WORK_DIR=$(pwd) -source ${WORK_DIR}/tools/gate/vars.sh -source ${WORK_DIR}/tools/gate/funcs/common.sh -source ${WORK_DIR}/tools/gate/funcs/network.sh - -# Setup the logging location: by default use the working dir as the root. -rm -rf ${LOGS_DIR} || true -mkdir -p ${LOGS_DIR} - -function dump_logs () { - bash ${WORK_DIR}/tools/gate/dump_logs.sh -} -trap 'dump_logs "$?"' ERR - -# Do the basic node setup for running the gate -gate_base_setup - -# We setup the network for pre kube here, to enable cluster restarts on -# development machines -net_resolv_pre_kube -net_hosts_pre_kube - -# Setup the K8s Cluster -bash ${WORK_DIR}/tools/gate/kubeadm_aio.sh - -# Starts a proxy to the Kubernetes API server in a screen session -create_k8s_screen diff --git a/tools/gate/vars.sh b/tools/gate/vars.sh deleted file mode 100755 index c862356f..00000000 --- a/tools/gate/vars.sh +++ /dev/null @@ -1,42 +0,0 @@ -#!/bin/bash -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. - -# Set work dir if not already done -: ${WORK_DIR:="$( cd "$( dirname "${BASH_SOURCE[0]}" )" && pwd )"} - -# Set logs directory -export LOGS_DIR=${LOGS_DIR:-"${WORK_DIR}/logs"} - -# Get Host OS -source /etc/os-release -export HOST_OS=${HOST_OS:="${ID}"} - -# Set versions of K8s to use -export KUBE_VERSION=${KUBE_VERSION:-"v1.7.3"} -export KUBEADM_IMAGE_VERSION=${KUBEADM_IMAGE_VERSION:-"v1.7.3"} - -# Set K8s-AIO options -export KUBECONFIG=${KUBECONFIG:="${HOME}/.kubeadm-aio/admin.conf"} -export KUBEADM_IMAGE=${KUBEADM_IMAGE:="openstackhelm/kubeadm-aio:${KUBEADM_IMAGE_VERSION}"} - -# Set K8s network options -export CNI_POD_CIDR=${CNI_POD_CIDR:="192.168.0.0/16"} -export KUBE_CNI=${KUBE_CNI:="calico"} - -# Set Upstream DNS -export UPSTREAM_DNS=${UPSTREAM_DNS:-"8.8.8.8"} - -# Set gate script timeouts -export SERVICE_LAUNCH_TIMEOUT=${SERVICE_LAUNCH_TIMEOUT:="600"} -export SERVICE_TEST_TIMEOUT=${SERVICE_TEST_TIMEOUT:="600"} diff --git a/tools/kubeadm-aio/Dockerfile b/tools/kubeadm-aio/Dockerfile deleted file mode 100644 index ed34098f..00000000 --- a/tools/kubeadm-aio/Dockerfile +++ /dev/null @@ -1,88 +0,0 @@ -FROM ubuntu:16.04 -MAINTAINER pete.birley@att.com - -ENV KUBE_VERSION=v1.6.8 \ - CNI_VERSION=v0.6.0-rc2 \ - container="docker" \ - DEBIAN_FRONTEND="noninteractive" - -RUN set -x \ - && TMP_DIR=$(mktemp --directory) \ - && cd ${TMP_DIR} \ - && apt-get update \ - && apt-get install -y --no-install-recommends \ - apt-transport-https \ - ca-certificates \ - curl \ - dbus \ - make \ - git \ - vim \ - jq \ -# Add Kubernetes repo - && curl -sSL https://packages.cloud.google.com/apt/doc/apt-key.gpg | apt-key add - \ - && echo "deb http://apt.kubernetes.io/ kubernetes-xenial main" > /etc/apt/sources.list.d/kubernetes.list \ - && apt-get update \ - && apt-get install -y --no-install-recommends \ - docker.io \ - iptables \ - kubectl \ - kubelet \ - kubernetes-cni \ -# Install Kubeadm without running postinstall script as it expects systemd to be running. - && apt-get download kubeadm \ - && dpkg --unpack kubeadm*.deb \ - && mv /var/lib/dpkg/info/kubeadm.postinst /opt/kubeadm.postinst \ - && dpkg --configure kubeadm \ - && apt-get install -yf kubeadm \ - && mkdir -p /etc/kubernetes/manifests \ -# Install kubectl: - && curl -sSL https://dl.k8s.io/${KUBE_VERSION}/kubernetes-client-linux-amd64.tar.gz | tar -zxv --strip-components=1 \ - && mv ${TMP_DIR}/client/bin/kubectl /usr/bin/kubectl \ - && chmod +x /usr/bin/kubectl \ -# Install kubelet & kubeadm binaries: -# (portdirect) We do things in this weird way to let us use the deps and systemd -# units from the packages in the .deb repo. - && curl -sSL https://dl.k8s.io/${KUBE_VERSION}/kubernetes-server-linux-amd64.tar.gz | tar -zxv --strip-components=1 \ - && mv ${TMP_DIR}/server/bin/kubelet /usr/bin/kubelet \ - && chmod +x /usr/bin/kubelet \ - && mv ${TMP_DIR}/server/bin/kubeadm /usr/bin/kubeadm \ - && chmod +x /usr/bin/kubeadm \ -# Install CNI: - && CNI_BIN_DIR=/opt/cni/bin \ - && mkdir -p ${CNI_BIN_DIR} \ - && cd ${CNI_BIN_DIR} \ - && curl -sSL https://github.com/containernetworking/plugins/releases/download/$CNI_VERSION/cni-plugins-amd64-$CNI_VERSION.tgz | tar -zxv --strip-components=1 \ - && cd ${TMP_DIR} \ -# Move kubelet binary as we will run containerised - && mv /usr/bin/kubelet /usr/bin/kubelet-real \ -# Install utils for PVC provisioners - && apt-get install -y --no-install-recommends \ - nfs-common \ - ceph-common \ - kmod \ -# Tweak Systemd units and targets for running in a container - && find /lib/systemd/system/sysinit.target.wants/ ! -name 'systemd-tmpfiles-setup.service' -type l -exec rm -fv {} + \ - && rm -fv \ - /lib/systemd/system/multi-user.target.wants/* \ - /etc/systemd/system/*.wants/* \ - /lib/systemd/system/local-fs.target.wants/* \ - /lib/systemd/system/sockets.target.wants/*udev* \ - /lib/systemd/system/sockets.target.wants/*initctl* \ - /lib/systemd/system/basic.target.wants/* \ -# Clean up apt cache - && rm -rf /var/lib/apt/lists/* \ -# Clean up tmp dir - && cd / \ - && rm -rf ${TMP_DIR} - -# Load assets into place, setup startup target & units -COPY ./assets/ / -RUN set -x \ - && ln -s /usr/lib/systemd/system/container-up.target /etc/systemd/system/default.target \ - && mkdir -p /etc/systemd/system/container-up.target.wants \ - && ln -s /usr/lib/systemd/system/kubeadm-aio.service /etc/systemd/system/container-up.target.wants/kubeadm-aio.service - -VOLUME /sys/fs/cgroup - -CMD /kubeadm-aio diff --git a/tools/kubeadm-aio/README.rst b/tools/kubeadm-aio/README.rst deleted file mode 100644 index a8d79605..00000000 --- a/tools/kubeadm-aio/README.rst +++ /dev/null @@ -1,110 +0,0 @@ -Kubeadm AIO Container -===================== - -This container builds a small AIO Kubeadm based Kubernetes deployment -for Development and Gating use. - -Instructions ------------- - -OS Specific Host setup: -~~~~~~~~~~~~~~~~~~~~~~~ - -Ubuntu: -^^^^^^^ - -From a freshly provisioned Ubuntu 16.04 LTS host run: - -.. code:: bash - - sudo apt-get update -y - sudo apt-get install -y \ - docker.io \ - nfs-common \ - git \ - make - -OS Independent Host setup: -~~~~~~~~~~~~~~~~~~~~~~~~~~ - -You should install the ``kubectl`` and ``helm`` binaries: - -.. code:: bash - - KUBE_VERSION=v1.6.8 - HELM_VERSION=v2.5.1 - - TMP_DIR=$(mktemp -d) - curl -sSL https://storage.googleapis.com/kubernetes-release/release/${KUBE_VERSION}/bin/linux/amd64/kubectl -o ${TMP_DIR}/kubectl - chmod +x ${TMP_DIR}/kubectl - sudo mv ${TMP_DIR}/kubectl /usr/local/bin/kubectl - curl -sSL https://storage.googleapis.com/kubernetes-helm/helm-${HELM_VERSION}-linux-amd64.tar.gz | tar -zxv --strip-components=1 -C ${TMP_DIR} - sudo mv ${TMP_DIR}/helm /usr/local/bin/helm - rm -rf ${TMP_DIR} - -And clone the OpenStack-Helm repo: - -.. code:: bash - - git clone https://git.openstack.org/openstack/openstack-helm - -Build the AIO environment (optional) -~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ - -A known good image is published to dockerhub on a fairly regular basis, but if -you wish to build your own image, from the root directory of the OpenStack-Helm -repo run: - -.. code:: bash - - export KUBEADM_IMAGE=openstackhelm/kubeadm-aio:v1.6.8 - sudo docker build --pull -t ${KUBEADM_IMAGE} tools/kubeadm-aio - -Deploy the AIO environment -~~~~~~~~~~~~~~~~~~~~~~~~~~ - -To launch the environment run: - -.. code:: bash - - export KUBEADM_IMAGE=openstackhelm/kubeadm-aio:v1.6.8 - export KUBE_VERSION=v1.6.8 - ./tools/kubeadm-aio/kubeadm-aio-launcher.sh - export KUBECONFIG=${HOME}/.kubeadm-aio/admin.conf - -Once this has run without errors, you should hopefully have a Kubernetes single -node environment running, with Helm, Calico, appropriate RBAC rules and node -labels to get developing. - -Prior to launching you can also optionally set the following environment -variables to control aspects of the CNI used: - -.. code:: bash - - export KUBE_CNI=calico # or "canal" "weave" "flannel" - export CNI_POD_CIDR=192.168.0.0/16 - -If you wish to use this environment as the primary Kubernetes environment on -your host you may run the following, but note that this will wipe any previous -client configuration you may have. - -.. code:: bash - - mkdir -p ${HOME}/.kube - cat ${HOME}/.kubeadm-aio/admin.conf > ${HOME}/.kube/config - -If you wish to create dummy network devices for Neutron to manage there -is a helper script that can set them up for you: - -.. code:: bash - - sudo docker exec kubelet /usr/bin/openstack-helm-aio-network-prep - -Logs -~~~~ - -You can get the logs from your ``kubeadm-aio`` container by running: - -.. code:: bash - - sudo docker logs -f kubeadm-aio diff --git a/tools/kubeadm-aio/assets/etc/kube-cni b/tools/kubeadm-aio/assets/etc/kube-cni deleted file mode 100644 index 2f1a9ce3..00000000 --- a/tools/kubeadm-aio/assets/etc/kube-cni +++ /dev/null @@ -1,2 +0,0 @@ -KUBE_CNI=calico -CNI_POD_CIDR=192.168.0.0/16 diff --git a/tools/kubeadm-aio/assets/etc/kube-role b/tools/kubeadm-aio/assets/etc/kube-role deleted file mode 100644 index 804a47a7..00000000 --- a/tools/kubeadm-aio/assets/etc/kube-role +++ /dev/null @@ -1,3 +0,0 @@ -# If KUBE_ROLE is set 'master' kubeadm-aio will set this node up to be a master -# node, otherwise if 'worker', will join an existing cluster. -KUBE_ROLE=master diff --git a/tools/kubeadm-aio/assets/etc/kube-version b/tools/kubeadm-aio/assets/etc/kube-version deleted file mode 100644 index a353de8b..00000000 --- a/tools/kubeadm-aio/assets/etc/kube-version +++ /dev/null @@ -1,3 +0,0 @@ -# If KUBE_VERSION is set 'default' kubeadm will use the default version of K8s -# otherwise the version specified here will be used. -KUBE_VERSION=default diff --git a/tools/kubeadm-aio/assets/etc/kubeadm-join-command-args b/tools/kubeadm-aio/assets/etc/kubeadm-join-command-args deleted file mode 100644 index c5813a89..00000000 --- a/tools/kubeadm-aio/assets/etc/kubeadm-join-command-args +++ /dev/null @@ -1 +0,0 @@ -KUBEADM_JOIN_ARGS="no_command_supplied" diff --git a/tools/kubeadm-aio/assets/etc/kubeadm.conf b/tools/kubeadm-aio/assets/etc/kubeadm.conf deleted file mode 100644 index af630535..00000000 --- a/tools/kubeadm-aio/assets/etc/kubeadm.conf +++ /dev/null @@ -1,4 +0,0 @@ -apiVersion: kubeadm.k8s.io/v1alpha1 -kind: MasterConfiguration -apiServerExtraArgs: - runtime-config: "batch/v2alpha1=true" diff --git a/tools/kubeadm-aio/assets/etc/kubeapi-device b/tools/kubeadm-aio/assets/etc/kubeapi-device deleted file mode 100644 index a9118c4c..00000000 --- a/tools/kubeadm-aio/assets/etc/kubeapi-device +++ /dev/null @@ -1,3 +0,0 @@ -# If KUBE_BIND_DEV is set to 'autodetect' we will use kubeadm's autodetect logic -# otherwise use the device specified to find the IP address to bind to. -KUBE_BIND_DEV=autodetect diff --git a/tools/kubeadm-aio/assets/etc/kubelet-container b/tools/kubeadm-aio/assets/etc/kubelet-container deleted file mode 100644 index 557545e4..00000000 --- a/tools/kubeadm-aio/assets/etc/kubelet-container +++ /dev/null @@ -1,3 +0,0 @@ -# If KUBELET_CONTAINER is set 'to_this' one we will not attempt to launch a new -# container for the kubelet process, otherwise use the image tag specified -KUBELET_CONTAINER=this_one diff --git a/tools/kubeadm-aio/assets/kubeadm-aio b/tools/kubeadm-aio/assets/kubeadm-aio deleted file mode 100755 index f9009bca..00000000 --- a/tools/kubeadm-aio/assets/kubeadm-aio +++ /dev/null @@ -1,54 +0,0 @@ -#!/bin/bash - -# Copyright 2017 The Openstack-Helm Authors. -# -# Licensed under the Apache License, Version 2.0 (the "License"); you may -# not use this file except in compliance with the License. You may obtain -# a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT -# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the -# License for the specific language governing permissions and limitations -# under the License. -set -xe - -echo 'Checking cgroups' -if ls -dZ /sys/fs/cgroup | grep -q :svirt_sandbox_file_t: ; then - echo 'Invocation error: use -v /sys/fs/cgroup:/sys/fs/cgroup:ro parameter to docker run.' - exit 1 -fi - -echo 'Setting up K8s version to deploy' -: ${KUBE_VERSION:="default"} -sed -i "s|KUBE_VERSION=.*|KUBE_VERSION=${KUBE_VERSION}|g" /etc/kube-version - -echo 'Setting up device to use for kube-api' -: ${KUBE_BIND_DEV:="autodetect"} -sed -i "s|KUBE_BIND_DEV=.*|KUBE_BIND_DEV=${KUBE_BIND_DEV}|g" /etc/kubeapi-device - -echo 'Setting up container image to use for kubelet' -: ${KUBELET_CONTAINER:="this_one"} -sed -i "s|KUBELET_CONTAINER=.*|KUBELET_CONTAINER=${KUBELET_CONTAINER}|g" /etc/kubelet-container - -echo 'Setting whether this node is a master, or slave, K8s node' -: ${KUBE_ROLE:="master"} -sed -i "s|KUBE_ROLE=.*|KUBE_ROLE=${KUBE_ROLE}|g" /etc/kube-role - -echo 'Setting any kubeadm join commands' -: ${KUBEADM_JOIN_ARGS:="no_command_supplied"} -sed -i "s|KUBEADM_JOIN_ARGS=.*|KUBEADM_JOIN_ARGS=\"${KUBEADM_JOIN_ARGS}\"|g" /etc/kubeadm-join-command-args - -echo 'Setting CNI pod CIDR' -: ${CNI_POD_CIDR:="192.168.0.0/16"} -sed -i "s|192.168.0.0/16|${CNI_POD_CIDR}|g" /opt/cni-manifests/*.yaml -sed -i "s|CNI_POD_CIDR=.*|CNI_POD_CIDR=\"${CNI_POD_CIDR}\"|g" /etc/kube-cni - -echo 'Setting CNI ' -: ${KUBE_CNI:="calico"} -sed -i "s|KUBE_CNI=.*|KUBE_CNI=\"${KUBE_CNI}\"|g" /etc/kube-cni - -echo 'Starting Systemd' -exec /bin/systemd --system diff --git a/tools/kubeadm-aio/assets/opt/cni-manifests/calico.yaml b/tools/kubeadm-aio/assets/opt/cni-manifests/calico.yaml deleted file mode 100644 index 67a53675..00000000 --- a/tools/kubeadm-aio/assets/opt/cni-manifests/calico.yaml +++ /dev/null @@ -1,365 +0,0 @@ -# Calico Version v2.1.4 -# http://docs.projectcalico.org/v2.1/releases#v2.1.4 -# This manifest includes the following component versions: -# calico/node:v1.1.3 -# calico/cni:v1.7.0 -# calico/kube-policy-controller:v0.5.4 - -# This ConfigMap is used to configure a self-hosted Calico installation. -kind: ConfigMap -apiVersion: v1 -metadata: - name: calico-config - namespace: kube-system -data: - # The location of your etcd cluster. This uses the Service clusterIP - # defined below. - etcd_endpoints: "http://10.96.232.136:6666" - - # Configure the Calico backend to use. - calico_backend: "bird" - - # The CNI network configuration to install on each node. - cni_network_config: |- - { - "name": "k8s-pod-network", - "type": "calico", - "etcd_endpoints": "__ETCD_ENDPOINTS__", - "log_level": "info", - "ipam": { - "type": "calico-ipam" - }, - "policy": { - "type": "k8s", - "k8s_api_root": "https://__KUBERNETES_SERVICE_HOST__:__KUBERNETES_SERVICE_PORT__", - "k8s_auth_token": "__SERVICEACCOUNT_TOKEN__" - }, - "kubernetes": { - "kubeconfig": "/etc/cni/net.d/__KUBECONFIG_FILENAME__" - } - } - ---- - -# This manifest installs the Calico etcd on the kubeadm master. This uses a DaemonSet -# to force it to run on the master even when the master isn't schedulable, and uses -# nodeSelector to ensure it only runs on the master. -apiVersion: extensions/v1beta1 -kind: DaemonSet -metadata: - name: calico-etcd - namespace: kube-system - labels: - k8s-app: calico-etcd -spec: - template: - metadata: - labels: - k8s-app: calico-etcd - annotations: - # Mark this pod as a critical add-on; when enabled, the critical add-on scheduler - # reserves resources for critical add-on pods so that they can be rescheduled after - # a failure. This annotation works in tandem with the toleration below. - scheduler.alpha.kubernetes.io/critical-pod: '' - spec: - # Only run this pod on the master. - tolerations: - - key: node-role.kubernetes.io/master - effect: NoSchedule - # Allow this pod to be rescheduled while the node is in "critical add-ons only" mode. - # This, along with the annotation above marks this pod as a critical add-on. - - key: CriticalAddonsOnly - operator: Exists - nodeSelector: - node-role.kubernetes.io/master: "" - hostNetwork: true - containers: - - name: calico-etcd - image: gcr.io/google_containers/etcd:2.2.1 - env: - - name: CALICO_ETCD_IP - valueFrom: - fieldRef: - fieldPath: status.podIP - command: ["/bin/sh","-c"] - args: ["/usr/local/bin/etcd --name=calico --data-dir=/var/etcd/calico-data --advertise-client-urls=http://$CALICO_ETCD_IP:6666 --listen-client-urls=http://0.0.0.0:6666 --listen-peer-urls=http://0.0.0.0:6667"] - volumeMounts: - - name: var-etcd - mountPath: /var/etcd - volumes: - - name: var-etcd - hostPath: - path: /var/etcd - ---- - -# This manfiest installs the Service which gets traffic to the Calico -# etcd. -apiVersion: v1 -kind: Service -metadata: - labels: - k8s-app: calico-etcd - name: calico-etcd - namespace: kube-system -spec: - # Select the calico-etcd pod running on the master. - selector: - k8s-app: calico-etcd - # This ClusterIP needs to be known in advance, since we cannot rely - # on DNS to get access to etcd. - clusterIP: 10.96.232.136 - ports: - - port: 6666 - ---- - -# This manifest installs the calico/node container, as well -# as the Calico CNI plugins and network config on -# each master and worker node in a Kubernetes cluster. -kind: DaemonSet -apiVersion: extensions/v1beta1 -metadata: - name: calico-node - namespace: kube-system - labels: - k8s-app: calico-node -spec: - selector: - matchLabels: - k8s-app: calico-node - template: - metadata: - labels: - k8s-app: calico-node - annotations: - # Mark this pod as a critical add-on; when enabled, the critical add-on scheduler - # reserves resources for critical add-on pods so that they can be rescheduled after - # a failure. This annotation works in tandem with the toleration below. - scheduler.alpha.kubernetes.io/critical-pod: '' - spec: - hostNetwork: true - tolerations: - - key: node-role.kubernetes.io/master - effect: NoSchedule - # Allow this pod to be rescheduled while the node is in "critical add-ons only" mode. - # This, along with the annotation above marks this pod as a critical add-on. - - key: CriticalAddonsOnly - operator: Exists - serviceAccountName: calico-cni-plugin - containers: - # Runs calico/node container on each Kubernetes node. This - # container programs network policy and routes on each - # host. - - name: calico-node - image: quay.io/calico/node:v1.1.3 - env: - # The location of the Calico etcd cluster. - - name: ETCD_ENDPOINTS - valueFrom: - configMapKeyRef: - name: calico-config - key: etcd_endpoints - # Enable BGP. Disable to enforce policy only. - - name: CALICO_NETWORKING_BACKEND - valueFrom: - configMapKeyRef: - name: calico-config - key: calico_backend - # Disable file logging so `kubectl logs` works. - - name: CALICO_DISABLE_FILE_LOGGING - value: "true" - # Set Felix endpoint to host default action to ACCEPT. - - name: FELIX_DEFAULTENDPOINTTOHOSTACTION - value: "ACCEPT" - # Configure the IP Pool from which Pod IPs will be chosen. - - name: CALICO_IPV4POOL_CIDR - value: "192.168.0.0/16" - - name: CALICO_IPV4POOL_IPIP - value: "always" - # Disable IPv6 on Kubernetes. - - name: FELIX_IPV6SUPPORT - value: "false" - # Set Felix logging to "info" - - name: FELIX_LOGSEVERITYSCREEN - value: "info" - # Auto-detect the BGP IP address. - - name: IP - value: "" - securityContext: - privileged: true - resources: - requests: - cpu: 250m - volumeMounts: - - mountPath: /lib/modules - name: lib-modules - readOnly: true - - mountPath: /var/run/calico - name: var-run-calico - readOnly: false - # This container installs the Calico CNI binaries - # and CNI network config file on each node. - - name: install-cni - image: quay.io/calico/cni:v1.7.0 - command: ["/install-cni.sh"] - env: - # The location of the Calico etcd cluster. - - name: ETCD_ENDPOINTS - valueFrom: - configMapKeyRef: - name: calico-config - key: etcd_endpoints - # The CNI network config to install on each node. - - name: CNI_NETWORK_CONFIG - valueFrom: - configMapKeyRef: - name: calico-config - key: cni_network_config - volumeMounts: - - mountPath: /host/opt/cni/bin - name: cni-bin-dir - - mountPath: /host/etc/cni/net.d - name: cni-net-dir - volumes: - # Used by calico/node. - - name: lib-modules - hostPath: - path: /lib/modules - - name: var-run-calico - hostPath: - path: /var/run/calico - # Used to install CNI. - - name: cni-bin-dir - hostPath: - path: /opt/cni/bin - - name: cni-net-dir - hostPath: - path: /etc/cni/net.d - ---- - -# This manifest deploys the Calico policy controller on Kubernetes. -# See https://github.com/projectcalico/k8s-policy -apiVersion: extensions/v1beta1 -kind: Deployment -metadata: - name: calico-policy-controller - namespace: kube-system - labels: - k8s-app: calico-policy -spec: - # The policy controller can only have a single active instance. - replicas: 1 - strategy: - type: Recreate - template: - metadata: - name: calico-policy-controller - namespace: kube-system - labels: - k8s-app: calico-policy-controller - annotations: - # Mark this pod as a critical add-on; when enabled, the critical add-on scheduler - # reserves resources for critical add-on pods so that they can be rescheduled after - # a failure. This annotation works in tandem with the toleration below. - scheduler.alpha.kubernetes.io/critical-pod: '' - spec: - # The policy controller must run in the host network namespace so that - # it isn't governed by policy that would prevent it from working. - hostNetwork: true - tolerations: - - key: node-role.kubernetes.io/master - effect: NoSchedule - # Allow this pod to be rescheduled while the node is in "critical add-ons only" mode. - # This, along with the annotation above marks this pod as a critical add-on. - - key: CriticalAddonsOnly - operator: Exists - serviceAccountName: calico-policy-controller - containers: - - name: calico-policy-controller - image: quay.io/calico/kube-policy-controller:v0.5.4 - env: - # The location of the Calico etcd cluster. - - name: ETCD_ENDPOINTS - valueFrom: - configMapKeyRef: - name: calico-config - key: etcd_endpoints - # The location of the Kubernetes API. Use the default Kubernetes - # service for API access. - - name: K8S_API - value: "https://kubernetes.default:443" - # Since we're running in the host namespace and might not have KubeDNS - # access, configure the container's /etc/hosts to resolve - # kubernetes.default to the correct service clusterIP. - - name: CONFIGURE_ETC_HOSTS - value: "true" ---- -apiVersion: rbac.authorization.k8s.io/v1beta1 -kind: ClusterRoleBinding -metadata: - name: calico-cni-plugin -roleRef: - apiGroup: rbac.authorization.k8s.io - kind: ClusterRole - name: calico-cni-plugin -subjects: -- kind: ServiceAccount - name: calico-cni-plugin - namespace: kube-system ---- -kind: ClusterRole -apiVersion: rbac.authorization.k8s.io/v1beta1 -metadata: - name: calico-cni-plugin - namespace: kube-system -rules: - - apiGroups: [""] - resources: - - pods - - nodes - verbs: - - get ---- -apiVersion: v1 -kind: ServiceAccount -metadata: - name: calico-cni-plugin - namespace: kube-system ---- -apiVersion: rbac.authorization.k8s.io/v1beta1 -kind: ClusterRoleBinding -metadata: - name: calico-policy-controller -roleRef: - apiGroup: rbac.authorization.k8s.io - kind: ClusterRole - name: calico-policy-controller -subjects: -- kind: ServiceAccount - name: calico-policy-controller - namespace: kube-system ---- -kind: ClusterRole -apiVersion: rbac.authorization.k8s.io/v1beta1 -metadata: - name: calico-policy-controller - namespace: kube-system -rules: - - apiGroups: - - "" - - extensions - resources: - - pods - - namespaces - - networkpolicies - verbs: - - watch - - list ---- -apiVersion: v1 -kind: ServiceAccount -metadata: - name: calico-policy-controller - namespace: kube-system diff --git a/tools/kubeadm-aio/assets/opt/cni-manifests/canal.yaml b/tools/kubeadm-aio/assets/opt/cni-manifests/canal.yaml deleted file mode 100644 index aa37ac05..00000000 --- a/tools/kubeadm-aio/assets/opt/cni-manifests/canal.yaml +++ /dev/null @@ -1,329 +0,0 @@ -# Calico Roles -kind: ClusterRole -apiVersion: rbac.authorization.k8s.io/v1beta1 -metadata: - name: canal - namespace: kube-system -rules: - - apiGroups: [""] - resources: - - namespaces - verbs: - - get - - list - - watch - - apiGroups: [""] - resources: - - pods/status - verbs: - - update - - apiGroups: [""] - resources: - - pods - verbs: - - get - - list - - watch - - apiGroups: [""] - resources: - - nodes - verbs: - - get - - list - - update - - watch - - apiGroups: ["extensions"] - resources: - - thirdpartyresources - verbs: - - create - - get - - list - - watch - - apiGroups: ["extensions"] - resources: - - networkpolicies - verbs: - - get - - list - - watch - - apiGroups: ["projectcalico.org"] - resources: - - globalconfigs - verbs: - - create - - get - - list - - update - - watch - - apiGroups: ["projectcalico.org"] - resources: - - ippools - verbs: - - create - - delete - - get - - list - - update - - watch ---- -# Flannel roles -# Pulled from https://github.com/coreos/flannel/blob/master/Documentation/kube-flannel-rbac.yml -kind: ClusterRole -apiVersion: rbac.authorization.k8s.io/v1beta1 -metadata: - name: flannel -rules: - - apiGroups: - - "" - resources: - - pods - verbs: - - get - - apiGroups: - - "" - resources: - - nodes - verbs: - - list - - watch - - apiGroups: - - "" - resources: - - nodes/status - verbs: - - patch ---- -kind: ClusterRoleBinding -apiVersion: rbac.authorization.k8s.io/v1beta1 -metadata: - name: flannel -roleRef: - apiGroup: rbac.authorization.k8s.io - kind: ClusterRole - name: flannel -subjects: -- kind: ServiceAccount - name: canal - namespace: kube-system ---- -apiVersion: rbac.authorization.k8s.io/v1beta1 -kind: ClusterRoleBinding -metadata: - name: canal -roleRef: - apiGroup: rbac.authorization.k8s.io - kind: ClusterRole - name: canal -subjects: -- kind: ServiceAccount - name: canal - namespace: kube-system ---- -# This ConfigMap can be used to configure a self-hosted Canal installation. -kind: ConfigMap -apiVersion: v1 -metadata: - name: canal-config - namespace: kube-system -data: - # The interface used by canal for host <-> host communication. - # If left blank, then the interface is chosen using the node's - # default route. - canal_iface: "" - - # Whether or not to masquerade traffic to destinations not within - # the pod network. - masquerade: "true" - - # The CNI network configuration to install on each node. - cni_network_config: |- - { - "name": "k8s-pod-network", - "type": "calico", - "log_level": "info", - "datastore_type": "kubernetes", - "hostname": "__KUBERNETES_NODE_NAME__", - "ipam": { - "type": "host-local", - "subnet": "usePodCidr" - }, - "policy": { - "type": "k8s", - "k8s_auth_token": "__SERVICEACCOUNT_TOKEN__" - }, - "kubernetes": { - "k8s_api_root": "https://__KUBERNETES_SERVICE_HOST__:__KUBERNETES_SERVICE_PORT__", - "kubeconfig": "__KUBECONFIG_FILEPATH__" - } - } - - # Flannel network configuration. Mounted into the flannel container. - net-conf.json: | - { - "Network": "192.168.0.0/16", - "Backend": { - "Type": "vxlan" - } - } ---- -# This manifest installs the calico/node container, as well -# as the Calico CNI plugins and network config on -# each master and worker node in a Kubernetes cluster. -kind: DaemonSet -apiVersion: extensions/v1beta1 -metadata: - name: canal - namespace: kube-system - labels: - k8s-app: canal -spec: - selector: - matchLabels: - k8s-app: canal - template: - metadata: - labels: - k8s-app: canal - annotations: - scheduler.alpha.kubernetes.io/critical-pod: '' - spec: - hostNetwork: true - serviceAccountName: canal - tolerations: - # Allow the pod to run on the master. This is required for - # the master to communicate with pods. - - key: node-role.kubernetes.io/master - effect: NoSchedule - # Mark the pod as a critical add-on for rescheduling. - - key: "CriticalAddonsOnly" - operator: "Exists" - containers: - # Runs calico/node container on each Kubernetes node. This - # container programs network policy and routes on each - # host. - - name: calico-node - image: quay.io/calico/node:v1.2.1 - env: - # Use Kubernetes API as the backing datastore. - - name: DATASTORE_TYPE - value: "kubernetes" - # Enable felix logging. - - name: FELIX_LOGSEVERITYSYS - value: "info" - # Period, in seconds, at which felix re-applies all iptables state - - name: FELIX_IPTABLESREFRESHINTERVAL - value: "60" - # Disable IPV6 support in Felix. - - name: FELIX_IPV6SUPPORT - value: "false" - # Don't enable BGP. - - name: CALICO_NETWORKING_BACKEND - value: "none" - # Disable file logging so `kubectl logs` works. - - name: CALICO_DISABLE_FILE_LOGGING - value: "true" - - name: WAIT_FOR_DATASTORE - value: "true" - # No IP address needed. - - name: IP - value: "" - - name: HOSTNAME - valueFrom: - fieldRef: - fieldPath: spec.nodeName - # Set Felix endpoint to host default action to ACCEPT. - - name: FELIX_DEFAULTENDPOINTTOHOSTACTION - value: "ACCEPT" - securityContext: - privileged: true - resources: - requests: - cpu: 250m - volumeMounts: - - mountPath: /lib/modules - name: lib-modules - readOnly: true - - mountPath: /var/run/calico - name: var-run-calico - readOnly: false - # This container installs the Calico CNI binaries - # and CNI network config file on each node. - - name: install-cni - image: quay.io/calico/cni:v1.8.3 - command: ["/install-cni.sh"] - env: - # The CNI network config to install on each node. - - name: CNI_NETWORK_CONFIG - valueFrom: - configMapKeyRef: - name: canal-config - key: cni_network_config - - name: KUBERNETES_NODE_NAME - valueFrom: - fieldRef: - fieldPath: spec.nodeName - volumeMounts: - - mountPath: /host/opt/cni/bin - name: cni-bin-dir - - mountPath: /host/etc/cni/net.d - name: cni-net-dir - # This container runs flannel using the kube-subnet-mgr backend - # for allocating subnets. - - name: kube-flannel - image: quay.io/coreos/flannel:v0.8.0 - command: [ "/opt/bin/flanneld", "--ip-masq", "--kube-subnet-mgr" ] - securityContext: - privileged: true - env: - - name: POD_NAME - valueFrom: - fieldRef: - fieldPath: metadata.name - - name: POD_NAMESPACE - valueFrom: - fieldRef: - fieldPath: metadata.namespace - - name: FLANNELD_IFACE - valueFrom: - configMapKeyRef: - name: canal-config - key: canal_iface - - name: FLANNELD_IP_MASQ - valueFrom: - configMapKeyRef: - name: canal-config - key: masquerade - volumeMounts: - - name: run - mountPath: /run - - name: flannel-cfg - mountPath: /etc/kube-flannel/ - volumes: - # Used by calico/node. - - name: lib-modules - hostPath: - path: /lib/modules - - name: var-run-calico - hostPath: - path: /var/run/calico - # Used to install CNI. - - name: cni-bin-dir - hostPath: - path: /opt/cni/bin - - name: cni-net-dir - hostPath: - path: /etc/cni/net.d - # Used by flannel. - - name: run - hostPath: - path: /run - - name: flannel-cfg - configMap: - name: canal-config ---- -apiVersion: v1 -kind: ServiceAccount -metadata: - name: canal - namespace: kube-system diff --git a/tools/kubeadm-aio/assets/opt/cni-manifests/flannel.yaml b/tools/kubeadm-aio/assets/opt/cni-manifests/flannel.yaml deleted file mode 100644 index 299c0bc9..00000000 --- a/tools/kubeadm-aio/assets/opt/cni-manifests/flannel.yaml +++ /dev/null @@ -1,94 +0,0 @@ -#https://raw.githubusercontent.com/coreos/flannel/v0.8.0/Documentation/kube-flannel.yml ---- -apiVersion: v1 -kind: ServiceAccount -metadata: - name: flannel - namespace: kube-system ---- -kind: ConfigMap -apiVersion: v1 -metadata: - name: kube-flannel-cfg - namespace: kube-system - labels: - tier: node - app: flannel -data: - cni-conf.json: | - { - "name": "cbr0", - "type": "flannel", - "delegate": { - "isDefaultGateway": true - } - } - net-conf.json: | - { - "Network": "192.168.0.0/16", - "Backend": { - "Type": "vxlan" - } - } ---- -apiVersion: extensions/v1beta1 -kind: DaemonSet -metadata: - name: kube-flannel-ds - namespace: kube-system - labels: - tier: node - app: flannel -spec: - template: - metadata: - labels: - tier: node - app: flannel - spec: - hostNetwork: true - nodeSelector: - beta.kubernetes.io/arch: amd64 - tolerations: - - key: node-role.kubernetes.io/master - operator: Exists - effect: NoSchedule - serviceAccountName: flannel - containers: - - name: kube-flannel - image: quay.io/coreos/flannel:v0.8.0-amd64 - command: [ "/opt/bin/flanneld", "--ip-masq", "--kube-subnet-mgr" ] - securityContext: - privileged: true - env: - - name: POD_NAME - valueFrom: - fieldRef: - fieldPath: metadata.name - - name: POD_NAMESPACE - valueFrom: - fieldRef: - fieldPath: metadata.namespace - volumeMounts: - - name: run - mountPath: /run - - name: flannel-cfg - mountPath: /etc/kube-flannel/ - - name: install-cni - image: quay.io/coreos/flannel:v0.8.0-amd64 - command: [ "/bin/sh", "-c", "set -e -x; cp -f /etc/kube-flannel/cni-conf.json /etc/cni/net.d/10-flannel.conf; while true; do sleep 3600; done" ] - volumeMounts: - - name: cni - mountPath: /etc/cni/net.d - - name: flannel-cfg - mountPath: /etc/kube-flannel/ - volumes: - - name: run - hostPath: - path: /run - - name: cni - hostPath: - path: /etc/cni/net.d - - name: flannel-cfg - configMap: - name: kube-flannel-cfg diff --git a/tools/kubeadm-aio/assets/opt/cni-manifests/weave.yaml b/tools/kubeadm-aio/assets/opt/cni-manifests/weave.yaml deleted file mode 100644 index 1a43da39..00000000 --- a/tools/kubeadm-aio/assets/opt/cni-manifests/weave.yaml +++ /dev/null @@ -1,187 +0,0 @@ -# curl --location "https://cloud.weave.works/k8s/net?k8s-version=$(kubectl version | base64 | tr -d '\n')&env.WEAVE_MTU=1337&env.IPALLOC_RANGE=192.168.0.0/16 -apiVersion: v1 -kind: List -items: - - apiVersion: v1 - kind: ServiceAccount - metadata: - name: weave-net - annotations: - cloud.weave.works/launcher-info: |- - { - "server-version": "master-c3b4969", - "original-request": { - "url": "/k8s/v1.6/net.yaml?k8s-version=Q2xpZW50IFZlcnNpb246IHZlcnNpb24uSW5mb3tNYWpvcjoiMSIsIE1pbm9yOiI2IiwgR2l0VmVyc2lvbjoidjEuNi43IiwgR2l0Q29tbWl0OiIwOTUxMzZjMzA3OGNjZjg4N2I5MDM0YjdjZTU5OGEwYTFmYWZmNzY5IiwgR2l0VHJlZVN0YXRlOiJjbGVhbiIsIEJ1aWxkRGF0ZToiMjAxNy0wNy0wNVQxNjo1MTo1NloiLCBHb1ZlcnNpb246ImdvMS43LjYiLCBDb21waWxlcjoiZ2MiLCBQbGF0Zm9ybToibGludXgvYW1kNjQifQpTZXJ2ZXIgVmVyc2lvbjogdmVyc2lvbi5JbmZve01ham9yOiIxIiwgTWlub3I6IjYiLCBHaXRWZXJzaW9uOiJ2MS42LjciLCBHaXRDb21taXQ6IjA5NTEzNmMzMDc4Y2NmODg3YjkwMzRiN2NlNTk4YTBhMWZhZmY3NjkiLCBHaXRUcmVlU3RhdGU6ImNsZWFuIiwgQnVpbGREYXRlOiIyMDE3LTA3LTA1VDE2OjQwOjQyWiIsIEdvVmVyc2lvbjoiZ28xLjcuNiIsIENvbXBpbGVyOiJnYyIsIFBsYXRmb3JtOiJsaW51eC9hbWQ2NCJ9Cg==&env.WEAVE_MTU=1337&env.IPALLOC_RANGE=192.168.0.0/16", - "date": "Sun Jul 30 2017 02:48:47 GMT+0000 (UTC)" - }, - "email-address": "support@weave.works" - } - labels: - name: weave-net - namespace: kube-system - - apiVersion: rbac.authorization.k8s.io/v1beta1 - kind: ClusterRole - metadata: - name: weave-net - annotations: - cloud.weave.works/launcher-info: |- - { - "server-version": "master-c3b4969", - "original-request": { - "url": "/k8s/v1.6/net.yaml?k8s-version=Q2xpZW50IFZlcnNpb246IHZlcnNpb24uSW5mb3tNYWpvcjoiMSIsIE1pbm9yOiI2IiwgR2l0VmVyc2lvbjoidjEuNi43IiwgR2l0Q29tbWl0OiIwOTUxMzZjMzA3OGNjZjg4N2I5MDM0YjdjZTU5OGEwYTFmYWZmNzY5IiwgR2l0VHJlZVN0YXRlOiJjbGVhbiIsIEJ1aWxkRGF0ZToiMjAxNy0wNy0wNVQxNjo1MTo1NloiLCBHb1ZlcnNpb246ImdvMS43LjYiLCBDb21waWxlcjoiZ2MiLCBQbGF0Zm9ybToibGludXgvYW1kNjQifQpTZXJ2ZXIgVmVyc2lvbjogdmVyc2lvbi5JbmZve01ham9yOiIxIiwgTWlub3I6IjYiLCBHaXRWZXJzaW9uOiJ2MS42LjciLCBHaXRDb21taXQ6IjA5NTEzNmMzMDc4Y2NmODg3YjkwMzRiN2NlNTk4YTBhMWZhZmY3NjkiLCBHaXRUcmVlU3RhdGU6ImNsZWFuIiwgQnVpbGREYXRlOiIyMDE3LTA3LTA1VDE2OjQwOjQyWiIsIEdvVmVyc2lvbjoiZ28xLjcuNiIsIENvbXBpbGVyOiJnYyIsIFBsYXRmb3JtOiJsaW51eC9hbWQ2NCJ9Cg==&env.WEAVE_MTU=1337&env.IPALLOC_RANGE=192.168.0.0/16", - "date": "Sun Jul 30 2017 02:48:47 GMT+0000 (UTC)" - }, - "email-address": "support@weave.works" - } - labels: - name: weave-net - rules: - - apiGroups: - - '' - resources: - - pods - - namespaces - - nodes - verbs: - - get - - list - - watch - - apiGroups: - - extensions - resources: - - networkpolicies - verbs: - - get - - list - - watch - - apiVersion: rbac.authorization.k8s.io/v1beta1 - kind: ClusterRoleBinding - metadata: - name: weave-net - annotations: - cloud.weave.works/launcher-info: |- - { - "server-version": "master-c3b4969", - "original-request": { - "url": "/k8s/v1.6/net.yaml?k8s-version=Q2xpZW50IFZlcnNpb246IHZlcnNpb24uSW5mb3tNYWpvcjoiMSIsIE1pbm9yOiI2IiwgR2l0VmVyc2lvbjoidjEuNi43IiwgR2l0Q29tbWl0OiIwOTUxMzZjMzA3OGNjZjg4N2I5MDM0YjdjZTU5OGEwYTFmYWZmNzY5IiwgR2l0VHJlZVN0YXRlOiJjbGVhbiIsIEJ1aWxkRGF0ZToiMjAxNy0wNy0wNVQxNjo1MTo1NloiLCBHb1ZlcnNpb246ImdvMS43LjYiLCBDb21waWxlcjoiZ2MiLCBQbGF0Zm9ybToibGludXgvYW1kNjQifQpTZXJ2ZXIgVmVyc2lvbjogdmVyc2lvbi5JbmZve01ham9yOiIxIiwgTWlub3I6IjYiLCBHaXRWZXJzaW9uOiJ2MS42LjciLCBHaXRDb21taXQ6IjA5NTEzNmMzMDc4Y2NmODg3YjkwMzRiN2NlNTk4YTBhMWZhZmY3NjkiLCBHaXRUcmVlU3RhdGU6ImNsZWFuIiwgQnVpbGREYXRlOiIyMDE3LTA3LTA1VDE2OjQwOjQyWiIsIEdvVmVyc2lvbjoiZ28xLjcuNiIsIENvbXBpbGVyOiJnYyIsIFBsYXRmb3JtOiJsaW51eC9hbWQ2NCJ9Cg==&env.WEAVE_MTU=1337&env.IPALLOC_RANGE=192.168.0.0/16", - "date": "Sun Jul 30 2017 02:48:47 GMT+0000 (UTC)" - }, - "email-address": "support@weave.works" - } - labels: - name: weave-net - roleRef: - kind: ClusterRole - name: weave-net - apiGroup: rbac.authorization.k8s.io - subjects: - - kind: ServiceAccount - name: weave-net - namespace: kube-system - - apiVersion: extensions/v1beta1 - kind: DaemonSet - metadata: - name: weave-net - annotations: - cloud.weave.works/launcher-info: |- - { - "server-version": "master-c3b4969", - "original-request": { - "url": "/k8s/v1.6/net.yaml?k8s-version=Q2xpZW50IFZlcnNpb246IHZlcnNpb24uSW5mb3tNYWpvcjoiMSIsIE1pbm9yOiI2IiwgR2l0VmVyc2lvbjoidjEuNi43IiwgR2l0Q29tbWl0OiIwOTUxMzZjMzA3OGNjZjg4N2I5MDM0YjdjZTU5OGEwYTFmYWZmNzY5IiwgR2l0VHJlZVN0YXRlOiJjbGVhbiIsIEJ1aWxkRGF0ZToiMjAxNy0wNy0wNVQxNjo1MTo1NloiLCBHb1ZlcnNpb246ImdvMS43LjYiLCBDb21waWxlcjoiZ2MiLCBQbGF0Zm9ybToibGludXgvYW1kNjQifQpTZXJ2ZXIgVmVyc2lvbjogdmVyc2lvbi5JbmZve01ham9yOiIxIiwgTWlub3I6IjYiLCBHaXRWZXJzaW9uOiJ2MS42LjciLCBHaXRDb21taXQ6IjA5NTEzNmMzMDc4Y2NmODg3YjkwMzRiN2NlNTk4YTBhMWZhZmY3NjkiLCBHaXRUcmVlU3RhdGU6ImNsZWFuIiwgQnVpbGREYXRlOiIyMDE3LTA3LTA1VDE2OjQwOjQyWiIsIEdvVmVyc2lvbjoiZ28xLjcuNiIsIENvbXBpbGVyOiJnYyIsIFBsYXRmb3JtOiJsaW51eC9hbWQ2NCJ9Cg==&env.WEAVE_MTU=1337&env.IPALLOC_RANGE=192.168.0.0/16", - "date": "Sun Jul 30 2017 02:48:47 GMT+0000 (UTC)" - }, - "email-address": "support@weave.works" - } - labels: - name: weave-net - namespace: kube-system - spec: - template: - metadata: - labels: - name: weave-net - spec: - containers: - - name: weave - command: - - /home/weave/launch.sh - env: - - name: WEAVE_MTU - value: '1337' - - name: IPALLOC_RANGE - value: 192.168.0.0/16 - - name: HOSTNAME - valueFrom: - fieldRef: - apiVersion: v1 - fieldPath: spec.nodeName - image: 'weaveworks/weave-kube:2.0.1' - imagePullPolicy: Always - livenessProbe: - httpGet: - host: 127.0.0.1 - path: /status - port: 6784 - initialDelaySeconds: 30 - resources: - requests: - cpu: 10m - securityContext: - privileged: true - volumeMounts: - - name: weavedb - mountPath: /weavedb - - name: cni-bin - mountPath: /host/opt - - name: cni-bin2 - mountPath: /host/home - - name: cni-conf - mountPath: /host/etc - - name: dbus - mountPath: /host/var/lib/dbus - - name: lib-modules - mountPath: /lib/modules - - name: weave-npc - env: - - name: HOSTNAME - valueFrom: - fieldRef: - apiVersion: v1 - fieldPath: spec.nodeName - image: 'weaveworks/weave-npc:2.0.1' - imagePullPolicy: Always - resources: - requests: - cpu: 10m - securityContext: - privileged: true - hostNetwork: true - hostPID: true - restartPolicy: Always - securityContext: - seLinuxOptions: {} - serviceAccountName: weave-net - tolerations: - - effect: NoSchedule - operator: Exists - volumes: - - name: weavedb - hostPath: - path: /var/lib/weave - - name: cni-bin - hostPath: - path: /opt - - name: cni-bin2 - hostPath: - path: /home - - name: cni-conf - hostPath: - path: /etc - - name: dbus - hostPath: - path: /var/lib/dbus - - name: lib-modules - hostPath: - path: /lib/modules - updateStrategy: - type: RollingUpdate diff --git a/tools/kubeadm-aio/assets/opt/nfs-provisioner/deployment.yaml b/tools/kubeadm-aio/assets/opt/nfs-provisioner/deployment.yaml deleted file mode 100644 index 73ec6ba1..00000000 --- a/tools/kubeadm-aio/assets/opt/nfs-provisioner/deployment.yaml +++ /dev/null @@ -1,73 +0,0 @@ -kind: Service -apiVersion: v1 -metadata: - name: nfs-provisioner - labels: - app: nfs-provisioner -spec: - ports: - - name: nfs - port: 2049 - - name: mountd - port: 20048 - - name: rpcbind - port: 111 - - name: rpcbind-udp - port: 111 - protocol: UDP - selector: - app: nfs-provisioner ---- -kind: Deployment -apiVersion: apps/v1beta1 -metadata: - name: nfs-provisioner -spec: - replicas: 1 - strategy: - type: Recreate - template: - metadata: - labels: - app: nfs-provisioner - spec: - containers: - - name: nfs-provisioner - image: quay.io/kubernetes_incubator/nfs-provisioner:v1.0.7 - ports: - - name: nfs - containerPort: 2049 - - name: mountd - containerPort: 20048 - - name: rpcbind - containerPort: 111 - - name: rpcbind-udp - containerPort: 111 - protocol: UDP - securityContext: - capabilities: - add: - - DAC_READ_SEARCH - - SYS_RESOURCE - args: - - "-provisioner=example.com/nfs" - - "-grace-period=10" - env: - - name: POD_IP - valueFrom: - fieldRef: - fieldPath: status.podIP - - name: SERVICE_NAME - value: nfs-provisioner - - name: POD_NAMESPACE - valueFrom: - fieldRef: - fieldPath: metadata.namespace - imagePullPolicy: "IfNotPresent" - volumeMounts: - - name: export-volume - mountPath: /export - volumes: - - name: export-volume - hostPath: - path: /var/lib/nfs-provisioner diff --git a/tools/kubeadm-aio/assets/opt/nfs-provisioner/storageclass.yaml b/tools/kubeadm-aio/assets/opt/nfs-provisioner/storageclass.yaml deleted file mode 100644 index 35a297f1..00000000 --- a/tools/kubeadm-aio/assets/opt/nfs-provisioner/storageclass.yaml +++ /dev/null @@ -1,5 +0,0 @@ -kind: StorageClass -apiVersion: storage.k8s.io/v1 -metadata: - name: general -provisioner: example.com/nfs diff --git a/tools/kubeadm-aio/assets/opt/rbac/dev.yaml b/tools/kubeadm-aio/assets/opt/rbac/dev.yaml deleted file mode 100644 index 77b5313c..00000000 --- a/tools/kubeadm-aio/assets/opt/rbac/dev.yaml +++ /dev/null @@ -1,15 +0,0 @@ -apiVersion: rbac.authorization.k8s.io/v1alpha1 -kind: ClusterRoleBinding -metadata: - name: cluster-admin -roleRef: - apiGroup: rbac.authorization.k8s.io - kind: ClusterRole - name: cluster-admin -subjects: -- kind: Group - name: system:masters -- kind: Group - name: system:authenticated -- kind: Group - name: system:unauthenticated diff --git a/tools/kubeadm-aio/kubeadm-aio-launcher.sh b/tools/kubeadm-aio/kubeadm-aio-launcher.sh deleted file mode 100755 index d637040e..00000000 --- a/tools/kubeadm-aio/kubeadm-aio-launcher.sh +++ /dev/null @@ -1,94 +0,0 @@ -#!/bin/bash - -# Copyright 2017 The Openstack-Helm Authors. -# -# Licensed under the Apache License, Version 2.0 (the "License"); you may -# not use this file except in compliance with the License. You may obtain -# a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT -# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the -# License for the specific language governing permissions and limitations -# under the License. -set -xe - -# Setup shared mounts for kubelet -sudo mkdir -p /var/lib/kubelet -sudo mount --bind /var/lib/kubelet /var/lib/kubelet -sudo mount --make-shared /var/lib/kubelet - -# Cleanup any old deployment -sudo docker rm -f kubeadm-aio || true -sudo docker rm -f kubelet || true -sudo docker ps -aq | xargs -r -l1 -P16 sudo docker rm -f -sudo rm -rfv \ - /etc/cni/net.d \ - /etc/kubernetes \ - /var/lib/etcd \ - /var/etcd \ - /var/lib/kubelet/* \ - ${HOME}/.kubeadm-aio/admin.conf || true - -: ${KUBE_CNI:="calico"} -: ${CNI_POD_CIDR:="192.168.0.0/16"} - -# Launch Container, refer to: -# https://docs.docker.com/engine/reference/run/ -sudo docker run \ - -dt \ - --name=kubeadm-aio \ - --net=host \ - --security-opt=seccomp:unconfined \ - --cap-add=SYS_ADMIN \ - --tmpfs=/run \ - --tmpfs=/run/lock \ - --volume=/etc/machine-id:/etc/machine-id:ro \ - --volume=${HOME}:${HOME}:rw \ - --volume=${HOME}/.kubeadm-aio:/root:rw \ - --volume=/etc/kubernetes:/etc/kubernetes:rw \ - --volume=/sys/fs/cgroup:/sys/fs/cgroup:ro \ - --volume=/var/run/docker.sock:/run/docker.sock \ - --env KUBELET_CONTAINER=${KUBEADM_IMAGE} \ - --env KUBE_VERSION=${KUBE_VERSION} \ - --env KUBE_CNI=${KUBE_CNI} \ - --env CNI_POD_CIDR=${CNI_POD_CIDR} \ - ${KUBEADM_IMAGE} - -echo "Waiting for kubeconfig" -set +x -end=$(($(date +%s) + 600)) -READY="False" -while true; do - if [ -f ${HOME}/.kubeadm-aio/admin.conf ]; then - READY="True" - fi - [ $READY == "True" ] && break || true - sleep 1 - now=$(date +%s) - [ $now -gt $end ] && \ - echo "KubeADM did not generate kubectl config in time" && \ - sudo docker logs kubeadm-aio && exit -1 -done -set -x - -# Set perms of kubeconfig and set env-var -sudo chown $(id -u):$(id -g) ${HOME}/.kubeadm-aio/admin.conf -export KUBECONFIG=${HOME}/.kubeadm-aio/admin.conf - -echo "Waiting for node to be ready before continuing" -set +x -end=$(($(date +%s) + 600)) -READY="False" -while true; do - READY=$(kubectl get nodes --no-headers=true | awk "{ print \$2 }" | head -1) - [ "$READY" == "Ready" ] && break || true - sleep 1 - now=$(date +%s) - [ $now -gt $end ] && \ - echo "Kube node did not register as ready in time" && \ - sudo docker logs kubeadm-aio && exit -1 -done -set -x