chore(gate): adds and fixes zuul jobs

This patch adds in a tekton zuul job and fixes/adjusts the existing
linting and building, which currently does not perform the advertised
checks. This is the final patch of the chain of patches in order:

0. https://review.opendev.org/#/c/759865/ : tekton-pipelines
1. https://review.opendev.org/#/c/759598/ : tekton-triggers
2. https://review.opendev.org/#/c/759764/ : tektnon-dashboard

Change-Id: Ie7bd9efd42fd13fe8e5e83f290f72ed00ba9dea1
Signed-off-by: Tin Lam <tin@irrational.io>
This commit is contained in:
Tin Lam 2020-11-08 17:50:27 -06:00
parent 16b697c9bb
commit de63cdf6d4
40 changed files with 875 additions and 156 deletions

1
.gitignore vendored Normal file
View File

@ -0,0 +1 @@
**.tgz

View File

@ -1,5 +1,3 @@
# Copyright 2017 The Openstack-Helm Authors.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
@ -17,6 +15,9 @@ SHELL := /bin/bash
TASK := build
EXCLUDES := playbooks roles doc tests tools logs tmp zuul.d releasenotes
# FIXME(lamt): Excluding the dex-aio as there are linting errors and should be
# fixed in follow on patch. Once that's done, this can be removed.
EXCLUDES += dex-aio
CHARTS := $(filter-out $(EXCLUDES), $(patsubst %/.,%,$(wildcard */.)))
.PHONY: $(EXCLUDES) $(CHARTS)
@ -52,8 +53,5 @@ pull-all-images:
pull-images:
@./tools/pull-images.sh $(filter-out $@,$(MAKECMDGOALS))
#dev-deploy:
# @./tools/gate/devel/start.sh $(filter-out $@,$(MAKECMDGOALS))
%:
@:

View File

@ -47,7 +47,7 @@ spec:
openssl req -new -key /var/run/airshipit.org/tls/key/tls.key -out ${TMP_KEY}/server.csr -batch -subj "/commonName={{ .Values.params.endpoints.hostname }}"
openssl x509 -req -days 365 -in ${TMP_KEY}/server.csr -CA /etc/kubernetes/pki/ca.crt -CAkey /etc/kubernetes/pki/ca.key -set_serial $(printf '%(%s)T\n' -1) -out /var/run/airshipit.org/tls/crt/tls.crt
openssl verify -purpose sslserver -CAfile /etc/kubernetes/pki/ca.crt /var/run/airshipit.org/tls/crt/tls.crt
cp -v /etc/kubernetes/pki/ca.crt /var/run/airshipit.org/tls/ca-crt/ca.crt
cp -v /etc/kubernetes/pki/ca.crt /var/run/airshipit.org/tls/ca-crt/ca.crt
cp -v /etc/kubernetes/pki/ca.crt /var/run/airshipit.org/tls/ca-crt/ca-certificates.crt
chmod 0444 /var/run/airshipit.org/tls/ca-crt/*.crt /var/run/airshipit.org/tls/crt/tls.crt /var/run/airshipit.org/tls/key/tls.key
volumeMounts:

View File

@ -17,7 +17,7 @@ images:
name: mintel/dex-k8s-authenticator
repo: docker.io
tls_init:
tag: latest
tag: latest
name: metal3-io/ironic
repo: quay.io
pull:
@ -149,11 +149,11 @@ config:
location / { # the default location redirects to https
return 301 https://$server_name:{{ .Values.params.endpoints.port.https }}$request_uri;
}
location = /ca.crt {
location = /ca.crt {
alias /usr/share/nginx/html/ca.crt;
}
}
server {
listen 443 ssl;
server_name {{ .Values.params.endpoints.hostname }};
@ -161,11 +161,11 @@ config:
ssl_certificate /var/run/secrets/airshipit.org/tls/crt/tls.crt;
ssl_certificate_key /var/run/secrets/airshipit.org/tls/key/tls.key;
location = / {
location = / {
return 301 /ui/;
}
location = /ca.crt {
location = /ca.crt {
alias /usr/share/nginx/html/ca.crt;
}
@ -185,5 +185,3 @@ config:
}
}

View File

@ -0,0 +1,21 @@
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# Leverage existing Zuul and OpenDev infra for k8s setup
---
- name: Set up requirements for k8s
hosts: primary
vars_files:
- vars.yaml
roles:
- role: clear-firewall
- role: ensure-pip

View File

@ -1,5 +1,3 @@
# Copyright 2017 The Openstack-Helm Authors.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
@ -11,15 +9,8 @@
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
---
- hosts: primary
vars_files:
- vars.yaml
vars:
work_dir: "{{ zuul.project.src_dir }} }}"
gather_facts: True
roles:
- build-helm-packages
tags:
- build-helm-packages

View File

@ -1,5 +1,3 @@
# Copyright 2017 The Openstack-Helm Authors.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
@ -11,10 +9,23 @@
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
---
- hosts: primary
vars_files:
- vars.yaml
vars:
work_dir: "{{ zuul.project.src_dir }}"
roles:
- role: setup-helm
- role: ensure-chart-testing
- name: chart-testing
chart_testing_options: "--chart-dirs=. --validate-maintainers=false"
zuul_work_dir: "{{ work_dir }}/charts"
- role: build-helm-packages
tasks:
- name: Execute a Whitespace Linter check
command: find . -not -path "*/\.*" -not -path "*/doc/build/*" -not -name "*.tgz" -type f -exec egrep -l " +$" {} \;
register: result
failed_when: result.stdout != ""
- name: Check for trailing whitespaces
command: git grep -E -l -I " +$"
register: result
failed_when: result.stdout != ""
args:
chdir: "{{ zuul.project.src_dir }}"

View File

@ -0,0 +1,30 @@
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
---
- hosts: primary
tasks:
- name: Create run artifacts
file:
path: "/tmp/artifacts"
state: directory
- name: Run gate script
include_role:
name: run-script
vars:
script: "{{ item }}"
loop: "{{ gate_scripts }}"
- name: Download artifacts to executor
synchronize:
src: "/tmp/artifacts"
dest: "{{ zuul.executor.log_root }}/{{ inventory_hostname }}"
mode: pull
ignore_errors: True

View File

@ -0,0 +1,22 @@
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
---
- hosts: all
vars_files:
- vars.yaml
vars:
logs_dir: "/tmp/logs"
roles:
- role: ensure-jq
- role: gather-logs
tags:
- gather-logs

View File

@ -1,5 +1,3 @@
# Copyright 2017 The Openstack-Helm Authors.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
@ -11,5 +9,5 @@
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
null: null
version:
helm: 3.3.4

View File

@ -9,10 +9,8 @@
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
- include: setup-helm-serve.yaml
- name: build all charts in repo
---
- name: Build all charts in repo
make:
chdir: "{{ work_dir }}"
chdir: "{{ work_dir }}/charts"
target: all

View File

@ -1,89 +0,0 @@
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
- block:
- name: check if correct version of helm client already installed
shell: "set -e; [ \"x$($(type -p helm) version --client --short | awk '{ print $NF }' | awk -F '+' '{ print $1 }')\" == \"x${HELM_VERSION}\" ] || exit 1"
environment:
HELM_VERSION: "{{ version.helm }}"
args:
executable: /bin/bash
register: need_helm
ignore_errors: True
- name: install helm client
when: need_helm is failed
become_user: root
shell: |
TMP_DIR=$(mktemp -d)
curl -sSL ${GOOGLE_HELM_REPO_URL}/helm-${HELM_VERSION}-linux-amd64.tar.gz | tar -zxv --strip-components=1 -C ${TMP_DIR}
sudo mv ${TMP_DIR}/helm /usr/bin/helm
rm -rf ${TMP_DIR}
environment:
HELM_VERSION: "{{ version.helm }}"
GOOGLE_HELM_REPO_URL: "{{ url.google_helm_repo }}"
args:
executable: /bin/bash
- name: setting up helm client
command: helm init --client-only --skip-refresh
- block:
- name: checking if local helm server is running
shell: curl -s 127.0.0.1:8879 | grep -q 'Helm Repository'
args:
executable: /bin/bash
register: helm_server_running
ignore_errors: True
- name: getting current host user name
when: helm_server_running is failed
shell: id -un
args:
executable: /bin/bash
register: helm_server_user
- name: moving systemd unit into place for helm server
when: helm_server_running is failed
become: yes
become_user: root
template:
src: helm-serve.service.j2
dest: /etc/systemd/system/helm-serve.service
mode: 0640
- name: starting helm serve service
when: helm_server_running is failed
become: yes
become_user: root
systemd:
state: restarted
daemon_reload: yes
name: helm-serve
enabled: yes
- name: wait for helm server to be ready
shell: curl -s 127.0.0.1:8879 | grep -q 'Helm Repository'
args:
executable: /bin/bash
register: wait_for_helm_server
until: wait_for_helm_server.rc == 0
retries: 120
delay: 5
- block:
- name: checking if helm 'stable' repo is present
shell: helm repo list | grep -q "^stable"
args:
executable: /bin/bash
register: helm_stable_repo_present
ignore_errors: True
- name: remove helm 'stable' repo when exists
when: helm_stable_repo_present is succeeded
command: helm repo remove stable
- name: adding helm local repo
command: helm repo add local http://localhost:8879/charts

View File

@ -1,11 +0,0 @@
[Unit]
Description=Helm Server
After=network.target
[Service]
User={{ helm_server_user.stdout }}
Restart=always
ExecStart=/usr/bin/helm serve
[Install]
WantedBy=multi-user.target

View File

@ -1,5 +1,3 @@
# Copyright 2017 The Openstack-Helm Authors.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
@ -11,8 +9,9 @@
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
version:
helm: v2.14.1
url:
google_helm_repo: https://storage.googleapis.com/kubernetes-helm
---
- name: Install jq
become: yes
apt:
name: jq
state: present

View File

@ -0,0 +1,28 @@
---
- name: Create directories for logs
file:
path: "{{ logs_dir }}/{{ item }}"
state: directory
with_items:
- objects/cluster
- objects/namespaced
- pod-logs
- name: Gather kubernetes logs
shell: |
set -xe;
./tools/gate/gather-pod-logs.sh
./tools/gate/gather-objects.sh
environment:
LOGDIR: "{{ logs_dir }}"
args:
chdir: "{{ zuul.project.src_dir }}"
- name: Upload the logs
synchronize:
mode: pull
src: "{{ logs_dir }}"
dest: "{{ zuul.executor.log_root }}/{{ inventory_hostname }}"
owner: no
group: no
ignore_errors: True

View File

@ -0,0 +1,20 @@
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
---
- name: "Run script {{ script }}"
shell: |
set -xe;
{{ script_path }}
vars:
script_path: "{{ script }}"
args:
chdir: "{{ zuul.project.src_dir }}"

View File

@ -0,0 +1,29 @@
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# NOTE(lamt): We should be using zuul's ensure-helm zuul-job instead of
# re-inventing the wheel here, but that job relies on Helm 2, and has a step to
# "helm init" which is removed in Helm 3. We will just use the key
# part from the zuul-job until that is resolved.
---
- name: Download Helm
unarchive:
remote_src: true
src: "https://get.helm.sh/helm-v{{ version.helm }}-linux-amd64.tar.gz"
dest: /tmp
- name: Install Helm
become: true
copy:
remote_src: true
src: /tmp/linux-amd64/helm
dest: /usr/local/bin/helm
mode: '0755'

164
tools/gate/deploy-k8s.sh Executable file
View File

@ -0,0 +1,164 @@
#!/bin/bash
set -ex
: ${HELM_VERSION:="v3.4.1"}
: ${KUBE_VERSION:="v1.19.2"}
: ${MINIKUBE_VERSION:="v1.15.1"}
: ${CALICO_VERSION:="v3.12"}
: "${HTTP_PROXY:=""}"
: "${HTTPS_PROXY:=""}"
export DEBCONF_NONINTERACTIVE_SEEN=true
export DEBIAN_FRONTEND=noninteractive
# Note: Including fix from https://review.opendev.org/c/openstack/openstack-helm-infra/+/763619/
echo "DefaultLimitMEMLOCK=16384" | sudo tee -a /etc/systemd/system.conf
sudo systemctl daemon-reexec
function configure_resolvconf {
# Setup resolv.conf to use the k8s api server, which is required for the
# kubelet to resolve cluster services.
sudo mv /etc/resolv.conf /etc/resolv.conf.backup
# Create symbolic link to the resolv.conf file managed by systemd-resolved, as
# the kubelet.resolv-conf extra-config flag is automatically executed by the
# minikube start command, regardless of being passed in here
sudo ln -s /run/systemd/resolve/resolv.conf /etc/resolv.conf
sudo bash -c "echo 'nameserver 10.96.0.10' >> /etc/resolv.conf"
# NOTE(drewwalters96): Use the Google DNS servers to prevent local addresses in
# the resolv.conf file unless using a proxy, then use the existing DNS servers,
# as custom DNS nameservers are commonly required when using a proxy server.
if [ -z "${HTTP_PROXY}" ]; then
sudo bash -c "echo 'nameserver 8.8.8.8' >> /etc/resolv.conf"
sudo bash -c "echo 'nameserver 8.8.4.4' >> /etc/resolv.conf"
else
sed -ne "s/nameserver //p" /etc/resolv.conf.backup | while read -r ns; do
sudo bash -c "echo 'nameserver ${ns}' >> /etc/resolv.conf"
done
fi
sudo bash -c "echo 'search svc.cluster.local cluster.local' >> /etc/resolv.conf"
sudo bash -c "echo 'options ndots:5 timeout:1 attempts:1' >> /etc/resolv.conf"
sudo rm /etc/resolv.conf.backup
}
# NOTE: Clean Up hosts file
sudo sed -i '/^127.0.0.1/c\127.0.0.1 localhost localhost.localdomain localhost4localhost4.localdomain4' /etc/hosts
sudo sed -i '/^::1/c\::1 localhost6 localhost6.localdomain6' /etc/hosts
# Install required packages for K8s on host
wget -q -O- 'https://download.ceph.com/keys/release.asc' | sudo apt-key add -
RELEASE_NAME=$(grep 'CODENAME' /etc/lsb-release | awk -F= '{print $2}')
sudo add-apt-repository "deb https://download.ceph.com/debian-nautilus/ ${RELEASE_NAME} main"
. /etc/os-release
curl -fsSL https://download.docker.com/linux/ubuntu/gpg | sudo apt-key add -
sudo apt-key fingerprint 0EBFCD88
sudo add-apt-repository \
"deb [arch=amd64] https://download.docker.com/linux/ubuntu \
$(lsb_release -cs) \
stable"
sudo -E apt-get update
sudo -E apt-get install -y \
docker-ce \
docker-ce-cli \
containerd.io \
socat \
jq \
util-linux \
nfs-common \
bridge-utils \
iptables \
conntrack \
libffi-dev
configure_resolvconf
# Prepare tmpfs for etcd
sudo mkdir -p /data
sudo mount -t tmpfs -o size=512m tmpfs /data
# Install minikube and kubectl
URL="https://storage.googleapis.com"
sudo -E curl -sSLo /usr/local/bin/minikube "${URL}"/minikube/releases/"${MINIKUBE_VERSION}"/minikube-linux-amd64
sudo -E curl -sSLo /usr/local/bin/kubectl "${URL}"/kubernetes-release/release/"${KUBE_VERSION}"/bin/linux/amd64/kubectl
sudo -E chmod +x /usr/local/bin/minikube
sudo -E chmod +x /usr/local/bin/kubectl
# Install Helm
TMP_DIR=$(mktemp -d)
sudo -E bash -c \
"curl -sSL https://get.helm.sh/helm-${HELM_VERSION}-linux-amd64.tar.gz | tar -zxv --strip-components=1 -C ${TMP_DIR}"
sudo -E mv "${TMP_DIR}"/helm /usr/local/bin/helm
rm -rf "${TMP_DIR}"
# NOTE: Deploy kubenetes using minikube. A CNI that supports network policy is
# required for validation; use calico for simplicity.
sudo -E minikube config set kubernetes-version "${KUBE_VERSION}"
sudo -E minikube config set vm-driver none
sudo -E minikube config set embed-certs true
export CHANGE_MINIKUBE_NONE_USER=true
export MINIKUBE_IN_STYLE=false
sudo -E minikube start \
--docker-env HTTP_PROXY="${HTTP_PROXY}" \
--docker-env HTTPS_PROXY="${HTTPS_PROXY}" \
--docker-env NO_PROXY="${NO_PROXY},10.96.0.0/12" \
--network-plugin=cni \
--extra-config=controller-manager.allocate-node-cidrs=true \
--extra-config=controller-manager.cluster-cidr=192.168.0.0/16
minikube addons list
curl https://docs.projectcalico.org/"${CALICO_VERSION}"/manifests/calico.yaml -o /tmp/calico.yaml
kubectl apply -f /tmp/calico.yaml
# Note: Patch calico daemonset to enable Prometheus metrics and annotations
tee /tmp/calico-node.yaml << EOF
spec:
template:
metadata:
annotations:
prometheus.io/scrape: "true"
prometheus.io/port: "9091"
spec:
containers:
- name: calico-node
env:
- name: FELIX_PROMETHEUSMETRICSENABLED
value: "true"
- name: FELIX_PROMETHEUSMETRICSPORT
value: "9091"
EOF
kubectl -n kube-system patch daemonset calico-node --patch "$(cat /tmp/calico-node.yaml)"
kubectl -n kube-system set env daemonset/calico-node FELIX_IGNORELOOSERPF=true
kubectl get pod -A
kubectl -n kube-system get pod -l k8s-app=kube-dns
# NOTE: Wait for dns to be running.
END=$(($(date +%s) + 240))
until kubectl --namespace=kube-system \
get pods -l k8s-app=kube-dns --no-headers -o name | grep -q "^pod/coredns"; do
NOW=$(date +%s)
[ "${NOW}" -gt "${END}" ] && exit 1
echo "still waiting for dns"
sleep 10
done
kubectl -n kube-system wait --timeout=240s --for=condition=Ready pods -l k8s-app=kube-dns
# Remove stable repo, if present, to improve build time
helm repo remove stable || true
# Add labels to the core namespaces
kubectl label --overwrite namespace default name=default
kubectl label --overwrite namespace kube-system name=kube-system
kubectl label --overwrite namespace kube-public name=kube-public

70
tools/gate/gather-objects.sh Executable file
View File

@ -0,0 +1,70 @@
#!/bin/bash
set -ux
export PARALLELISM_FACTOR=2
export OBJECT_TYPE=node,clusterrole,clusterrolebinding,storageclass,namespace,crd
export NS_OBJECT_TYPE=configmaps,cronjobs,daemonsets,deployment,endpoints,ingresses,jobs,networkpolicies,pods,podsecuritypolicies,persistentvolumeclaims,rolebindings,roles,secrets,serviceaccounts,services,statefulsets
function get_namespaces {
kubectl get namespaces -o name | awk -F '/' '{ print $NF }'
}
function list_objects {
printf ${OBJECT_TYPE} | xargs -d ',' -I {} -P${PARALLELISM_FACTOR} -n1 bash -c 'echo "$@"' _ {}
}
function name_objects {
export OBJECT=$1
kubectl get ${OBJECT} -o name | xargs -L1 -I {} -P${PARALLELISM_FACTOR} -n1 bash -c 'echo "${OBJECT} ${1#*/}"' _ {}
}
function get_objects {
input=($1)
export OBJECT=${input[0]}
export NAME=${input[1]#*/}
echo "${OBJECT}/${NAME}"
DIR="${LOGDIR}/objects/cluster/${OBJECT}"
mkdir -p ${DIR}
kubectl get ${OBJECT} ${NAME} -o yaml > "${DIR}/${NAME}.yaml"
kubectl describe ${OBJECT} ${NAME} > "${DIR}/${NAME}.txt"
}
function list_namespaced_objects {
export NAMESPACE=$1
printf ${NS_OBJECT_TYPE} | xargs -d ',' -I {} -P${PARALLELISM_FACTOR} -n1 bash -c 'echo "${NAMESPACE} $@"' _ {}
}
function name_namespaced_objects {
input=($1)
export NAMESPACE=${input[0]}
export OBJECT=${input[1]}
kubectl get -n ${NAMESPACE} ${OBJECT} -o name | xargs -L1 -I {} -P${PARALLELISM_FACTOR} -n1 bash -c 'echo "${NAMESPACE} ${OBJECT} $@"' _ {}
}
function get_namespaced_objects () {
input=($1)
export NAMESPACE=${input[0]}
export OBJECT=${input[1]}
export NAME=${input[2]#*/}
echo "${NAMESPACE}/${OBJECT}/${NAME}"
DIR="${LOGDIR}/objects/namespaced/${NAMESPACE}/${OBJECT}"
mkdir -p ${DIR}
kubectl get -n ${NAMESPACE} ${OBJECT} ${NAME} -o yaml > "${DIR}/${NAME}.yaml"
kubectl describe -n ${NAMESPACE} ${OBJECT} ${NAME} > "${DIR}/${NAME}.txt"
}
export -f list_objects
export -f name_objects
export -f get_objects
export -f list_namespaced_objects
export -f name_namespaced_objects
export -f get_namespaced_objects
list_objects | \
xargs -r -n1 -P${PARALLELISM_FACTOR} -I {} bash -c 'name_objects "$@"' _ {} | \
xargs -r -n1 -P${PARALLELISM_FACTOR} -I {} bash -c 'get_objects "$@"' _ {}
get_namespaces | \
xargs -r -n1 -P${PARALLELISM_FACTOR} -I {} bash -c 'list_namespaced_objects "$@"' _ {} | \
xargs -r -n1 -P${PARALLELISM_FACTOR} -I {} bash -c 'name_namespaced_objects "$@"' _ {} | \
xargs -r -n1 -P${PARALLELISM_FACTOR} -I {} bash -c 'get_namespaced_objects "$@"' _ {}

33
tools/gate/gather-pod-logs.sh Executable file
View File

@ -0,0 +1,33 @@
#!/bin/bash
set -ux
export PARALLELISM_FACTOR=2
function get_namespaces {
kubectl get namespaces -o name | awk -F '/' '{ print $NF }'
}
function get_pods () {
NAMESPACE=$1
kubectl get pods -n ${NAMESPACE} -o name | awk -F '/' '{ print $NF }' | xargs -L1 -P${PARALLELISM_FACTOR} -I {} echo ${NAMESPACE} {}
}
export -f get_pods
function get_pod_logs () {
NAMESPACE=${1% *}
POD=${1#* }
INIT_CONTAINERS=$(kubectl get pod $POD -n ${NAMESPACE} -o json | jq -r '.spec.initContainers[]?.name')
CONTAINERS=$(kubectl get pod $POD -n ${NAMESPACE} -o json | jq -r '.spec.containers[].name')
for CONTAINER in ${INIT_CONTAINERS} ${CONTAINERS}; do
echo "${NAMESPACE}/${POD}/${CONTAINER}"
mkdir -p "${LOGDIR}/pod-logs/${NAMESPACE}/${POD}"
kubectl logs ${POD} -n ${NAMESPACE} -c ${CONTAINER} > "${LOGDIR}/pod-logs/${NAMESPACE}/${POD}/${CONTAINER}.txt"
done
}
export -f get_pod_logs
get_namespaces | \
xargs -r -n 1 -P ${PARALLELISM_FACTOR} -I {} bash -c 'get_pods "$@"' _ {} | \
xargs -r -n 2 -P ${PARALLELISM_FACTOR} -I {} bash -c 'get_pod_logs "$@"' _ {}

View File

@ -0,0 +1 @@
../deploy-k8s.sh

View File

@ -0,0 +1,9 @@
#!/bin/bash
set -eux
NS="harbor"
kubectl create ns $NS
helm upgrade --install harbor ./charts/harbor --namespace $NS
kubectl wait --for=condition=ready pod --timeout=600s --namespace $NS --all
helm test harbor -n $NS
kubectl --namespace $NS get pod

View File

@ -0,0 +1 @@
../deploy-k8s.sh

View File

@ -0,0 +1,15 @@
#!/bin/bash
set -eux
NS="tekton-pipelines"
kubectl create ns $NS
for ele in tekton-pipelines tekton-triggers tekton-dashboard; do
helm upgrade --install $ele ./charts/$ele --namespace $NS
done
kubectl wait --for=condition=ready pod --timeout=120s --namespace $NS --all
kubectl --namespace $NS get pod

65
tools/gate/tekton/300-test.sh Executable file
View File

@ -0,0 +1,65 @@
#!/bin/bash
set -eux
NS="tekton-pipelines"
function retry {
local n=1
local max=3
local delay=10
while true; do
"$@" && break || {
if [[ $n -lt $max ]]; then
(( n++ ))
sleep $delay
else
echo "failed after $n attempts." >&2
exit 1
fi
}
done
}
kubectl -n $NS apply -f ./tools/gate/tekton/yaml/role-resources/secret.yaml
kubectl -n $NS apply -f ./tools/gate/tekton/yaml/role-resources/serviceaccount.yaml
kubectl -n $NS apply -f ./tools/gate/tekton/yaml/role-resources/clustertriggerbinding-roles
kubectl -n $NS apply -f ./tools/gate/tekton/yaml/role-resources/triggerbinding-roles
retry kubectl -n $NS apply -f ./tools/gate/tekton/yaml/triggertemplates/triggertemplate.yaml
retry kubectl -n $NS apply -f ./tools/gate/tekton/yaml/triggerbindings/triggerbinding.yaml
retry kubectl -n $NS apply -f ./tools/gate/tekton/yaml/triggerbindings/triggerbinding-message.yaml
retry kubectl -n $NS apply -f ./tools/gate/tekton/yaml/eventlisteners/eventlistener.yaml
kubectl -n $NS get svc
kubectl -n $NS get pod
kubectl -n $NS get triggerbinding
kubectl -n $NS get triggertemplate
kubectl -n $NS wait --for=condition=Ready pod --timeout=120s --all
# Install the pipeline
kubectl -n $NS apply -f ./tools/gate/tekton/yaml/example-pipeline.yaml
kubectl -n $NS wait --for=condition=Ready pod --timeout=120s --all
kubectl get po -A
# Trigger the sample github pipeline
SVCIP=$(kubectl -n $NS get svc --no-headers | grep el-listener | awk '{print $3}')
curl -X POST \
http://$SVCIP:8080 \
-H 'Content-Type: application/json' \
-H 'X-Hub-Signature: sha1=2da37dcb9404ff17b714ee7a505c384758ddeb7b' \
-d '{
"repository":
{
"url": "https://github.com/tektoncd/triggers.git"
}
}'
# Ensure the run is successful
kubectl -n $NS wait --for=condition=Succeeded pipelineruns --timeout=120s --all
# Check the pipeline runs
kubectl -n $NS get pipelinerun

View File

@ -0,0 +1,14 @@
---
apiVersion: triggers.tekton.dev/v1alpha1
kind: EventListener
metadata:
name: listener
spec:
serviceAccountName: tekton-triggers-example-sa
triggers:
- name: foo-trig
bindings:
- ref: pipeline-binding
- ref: message-binding
template:
ref: pipeline-template

View File

@ -0,0 +1,103 @@
apiVersion: tekton.dev/v1beta1
kind: Task
metadata:
name: say-hello
spec:
params:
- name: contenttype
description: The Content-Type of the event
type: string
resources:
inputs:
- name: git-source
type: git
steps:
- name: say-hi
image: bash
command: ["bash", "-c"]
args:
- echo -e 'Hello Triggers!\nContent-Type is $(params.contenttype)'
---
apiVersion: tekton.dev/v1beta1
kind: Task
metadata:
name: say-message
spec:
params:
- name: message
description: The message to print
default: This is the default message
type: string
resources:
inputs:
- name: git-source
type: git
steps:
- name: say-message
image: bash
command: ["bash", "-c"]
args:
- echo '$(params.message)'
---
apiVersion: tekton.dev/v1beta1
kind: Task
metadata:
name: say-bye
spec:
resources:
inputs:
- name: git-source
type: git
steps:
- name: say-bye
image: bash
command: ["bash", "-c"]
args:
- echo 'Goodbye Triggers!'
---
apiVersion: tekton.dev/v1beta1
kind: Pipeline
metadata:
name: simple-pipeline
spec:
params:
- name: message
description: The message to print
default: This is the default message
type: string
- name: contenttype
description: The Content-Type of the event
type: string
resources:
- name: git-source
type: git
tasks:
- name: say-hello
taskRef:
name: say-hello
params:
- name: contenttype
value: $(params.contenttype)
resources:
inputs:
- name: git-source
resource: git-source
- name: say-message
runAfter: [say-hello]
taskRef:
name: say-message
params:
- name: message
value: $(params.message)
resources:
inputs:
- name: git-source
resource: git-source
- name: say-bye
runAfter: [say-message]
taskRef:
name: say-bye
resources:
inputs:
- name: git-source
resource: git-source

View File

@ -0,0 +1,12 @@
apiVersion: rbac.authorization.k8s.io/v1
kind: ClusterRoleBinding
metadata:
name: tekton-triggers-example-clusterbinding
subjects:
- kind: ServiceAccount
name: tekton-triggers-example-sa
namespace: tekton-pipelines
roleRef:
apiGroup: rbac.authorization.k8s.io
kind: ClusterRole
name: tekton-triggers-example-clusterrole

View File

@ -0,0 +1,17 @@
kind: ClusterRole
apiVersion: rbac.authorization.k8s.io/v1
metadata:
name: tekton-triggers-example-clusterrole
rules:
# Permissions for every EventListener deployment to function
- apiGroups: ["triggers.tekton.dev"]
resources: ["clustertriggerbindings", "eventlisteners", "triggerbindings", "triggertemplates", "triggers"]
verbs: ["get", "list", "watch"]
- apiGroups: [""]
# secrets are only needed for GitHub/GitLab interceptors
resources: ["configmaps", "secrets"]
verbs: ["get", "list", "watch"]
# Permissions to create resources in associated TriggerTemplates
- apiGroups: ["tekton.dev"]
resources: ["pipelineruns", "pipelineresources", "taskruns"]
verbs: ["create"]

View File

@ -0,0 +1,7 @@
apiVersion: v1
kind: Secret
metadata:
name: tekton-triggers-example-secret
type: Opaque
stringData:
secretToken: "1234567"

View File

@ -0,0 +1,6 @@
apiVersion: v1
kind: ServiceAccount
metadata:
name: tekton-triggers-example-sa
secrets:
- name: tekton-triggers-example-secret

View File

@ -0,0 +1,11 @@
apiVersion: rbac.authorization.k8s.io/v1
kind: RoleBinding
metadata:
name: tekton-triggers-example-binding
subjects:
- kind: ServiceAccount
name: tekton-triggers-example-sa
roleRef:
apiGroup: rbac.authorization.k8s.io
kind: Role
name: tekton-triggers-example-minimal

View File

@ -0,0 +1,20 @@
kind: Role
apiVersion: rbac.authorization.k8s.io/v1
metadata:
name: tekton-triggers-example-minimal
rules:
# Permissions for every EventListener deployment to function
- apiGroups: ["triggers.tekton.dev"]
resources: ["eventlisteners", "triggerbindings", "triggertemplates", "triggers"]
verbs: ["get", "list", "watch"]
- apiGroups: [""]
# secrets are only needed for GitHub/GitLab interceptors
resources: ["configmaps", "secrets"]
verbs: ["get", "list", "watch"]
# Permissions to create resources in associated TriggerTemplates
- apiGroups: ["tekton.dev"]
resources: ["pipelineruns", "pipelineresources", "taskruns"]
verbs: ["create"]
- apiGroups: [""]
resources: ["serviceaccounts"]
verbs: ["impersonate"]

View File

@ -0,0 +1,8 @@
apiVersion: triggers.tekton.dev/v1alpha1
kind: TriggerBinding
metadata:
name: message-binding
spec:
params:
- name: message
value: Hello from the Triggers EventListener!

View File

@ -0,0 +1,12 @@
apiVersion: triggers.tekton.dev/v1alpha1
kind: TriggerBinding
metadata:
name: pipeline-binding
spec:
params:
- name: gitrevision
value: $(body.head_commit.id)
- name: gitrepositoryurl
value: $(body.repository.url)
- name: contenttype
value: $(header.Content-Type)

View File

@ -0,0 +1,38 @@
apiVersion: triggers.tekton.dev/v1alpha1
kind: TriggerTemplate
metadata:
name: pipeline-template
spec:
params:
- name: gitrevision
description: The git revision
default: master
- name: gitrepositoryurl
description: The git repository url
- name: message
description: The message to print
default: This is the default message
- name: contenttype
description: The Content-Type of the event
resourcetemplates:
- apiVersion: tekton.dev/v1beta1
kind: PipelineRun
metadata:
generateName: simple-pipeline-run-
spec:
pipelineRef:
name: simple-pipeline
params:
- name: message
value: $(tt.params.message)
- name: contenttype
value: $(tt.params.contenttype)
resources:
- name: git-source
resourceSpec:
type: git
params:
- name: revision
value: $(tt.params.gitrevision)
- name: url
value: $(tt.params.gitrepositoryurl)

View File

@ -0,0 +1,35 @@
#!/bin/bash
set -ex
# NOTE(lamt): Runs this script in the root directory of this repo.
: ${NAMESPACE:="tekton-pipelines"}
: ${CHART_ROOT_PATH:="./charts"}
# deploys a Kubernetes cluster
# ./tools/gate/deploy-k8s.sh
# creates namespace
kubectl create namespace $NAMESPACE || true
# TODO(lamt): Needs an PV/C provider - NFS
# deploys harbor
helm upgrade --install harbor ${CHART_ROOT_PATH}/harbor \
--namespace=$NAMESPACE \
${EXTRA_HELM_ARGS_TEKTON_HARBOR}
# deploys tekton
helm upgrade --install tekton-pipelines ${CHART_ROOT_PATH}/tekton-pipelines \
--namespace=$NAMESPACE \
${EXTRA_HELM_ARGS_TEKTON_PIPELINES}
helm upgrade --install tekton-triggers ${CHART_ROOT_PATH}/tekton-triggers \
--namespace=$NAMESPACE \
${EXTRA_HELM_ARGS_TEKTON_TRIGGERS}
helm upgrade --install tekton-dashboard ${CHART_ROOT_PATH}/tekton-dashboard \
--namespace=$NAMESPACE \
${EXTRA_HELM_ARGS_TEKTON_DASHBOARD}
# waits for the pods to get ready
kubectl --namespace $NAMESPACE wait --for=condition=ready pod --timeout=600s --all

View File

@ -11,23 +11,36 @@
# limitations under the License.
- job:
name: airship-charts-lint
name: airship-charts-lint-and-build
run: playbooks/airship-zuul-linter.yaml
description: Lint Helm charts
nodeset: airship-charts-single-node
- job:
name: airship-charts-build
run: playbooks/airship-charts-build.yaml
description: Build Helm charts
name: airship-harbor-tekton
timeout: 7200
pre-run:
- playbooks/airship-setup.yaml
run: playbooks/gate-runner.yaml
post-run: playbooks/gather-logs.yaml
description: Runs Tekton charts
nodeset: airship-charts-single-node
vars:
collect_kubernetes_logs_namespace: tekton-pipelines
gate_scripts:
- ./tools/gate/tekton/100-deploy-k8s.sh
- ./tools/gate/tekton/200-install.sh
- ./tools/gate/tekton/300-test.sh
- ./tools/gate/harbor/200-install.sh
- job:
name: airship-charts-upload-git-mirror
parent: upload-git-mirror
description: Mirrors airship/charts to airshipit/charts
nodeset: airship-charts-single-node
vars:
git_mirror_repository: airshipit/charts
secrets:
- name: git_mirror_credentials
secret: airship-charts-airshipit-github-secret
pass-to-parent: true

22
zuul.d/nodesets.yaml Normal file
View File

@ -0,0 +1,22 @@
---
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
- nodeset:
name: airship-charts-single-node
nodes:
- name: primary
label: ubuntu-bionic
groups:
- name: primary
nodes:
- primary

View File

@ -13,13 +13,12 @@
- project:
check:
jobs:
- airship-charts-lint
- airship-charts-build
- airship-charts-lint-and-build
- airship-harbor-tekton
gate:
jobs:
- airship-charts-lint
- airship-charts-build
- airship-charts-lint-and-build
- airship-harbor-tekton
post:
jobs:
- airship-charts-upload-git-mirror