Replace existing operator tasks with the new dhall function

This change replaces the existing tasks with a dhall function to
generates all the kubernetes objects. The operator nows converts
the CR spec to a dhall `Input`, then it applies the function
output to the cluster. Follow-up changes demonstrate how
runtime operations can be performed around that function.

This change updates the zuul-ci_v1alpha1_zuul_cr.yaml file with
the actual CR defined in the zuul specification so that it can
be used in the functional tests.

Depends-On: https://review.opendev.org/702753
Change-Id: Iea51bccf90def6e827d2c5846ad6a7e4c86a5bc1
changes/06/702106/24
Tristan Cacqueray 3 years ago
parent bdf18a00b8
commit 2937272624

@ -1,18 +1,26 @@
- job:
description: |
Test that zuul-operator works in Kubernetes (currently debugging use)
description: Operator integration tests
name: zuul-operator-functional
abstract: true
run: playbooks/zuul-operator-functional/run.yaml
post-run: playbooks/zuul-operator-functional/post.yaml
requires: docker-image
vars:
# We disable userland-proxy to enable scheduler deployement to connect to the gearman service
# see: https://github.com/eclipse/che/issues/8134
docker_userland_proxy: false
- job:
description: Operator integration tests with Kubernetes
name: zuul-operator-functional-k8s
pre-run: playbooks/zuul-operator-functional-k8s/pre.yaml
run: playbooks/zuul-operator-functional-k8s/run.yaml
post-run: playbooks/zuul-operator-functional-k8s/post.yaml
nodeset: ubuntu-xenial
required-projects:
- github.com/pravega/zookeeper-operator
- zuul/zuul-operator
parent: zuul-operator-functional
pre-run: playbooks/zuul-operator-functional/pre-k8s.yaml
nodeset: ubuntu-bionic
vars:
namespace: 'default'
- job:
description: |
Build's Zuul operator image taken from buildset registry
description: Image and buildset registry job
name: zuul-operator-build-image
parent: opendev-build-docker-image
allowed-projects: zuul/zuul-operator
@ -27,10 +35,10 @@
check:
jobs:
- zuul-operator-build-image
# - zuul-operator-functional-k8s:
# dependencies: zuul-operator-build-image
- zuul-operator-functional-k8s:
dependencies: zuul-operator-build-image
gate:
jobs:
- zuul-operator-build-image
# - zuul-operator-functional-k8s:
# dependencies: zuul-operator-build-image
- zuul-operator-functional-k8s:
dependencies: zuul-operator-build-image

@ -1,2 +1,8 @@
image:
podman build -f build/Dockerfile -t docker.io/zuul/zuul-operator .
install:
kubectl apply -f deploy/crds/zuul-ci_v1alpha1_zuul_crd.yaml -f deploy/rbac.yaml -f deploy/operator.yaml
deploy-cr:
kubectl apply -f deploy/crds/zuul-ci_v1alpha1_zuul_cr.yaml

@ -1,128 +1,44 @@
A Zuul Operator PoC
=======
Zuul Operator
=============
## Requirements:
* [OKD](https://github.com/openshift/origin/releases/download/v3.11.0/openshift-origin-client-tools-v3.11.0-0cbc58b-linux-64bit.tar.gz)
* [SDK](https://github.com/operator-framework/operator-sdk#quick-start)
* [Zookeeper Operator](https://github.com/pravega/zookeeper-operator#install-the-operator)
* [Postgresql Operator](https://operatorhub.io/operator/alpha/postgres-operator.v3.5.0)
## Prepare cluster
## Build the image
```shell
sudo -i oc cluster up
sudo chown root:fedora /var/run/docker.sock
oc login -u developer -p dev
docker login -u developer -p $(oc whoami -t) $(oc registry info)
# Log as admin to install crd
sudo cat /root/.kube/config > ~/.kube/config
oc login -u system:admin
oc project default
```
## Install Postgress Operator
Follow [install instruction](https://crunchydata.github.io/postgres-operator/stable/installation/),
basically:
```
vi ./pv/crunchy-pv.json # set volume size and pv number
oc apply -f ./pv/crunchy-pv.json
oc apply -f ./deploy/cluster-rbac.yaml
oc apply -f ./deploy/rbac.yaml
./deploy/deploy.sh
$ make image
```
## Install Zookeeper Operator
## Install the operator
```shell
oc create -f https://raw.githubusercontent.com/pravega/zookeeper-operator/master/deploy/crds/zookeeper_v1beta1_zookeepercluster_crd.yaml
oc create -f https://raw.githubusercontent.com/pravega/zookeeper-operator/master/deploy/default_ns/rbac.yaml
oc create -f https://raw.githubusercontent.com/pravega/zookeeper-operator/master/deploy/default_ns/operator.yaml
```
## Install Zuul Operator
```shell
operator-sdk build 172.30.1.1:5000/myproject/zuul-operator:latest
docker push 172.30.1.1:5000/myproject/zuul-operator:latest
oc create -f deploy/crds/zuul-ci_v1alpha1_zuul_crd.yaml
oc create -f deploy/rbac.yaml
oc create -f deploy/operator.yaml
$ make install
kubectl apply -f deploy/crds/zuul-ci_v1alpha1_zuul_crd.yaml -f deploy/rbac.yaml -f deploy/operator.yaml
```
Look for operator pod and check it's output
```shell
$ oc get pods
$ kubectl get pods
NAME READY STATUS RESTARTS AGE
zuul-operator-c64756f66-rbdmg 2/2 Running 0 3s
$ oc logs zuul-operator-c64756f66-rbdmg -c operator
...
$ kubectl logs zuul-operator-c64756f66-rbdmg -c operator
[...]
{"level":"info","ts":1554197305.5853095,"logger":"cmd","msg":"Go Version: go1.10.3"}
{"level":"info","ts":1554197305.5854425,"logger":"cmd","msg":"Go OS/Arch: linux/amd64"}
{"level":"info","ts":1554197305.5854564,"logger":"cmd","msg":"Version of operator-sdk: v0.6.0"}
{"level":"info","ts":1554197305.5855,"logger":"cmd","msg":"Watching namespace.","Namespace":"default"}
...
[...]
```
## Usage
```
$ oc apply -f - <<EOF
apiVersion: zuul-ci.org/v1alpha1
$ kubectl apply -f - <<EOF
apiVersion: operator.zuul-ci.org/v1alpha1
kind: Zuul
metadata:
name: example-zuul
spec:
# Optional user-provided ssh key
#sshsecretename: ""
# Optional user-provided clouds.yaml
#cloudssecretname: ""
# Optional user-provided kube/config
#kubesecretname: ""
merger:
min: 0
max: 10
executor:
min: 1
max: 5
web:
min: 1
launcher:
min: 1
connections: []
tenants:
- tenant:
name: demo
source: {}
EOF
zuul.zuul-ci.org/example-zuul created
$ oc get zuul
NAME AGE
example-zuul 10s
# Get zuul public key
$ oc get secret example-ssh-secret-pub -o "jsonpath={.data.id_rsa\.pub}" | base64 -d
ssh-rsa AAAAB3Nza...
$ oc get pods
NAME READY STATUS RESTARTS AGE
example-zuul-executor-696f969c4-6cpjv 1/1 Running 0 8s
example-zuul-launcher-5974789746-wbwpv 1/1 Running 0 9s
example-zuul-pg-5dfc477bff-8426l 1/1 Running 0 30s
example-zuul-scheduler-77b6cf7967-ksh64 1/1 Running 0 11s
example-zuul-web-5f744f89c9-qjp9l 1/1 Running 0 6s
example-zuul-zk-0 1/1 Running 0 22s
$ oc get svc example-zuul-web
NAME TYPE CLUSTER-IP EXTERNAL-IP PORT(S) AGE
example-zuul-web ClusterIP 172.30.209.181 <none> 80/TCP 41s
$ curl 172.30.209.181/api/tenants
[{"name": "demo", "projects": 0, "queue": 0}]
```

@ -1,58 +0,0 @@
---
# Default cr spec
meta:
name: demozuul
tenants:
- tenant:
name: demo
source: {}
connections: []
providers: []
labels:
- name: okd-fedora
min-ready: 1
launcher:
min: 1
merger:
min: 0
max: 5
executor:
min: 1
max: 5
web:
min: 1
max: 1
namespace: "{{ meta.namespace|default('default') }}"
state: "present"
zuul_app_name: "zuul"
zuul_cluster_name: "{{ meta.name }}"
sshsecretname: "{{ zuul_cluster_name }}-ssh-secret"
kubesecretname: "{{ zuul_cluster_name }}-kube-secret"
cloudssecretname: "{{ zuul_cluster_name }}-clouds-secret"
zuul_version: "latest" #"3.7.1"
nodepool_version: "latest"
# Use local image for https://review.openstack.org/650246
#zuul_image_name_base: "docker.io/zuul/zuul"
#nodepool_image_name_base: "docker.io/zuul/nodepool"
zuul_image_name_base: "172.30.1.1:5000/myproject/zuul"
nodepool_image_name_base: "172.30.1.1:5000/myproject/nodepool"
zuul_image_name:
scheduler: "{{ zuul_image_name_base }}-scheduler:{{ zuul_version }}"
merger: "{{ zuul_image_name_base }}-merger:{{ zuul_version }}"
executor: "{{ zuul_image_name_base }}-executor:{{ zuul_version }}"
web: "{{ zuul_image_name_base }}-web:{{ zuul_version }}"
launcher: "{{ nodepool_image_name_base }}-launcher:{{ nodepool_version }}"
zuul_service_account_name: "zuul-operator"
zuul_image_pull_policy: "IfNotPresent"
zuul_configmap_name: "{{ zuul_cluster_name }}-config"
zk_cluster_name: "{{ zuul_cluster_name }}-zk"
zk_api_version: "zookeeper.pravega.io/v1beta1"
pg_cluster_name: "{{ zuul_cluster_name }}-pg"
pg_cr_kind: "Pgcluster"
pg_api_version: "cr.client-go.k8s.io/v1"

@ -1,237 +0,0 @@
---
- name: Create Postgresql Credential
when: not zuul_pg_user
block:
- name: Create k8s secret
k8s:
state: "{{ state }}"
definition:
apiVersion: v1
kind: Secret
metadata:
labels:
pg-database: "{{ pg_cluster_name }}"
app: "{{ zuul_app_name }}"
zuul_cluster: "{{ zuul_cluster_name }}"
name: "{{ pg_cluster_name }}-zuul-secret"
namespace: "{{ namespace }}"
type: Opaque
data:
password: UE5xOEVFVTBxTQ==
username: dGVzdHVzZXI=
- name: Set fact
set_fact:
zuul_pg_user:
- username: dGVzdHVzZXI=
password: UE5xOEVFVTBxTQ==
- name: Create ssh key
when: not zuul_ssh_key
block:
- name: Create ssh key
command: "ssh-keygen -f /opt/ansible/ssh-{{ zuul_cluster_name }} -m PEM -t rsa -N '' -C zuul"
args:
creates: "/opt/ansible/ssh-{{ zuul_cluster_name }}"
- name: Create ssh secret
k8s:
state: "{{ state }}"
definition:
apiVersion: v1
kind: Secret
metadata:
labels:
app: "{{ zuul_app_name }}"
zuul_cluster: "{{ zuul_cluster_name }}"
name: "{{ sshsecretname }}"
namespace: "{{ namespace }}"
type: Opaque
stringData:
id_rsa: |
{{lookup('file', '/opt/ansible/ssh-' + zuul_cluster_name) }}
- name: Create ssh pub secret
k8s:
state: "{{ state }}"
definition:
apiVersion: v1
kind: Secret
metadata:
labels:
app: "{{ zuul_app_name }}"
zuul_cluster: "{{ zuul_cluster_name }}"
name: "{{ sshsecretname }}-pub"
namespace: "{{ namespace }}"
type: Opaque
stringData:
id_rsa.pub: |
{{lookup('file', '/opt/ansible/ssh-' + zuul_cluster_name + '.pub') }}
# TODO: cleanup key file from operator pod
- name: Create cloud config
when: not zuul_clouds_secret
k8s:
state: "{{ state }}"
definition:
apiVersion: v1
kind: Secret
metadata:
labels:
app: "{{ zuul_app_name }}"
zuul_cluster: "{{ zuul_cluster_name }}"
name: "{{ cloudssecretname }}"
namespace: "{{ namespace }}"
type: Opaque
stringData:
clouds.yaml: |
cache:
expiration:
server: 5
port: 5
floating-ip: 5
- name: Create kube config
when: not zuul_kube_secret
k8s:
state: "{{ state }}"
definition:
apiVersion: v1
kind: Secret
metadata:
labels:
app: "{{ zuul_app_name }}"
zuul_cluster: "{{ zuul_cluster_name }}"
name: "{{ kubesecretname }}"
namespace: "{{ namespace }}"
type: Opaque
stringData:
config: |
apiVersion: v1
clusters: []
contexts: []
- name: Create the scheduler configmap
k8s:
state: "{{ state }}"
definition:
kind: ConfigMap
apiVersion: v1
metadata:
name: "{{ zuul_configmap_name }}-scheduler"
namespace: "{{ namespace }}"
labels:
app: "{{ zuul_app_name }}"
zuul_cluster: "{{ zuul_cluster_name }}"
data:
"zuul.conf": |
[gearman]
server=localhost
port=4730
[zookeeper]
hosts={{ zk_cluster_name }}-client:2181
[gearman_server]
start=true
[scheduler]
tenant_config=/etc/zuul/main.yaml
[connection sqlreporter]
driver=sql
dburi=postgresql://{{ zuul_pg_user[0]["username"] | b64decode }}:{{ zuul_pg_user[0]["password"] | b64decode }}@{{ pg_cluster_name }}/zuul
{% for connection in connections %}
[connection {{ connection["name"] }}]
{% if connection["driver"] == "gerrit" %}
sshkey=/var/lib/zuul/ssh-secret/id_rsa
{% endif %}
{% for k, v in connection.items() %}{% if k != "name" %}
{{ k }}={{ v }}
{% endif %}{% endfor %}
{% endfor %}
"main.yaml": |
{{ tenants|to_yaml|regex_replace('(config|untrusted)_projects:', '\1-projects:') }}
register: scheduler_config
- name: Register if tenant config changed
set_fact:
tenant_config_updated: >-
{% if (scheduler_config is changed and
scheduler_config.diff|default(None) and
'main.yaml' in scheduler_config.diff[-1][1]) %}True
{% endif %}
- debug:
msg: "Tenant config is updated"
when: tenant_config_updated
- name: Create the zuul service configmap
k8s:
state: "{{ state }}"
definition:
kind: ConfigMap
apiVersion: v1
metadata:
name: "{{ zuul_configmap_name }}"
namespace: "{{ namespace }}"
labels:
app: "{{ zuul_app_name }}"
zuul_cluster: "{{ zuul_cluster_name }}"
data:
"zuul.conf": |
[gearman]
server={{ zuul_cluster_name }}-scheduler
port=4730
[zookeeper]
hosts={{ zk_cluster_name }}-client:2181
[web]
listen_address=0.0.0.0
port=9000
[executor]
# TODO: add secret map for executor ssh key
private_key_file=/var/lib/zuul/ssh-secret/id_rsa
[connection sqlreporter]
driver=sql
dburi=postgresql://{{ zuul_pg_user[0]["username"] | b64decode }}:{{ zuul_pg_user[0]["password"] | b64decode }}@{{ pg_cluster_name }}/zuul
{% for connection in connections %}
[connection {{ connection["name"] }}]
{% if connection["driver"] == "gerrit" %}
sshkey=/var/lib/zuul/ssh-secret/id_rsa
{% endif %}
{% for k, v in connection.items() %}{% if k != "name" %}
{{ k }}={{ v }}
{% endif %}{% endfor %}
{% endfor %}
- name: Create the nodepool configmap
k8s:
state: "{{ state }}"
definition:
kind: ConfigMap
apiVersion: v1
metadata:
name: "{{ zuul_configmap_name }}-nodepool"
namespace: "{{ namespace }}"
labels:
app: "{{ zuul_app_name }}"
zuul_cluster: "{{ zuul_cluster_name }}"
data:
"nodepool.yaml": |
{{ ({'labels': labels})|to_yaml }}
{{ ({'providers': providers})|to_yaml }}
webapp:
port: 8006
zookeeper-servers:
- host: {{ zk_cluster_name }}-client
port: 2181
register: nodepool_config

@ -1,67 +0,0 @@
- name: Get autoscale count
autoscale_gearman:
service: "{{ deployment_name }}"
gearman: "{{ gearman_service.spec.clusterIP|default(None) }}"
min: "{{ deployment_conf.min|default(0) }}"
max: "{{ deployment_conf.max|default(1) }}"
register: autoscale
when: gearman_service is defined
# TODO: ensure graceful scale-down of service's replicas
- name: Create Deployment
k8s:
state: "{{ state }}"
definition:
kind: "Deployment"
apiVersion: "extensions/v1beta1"
metadata:
name: "{{ zuul_cluster_name }}-{{ deployment_name }}"
namespace: "{{ namespace }}"
labels:
app: "{{ zuul_app_name }}"
zuul_cluster: "{{ zuul_cluster_name }}"
annotations:
configHash: ""
spec:
replicas: "{{ autoscale.count|default(deployment_conf.min) }}"
selector:
matchLabels:
app: "{{ zuul_cluster_name }}-{{ deployment_name }}"
zuul_cluster: "{{ zuul_cluster_name }}"
template:
metadata:
name: "{{ zuul_cluster_name }}-{{ deployment_name }}"
labels:
app: "{{ zuul_cluster_name }}-{{ deployment_name }}"
zuul_cluster: "{{ zuul_cluster_name }}"
spec:
containers:
- name: "{{ zuul_cluster_name }}-{{ deployment_name }}"
image: "{{ zuul_image_name[deployment_name] }}"
imagePullPolicy: "{{ zuul_image_pull_policy }}"
ports: "{{ deployment_ports|default([]) }}"
env:
- name: CONFIG_CHECKSUM
value: "{{ scheduler_config.result.data | checksum }}"
volumeMounts:
- mountPath: "/etc/zuul"
name: zuul-config-volume
readOnly: true
- mountPath: "/var/lib/zuul"
name: zuul-data-volume
- mountPath: "/var/lib/zuul/ssh-secret/"
name: zuul-ssh-key
command:
- "/uid_entrypoint"
- "zuul-{{ deployment_name }}"
- "-d"
volumes:
- name: zuul-config-volume
configMap:
name: "{{ deployment_config|default(zuul_configmap_name) }}"
- name: zuul-data-volume
emptyDir: {}
- name: zuul-ssh-key
secret:
secretName: "{{ sshsecretname }}"
defaultMode: 256

@ -1,72 +0,0 @@
# TODO:
- name: Get autoscale count
# TODO: look for replicas count in zk requests list
# autoscale_zk:
# service: {{ deployment_name }}
# zkhost: "{{ zk_cluster_name }}-client:2181"
# min: {{ deployment_conf.min|default(0) }}
# register: autoscale
set_fact:
autoscale:
count: "{{ deployment_conf.min|default(0) }}"
- name: Create Deployment
k8s:
state: "{{ state }}"
definition:
kind: "Deployment"
apiVersion: "extensions/v1beta1"
metadata:
name: "{{ zuul_cluster_name }}-{{ deployment_name }}"
namespace: "{{ namespace }}"
labels:
app: "{{ zuul_app_name }}"
zuul_cluster: "{{ zuul_cluster_name }}"
annotations:
configHash: ""
spec:
replicas: "{{ autoscale.count }}"
selector:
matchLabels:
app: "{{ zuul_cluster_name }}-{{ deployment_name }}"
zuul_cluster: "{{ zuul_cluster_name }}"
template:
metadata:
name: "{{ zuul_cluster_name }}-{{ deployment_name }}"
labels:
app: "{{ zuul_cluster_name }}-{{ deployment_name }}"
zuul_cluster: "{{ zuul_cluster_name }}"
spec:
containers:
- name: "{{ zuul_cluster_name }}-{{ deployment_name }}"
image: "{{ zuul_image_name[deployment_name] }}"
imagePullPolicy: "{{ zuul_image_pull_policy }}"
env:
- name: CONFIG_CHECKSUM
value: "{{ nodepool_config.result.data | checksum }}"
volumeMounts:
- mountPath: "/etc/nodepool"
name: nodepool-config-volume
readOnly: true
- mountPath: "/var/lib/nodepool"
name: nodepool-data-volume
- mountPath: "/var/lib/nodepool/.kube"
name: nodepool-kube-volume
- mountPath: "/var/lib/nodepool/.config/openstack"
name: nodepool-clouds-volume
command:
- "/uid_entrypoint"
- "nodepool-{{ deployment_name }}"
- "-d"
volumes:
- name: nodepool-config-volume
configMap:
name: "{{ zuul_configmap_name }}-nodepool"
- name: nodepool-data-volume
emptyDir: {}
- name: nodepool-kube-volume
secret:
secretName: "{{ kubesecretname }}"
- name: nodepool-clouds-volume
secret:
secretName: "{{ cloudssecretname }}"

@ -1,18 +0,0 @@
- name: Create Service
k8s:
state: "{{ state }}"
definition:
kind: Service
apiVersion: v1
metadata:
name: "{{ zuul_cluster_name }}-{{ service_name }}"
namespace: "{{ namespace }}"
labels:
app: "{{ zuul_app_name }}"
zuul_cluster: "{{ zuul_cluster_name }}"
spec:
type: ClusterIP
selector:
app: "{{ zuul_cluster_name }}-{{ service_name }}"
zuul_cluster: "{{ zuul_cluster_name }}"
ports: "{{ service_ports }}"

@ -1,83 +0,0 @@
---
- name: Nodepool Deployment
vars:
deployment_name: launcher
deployment_conf: "{{ launcher }}"
include_tasks: "./create_nodepool_deployment.yaml"
- name: Save queue
include_tasks: "./queue_save.yaml"
when:
- scheduler_config is changed
- zuul_scheduler_pod
- name: Scheduler Deployment
vars:
deployment_name: scheduler
deployment_ports:
- containerPort: 4730
protocol: "TCP"
deployment_config: "{{ zuul_configmap_name }}-scheduler"
deployment_conf:
min: 1
include_tasks: "./create_deployment.yaml"
register: sched_deployment
- name: Scheduler service
vars:
service_name: scheduler
service_ports:
- name: "gearman"
port: 4730
protocol: TCP
include_tasks: "./create_service.yaml"
- name: Wait for Service
set_fact:
gearman_service: "{{ lookup('k8s', api_version='v1', kind='Service', namespace=namespace, resource_name=zuul_cluster_name + '-scheduler') }}"
until: gearman_service
retries: 5
delay: 10
- name: Reload scheduler
include_tasks: "./reload_scheduler.yaml"
when:
- sched_deployment is not changed
- tenant_config_updated
- name: Merger Deployment
vars:
deployment_name: merger
deployment_conf: "{{ merger }}"
include_tasks: "./create_deployment.yaml"
- name: Executor Deployment
vars:
deployment_name: executor
deployment_conf: "{{ executor }}"
include_tasks: "./create_deployment.yaml"
- name: Web Deployment
vars:
deployment_name: web
deployment_conf: "{{ web }}"
deployment_ports:
- containerPort: 9000
protocol: "TCP"
include_tasks: "./create_deployment.yaml"
- name: Web Service
vars:
service_name: web
service_ports:
- name: "web"
port: 80
protocol: TCP
targetPort: 9000
include_tasks: "./create_service.yaml"
- name: Load queue
include_tasks: "./queue_load.yaml"
when:
- scheduler_config is changed
- zuul_scheduler_pod

@ -1,3 +0,0 @@
---
- name: Load scheduler queue
debug: msg="TODO..."

@ -1,3 +0,0 @@
---
- name: Dump scheduler queue
debug: msg="TODO..."

@ -1,3 +0,0 @@
---
- name: Add scheduler pod to the inventory
debug: msg="TODO..."

@ -1,125 +0,0 @@
- name: Postgresql Secret
k8s:
definition:
apiVersion: v1
kind: Secret
metadata:
labels:
pg-database: "{{ pg_cluster_name }}"
app: "{{ zuul_app_name }}"
zuul_cluster: "{{ zuul_cluster_name }}"
name: "{{ pg_cluster_name }}-postgres-secret"
namespace: "{{ namespace }}"
type: Opaque
data:
password: M3pBeXpmMThxQg==
username: cG9zdGdyZXM=
- name: Postgresql Primary User
k8s:
definition:
apiVersion: v1
kind: Secret
metadata:
labels:
pg-database: "{{ pg_cluster_name }}"
app: "{{ zuul_app_name }}"
zuul_cluster: "{{ zuul_cluster_name }}"
name: "{{ pg_cluster_name }}-primaryuser-secret"
namespace: "{{ namespace }}"
type: Opaque
data:
password: d0ZvYWlRZFhPTQ==
username: cHJpbWFyeXVzZXI=
- name: Postgresql Deployment
k8s:
definition:
apiVersion: "{{ pg_api_version }}"
kind: "{{ pg_cr_kind }}"
metadata:
labels:
archive: 'false'
archive-timeout: '60'
crunchy-pgbadger: 'false'
crunchy_collect: 'false'
current-primary: "{{ pg_cluster_name }}"
deployment-name: "{{ pg_cluster_name }}"
name: "{{ pg_cluster_name }}"
pg-cluster: "{{ pg_cluster_name }}"
pgo-backrest: 'false'
pgo-version: 3.5.2
primary: 'true'
app: "{{ zuul_app_name }}"
zuul_cluster: "{{ zuul_cluster_name }}"
name: "{{ pg_cluster_name }}"
namespace: "{{ namespace }}"
spec:
ArchiveStorage:
accessmode: ''
fsgroup: ''
matchLabels: ''
name: ''
size: ''
storageclass: ''
storagetype: ''
supplementalgroups: ''
BackrestStorage:
accessmode: ReadWriteMany
fsgroup: ''
matchLabels: ''
name: ''
size: 1G
storageclass: ''
storagetype: create
supplementalgroups: ''
ContainerResources:
limitscpu: ''
limitsmemory: ''
requestscpu: ''
requestsmemory: ''
PrimaryStorage:
accessmode: ReadWriteMany
fsgroup: ''
matchLabels: ''
name: "{{ pg_cluster_name }}"
size: 1G
storageclass: ''
storagetype: create
supplementalgroups: ''
ReplicaStorage:
accessmode: ReadWriteMany
fsgroup: ''
matchLabels: ''
name: ''
size: 1G
storageclass: ''
storagetype: create
supplementalgroups: ''
backuppath: ''
backuppvcname: ''
ccpimage: crunchy-postgres
ccpimagetag: centos7-11.2-2.3.1
clustername: "{{ pg_cluster_name }}"
customconfig: ''
database: zuul
name: "{{ pg_cluster_name }}"
nodename: ''
policies: ''
port: '5432'
primaryhost: "{{ pg_cluster_name }}"
primarysecretname: "{{ pg_cluster_name }}-primaryuser-secret"
replicas: '0'
rootsecretname: "{{ pg_cluster_name }}-postgres-secret"
secretfrom: ''
status: ''
strategy: '1'
user: zuul
userlabels:
archive: 'false'
archive-timeout: '60'
crunchy-pgbadger: 'false'
crunchy_collect: 'false'
pgo-backrest: 'false'
pgo-version: 3.5.2
usersecretname: "{{ pg_cluster_name }}-zuul-secret"

@ -1,14 +0,0 @@
- name: Zookeeper Deployment
k8s:
definition:
apiVersion: "{{ zk_api_version }}"
kind: "ZookeeperCluster"
metadata:
name: "{{ zk_cluster_name }}"
namespace: "{{ namespace }}"
labels:
app: "{{ zuul_app_name }}"
zuul_cluster: "{{ zuul_cluster_name }}"
spec:
size: 1
version: "3.5.3-beta"

@ -1,42 +0,0 @@
---
- set_fact:
label_selector_value: "zuul_cluster={{ zuul_cluster_name }},app={{ zuul_app_name }}"
sched_selector_value: "zuul_cluster={{ zuul_cluster_name }},app={{ zuul_cluster_name }}-scheduler"
pg_user_query: "[?metadata.name=='{{ pg_cluster_name }}-zuul-secret'].data"
ssh_key_query: "[?metadata.name=='{{ sshsecretname }}'].data"
- name: lookup k8s secrets
set_fact:
secrets_lookup: "{{ lookup('k8s', api_version='v1', kind='Secret', namespace=namespace, label_selector=label_selector_value) }}"
- name: lookup cluster secret
set_fact:
zuul_pg_user: "{{ secrets_lookup | json_query(pg_user_query) }}"
zuul_ssh_key: "{{ lookup('k8s', api_version='v1', kind='Secret', namespace=namespace, resource_name=sshsecretname) }}"
zuul_clouds_secret: "{{ lookup('k8s', api_version='v1', kind='Secret', namespace=namespace, resource_name=cloudssecretname) }}"
zuul_kube_secret: "{{ lookup('k8s', api_version='v1', kind='Secret', namespace=namespace, resource_name=kubesecretname) }}"
- name: lookup k8s postgres cr
set_fact:
pg_cr_lookup: "{{ lookup('k8s', api_version=pg_api_version, kind=pg_cr_kind, namespace=namespace, resource_name=pg_cluster_name) }}"
- name: lookup k8s zookeeper cr
set_fact:
zk_cr_lookup: "{{ lookup('k8s', api_version=zk_api_version, kind='ZookeeperCluster', namespace=namespace, resource_name=zk_cluster_name) }}"
- name: lookup scheduler pod
set_fact:
zuul_scheduler_pod: "{{ lookup('k8s', api_version='v1', kind='Pod', namespace=namespace, label_selector=sched_selector_value) }}"
- name: lookup k8s Zuul deployment
set_fact:
zuul_deployment_lookup: "{{ lookup('k8s', api_version='extensions/v1beta1', kind='Deployment', namespace=namespace, resource_name=zuul_cluster_name) }}"
- name: get currently deployed Zuul image name
set_fact:
current_deployed_image: "{{ zuul_deployment_lookup.spec.template.spec.containers['name'=='zuul'].image }}"
when: zuul_deployment_lookup.spec is defined
- debug:
msg: "Zuul Version has CHANGED to '{{ zuul_version }}' - Currently at {{ current_deployed_image }}"
when: (current_deployed_image is defined) and (current_deployed_image != zuul_image_name)

@ -1,19 +0,0 @@
---
- hosts: localhost
gather_facts: no
tasks:
- debug: msg="Running Zuul Operator Playbook"
- name: Show the env
command: env
- import_role:
name: get_status
- import_role:
name: create_config
- import_role:
name: deploy_pg
when: (pg_cr_lookup|length==0)
- import_role:
name: deploy_zk
when: (zk_cr_lookup|length==0)
- import_role:
name: deploy

@ -36,9 +36,4 @@ RUN echo 'let Prelude = ~/conf/Prelude.dhall let Kubernetes = ~/conf/Kubernetes.
# Copy ansible operator requirements
COPY watches.yaml ${HOME}/watches.yaml
COPY ansible/zuul.yaml ${HOME}/zuul.yaml
COPY ansible/group_vars/ ${HOME}/group_vars/
COPY ansible/roles/ ${HOME}/roles/
COPY build/uid_entrypoint.sh /uid_entrypoint
COPY roles ${HOME}/roles

@ -1,8 +0,0 @@
#!/bin/sh
if ! whoami &> /dev/null; then
if [ -w /etc/passwd ]; then
echo "${USER_NAME:-default}:x:$(id -u):0:${USER_NAME:-default} user:${HOME}:/sbin/nologin" >> /etc/passwd
fi
fi
exec "$@"

@ -0,0 +1,137 @@
{- Zuul CR spec as a dhall schemas
> Note: in dhall, a record with such structure:
> { Type = { foo : Text }, default = { foo = "bar" }}
> is named a `schemas` and it can be used to set default value:
> https://docs.dhall-lang.org/references/Built-in-types.html#id133
The `Schemas` record contains schemas for the CR spec attributes.
The `Input` record is the Zuul CR spec schema.
-}
let UserSecret = { secretName : Text, key : Optional Text }
let Gerrit =
{ name : Text
, server : Optional Text
, user : Text
, baseurl : Text
, sshkey : UserSecret
}
let GitHub = { name : Text, app_id : Natural, app_key : UserSecret }
let Mqtt =
{ name : Text
, server : Text
, user : Optional Text
, password : Optional UserSecret
}
let Git = { name : Text, baseurl : Text }
let Schemas =
{ Merger =
{ Type =
{ image : Optional Text
, count : Optional Natural
, git_user_email : Optional Text
, git_user_name : Optional Text
}
, default =
{ image = None Text
, count = None Natural
, git_user_email = None Text
, git_user_name = None Text
}
}
, Executor =
{ Type =
{ image : Optional Text
, count : Optional Natural
, ssh_key : UserSecret
}
, default = { image = None Text, count = None Natural }
}
, Web =
{ Type =
{ image : Optional Text
, count : Optional Natural
, status_url : Optional Text
}
, default =
{ image = None Text
, count = None Natural
, status_url = None Text
}
}
, Scheduler =
{ Type =
{ image : Optional Text
, count : Optional Natural
, config : UserSecret
}
, default = { image = None Text, count = None Natural }
}
, Launcher =
{ Type = { image : Optional Text, config : UserSecret }
, default = { image = None Text }
}
, Connections =
{ Type =
{ gerrits : Optional (List Gerrit)
, githubs : Optional (List GitHub)
, mqtts : Optional (List Mqtt)
, gits : Optional (List Git)
}
, default =
{ gerrits = None (List Gerrit)
, githubs = None (List GitHub)
, mqtts = None (List Mqtt)
, gits = None (List Git)
}
}
, ExternalConfigs =
{ Type =
{ openstack : Optional UserSecret
, kubernetes : Optional UserSecret
, amazon : Optional UserSecret
}
, default =
{ openstack = None UserSecret
, kubernetes = None UserSecret
, amazon = None UserSecret
}
}
, UserSecret = { Type = UserSecret, default = { key = None Text } }
, Gerrit = { Type = Gerrit }
, GitHub = { Type = GitHub }
, Mqtt = { Type = Mqtt }
, Git = { Type = Git }
}
let Input =
{ Type =
{ name : Text
, merger : Schemas.Merger.Type
, executor : Schemas.Executor.Type
, web : Schemas.Web.Type
, scheduler : Schemas.Scheduler.Type
, launcher : Schemas.Launcher.Type
, database : Optional UserSecret
, zookeeper : Optional UserSecret
, external_config : Schemas.ExternalConfigs.Type
, connections : Schemas.Connections.Type
}
, default =
{ database = None UserSecret
, zookeeper = None UserSecret
, external_config = Schemas.ExternalConfigs.default
, merger = Schemas.Merger.default
, web = Schemas.Web.default
}
}
in Schemas // { Input = Input }

@ -0,0 +1,802 @@
{- Zuul CR kubernetes resources
The evaluation of that file is a function that takes the cr inputs as an argument,
and returns the list of kubernetes of objects
-}
let Prelude = ../Prelude.dhall
let Kubernetes = ../Kubernetes.dhall
let Schemas = ./input.dhall
let Input = Schemas.Input.Type
let UserSecret = Schemas.UserSecret.Type
let Label = { mapKey : Text, mapValue : Text }
let Labels = List Label
let EnvSecret = { name : Text, secret : Text, key : Text }
let File = { path : Text, content : Text }
let Volume =
{ Type = { name : Text, dir : Text, files : List File }
, default = { files = [] : List File }
}
let {- A high level description of a component such as the scheduler or the launcher
-} Component =
{ Type =
{ name : Text
, count : Natural
, container : Kubernetes.Container.Type
, data-dir : List Volume.Type
, volumes : List Volume.Type
, claim-size : Natural
}
, default =
{ data-dir = [] : List Volume.Type
, volumes = [] : List Volume.Type
, claim-size = 0
}
}
let {- The Kubernetes resources of a Component
-} KubernetesComponent =
{ Type =
{ Service : Optional Kubernetes.Service.Type
, Deployment : Optional Kubernetes.Deployment.Type
, StatefulSet : Optional Kubernetes.StatefulSet.Type
}
, default =
{ Service = None Kubernetes.Service.Type
, Deployment = None Kubernetes.Deployment.Type
, StatefulSet = None Kubernetes.StatefulSet.Type
}
}
let DefaultText =
\(value : Optional Text)
-> \(default : Text)
-> Optional/fold Text value Text (\(some : Text) -> some) default
let DefaultKey =
\(secret : Optional UserSecret)
-> \(default : Text)
-> Optional/fold
UserSecret
secret
Text
(\(some : UserSecret) -> DefaultText some.key default)
default
let newlineSep = Prelude.Text.concatSep "\n"
let {- This method renders the zuul.conf
-} mkZuulConf =
\(input : Input)
-> \(zk-hosts : Text)
-> \(default-db-password : Text)
-> let {- This is a high level method. It takes:
* a Connection type such as `Schemas.Gerrit.Type`,
* an Optional List of that type
* a function that goes from that type to a zuul.conf text blob
Then it returns a text blob for all the connections
-} mkConns =
\(type : Type)
-> \(list : Optional (List type))
-> \(f : type -> Text)
-> newlineSep
( Optional/fold
(List type)
list
(List Text)
(Prelude.List.map type Text f)
([] : List Text)
)
let merger-email =
DefaultText
input.merger.git_user_email
"${input.name}@localhost"
let merger-user = DefaultText input.merger.git_user_name "Zuul"
let executor-key-name =
DefaultText input.executor.ssh_key.key "id_rsa"
let sched-config = DefaultText input.scheduler.config.key "main.yaml"
let web-url = DefaultText input.web.status_url "http://web:9000"
let extra-kube-path = "/etc/nodepool-kubernetes/"
let db-uri =
Optional/fold
UserSecret
input.database
Text
(\(some : UserSecret) -> "%(ZUUL_DB_URI)")
"postgresql://zuul:${default-db-password}@db/zuul"
let gerrits-conf =
mkConns
Schemas.Gerrit.Type
input.connections.gerrits
( \(gerrit : Schemas.Gerrit.Type)
-> let key = DefaultText gerrit.sshkey.key "id_rsa"
let server = DefaultText gerrit.server gerrit.name
in ''
[connection ${gerrit.name}]
driver=gerrit
server=${server}
sshkey=/etc/zuul-gerrit-${gerrit.name}/${key}
user=${gerrit.user}
baseurl=${gerrit.baseurl}
''
)
let githubs-conf =
mkConns
Schemas.GitHub.Type
input.connections.githubs
( \(github : Schemas.GitHub.Type)
-> let key = DefaultText github.app_key.key "github_rsa"
in ''
[connection ${github.name}]
driver=github
server=github.com
app_id={github.app_id}
app_key=/etc/zuul-github-${github.name}/${key}
''
)
let gits-conf =
mkConns
Schemas.Git.Type
input.connections.gits
( \(git : Schemas.Git.Type)
-> ''
[connection ${git.name}]
driver=git
baseurl=${git.baseurl}
''
)
let mqtts-conf =
mkConns
Schemas.Mqtt.Type
input.connections.mqtts
( \(mqtt : Schemas.Mqtt.Type)
-> let user =
Optional/fold
Text
mqtt.user
Text
(\(some : Text) -> "user=${some}")
""
let password =
Optional/fold
UserSecret
mqtt.password
Text
( \(password : UserSecret)
-> "password=%(ZUUL_MQTT_PASSWORD)"
)
""
in ''
[connection ${mqtt.name}]
driver=mqtt
server=${mqtt.server}
${user}
${password}
''
)
in ''
[gearman]
server=scheduler
[gearman_server]
start=true
[zookeeper]
hosts=${zk-hosts}
[merger]
git_user_email=${merger-email}
git_user_name=${merger-user}
[scheduler]
tenant_config=/etc/zuul-scheduler/${sched-config}
[web]
listen_address=0.0.0.0
root=${web-url}
[executor]
private_key_file=/etc/zuul-executor/${executor-key-name}
manage_ansible=false
[connection "sql"]
driver=sql
dburi=${db-uri}
''
++ gits-conf
++ gerrits-conf
++ githubs-conf
++ mqtts-conf
in \(input : Input)
-> let app-labels =
[ { mapKey = "app.kubernetes.io/name", mapValue = input.name }
, { mapKey = "app.kubernetes.io/instance", mapValue = input.name }
, { mapKey = "app.kubernetes.io/part-of", mapValue = "zuul" }
]
let component-label =
\(name : Text)
-> app-labels
# [ { mapKey = "app.kubernetes.io/component"
, mapValue = name
}
]
let mkObjectMeta =
\(name : Text)
-> \(labels : Labels)
-> Kubernetes.ObjectMeta::{ name = name, labels = labels }
let mkSelector =
\(labels : Labels)
-> Kubernetes.LabelSelector::{ matchLabels = labels }
let mkService =
\(name : Text)
-> \(port-name : Text)
-> \(port : Natural)
-> let labels = component-label name
in Kubernetes.Service::{
, metadata = mkObjectMeta name labels
, spec = Some Kubernetes.ServiceSpec::{
, type = Some "ClusterIP"
, selector = labels
, ports =
[ Kubernetes.ServicePort::{
, name = Some port-name
, protocol = Some "TCP"
, targetPort = Some
(Kubernetes.IntOrString.String port-name)
, port = port
}
]
}
}
let mkVolumeEmptyDir =
Prelude.List.map
Volume.Type
Kubernetes.Volume.Type
( \(volume : Volume.Type)
-> Kubernetes.Volume::{
, name = volume.name
, emptyDir = Kubernetes.EmptyDirVolumeSource::{
, medium = Some ""
}
}
)
let mkVolumeSecret =
Prelude.List.map
Volume.Type
Kubernetes.Volume.Type
( \(volume : Volume.Type)
-> Kubernetes.Volume::{
, name = volume.name
, secret = Some Kubernetes.SecretVolumeSource::{
, secretName = Some volume.name
}
}
)
let mkPodTemplateSpec =
\(component : Component.Type)
-> \(labels : Labels)
-> Kubernetes.PodTemplateSpec::{
, metadata = mkObjectMeta component.name labels
, spec = Some Kubernetes.PodSpec::{
, volumes =
mkVolumeSecret component.volumes
# mkVolumeEmptyDir component.data-dir
, containers = [ component.container ]
, automountServiceAccountToken = Some False
}
}
let mkStatefulSet =
\(component : Component.Type)
-> let labels = component-label component.name
let component-name = input.name ++ "-" ++ component.name
let claim =
if Natural/isZero component.claim-size
then [] : List Kubernetes.PersistentVolumeClaim.Type
else [ Kubernetes.PersistentVolumeClaim::{
, apiVersion = ""
, kind = ""
, metadata =
mkObjectMeta component-name ([] : Labels)
, spec = Some Kubernetes.PersistentVolumeClaimSpec::{
, accessModes = [ "ReadWriteOnce" ]
, resources = Some Kubernetes.ResourceRequirements::{
, requests =
toMap
{ storage =
Natural/show
component.claim-size
++ "Gi"
}
}
}
}
]
in Kubernetes.StatefulSet::{
, metadata = mkObjectMeta component-name labels
, spec = Some Kubernetes.StatefulSetSpec::{
, serviceName = component.name
, replicas = Some component.count
, selector = mkSelector labels
, template = mkPodTemplateSpec component labels
, volumeClaimTemplates = claim
}
}
let mkDeployment =
\(component : Component.Type)
-> let labels = component-label component.name
let component-name = input.name ++ "-" ++ component.name
in Kubernetes.Deployment::{
, metadata = mkObjectMeta component-name labels
, spec = Some Kubernetes.DeploymentSpec::{
, replicas = Some component.count
, selector = mkSelector labels
, template = mkPodTemplateSpec component labels
}
}
let mkEnvVarValue =
Prelude.List.map
Label
Kubernetes.EnvVar.Type
( \(env : Label)
-> Kubernetes.EnvVar::{
, name = env.mapKey
, value = Some env.mapValue
}
)
let mkEnvVarSecret =
Prelude.List.map
EnvSecret
Kubernetes.EnvVar.Type
( \(env : EnvSecret)
-> Kubernetes.EnvVar::{
, name = env.name
, valueFrom = Kubernetes.EnvVarSource::{
, secretKeyRef = Some Kubernetes.SecretKeySelector::{
, key = env.key
, name = Some env.secret
}
}
}
)
let mkVolumeMount =
Prelude.List.map
Volume.Type
Kubernetes.VolumeMount.Type
( \(volume : Volume.Type)
-> Kubernetes.VolumeMount::{
, name = volume.name
, mountPath = volume.dir
}
)
let mkSecret =
\(volume : Volume.Type)
-> Kubernetes.Resource.Secret
Kubernetes.Secret::{
, metadata = Kubernetes.ObjectMeta::{ name = volume.name }
, stringData =
Prelude.List.map
File
{ mapKey : Text, mapValue : Text }
( \(config : File)
-> { mapKey = config.path
, mapValue = config.content
}
)
volume.files
}
let zk-hosts =
Optional/fold
UserSecret
input.zookeeper
Text
(\(some : UserSecret) -> "%(ZUUL_ZK_HOSTS)")
"zk"
let zk-hosts-secret-env =
Optional/fold
UserSecret
input.zookeeper
(List Kubernetes.EnvVar.Type)
( \(some : UserSecret)
-> mkEnvVarSecret
[ { name = "ZUUL_ZK_HOSTS"
, secret = some.secretName
, key = DefaultText some.key "hosts"
}
]
)
([] : List Kubernetes.EnvVar.Type)
let org = "docker.io/zuul"
let version = "latest"
let image = \(name : Text) -> "${org}/${name}:${version}"
let etc-nodepool =
Volume::{
, name = input.name ++ "-secret-nodepool"
, dir = "/etc/nodepool"
, files =
[ { path = "nodepool.yaml"
, content =
''
zookeeper-servers:
- host: ${zk-hosts}
port: 2181
webapp:
port: 5000
''
}
]
}
let {- TODO: generate random password -} default-db-password =
"super-secret"
let etc-zuul =
Volume::{
, name = input.name ++ "-secret-zuul"
, dir = "/etc/zuul"
, files =
[ { path = "zuul.conf"
, content = mkZuulConf input zk-hosts default-db-password
}
]
}
let Components =
{ Backend =
let db-volumes =
[ Volume::{ name = "pg-data", dir = "/var/lib/pg/" } ]
let zk-volumes =
[ Volume::{
, name = "zk-log"
, dir = "/var/log/zookeeper/"
}
, Volume::{
, name = "zk-dat"
, dir = "/var/lib/zookeeper/"
}
]
in { Database =
Optional/fold
UserSecret
input.database
KubernetesComponent.Type
( \(some : UserSecret)
-> KubernetesComponent.default
)
KubernetesComponent::{
, Service = Some (mkService "db" "pg" 5432)
, StatefulSet = Some
( mkStatefulSet
Component::{
, name = "db"
, count = 1
, data-dir = db-volumes
, claim-size = 1
, container = Kubernetes.Container::{
, name = "db"
, image = Some
"docker.io/library/postgres:12.1"
, imagePullPolicy = Some "IfNotPresent"
, ports =
[ Kubernetes.ContainerPort::{
, name = Some "pg"
, containerPort = 5432
}
]
, env =
mkEnvVarValue
( toMap
{ POSTGRES_USER = "zuul"