Replace existing operator tasks with the new dhall function

This change replaces the existing tasks with a dhall function to
generates all the kubernetes objects. The operator nows converts
the CR spec to a dhall `Input`, then it applies the function
output to the cluster. Follow-up changes demonstrate how
runtime operations can be performed around that function.

This change updates the zuul-ci_v1alpha1_zuul_cr.yaml file with
the actual CR defined in the zuul specification so that it can
be used in the functional tests.

Depends-On: https://review.opendev.org/702753
Change-Id: Iea51bccf90def6e827d2c5846ad6a7e4c86a5bc1
This commit is contained in:
Tristan Cacqueray 2020-01-11 18:56:14 +00:00
parent bdf18a00b8
commit 2937272624
37 changed files with 1460 additions and 975 deletions

View File

@ -1,18 +1,26 @@
- job: - job:
description: | description: Operator integration tests
Test that zuul-operator works in Kubernetes (currently debugging use) name: zuul-operator-functional
name: zuul-operator-functional-k8s abstract: true
pre-run: playbooks/zuul-operator-functional-k8s/pre.yaml run: playbooks/zuul-operator-functional/run.yaml
run: playbooks/zuul-operator-functional-k8s/run.yaml post-run: playbooks/zuul-operator-functional/post.yaml
post-run: playbooks/zuul-operator-functional-k8s/post.yaml requires: docker-image
nodeset: ubuntu-xenial vars:
required-projects: # We disable userland-proxy to enable scheduler deployement to connect to the gearman service
- github.com/pravega/zookeeper-operator # see: https://github.com/eclipse/che/issues/8134
- zuul/zuul-operator docker_userland_proxy: false
- job: - job:
description: | description: Operator integration tests with Kubernetes
Build's Zuul operator image taken from buildset registry name: zuul-operator-functional-k8s
parent: zuul-operator-functional
pre-run: playbooks/zuul-operator-functional/pre-k8s.yaml
nodeset: ubuntu-bionic
vars:
namespace: 'default'
- job:
description: Image and buildset registry job
name: zuul-operator-build-image name: zuul-operator-build-image
parent: opendev-build-docker-image parent: opendev-build-docker-image
allowed-projects: zuul/zuul-operator allowed-projects: zuul/zuul-operator
@ -27,10 +35,10 @@
check: check:
jobs: jobs:
- zuul-operator-build-image - zuul-operator-build-image
# - zuul-operator-functional-k8s: - zuul-operator-functional-k8s:
# dependencies: zuul-operator-build-image dependencies: zuul-operator-build-image
gate: gate:
jobs: jobs:
- zuul-operator-build-image - zuul-operator-build-image
# - zuul-operator-functional-k8s: - zuul-operator-functional-k8s:
# dependencies: zuul-operator-build-image dependencies: zuul-operator-build-image

View File

@ -1,2 +1,8 @@
image: image:
podman build -f build/Dockerfile -t docker.io/zuul/zuul-operator . podman build -f build/Dockerfile -t docker.io/zuul/zuul-operator .
install:
kubectl apply -f deploy/crds/zuul-ci_v1alpha1_zuul_crd.yaml -f deploy/rbac.yaml -f deploy/operator.yaml
deploy-cr:
kubectl apply -f deploy/crds/zuul-ci_v1alpha1_zuul_cr.yaml

112
README.md
View File

@ -1,128 +1,44 @@
A Zuul Operator PoC Zuul Operator
======= =============
## Requirements: ## Build the image
* [OKD](https://github.com/openshift/origin/releases/download/v3.11.0/openshift-origin-client-tools-v3.11.0-0cbc58b-linux-64bit.tar.gz)
* [SDK](https://github.com/operator-framework/operator-sdk#quick-start)
* [Zookeeper Operator](https://github.com/pravega/zookeeper-operator#install-the-operator)
* [Postgresql Operator](https://operatorhub.io/operator/alpha/postgres-operator.v3.5.0)
## Prepare cluster
```shell ```shell
sudo -i oc cluster up $ make image
sudo chown root:fedora /var/run/docker.sock
oc login -u developer -p dev
docker login -u developer -p $(oc whoami -t) $(oc registry info)
# Log as admin to install crd
sudo cat /root/.kube/config > ~/.kube/config
oc login -u system:admin
oc project default
``` ```
## Install Postgress Operator ## Install the operator
Follow [install instruction](https://crunchydata.github.io/postgres-operator/stable/installation/),
basically:
```
vi ./pv/crunchy-pv.json # set volume size and pv number
oc apply -f ./pv/crunchy-pv.json
oc apply -f ./deploy/cluster-rbac.yaml
oc apply -f ./deploy/rbac.yaml
./deploy/deploy.sh
```
## Install Zookeeper Operator
```shell ```shell
oc create -f https://raw.githubusercontent.com/pravega/zookeeper-operator/master/deploy/crds/zookeeper_v1beta1_zookeepercluster_crd.yaml $ make install
oc create -f https://raw.githubusercontent.com/pravega/zookeeper-operator/master/deploy/default_ns/rbac.yaml kubectl apply -f deploy/crds/zuul-ci_v1alpha1_zuul_crd.yaml -f deploy/rbac.yaml -f deploy/operator.yaml
oc create -f https://raw.githubusercontent.com/pravega/zookeeper-operator/master/deploy/default_ns/operator.yaml
```
## Install Zuul Operator
```shell
operator-sdk build 172.30.1.1:5000/myproject/zuul-operator:latest
docker push 172.30.1.1:5000/myproject/zuul-operator:latest
oc create -f deploy/crds/zuul-ci_v1alpha1_zuul_crd.yaml
oc create -f deploy/rbac.yaml
oc create -f deploy/operator.yaml
``` ```
Look for operator pod and check it's output Look for operator pod and check it's output
```shell ```shell
$ oc get pods $ kubectl get pods
NAME READY STATUS RESTARTS AGE NAME READY STATUS RESTARTS AGE
zuul-operator-c64756f66-rbdmg 2/2 Running 0 3s zuul-operator-c64756f66-rbdmg 2/2 Running 0 3s
$ oc logs zuul-operator-c64756f66-rbdmg -c operator $ kubectl logs zuul-operator-c64756f66-rbdmg -c operator
... [...]
{"level":"info","ts":1554197305.5853095,"logger":"cmd","msg":"Go Version: go1.10.3"} {"level":"info","ts":1554197305.5853095,"logger":"cmd","msg":"Go Version: go1.10.3"}
{"level":"info","ts":1554197305.5854425,"logger":"cmd","msg":"Go OS/Arch: linux/amd64"} {"level":"info","ts":1554197305.5854425,"logger":"cmd","msg":"Go OS/Arch: linux/amd64"}
{"level":"info","ts":1554197305.5854564,"logger":"cmd","msg":"Version of operator-sdk: v0.6.0"} {"level":"info","ts":1554197305.5854564,"logger":"cmd","msg":"Version of operator-sdk: v0.6.0"}
{"level":"info","ts":1554197305.5855,"logger":"cmd","msg":"Watching namespace.","Namespace":"default"} {"level":"info","ts":1554197305.5855,"logger":"cmd","msg":"Watching namespace.","Namespace":"default"}
... [...]
``` ```
## Usage ## Usage
``` ```
$ oc apply -f - <<EOF $ kubectl apply -f - <<EOF
apiVersion: zuul-ci.org/v1alpha1 apiVersion: operator.zuul-ci.org/v1alpha1
kind: Zuul kind: Zuul
metadata: metadata:
name: example-zuul name: example-zuul
spec: spec:
# Optional user-provided ssh key
#sshsecretename: ""
# Optional user-provided clouds.yaml
#cloudssecretname: ""
# Optional user-provided kube/config
#kubesecretname: ""
merger:
min: 0
max: 10
executor:
min: 1
max: 5
web:
min: 1
launcher:
min: 1
connections: []
tenants:
- tenant:
name: demo
source: {}
EOF EOF
zuul.zuul-ci.org/example-zuul created zuul.zuul-ci.org/example-zuul created
$ oc get zuul
NAME AGE
example-zuul 10s
# Get zuul public key
$ oc get secret example-ssh-secret-pub -o "jsonpath={.data.id_rsa\.pub}" | base64 -d
ssh-rsa AAAAB3Nza...
$ oc get pods
NAME READY STATUS RESTARTS AGE
example-zuul-executor-696f969c4-6cpjv 1/1 Running 0 8s
example-zuul-launcher-5974789746-wbwpv 1/1 Running 0 9s
example-zuul-pg-5dfc477bff-8426l 1/1 Running 0 30s
example-zuul-scheduler-77b6cf7967-ksh64 1/1 Running 0 11s
example-zuul-web-5f744f89c9-qjp9l 1/1 Running 0 6s
example-zuul-zk-0 1/1 Running 0 22s
$ oc get svc example-zuul-web
NAME TYPE CLUSTER-IP EXTERNAL-IP PORT(S) AGE
example-zuul-web ClusterIP 172.30.209.181 <none> 80/TCP 41s
$ curl 172.30.209.181/api/tenants
[{"name": "demo", "projects": 0, "queue": 0}]
``` ```

View File

@ -1,58 +0,0 @@
---
# Default cr spec
meta:
name: demozuul
tenants:
- tenant:
name: demo
source: {}
connections: []
providers: []
labels:
- name: okd-fedora
min-ready: 1
launcher:
min: 1
merger:
min: 0
max: 5
executor:
min: 1
max: 5
web:
min: 1
max: 1
namespace: "{{ meta.namespace|default('default') }}"
state: "present"
zuul_app_name: "zuul"
zuul_cluster_name: "{{ meta.name }}"
sshsecretname: "{{ zuul_cluster_name }}-ssh-secret"
kubesecretname: "{{ zuul_cluster_name }}-kube-secret"
cloudssecretname: "{{ zuul_cluster_name }}-clouds-secret"
zuul_version: "latest" #"3.7.1"
nodepool_version: "latest"
# Use local image for https://review.openstack.org/650246
#zuul_image_name_base: "docker.io/zuul/zuul"
#nodepool_image_name_base: "docker.io/zuul/nodepool"
zuul_image_name_base: "172.30.1.1:5000/myproject/zuul"
nodepool_image_name_base: "172.30.1.1:5000/myproject/nodepool"
zuul_image_name:
scheduler: "{{ zuul_image_name_base }}-scheduler:{{ zuul_version }}"
merger: "{{ zuul_image_name_base }}-merger:{{ zuul_version }}"
executor: "{{ zuul_image_name_base }}-executor:{{ zuul_version }}"
web: "{{ zuul_image_name_base }}-web:{{ zuul_version }}"
launcher: "{{ nodepool_image_name_base }}-launcher:{{ nodepool_version }}"
zuul_service_account_name: "zuul-operator"
zuul_image_pull_policy: "IfNotPresent"
zuul_configmap_name: "{{ zuul_cluster_name }}-config"
zk_cluster_name: "{{ zuul_cluster_name }}-zk"
zk_api_version: "zookeeper.pravega.io/v1beta1"
pg_cluster_name: "{{ zuul_cluster_name }}-pg"
pg_cr_kind: "Pgcluster"
pg_api_version: "cr.client-go.k8s.io/v1"

View File

@ -1,237 +0,0 @@
---
- name: Create Postgresql Credential
when: not zuul_pg_user
block:
- name: Create k8s secret
k8s:
state: "{{ state }}"
definition:
apiVersion: v1
kind: Secret
metadata:
labels:
pg-database: "{{ pg_cluster_name }}"
app: "{{ zuul_app_name }}"
zuul_cluster: "{{ zuul_cluster_name }}"
name: "{{ pg_cluster_name }}-zuul-secret"
namespace: "{{ namespace }}"
type: Opaque
data:
password: UE5xOEVFVTBxTQ==
username: dGVzdHVzZXI=
- name: Set fact
set_fact:
zuul_pg_user:
- username: dGVzdHVzZXI=
password: UE5xOEVFVTBxTQ==
- name: Create ssh key
when: not zuul_ssh_key
block:
- name: Create ssh key
command: "ssh-keygen -f /opt/ansible/ssh-{{ zuul_cluster_name }} -m PEM -t rsa -N '' -C zuul"
args:
creates: "/opt/ansible/ssh-{{ zuul_cluster_name }}"
- name: Create ssh secret
k8s:
state: "{{ state }}"
definition:
apiVersion: v1
kind: Secret
metadata:
labels:
app: "{{ zuul_app_name }}"
zuul_cluster: "{{ zuul_cluster_name }}"
name: "{{ sshsecretname }}"
namespace: "{{ namespace }}"
type: Opaque
stringData:
id_rsa: |
{{lookup('file', '/opt/ansible/ssh-' + zuul_cluster_name) }}
- name: Create ssh pub secret
k8s:
state: "{{ state }}"
definition:
apiVersion: v1
kind: Secret
metadata:
labels:
app: "{{ zuul_app_name }}"
zuul_cluster: "{{ zuul_cluster_name }}"
name: "{{ sshsecretname }}-pub"
namespace: "{{ namespace }}"
type: Opaque
stringData:
id_rsa.pub: |
{{lookup('file', '/opt/ansible/ssh-' + zuul_cluster_name + '.pub') }}
# TODO: cleanup key file from operator pod
- name: Create cloud config
when: not zuul_clouds_secret
k8s:
state: "{{ state }}"
definition:
apiVersion: v1
kind: Secret
metadata:
labels:
app: "{{ zuul_app_name }}"
zuul_cluster: "{{ zuul_cluster_name }}"
name: "{{ cloudssecretname }}"
namespace: "{{ namespace }}"
type: Opaque
stringData:
clouds.yaml: |
cache:
expiration:
server: 5
port: 5
floating-ip: 5
- name: Create kube config
when: not zuul_kube_secret
k8s:
state: "{{ state }}"
definition:
apiVersion: v1
kind: Secret
metadata:
labels:
app: "{{ zuul_app_name }}"
zuul_cluster: "{{ zuul_cluster_name }}"
name: "{{ kubesecretname }}"
namespace: "{{ namespace }}"
type: Opaque
stringData:
config: |
apiVersion: v1
clusters: []
contexts: []
- name: Create the scheduler configmap
k8s:
state: "{{ state }}"
definition:
kind: ConfigMap
apiVersion: v1
metadata:
name: "{{ zuul_configmap_name }}-scheduler"
namespace: "{{ namespace }}"
labels:
app: "{{ zuul_app_name }}"
zuul_cluster: "{{ zuul_cluster_name }}"
data:
"zuul.conf": |
[gearman]
server=localhost
port=4730
[zookeeper]
hosts={{ zk_cluster_name }}-client:2181
[gearman_server]
start=true
[scheduler]
tenant_config=/etc/zuul/main.yaml
[connection sqlreporter]
driver=sql
dburi=postgresql://{{ zuul_pg_user[0]["username"] | b64decode }}:{{ zuul_pg_user[0]["password"] | b64decode }}@{{ pg_cluster_name }}/zuul
{% for connection in connections %}
[connection {{ connection["name"] }}]
{% if connection["driver"] == "gerrit" %}
sshkey=/var/lib/zuul/ssh-secret/id_rsa
{% endif %}
{% for k, v in connection.items() %}{% if k != "name" %}
{{ k }}={{ v }}
{% endif %}{% endfor %}
{% endfor %}
"main.yaml": |
{{ tenants|to_yaml|regex_replace('(config|untrusted)_projects:', '\1-projects:') }}
register: scheduler_config
- name: Register if tenant config changed
set_fact:
tenant_config_updated: >-
{% if (scheduler_config is changed and
scheduler_config.diff|default(None) and
'main.yaml' in scheduler_config.diff[-1][1]) %}True
{% endif %}
- debug:
msg: "Tenant config is updated"
when: tenant_config_updated
- name: Create the zuul service configmap
k8s:
state: "{{ state }}"
definition:
kind: ConfigMap
apiVersion: v1
metadata:
name: "{{ zuul_configmap_name }}"
namespace: "{{ namespace }}"
labels:
app: "{{ zuul_app_name }}"
zuul_cluster: "{{ zuul_cluster_name }}"
data:
"zuul.conf": |
[gearman]
server={{ zuul_cluster_name }}-scheduler
port=4730
[zookeeper]
hosts={{ zk_cluster_name }}-client:2181
[web]
listen_address=0.0.0.0
port=9000
[executor]
# TODO: add secret map for executor ssh key
private_key_file=/var/lib/zuul/ssh-secret/id_rsa
[connection sqlreporter]
driver=sql
dburi=postgresql://{{ zuul_pg_user[0]["username"] | b64decode }}:{{ zuul_pg_user[0]["password"] | b64decode }}@{{ pg_cluster_name }}/zuul
{% for connection in connections %}
[connection {{ connection["name"] }}]
{% if connection["driver"] == "gerrit" %}
sshkey=/var/lib/zuul/ssh-secret/id_rsa
{% endif %}
{% for k, v in connection.items() %}{% if k != "name" %}
{{ k }}={{ v }}
{% endif %}{% endfor %}
{% endfor %}
- name: Create the nodepool configmap
k8s:
state: "{{ state }}"
definition:
kind: ConfigMap
apiVersion: v1
metadata:
name: "{{ zuul_configmap_name }}-nodepool"
namespace: "{{ namespace }}"
labels:
app: "{{ zuul_app_name }}"
zuul_cluster: "{{ zuul_cluster_name }}"
data:
"nodepool.yaml": |
{{ ({'labels': labels})|to_yaml }}
{{ ({'providers': providers})|to_yaml }}
webapp:
port: 8006
zookeeper-servers:
- host: {{ zk_cluster_name }}-client
port: 2181
register: nodepool_config

View File

@ -1,67 +0,0 @@
- name: Get autoscale count
autoscale_gearman:
service: "{{ deployment_name }}"
gearman: "{{ gearman_service.spec.clusterIP|default(None) }}"
min: "{{ deployment_conf.min|default(0) }}"
max: "{{ deployment_conf.max|default(1) }}"
register: autoscale
when: gearman_service is defined
# TODO: ensure graceful scale-down of service's replicas
- name: Create Deployment
k8s:
state: "{{ state }}"
definition:
kind: "Deployment"
apiVersion: "extensions/v1beta1"
metadata:
name: "{{ zuul_cluster_name }}-{{ deployment_name }}"
namespace: "{{ namespace }}"
labels:
app: "{{ zuul_app_name }}"
zuul_cluster: "{{ zuul_cluster_name }}"
annotations:
configHash: ""
spec:
replicas: "{{ autoscale.count|default(deployment_conf.min) }}"
selector:
matchLabels:
app: "{{ zuul_cluster_name }}-{{ deployment_name }}"
zuul_cluster: "{{ zuul_cluster_name }}"
template:
metadata:
name: "{{ zuul_cluster_name }}-{{ deployment_name }}"
labels:
app: "{{ zuul_cluster_name }}-{{ deployment_name }}"
zuul_cluster: "{{ zuul_cluster_name }}"
spec:
containers:
- name: "{{ zuul_cluster_name }}-{{ deployment_name }}"
image: "{{ zuul_image_name[deployment_name] }}"
imagePullPolicy: "{{ zuul_image_pull_policy }}"
ports: "{{ deployment_ports|default([]) }}"
env:
- name: CONFIG_CHECKSUM
value: "{{ scheduler_config.result.data | checksum }}"
volumeMounts:
- mountPath: "/etc/zuul"
name: zuul-config-volume
readOnly: true
- mountPath: "/var/lib/zuul"
name: zuul-data-volume
- mountPath: "/var/lib/zuul/ssh-secret/"
name: zuul-ssh-key
command:
- "/uid_entrypoint"
- "zuul-{{ deployment_name }}"
- "-d"
volumes:
- name: zuul-config-volume
configMap:
name: "{{ deployment_config|default(zuul_configmap_name) }}"
- name: zuul-data-volume
emptyDir: {}
- name: zuul-ssh-key
secret:
secretName: "{{ sshsecretname }}"
defaultMode: 256

View File

@ -1,72 +0,0 @@
# TODO:
- name: Get autoscale count
# TODO: look for replicas count in zk requests list
# autoscale_zk:
# service: {{ deployment_name }}
# zkhost: "{{ zk_cluster_name }}-client:2181"
# min: {{ deployment_conf.min|default(0) }}
# register: autoscale
set_fact:
autoscale:
count: "{{ deployment_conf.min|default(0) }}"
- name: Create Deployment
k8s:
state: "{{ state }}"
definition:
kind: "Deployment"
apiVersion: "extensions/v1beta1"
metadata:
name: "{{ zuul_cluster_name }}-{{ deployment_name }}"
namespace: "{{ namespace }}"
labels:
app: "{{ zuul_app_name }}"
zuul_cluster: "{{ zuul_cluster_name }}"
annotations:
configHash: ""
spec:
replicas: "{{ autoscale.count }}"
selector:
matchLabels:
app: "{{ zuul_cluster_name }}-{{ deployment_name }}"
zuul_cluster: "{{ zuul_cluster_name }}"
template:
metadata:
name: "{{ zuul_cluster_name }}-{{ deployment_name }}"
labels:
app: "{{ zuul_cluster_name }}-{{ deployment_name }}"
zuul_cluster: "{{ zuul_cluster_name }}"
spec:
containers:
- name: "{{ zuul_cluster_name }}-{{ deployment_name }}"
image: "{{ zuul_image_name[deployment_name] }}"
imagePullPolicy: "{{ zuul_image_pull_policy }}"
env:
- name: CONFIG_CHECKSUM
value: "{{ nodepool_config.result.data | checksum }}"
volumeMounts:
- mountPath: "/etc/nodepool"
name: nodepool-config-volume
readOnly: true
- mountPath: "/var/lib/nodepool"
name: nodepool-data-volume
- mountPath: "/var/lib/nodepool/.kube"
name: nodepool-kube-volume
- mountPath: "/var/lib/nodepool/.config/openstack"
name: nodepool-clouds-volume
command:
- "/uid_entrypoint"
- "nodepool-{{ deployment_name }}"
- "-d"
volumes:
- name: nodepool-config-volume
configMap:
name: "{{ zuul_configmap_name }}-nodepool"
- name: nodepool-data-volume
emptyDir: {}
- name: nodepool-kube-volume
secret:
secretName: "{{ kubesecretname }}"
- name: nodepool-clouds-volume
secret:
secretName: "{{ cloudssecretname }}"

View File

@ -1,18 +0,0 @@
- name: Create Service
k8s:
state: "{{ state }}"
definition:
kind: Service
apiVersion: v1
metadata:
name: "{{ zuul_cluster_name }}-{{ service_name }}"
namespace: "{{ namespace }}"
labels:
app: "{{ zuul_app_name }}"
zuul_cluster: "{{ zuul_cluster_name }}"
spec:
type: ClusterIP
selector:
app: "{{ zuul_cluster_name }}-{{ service_name }}"
zuul_cluster: "{{ zuul_cluster_name }}"
ports: "{{ service_ports }}"

View File

@ -1,83 +0,0 @@
---
- name: Nodepool Deployment
vars:
deployment_name: launcher
deployment_conf: "{{ launcher }}"
include_tasks: "./create_nodepool_deployment.yaml"
- name: Save queue
include_tasks: "./queue_save.yaml"
when:
- scheduler_config is changed
- zuul_scheduler_pod
- name: Scheduler Deployment
vars:
deployment_name: scheduler
deployment_ports:
- containerPort: 4730
protocol: "TCP"
deployment_config: "{{ zuul_configmap_name }}-scheduler"
deployment_conf:
min: 1
include_tasks: "./create_deployment.yaml"
register: sched_deployment
- name: Scheduler service
vars:
service_name: scheduler
service_ports:
- name: "gearman"
port: 4730
protocol: TCP
include_tasks: "./create_service.yaml"
- name: Wait for Service
set_fact:
gearman_service: "{{ lookup('k8s', api_version='v1', kind='Service', namespace=namespace, resource_name=zuul_cluster_name + '-scheduler') }}"
until: gearman_service
retries: 5
delay: 10
- name: Reload scheduler
include_tasks: "./reload_scheduler.yaml"
when:
- sched_deployment is not changed
- tenant_config_updated
- name: Merger Deployment
vars:
deployment_name: merger
deployment_conf: "{{ merger }}"
include_tasks: "./create_deployment.yaml"
- name: Executor Deployment
vars:
deployment_name: executor
deployment_conf: "{{ executor }}"
include_tasks: "./create_deployment.yaml"
- name: Web Deployment
vars:
deployment_name: web
deployment_conf: "{{ web }}"
deployment_ports:
- containerPort: 9000
protocol: "TCP"
include_tasks: "./create_deployment.yaml"
- name: Web Service
vars:
service_name: web
service_ports:
- name: "web"
port: 80
protocol: TCP
targetPort: 9000
include_tasks: "./create_service.yaml"
- name: Load queue
include_tasks: "./queue_load.yaml"
when:
- scheduler_config is changed
- zuul_scheduler_pod

View File

@ -1,3 +0,0 @@
---
- name: Load scheduler queue
debug: msg="TODO..."

View File

@ -1,3 +0,0 @@
---
- name: Dump scheduler queue
debug: msg="TODO..."

View File

@ -1,3 +0,0 @@
---
- name: Add scheduler pod to the inventory
debug: msg="TODO..."

View File

@ -1,125 +0,0 @@
- name: Postgresql Secret
k8s:
definition:
apiVersion: v1
kind: Secret
metadata:
labels:
pg-database: "{{ pg_cluster_name }}"
app: "{{ zuul_app_name }}"
zuul_cluster: "{{ zuul_cluster_name }}"
name: "{{ pg_cluster_name }}-postgres-secret"
namespace: "{{ namespace }}"
type: Opaque
data:
password: M3pBeXpmMThxQg==
username: cG9zdGdyZXM=
- name: Postgresql Primary User
k8s:
definition:
apiVersion: v1
kind: Secret
metadata:
labels:
pg-database: "{{ pg_cluster_name }}"
app: "{{ zuul_app_name }}"
zuul_cluster: "{{ zuul_cluster_name }}"
name: "{{ pg_cluster_name }}-primaryuser-secret"
namespace: "{{ namespace }}"
type: Opaque
data:
password: d0ZvYWlRZFhPTQ==
username: cHJpbWFyeXVzZXI=
- name: Postgresql Deployment
k8s:
definition:
apiVersion: "{{ pg_api_version }}"
kind: "{{ pg_cr_kind }}"
metadata:
labels:
archive: 'false'
archive-timeout: '60'
crunchy-pgbadger: 'false'
crunchy_collect: 'false'
current-primary: "{{ pg_cluster_name }}"
deployment-name: "{{ pg_cluster_name }}"
name: "{{ pg_cluster_name }}"
pg-cluster: "{{ pg_cluster_name }}"
pgo-backrest: 'false'
pgo-version: 3.5.2
primary: 'true'
app: "{{ zuul_app_name }}"
zuul_cluster: "{{ zuul_cluster_name }}"
name: "{{ pg_cluster_name }}"
namespace: "{{ namespace }}"
spec:
ArchiveStorage:
accessmode: ''
fsgroup: ''
matchLabels: ''
name: ''
size: ''
storageclass: ''
storagetype: ''
supplementalgroups: ''
BackrestStorage:
accessmode: ReadWriteMany
fsgroup: ''
matchLabels: ''
name: ''
size: 1G
storageclass: ''
storagetype: create
supplementalgroups: ''
ContainerResources:
limitscpu: ''
limitsmemory: ''
requestscpu: ''
requestsmemory: ''
PrimaryStorage:
accessmode: ReadWriteMany
fsgroup: ''
matchLabels: ''
name: "{{ pg_cluster_name }}"
size: 1G
storageclass: ''
storagetype: create
supplementalgroups: ''
ReplicaStorage:
accessmode: ReadWriteMany
fsgroup: ''
matchLabels: ''
name: ''
size: 1G
storageclass: ''
storagetype: create
supplementalgroups: ''
backuppath: ''
backuppvcname: ''
ccpimage: crunchy-postgres
ccpimagetag: centos7-11.2-2.3.1
clustername: "{{ pg_cluster_name }}"
customconfig: ''
database: zuul
name: "{{ pg_cluster_name }}"
nodename: ''
policies: ''
port: '5432'
primaryhost: "{{ pg_cluster_name }}"
primarysecretname: "{{ pg_cluster_name }}-primaryuser-secret"
replicas: '0'
rootsecretname: "{{ pg_cluster_name }}-postgres-secret"
secretfrom: ''
status: ''
strategy: '1'
user: zuul
userlabels:
archive: 'false'
archive-timeout: '60'
crunchy-pgbadger: 'false'
crunchy_collect: 'false'
pgo-backrest: 'false'
pgo-version: 3.5.2
usersecretname: "{{ pg_cluster_name }}-zuul-secret"

View File

@ -1,14 +0,0 @@
- name: Zookeeper Deployment
k8s:
definition:
apiVersion: "{{ zk_api_version }}"
kind: "ZookeeperCluster"
metadata:
name: "{{ zk_cluster_name }}"
namespace: "{{ namespace }}"
labels:
app: "{{ zuul_app_name }}"
zuul_cluster: "{{ zuul_cluster_name }}"
spec:
size: 1
version: "3.5.3-beta"

View File

@ -1,42 +0,0 @@
---
- set_fact:
label_selector_value: "zuul_cluster={{ zuul_cluster_name }},app={{ zuul_app_name }}"
sched_selector_value: "zuul_cluster={{ zuul_cluster_name }},app={{ zuul_cluster_name }}-scheduler"
pg_user_query: "[?metadata.name=='{{ pg_cluster_name }}-zuul-secret'].data"
ssh_key_query: "[?metadata.name=='{{ sshsecretname }}'].data"
- name: lookup k8s secrets
set_fact:
secrets_lookup: "{{ lookup('k8s', api_version='v1', kind='Secret', namespace=namespace, label_selector=label_selector_value) }}"
- name: lookup cluster secret
set_fact:
zuul_pg_user: "{{ secrets_lookup | json_query(pg_user_query) }}"
zuul_ssh_key: "{{ lookup('k8s', api_version='v1', kind='Secret', namespace=namespace, resource_name=sshsecretname) }}"
zuul_clouds_secret: "{{ lookup('k8s', api_version='v1', kind='Secret', namespace=namespace, resource_name=cloudssecretname) }}"
zuul_kube_secret: "{{ lookup('k8s', api_version='v1', kind='Secret', namespace=namespace, resource_name=kubesecretname) }}"
- name: lookup k8s postgres cr
set_fact:
pg_cr_lookup: "{{ lookup('k8s', api_version=pg_api_version, kind=pg_cr_kind, namespace=namespace, resource_name=pg_cluster_name) }}"
- name: lookup k8s zookeeper cr
set_fact:
zk_cr_lookup: "{{ lookup('k8s', api_version=zk_api_version, kind='ZookeeperCluster', namespace=namespace, resource_name=zk_cluster_name) }}"
- name: lookup scheduler pod
set_fact:
zuul_scheduler_pod: "{{ lookup('k8s', api_version='v1', kind='Pod', namespace=namespace, label_selector=sched_selector_value) }}"
- name: lookup k8s Zuul deployment
set_fact:
zuul_deployment_lookup: "{{ lookup('k8s', api_version='extensions/v1beta1', kind='Deployment', namespace=namespace, resource_name=zuul_cluster_name) }}"
- name: get currently deployed Zuul image name
set_fact:
current_deployed_image: "{{ zuul_deployment_lookup.spec.template.spec.containers['name'=='zuul'].image }}"
when: zuul_deployment_lookup.spec is defined
- debug:
msg: "Zuul Version has CHANGED to '{{ zuul_version }}' - Currently at {{ current_deployed_image }}"
when: (current_deployed_image is defined) and (current_deployed_image != zuul_image_name)

View File

@ -1,19 +0,0 @@
---
- hosts: localhost
gather_facts: no
tasks:
- debug: msg="Running Zuul Operator Playbook"
- name: Show the env
command: env
- import_role:
name: get_status
- import_role:
name: create_config
- import_role:
name: deploy_pg
when: (pg_cr_lookup|length==0)
- import_role:
name: deploy_zk
when: (zk_cr_lookup|length==0)
- import_role:
name: deploy

View File

@ -36,9 +36,4 @@ RUN echo 'let Prelude = ~/conf/Prelude.dhall let Kubernetes = ~/conf/Kubernetes.
# Copy ansible operator requirements # Copy ansible operator requirements
COPY watches.yaml ${HOME}/watches.yaml COPY watches.yaml ${HOME}/watches.yaml
COPY roles ${HOME}/roles
COPY ansible/zuul.yaml ${HOME}/zuul.yaml
COPY ansible/group_vars/ ${HOME}/group_vars/
COPY ansible/roles/ ${HOME}/roles/
COPY build/uid_entrypoint.sh /uid_entrypoint

View File

@ -1,8 +0,0 @@
#!/bin/sh
if ! whoami &> /dev/null; then
if [ -w /etc/passwd ]; then
echo "${USER_NAME:-default}:x:$(id -u):0:${USER_NAME:-default} user:${HOME}:/sbin/nologin" >> /etc/passwd
fi
fi
exec "$@"

137
conf/zuul/input.dhall Normal file
View File

@ -0,0 +1,137 @@
{- Zuul CR spec as a dhall schemas
> Note: in dhall, a record with such structure:
> { Type = { foo : Text }, default = { foo = "bar" }}
> is named a `schemas` and it can be used to set default value:
> https://docs.dhall-lang.org/references/Built-in-types.html#id133
The `Schemas` record contains schemas for the CR spec attributes.
The `Input` record is the Zuul CR spec schema.
-}
let UserSecret = { secretName : Text, key : Optional Text }
let Gerrit =
{ name : Text
, server : Optional Text
, user : Text
, baseurl : Text
, sshkey : UserSecret
}
let GitHub = { name : Text, app_id : Natural, app_key : UserSecret }
let Mqtt =
{ name : Text
, server : Text
, user : Optional Text
, password : Optional UserSecret
}
let Git = { name : Text, baseurl : Text }
let Schemas =
{ Merger =
{ Type =
{ image : Optional Text
, count : Optional Natural
, git_user_email : Optional Text
, git_user_name : Optional Text
}
, default =
{ image = None Text
, count = None Natural
, git_user_email = None Text
, git_user_name = None Text
}
}
, Executor =
{ Type =
{ image : Optional Text
, count : Optional Natural
, ssh_key : UserSecret
}
, default = { image = None Text, count = None Natural }
}
, Web =
{ Type =
{ image : Optional Text
, count : Optional Natural
, status_url : Optional Text
}
, default =
{ image = None Text
, count = None Natural
, status_url = None Text
}
}
, Scheduler =
{ Type =
{ image : Optional Text
, count : Optional Natural
, config : UserSecret
}
, default = { image = None Text, count = None Natural }
}
, Launcher =
{ Type = { image : Optional Text, config : UserSecret }
, default = { image = None Text }
}
, Connections =
{ Type =
{ gerrits : Optional (List Gerrit)
, githubs : Optional (List GitHub)
, mqtts : Optional (List Mqtt)
, gits : Optional (List Git)
}
, default =
{ gerrits = None (List Gerrit)
, githubs = None (List GitHub)
, mqtts = None (List Mqtt)
, gits = None (List Git)
}
}
, ExternalConfigs =
{ Type =
{ openstack : Optional UserSecret
, kubernetes : Optional UserSecret
, amazon : Optional UserSecret
}
, default =
{ openstack = None UserSecret
, kubernetes = None UserSecret
, amazon = None UserSecret
}
}
, UserSecret = { Type = UserSecret, default = { key = None Text } }
, Gerrit = { Type = Gerrit }
, GitHub = { Type = GitHub }
, Mqtt = { Type = Mqtt }
, Git = { Type = Git }
}
let Input =
{ Type =
{ name : Text
, merger : Schemas.Merger.Type
, executor : Schemas.Executor.Type
, web : Schemas.Web.Type
, scheduler : Schemas.Scheduler.Type
, launcher : Schemas.Launcher.Type
, database : Optional UserSecret
, zookeeper : Optional UserSecret
, external_config : Schemas.ExternalConfigs.Type
, connections : Schemas.Connections.Type
}
, default =
{ database = None UserSecret
, zookeeper = None UserSecret
, external_config = Schemas.ExternalConfigs.default
, merger = Schemas.Merger.default
, web = Schemas.Web.default
}
}
in Schemas // { Input = Input }

802
conf/zuul/resources.dhall Normal file
View File

@ -0,0 +1,802 @@
{- Zuul CR kubernetes resources
The evaluation of that file is a function that takes the cr inputs as an argument,
and returns the list of kubernetes of objects
-}
let Prelude = ../Prelude.dhall
let Kubernetes = ../Kubernetes.dhall
let Schemas = ./input.dhall
let Input = Schemas.Input.Type
let UserSecret = Schemas.UserSecret.Type
let Label = { mapKey : Text, mapValue : Text }
let Labels = List Label
let EnvSecret = { name : Text, secret : Text, key : Text }
let File = { path : Text, content : Text }
let Volume =
{ Type = { name : Text, dir : Text, files : List File }
, default = { files = [] : List File }
}
let {- A high level description of a component such as the scheduler or the launcher
-} Component =
{ Type =
{ name : Text
, count : Natural
, container : Kubernetes.Container.Type
, data-dir : List Volume.Type
, volumes : List Volume.Type
, claim-size : Natural
}
, default =
{ data-dir = [] : List Volume.Type
, volumes = [] : List Volume.Type
, claim-size = 0
}
}
let {- The Kubernetes resources of a Component
-} KubernetesComponent =
{ Type =
{ Service : Optional Kubernetes.Service.Type
, Deployment : Optional Kubernetes.Deployment.Type
, StatefulSet : Optional Kubernetes.StatefulSet.Type
}
, default =
{ Service = None Kubernetes.Service.Type
, Deployment = None Kubernetes.Deployment.Type
, StatefulSet = None Kubernetes.StatefulSet.Type
}
}
let DefaultText =
\(value : Optional Text)
-> \(default : Text)
-> Optional/fold Text value Text (\(some : Text) -> some) default
let DefaultKey =
\(secret : Optional UserSecret)
-> \(default : Text)
-> Optional/fold
UserSecret
secret
Text
(\(some : UserSecret) -> DefaultText some.key default)
default
let newlineSep = Prelude.Text.concatSep "\n"
let {- This method renders the zuul.conf
-} mkZuulConf =
\(input : Input)
-> \(zk-hosts : Text)
-> \(default-db-password : Text)
-> let {- This is a high level method. It takes:
* a Connection type such as `Schemas.Gerrit.Type`,
* an Optional List of that type
* a function that goes from that type to a zuul.conf text blob
Then it returns a text blob for all the connections
-} mkConns =
\(type : Type)
-> \(list : Optional (List type))
-> \(f : type -> Text)
-> newlineSep
( Optional/fold
(List type)
list
(List Text)
(Prelude.List.map type Text f)
([] : List Text)
)
let merger-email =
DefaultText
input.merger.git_user_email
"${input.name}@localhost"
let merger-user = DefaultText input.merger.git_user_name "Zuul"
let executor-key-name =
DefaultText input.executor.ssh_key.key "id_rsa"
let sched-config = DefaultText input.scheduler.config.key "main.yaml"
let web-url = DefaultText input.web.status_url "http://web:9000"
let extra-kube-path = "/etc/nodepool-kubernetes/"
let db-uri =
Optional/fold
UserSecret
input.database
Text
(\(some : UserSecret) -> "%(ZUUL_DB_URI)")
"postgresql://zuul:${default-db-password}@db/zuul"
let gerrits-conf =
mkConns
Schemas.Gerrit.Type
input.connections.gerrits
( \(gerrit : Schemas.Gerrit.Type)
-> let key = DefaultText gerrit.sshkey.key "id_rsa"
let server = DefaultText gerrit.server gerrit.name
in ''
[connection ${gerrit.name}]
driver=gerrit
server=${server}
sshkey=/etc/zuul-gerrit-${gerrit.name}/${key}
user=${gerrit.user}
baseurl=${gerrit.baseurl}
''
)
let githubs-conf =
mkConns
Schemas.GitHub.Type
input.connections.githubs
( \(github : Schemas.GitHub.Type)
-> let key = DefaultText github.app_key.key "github_rsa"
in ''
[connection ${github.name}]
driver=github
server=github.com
app_id={github.app_id}
app_key=/etc/zuul-github-${github.name}/${key}
''
)
let gits-conf =
mkConns
Schemas.Git.Type
input.connections.gits
( \(git : Schemas.Git.Type)
-> ''
[connection ${git.name}]
driver=git
baseurl=${git.baseurl}
''
)
let mqtts-conf =
mkConns
Schemas.Mqtt.Type
input.connections.mqtts
( \(mqtt : Schemas.Mqtt.Type)
-> let user =
Optional/fold
Text
mqtt.user
Text
(\(some : Text) -> "user=${some}")
""
let password =
Optional/fold
UserSecret
mqtt.password
Text
( \(password : UserSecret)
-> "password=%(ZUUL_MQTT_PASSWORD)"
)
""
in ''
[connection ${mqtt.name}]
driver=mqtt
server=${mqtt.server}
${user}
${password}
''
)
in ''
[gearman]
server=scheduler
[gearman_server]
start=true
[zookeeper]
hosts=${zk-hosts}
[merger]
git_user_email=${merger-email}
git_user_name=${merger-user}
[scheduler]
tenant_config=/etc/zuul-scheduler/${sched-config}
[web]
listen_address=0.0.0.0
root=${web-url}
[executor]
private_key_file=/etc/zuul-executor/${executor-key-name}
manage_ansible=false
[connection "sql"]
driver=sql
dburi=${db-uri}
''
++ gits-conf
++ gerrits-conf
++ githubs-conf
++ mqtts-conf
in \(input : Input)
-> let app-labels =
[ { mapKey = "app.kubernetes.io/name", mapValue = input.name }
, { mapKey = "app.kubernetes.io/instance", mapValue = input.name }
, { mapKey = "app.kubernetes.io/part-of", mapValue = "zuul" }
]
let component-label =
\(name : Text)
-> app-labels
# [ { mapKey = "app.kubernetes.io/component"
, mapValue = name
}
]
let mkObjectMeta =
\(name : Text)
-> \(labels : Labels)
-> Kubernetes.ObjectMeta::{ name = name, labels = labels }
let mkSelector =
\(labels : Labels)
-> Kubernetes.LabelSelector::{ matchLabels = labels }
let mkService =
\(name : Text)
-> \(port-name : Text)
-> \(port : Natural)
-> let labels = component-label name
in Kubernetes.Service::{
, metadata = mkObjectMeta name labels
, spec = Some Kubernetes.ServiceSpec::{
, type = Some "ClusterIP"
, selector = labels
, ports =
[ Kubernetes.ServicePort::{
, name = Some port-name
, protocol = Some "TCP"
, targetPort = Some
(Kubernetes.IntOrString.String port-name)
, port = port
}
]
}
}
let mkVolumeEmptyDir =
Prelude.List.map
Volume.Type
Kubernetes.Volume.Type
( \(volume : Volume.Type)
-> Kubernetes.Volume::{
, name = volume.name
, emptyDir = Kubernetes.EmptyDirVolumeSource::{
, medium = Some ""
}
}
)
let mkVolumeSecret =
Prelude.List.map
Volume.Type
Kubernetes.Volume.Type
( \(volume : Volume.Type)
-> Kubernetes.Volume::{
, name = volume.name
, secret = Some Kubernetes.SecretVolumeSource::{
, secretName = Some volume.name
}
}
)
let mkPodTemplateSpec =
\(component : Component.Type)
-> \(labels : Labels)
-> Kubernetes.PodTemplateSpec::{
, metadata = mkObjectMeta component.name labels
, spec = Some Kubernetes.PodSpec::{
, volumes =
mkVolumeSecret component.volumes
# mkVolumeEmptyDir component.data-dir
, containers = [ component.container ]
, automountServiceAccountToken = Some False
}
}
let mkStatefulSet =
\(component : Component.Type)
-> let labels = component-label component.name
let component-name = input.name ++ "-" ++ component.name
let claim =
if Natural/isZero component.claim-size
then [] : List Kubernetes.PersistentVolumeClaim.Type
else [ Kubernetes.PersistentVolumeClaim::{
, apiVersion = ""
, kind = ""
, metadata =
mkObjectMeta component-name ([] : Labels)
, spec = Some Kubernetes.PersistentVolumeClaimSpec::{
, accessModes = [ "ReadWriteOnce" ]
, resources = Some Kubernetes.ResourceRequirements::{
, requests =
toMap
{ storage =
Natural/show
component.claim-size
++ "Gi"
}
}
}
}
]
in Kubernetes.StatefulSet::{
, metadata = mkObjectMeta component-name labels
, spec = Some Kubernetes.StatefulSetSpec::{
, serviceName = component.name
, replicas = Some component.count
, selector = mkSelector labels
, template = mkPodTemplateSpec component labels
, volumeClaimTemplates = claim
}
}
let mkDeployment =
\(component : Component.Type)
-> let labels = component-label component.name
let component-name = input.name ++ "-" ++ component.name
in Kubernetes.Deployment::{
, metadata = mkObjectMeta component-name labels
, spec = Some Kubernetes.DeploymentSpec::{
, replicas = Some component.count
, selector = mkSelector labels
, template = mkPodTemplateSpec component labels
}
}
let mkEnvVarValue =
Prelude.List.map
Label
Kubernetes.EnvVar.Type
( \(env : Label)
-> Kubernetes.EnvVar::{
, name = env.mapKey
, value = Some env.mapValue
}
)
let mkEnvVarSecret =
Prelude.List.map
EnvSecret
Kubernetes.EnvVar.Type
( \(env : EnvSecret)
-> Kubernetes.EnvVar::{
, name = env.name
, valueFrom = Kubernetes.EnvVarSource::{
, secretKeyRef = Some Kubernetes.SecretKeySelector::{
, key = env.key
, name = Some env.secret
}
}
}
)
let mkVolumeMount =
Prelude.List.map
Volume.Type
Kubernetes.VolumeMount.Type
( \(volume : Volume.Type)
-> Kubernetes.VolumeMount::{
, name = volume.name
, mountPath = volume.dir
}
)
let mkSecret =
\(volume : Volume.Type)
-> Kubernetes.Resource.Secret
Kubernetes.Secret::{
, metadata = Kubernetes.ObjectMeta::{ name = volume.name }
, stringData =
Prelude.List.map
File
{ mapKey : Text, mapValue : Text }
( \(config : File)
-> { mapKey = config.path
, mapValue = config.content
}
)
volume.files
}
let zk-hosts =
Optional/fold
UserSecret
input.zookeeper
Text
(\(some : UserSecret) -> "%(ZUUL_ZK_HOSTS)")
"zk"
let zk-hosts-secret-env =
Optional/fold
UserSecret
input.zookeeper
(List Kubernetes.EnvVar.Type)
( \(some : UserSecret)
-> mkEnvVarSecret
[ { name = "ZUUL_ZK_HOSTS"
, secret = some.secretName
, key = DefaultText some.key "hosts"
}
]
)
([] : List Kubernetes.EnvVar.Type)
let org = "docker.io/zuul"
let version = "latest"
let image = \(name : Text) -> "${org}/${name}:${version}"
let etc-nodepool =
Volume::{
, name = input.name ++ "-secret-nodepool"
, dir = "/etc/nodepool"
, files =
[ { path = "nodepool.yaml"
, content =
''
zookeeper-servers:
- host: ${zk-hosts}
port: 2181
webapp:
port: 5000
''
}
]
}
let {- TODO: generate random password -} default-db-password =
"super-secret"
let etc-zuul =
Volume::{
, name = input.name ++ "-secret-zuul"
, dir = "/etc/zuul"
, files =
[ { path = "zuul.conf"
, content = mkZuulConf input zk-hosts default-db-password
}
]
}
let Components =
{ Backend =
let db-volumes =
[ Volume::{ name = "pg-data", dir = "/var/lib/pg/" } ]
let zk-volumes =
[ Volume::{
, name = "zk-log"
, dir = "/var/log/zookeeper/"
}
, Volume::{
, name = "zk-dat"
, dir = "/var/lib/zookeeper/"
}
]
in { Database =
Optional/fold
UserSecret
input.database
KubernetesComponent.Type
( \(some : UserSecret)
-> KubernetesComponent.default
)
KubernetesComponent::{
, Service = Some (mkService "db" "pg" 5432)
, StatefulSet = Some
( mkStatefulSet
Component::{
, name = "db"
, count = 1
, data-dir = db-volumes
, claim-size = 1
, container = Kubernetes.Container::{
, name = "db"
, image = Some
"docker.io/library/postgres:12.1"
, imagePullPolicy = Some "IfNotPresent"
, ports =
[ Kubernetes.ContainerPort::{
, name = Some "pg"
, containerPort = 5432
}
]
, env =
mkEnvVarValue
( toMap
{ POSTGRES_USER = "zuul"
, POSTGRES_PASSWORD =
default-db-password
, PGDATA = "/var/lib/pg/data"
}
)
, volumeMounts = mkVolumeMount db-volumes
}
}
)
}
, ZooKeeper =
Optional/fold
UserSecret
input.zookeeper
KubernetesComponent.Type
( \(some : UserSecret)
-> KubernetesComponent.default
)
KubernetesComponent::{
, Service = Some (mkService "zk" "zk" 2181)
, StatefulSet = Some
( mkStatefulSet
Component::{
, name = "zk"
, count = 1
, data-dir = zk-volumes
, claim-size = 1
, container = Kubernetes.Container::{
, name = "zk"
, image = Some
"docker.io/library/zookeeper"
, imagePullPolicy = Some "IfNotPresent"
, ports =
[ Kubernetes.ContainerPort::{
, name = Some "zk"
, containerPort = 2181
}
]
, volumeMounts = mkVolumeMount zk-volumes
}
}
)
}
}
, Zuul =
let zuul-image =
\(name : Text) -> Some (image ("zuul-" ++ name))
let zuul-env =
mkEnvVarValue (toMap { HOME = "/var/lib/zuul" })
let db-uri-secret-env =
Optional/fold
UserSecret
input.database
(List Kubernetes.EnvVar.Type)
( \(some : UserSecret)
-> mkEnvVarSecret
[ { name = "ZUUL_DB_URI"
, secret = some.secretName
, key = DefaultText some.key "db_uri"
}
]
)
([] : List Kubernetes.EnvVar.Type)
let zuul-data-dir =
[ Volume::{ name = "zuul-data", dir = "/var/lib/zuul" }
]
let sched-config =
Volume::{
, name = input.scheduler.config.secretName
, dir = "/etc/zuul-scheduler"
}
let executor-ssh-key =
Volume::{
, name = input.executor.ssh_key.secretName
, dir = "/etc/zuul-executor"
}
let conn-keys = [] : List Volume.Type
let web-volumes = [ etc-zuul ]
let merger-volumes = [ etc-zuul ]
let scheduler-volumes = [ etc-zuul, sched-config ] # conn-keys
let executor-volumes =
[ etc-zuul, executor-ssh-key ] # conn-keys
in { Scheduler = KubernetesComponent::{
, Service = Some (mkService "scheduler" "gearman" 4730)
, StatefulSet = Some
( mkStatefulSet
Component::{
, name = "scheduler"
, count = 1
, data-dir = zuul-data-dir
, volumes = scheduler-volumes
, claim-size = 5
, container = Kubernetes.Container::{
, name = "scheduler"
, image = zuul-image "scheduler"
, args = [ "zuul-scheduler", "-d" ]
, imagePullPolicy = Some "IfNotPresent"
, ports =
[ Kubernetes.ContainerPort::{
, name = Some "gearman"
, containerPort = 4730
}
]
, env =
zuul-env
# db-uri-secret-env
# zk-hosts-secret-env
, volumeMounts =
mkVolumeMount
(scheduler-volumes # zuul-data-dir)
}
}
)
}
, Executor = KubernetesComponent::{
, Service = Some (mkService "executor" "finger" 7900)
, StatefulSet = Some
( mkStatefulSet
Component::{
, name = "executor"
, count = 1
, data-dir = zuul-data-dir
, volumes = executor-volumes
, claim-size = 0
, container = Kubernetes.Container::{
, name = "executor"
, image = zuul-image "executor"
, args = [ "zuul-executor", "-d" ]
, imagePullPolicy = Some "IfNotPresent"
, ports =
[ Kubernetes.ContainerPort::{
, name = Some "finger"
, containerPort = 7900
}
]
, env = zuul-env
, volumeMounts =
mkVolumeMount
(executor-volumes # zuul-data-dir)
, securityContext = Some Kubernetes.SecurityContext::{
, privileged = Some True
}
}
}
)
}
, Web = KubernetesComponent::{
, Service = Some (mkService "web" "api" 9000)
, Deployment = Some
( mkDeployment
Component::{
, name = "web"
, count = 1
, data-dir = zuul-data-dir
, volumes = web-volumes
, container = Kubernetes.Container::{
, name = "web"
, image = zuul-image "web"
, args = [ "zuul-web", "-d" ]
, imagePullPolicy = Some "IfNotPresent"
, ports =
[ Kubernetes.ContainerPort::{
, name = Some "api"
, containerPort = 9000
}
]
, env = zuul-env
, volumeMounts =
mkVolumeMount
(web-volumes # zuul-data-dir)
}
}
)
}
, Merger = KubernetesComponent::{
, Deployment = Some
( mkDeployment
Component::{
, name = "merger"
, count = 1
, data-dir = zuul-data-dir
, volumes = merger-volumes
, container = Kubernetes.Container::{
, name = "merger"
, image = zuul-image "merger"
, args = [ "zuul-merger", "-d" ]
, imagePullPolicy = Some "IfNotPresent"
, env = zuul-env
, volumeMounts =
mkVolumeMount
(merger-volumes # zuul-data-dir)
}
}
)
}
}
}
let {- This function transforms the different types into the Kubernetes.Resource
union to enable using them inside a single List array
-} mkUnion =
\(component : KubernetesComponent.Type)
-> let empty = [] : List Kubernetes.Resource
in Optional/fold
Kubernetes.Service.Type
component.Service
(List Kubernetes.Resource)
( \(some : Kubernetes.Service.Type)
-> [ Kubernetes.Resource.Service some ]
)
empty
# Optional/fold
Kubernetes.StatefulSet.Type
component.StatefulSet
(List Kubernetes.Resource)
( \(some : Kubernetes.StatefulSet.Type)
-> [ Kubernetes.Resource.StatefulSet some ]
)
empty
# Optional/fold
Kubernetes.Deployment.Type
component.Deployment
(List Kubernetes.Resource)
( \(some : Kubernetes.Deployment.Type)
-> [ Kubernetes.Resource.Deployment some ]
)
empty
in { Components = Components
, List =
{ apiVersion = "v1"
, kind = "List"
, items =
[ mkSecret etc-zuul, mkSecret etc-nodepool ]
# mkUnion Components.Backend.Database
# mkUnion Components.Backend.ZooKeeper
# mkUnion Components.Zuul.Scheduler
# mkUnion Components.Zuul.Executor
# mkUnion Components.Zuul.Web
# mkUnion Components.Zuul.Merger
}
}

View File

@ -1,18 +1,25 @@
apiVersion: operator.zuul-ci.org/v1alpha1 apiVersion: operator.zuul-ci.org/v1alpha1
kind: Zuul kind: Zuul
metadata: metadata:
name: example-zuul name: zuul
spec: spec:
merger:
min: 0
max: 10
executor: executor:
min: 1 count: 1
max: 5 ssh_key:
web: secretName: executor-ssh-key
min: 1 merger:
connections: [] count: 1
tenants: scheduler:
- tenant: config:
name: demo secretName: zuul-yaml-conf
source: {} launcher:
config:
secretName: nodepool-yaml-conf
connections:
gits:
- baseurl: https://opendev.org
name: opendev.org
external_config:
kubernetes:
secretName: nodepool-kube-config
key: kube.config

View File

@ -16,18 +16,18 @@ spec:
containers: containers:
- name: ansible - name: ansible
command: command:
- /uid_entrypoint
- /usr/local/bin/ao-logs - /usr/local/bin/ao-logs
- /tmp/ansible-operator/runner - /tmp/ansible-operator/runner
- stdout - stdout
# TODO: use a public name image: "docker.io/zuul/zuul-operator"
image: "zuul/zuul-operator" imagePullPolicy: "IfNotPresent"
volumeMounts: volumeMounts:
- mountPath: /tmp/ansible-operator/runner - mountPath: /tmp/ansible-operator/runner
name: runner name: runner
readOnly: true readOnly: true
- name: operator - name: operator
image: "zuul/zuul-operator" image: "docker.io/zuul/zuul-operator"
imagePullPolicy: "IfNotPresent"
volumeMounts: volumeMounts:
- mountPath: /tmp/ansible-operator/runner - mountPath: /tmp/ansible-operator/runner
name: runner name: runner

View File

@ -8,7 +8,6 @@ metadata:
apiVersion: rbac.authorization.k8s.io/v1 apiVersion: rbac.authorization.k8s.io/v1
kind: Role kind: Role
metadata: metadata:
creationTimestamp: null
name: zuul-operator name: zuul-operator
rules: rules:
- apiGroups: - apiGroups:
@ -16,29 +15,36 @@ rules:
resources: resources:
- pods - pods
- services - services
- services/finalizers
- endpoints - endpoints
- persistentvolumeclaims - persistentvolumeclaims
- events - events
- configmaps - configmaps
- secrets - secrets
- ingresses
verbs: verbs:
- '*' - create
- apiGroups: - delete
- ""
resources:
- namespaces
verbs:
- get - get
- list
- patch
- update
- watch
- apiGroups: - apiGroups:
- apps - apps
- extensions
resources: resources:
- deployments - deployments
- daemonsets - daemonsets
- replicasets - replicasets
- statefulsets - statefulsets
verbs: verbs:
- '*' - create
- delete
- get
- list
- patch
- update
- watch
- apiGroups: - apiGroups:
- monitoring.coreos.com - monitoring.coreos.com
resources: resources:
@ -54,19 +60,31 @@ rules:
- deployments/finalizers - deployments/finalizers
verbs: verbs:
- update - update
- apiGroups:
- ""
resources:
- pods
verbs:
- get
- apiGroups:
- apps
resources:
- replicasets
- deployments
verbs:
- get
- apiGroups: - apiGroups:
- operator.zuul-ci.org - operator.zuul-ci.org
resources: resources:
- '*' - '*'
verbs: verbs:
- '*' - create
- apiGroups: - delete
- cr.client-go.k8s.io - get
resources: - list
- '*' - patch
verbs: - update
- '*' - watch
--- ---
@ -81,15 +99,3 @@ roleRef:
kind: Role kind: Role
name: zuul-operator name: zuul-operator
apiGroup: rbac.authorization.k8s.io apiGroup: rbac.authorization.k8s.io
---
kind: RoleBinding
apiVersion: rbac.authorization.k8s.io/v1
metadata:
name: zuul-operator-zookeeper
subjects:
- kind: ServiceAccount
name: zuul-operator
roleRef:
kind: Role
name: zookeeper-operator
apiGroup: rbac.authorization.k8s.io

View File

@ -0,0 +1,30 @@
# Render kubernetes resources using:
# INPUT=$(yaml-to-dhall "(./conf/zuul/input.dhall).Input.Type" < playbooks/files/cr_spec.yaml)
# dhall-to-yaml --omit-empty --explain <<< "(./conf/zuul/resources.dhall ($INPUT)).Components.Zuul.Scheduler"
# Or
# dhall-to-yaml --omit-empty --explain <<< "(./conf/zuul/resources.dhall ($INPUT)).List"
executor:
count: 1
ssh_key:
secretName: executor-ssh-key
merger:
count: 1
scheduler:
config:
secretName: zuul-yaml-conf
launcher:
config:
secretName: nodepool-yaml-conf
connections:
gits:
- baseurl: https://opendev.org
name: opendev.org
external_config:
kubernetes:
secretName: nodepool-kube-config
key: kube.config
# extra
name: zuul
web: {}

View File

@ -0,0 +1,76 @@
# Run operator role locally, without the operator-framework using:
# ansible-playbook playbooks/files/local.yaml
# Add '-e k8s_state=absent' to remove resources
- hosts: localhost
gather_facts: no
vars:
zuul_app_path: ../../conf/zuul
meta:
name: zuul
namespace: default
spec:
executor:
count: 1
ssh_key:
secretName: executor-ssh-key
merger:
count: 1
scheduler:
config:
secretName: zuul-yaml-conf
launcher:
config:
secretName: nodepool-yaml-conf
connections:
gits:
- baseurl: https://opendev.org
name: opendev.org
pre_tasks:
- name: "Create necessary secrets"
k8s:
namespace: "{{ meta.namespace }}"
definition:
apiVersion: v1
kind: Secret
metadata:
name: "{{ item.name }}"
stringData:
id_rsa: "{{ item.content }}"
main.yaml: "{{ item.content }}"
nodepool.yaml: "{{ item.content }}"
loop:
- name: executor-ssh-key
file: id_rsa
content: "{{ lookup('file', '~/.ssh/id_rsa') }}"
- name: zuul-yaml-conf
file: main.yaml
content: |
- tenant:
name: local
source:
opendev.org:
config-projects:
- zuul/zuul-base-jobs
untrusted-projects:
- zuul/zuul-jobs
- name: nodepool-yaml-conf
file: nodepool.yaml
content: |
labels:
- name: pod-centos
min-ready: 1
providers:
- name: kube-cluster
driver: openshiftpods
context: local
max-pods: 15
pools:
- name: default
labels:
- name: pod-centos
image: quay.io/software-factory/pod-centos-7
python-path: /bin/python2
roles:
- zuul

1
playbooks/files/roles Symbolic link
View File

@ -0,0 +1 @@
../../roles/

View File

@ -1,15 +0,0 @@
- name: View Status of K8s Pods
hosts: all
tasks:
- name: Get pods
command: kubectl get pods
- name: Get logs from ansible container
command: kubectl logs -l name=zuul-operator -c ansible
- name: Get logs from operator container
command: kubectl logs -l name=zuul-operator -c operator
- name: Log all Events
command: kubectl get events

View File

@ -1,38 +0,0 @@
- name: install and start zuul operator
hosts: all
tasks:
- name: Set Operator SDK Release Version fact
set_fact:
RELEASE_VERSION: v0.8.1
- name: Setup CRD
command: kubectl create -f deploy/crds/zuul-ci_v1alpha1_zuul_crd.yaml
args:
chdir: "{{ zuul.projects['opendev.org/zuul/zuul-operator'].src_dir }}"
- name: Setup rbac
command: kubectl create -f deploy/rbac.yaml
args:
chdir: "{{ zuul.projects['opendev.org/zuul/zuul-operator'].src_dir }}"
- name: Setup Operator
command: kubectl create -f deploy/operator.yaml
args:
chdir: "{{ zuul.projects['opendev.org/zuul/zuul-operator'].src_dir }}"
- name: Deploy Zookeeper operator
command: kubectl create -f {{ item }}
args:
chdir: "{{ zuul.projects['github.com/pravega/zookeeper-operator'].src_dir }}/deploy"
loop:
- crds/zookeeper_v1beta1_zookeepercluster_crd.yaml
- default_ns/rbac.yaml
- default_ns/operator.yaml
- name: wait for pods to come up
command: kubectl get pods -o json
register: kubectl_get_pods
until: kubectl_get_pods.stdout|from_json|json_query('items[*].status.phase')|unique == ["Running"]
retries: 30
delay: 10

View File

@ -0,0 +1,21 @@
- hosts: all
roles:
- collect-container-logs
post_tasks:
- name: Describe resources
command: "kubectl describe {{ item }}"
loop:
- pods
- deployments
- statefulsets
- services
- secrets
- configmaps
- name: Grab scheduler logs
command: "kubectl logs statefulset/zuul-scheduler"
ignore_errors: yes
- name: Grab executor logs
command: "kubectl logs statefulset/zuul-executor"
ignore_errors: yes

View File

@ -1,10 +1,14 @@
- name: start kubernetes and install all dependencies - name: start kubernetes and install all dependencies
hosts: all hosts: all
pre_tasks:
- name: Install openshift client for k8s tasks
command: python3 -m pip install --user openshift
roles: roles:
- role: bindep
- role: clear-firewall - role: clear-firewall
- role: install-kubernetes - role: install-kubernetes
vars:
minikube_dns_resolvers:
- '1.1.1.1'
- '8.8.8.8'
- role: use-buildset-registry - role: use-buildset-registry
buildset_registry_docker_user: root buildset_registry_docker_user: root
docker_version: 18.06.1~ce~3-0~ubuntu

View File

@ -0,0 +1,115 @@
- name: install and start zuul operator
hosts: all
tasks:
- name: Setup CRD
command: make install
args:
chdir: "{{ zuul.projects['opendev.org/zuul/zuul-operator'].src_dir }}"
- name: Generate executor ssh key
command: ssh-keygen -t rsa -m PEM -N '' -f 'id_rsa' -q -C 'zuul-executor'
args:
creates: id_rsa
# Note: Using lookup(file) is somehow failing with 'NoneType' object has no attribute 'startswith'
- name: Read generated ssh key
command: cat id_rsa
register: _ssh_key
- name: Setup user provided secrets
k8s:
namespace: "{{ namespace }}"
definition:
apiVersion: v1
kind: Secret
metadata:
name: "{{ item.name }}"
stringData: "{{ item.data }}"
loop:
- name: executor-ssh-key
data:
id_rsa: "{{ _ssh_key.stdout }}"
- name: zuul-yaml-conf
data:
main.yaml: |
- tenant:
name: local
source:
opendev.org:
config-projects:
- zuul/zuul-base-jobs
untrusted-projects:
- zuul/zuul-jobs
- name: nodepool-yaml-conf
data:
nodepool.yaml: |
labels:
- name: pod-centos
min-ready: 1
providers:
- name: kube-cluster
driver: openshiftpods
context: local
max-pods: 15
pools:
- name: default
labels:
- name: pod-centos
image: quay.io/software-factory/pod-centos-7
python-path: /bin/python2
- name: nodepool-kube-config
data:
kube.config: |
apiVersion: v1
kind: Config
preferences: {}
clusters:
- cluster:
server: https://10.43.0.1:8043
insecure-skip-tls-verify: true
name: local
users:
- name: nodepool
user:
token: test-token
contexts:
- context:
cluster: local
user: nodepool
namespace: default
name: local
current-context: local
- name: Deploy CR
command: make deploy-cr
args:
chdir: "{{ zuul.projects['opendev.org/zuul/zuul-operator'].src_dir }}"
- name: Wait maximum 4 minutes for the scheduler pod
shell: |
for idx in $(seq 24); do
date;
for res in statefulsets deployments pods; do echo == $res ==; kubectl get $res; done
kubectl get pod zuul-scheduler-0 2> /dev/null && break || :
sleep 10;
done
- name: Wait 2 minutes for the scheduler pod to be ready
command: kubectl wait --for=condition=Ready --timeout=120s pod/zuul-scheduler-0
- name: Wait 4 minutes for scheduler to settle
command: kubectl logs pod/zuul-scheduler-0
register: _scheduler_log
until: "'Full reconfiguration complete' in _scheduler_log.stdout"
delay: 10
retries: 24
- name: Wait 2 minutes for the executor pod to be ready
command: kubectl wait --for=condition=Ready --timeout=120s pod/zuul-executor-0
- name: Wait an extra 2 minutes for the services to settle
pause:
minutes: 2

View File

@ -0,0 +1,8 @@
zuul_name: "{{ meta.name | default('zuul') }}"
namespace: "{{ meta.namespace | default('default') }}"
state: "{{ k8s_state | default('present') }}"
zuul_app_path: "/opt/ansible/conf/zuul"
# Here we use zuul_spec to get un-modified cr
# see: https://github.com/operator-framework/operator-sdk/issues/1770
raw_spec: "{{ vars['_operator_zuul-ci_org_zuul_spec'] | default(spec) }}"

View File

@ -0,0 +1,61 @@
#!/usr/bin/env python3
# Copyright 2020 Red Hat, Inc
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
import argparse
import json
import subprocess
import sys
from typing import Any
from ansible.module_utils.basic import AnsibleModule # type: ignore
def run(expression: str) -> Any:
proc = subprocess.Popen(
['dhall-to-json', '--omit-empty', '--explain'], stdin=subprocess.PIPE, stdout=subprocess.PIPE, stderr=subprocess.PIPE)
stdout, stderr = proc.communicate(expression.encode('utf-8'))
if stderr:
return dict(failed=True, msg=stderr.decode('utf-8'))
result = dict(result=json.loads(stdout.decode('utf-8')))
result['changed'] = True
return result
def ansible_main():
module = AnsibleModule(
argument_spec=dict(
expression=dict(required=True, type='str'),
)
)
p = module.params
result = run(p['expression'])
if result.get('failed'):
module.fail_json(msg="Dhall expression failed:" + result['msg'])
module.exit_json(**result)
def cli_main():
parser = argparse.ArgumentParser()
parser.add_argument('expression')
args = parser.parse_args()
print(run(args.expression))
if __name__ == '__main__':
if sys.stdin.isatty():
cli_main()
else:
ansible_main()

View File

@ -0,0 +1,65 @@
#!/usr/bin/env python3
# Copyright 2020 Red Hat, Inc
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
import argparse
import subprocess
import sys
from typing import List
from ansible.module_utils.basic import AnsibleModule # type: ignore
def pread(args: List[str], stdin: str) -> str:
proc = subprocess.Popen(args, stdin=subprocess.PIPE, stdout=subprocess.PIPE, stderr=subprocess.PIPE)
stdout, stderr = proc.communicate(stdin.encode('utf-8'))
if stderr:
raise RuntimeError("Command failed: " + stderr.decode('utf-8'))
return stdout.decode('utf-8')
def run(schema: str, json_input: str) -> str:
return pread(['json-to-dhall', '--plain', schema], json_input)
def ansible_main():
module = AnsibleModule(
argument_spec=dict(
schema=dict(required=True, type='str'),
json=dict(required=True, type='str'),
)
)
p = module.params
try:
module.exit_json(changed=True, result=run(p['schema'], p['json']))
except Exception as e:
module.fail_json(msg="Dhall expression failed:" + str(e))
def cli_main():
parser = argparse.ArgumentParser()
parser.add_argument('schema')
parser.add_argument('--json')
parser.add_argument('--file')
args = parser.parse_args()
if args.file:
import yaml, json
args.json = json.dumps(yaml.safe_load(open(args.file)))
print(run(args.schema, args.json))
if __name__ == '__main__':
if sys.stdin.isatty():
cli_main()
else:
ansible_main()

View File

@ -0,0 +1,32 @@
# TODO: Generate tls cert secret
- name: Convert spec to template input
json_to_dhall:
schema: "({{ zuul_app_path }}/input.dhall).Input.Type"
json: "{{ rspec | to_json }}"
vars:
rspec:
name: "{{ zuul_name }}"
merger: "{{ raw_spec['merger'] | default({}) }}"
executor: "{{ raw_spec['executor'] | default({}) }}"
web: "{{ raw_spec['web'] | default({}) }}"
scheduler: "{{ raw_spec['scheduler'] | default({}) }}"
launcher: "{{ raw_spec['launcher'] | default({}) }}"
external_config: "{{ raw_spec['external_config'] | default({}) }}"
connections: "{{ raw_spec['connections'] | default({}) }}"
register: _cr_input
- name: Convert expression to kubernetes objects
dhall_to_json:
expression: "{{ zuul_app_path }}/resources.dhall {{ _cr_input.result }}"
register: _json
- name: Apply objects
k8s:
state: "{{ state }}"
namespace: "{{ namespace }}"
definition: "{{ item }}"
apply: yes
loop: "{{ _json.result['List']['items'] }}"
# TODO: Patch services when their configuration changed

View File

@ -2,4 +2,4 @@
- version: v1alpha1 - version: v1alpha1
group: operator.zuul-ci.org group: operator.zuul-ci.org
kind: Zuul kind: Zuul
playbook: /opt/ansible/zuul.yaml role: /opt/ansible/roles/zuul