Replace existing operator tasks with the new application

This change glues the previous changes to implement the operator.

Change-Id: Iea51bccf90def6e827d2c5846ad6a7e4c86a5bc1
This commit is contained in:
Tristan Cacqueray 2020-01-11 18:56:14 +00:00
parent 66a14fa01d
commit d5ed840bb4
32 changed files with 389 additions and 958 deletions

View File

@ -5,10 +5,10 @@
pre-run: playbooks/zuul-operator-functional-k8s/pre.yaml
run: playbooks/zuul-operator-functional-k8s/run.yaml
post-run: playbooks/zuul-operator-functional-k8s/post.yaml
nodeset: ubuntu-xenial
nodeset: ubuntu-bionic
required-projects:
- github.com/pravega/zookeeper-operator
- zuul/zuul-operator
requires: docker-image
- job:
description: |
@ -27,10 +27,10 @@
check:
jobs:
- zuul-operator-build-image
# - zuul-operator-functional-k8s:
# dependencies: zuul-operator-build-image
# gate:
# jobs:
# - zuul-operator-build-image
# - zuul-operator-functional-k8s:
# dependencies: zuul-operator-build-image
- zuul-operator-functional-k8s:
dependencies: zuul-operator-build-image
gate:
jobs:
- zuul-operator-build-image
- zuul-operator-functional-k8s:
dependencies: zuul-operator-build-image

View File

@ -1,6 +1,12 @@
build:
podman build -f build/Dockerfile -t docker.io/zuul/zuul-operator .
install:
kubectl apply -f deploy/crds/zuul-ci_v1alpha1_zuul_crd.yaml -f deploy/rbac.yaml -f deploy/operator.yaml
deploy-cr:
kubectl apply -f deploy/crds/zuul-ci_v1alpha1_zuul_cr.yaml
# Generate demo deployments

112
README.md
View File

@ -1,128 +1,44 @@
A Zuul Operator PoC
=======
Zuul Operator
=============
## Requirements:
* [OKD](https://github.com/openshift/origin/releases/download/v3.11.0/openshift-origin-client-tools-v3.11.0-0cbc58b-linux-64bit.tar.gz)
* [SDK](https://github.com/operator-framework/operator-sdk#quick-start)
* [Zookeeper Operator](https://github.com/pravega/zookeeper-operator#install-the-operator)
* [Postgresql Operator](https://operatorhub.io/operator/alpha/postgres-operator.v3.5.0)
## Prepare cluster
## Build the image
```shell
sudo -i oc cluster up
sudo chown root:fedora /var/run/docker.sock
oc login -u developer -p dev
docker login -u developer -p $(oc whoami -t) $(oc registry info)
# Log as admin to install crd
sudo cat /root/.kube/config > ~/.kube/config
oc login -u system:admin
oc project default
$ make build-image
```
## Install Postgress Operator
Follow [install instruction](https://crunchydata.github.io/postgres-operator/stable/installation/),
basically:
```
vi ./pv/crunchy-pv.json # set volume size and pv number
oc apply -f ./pv/crunchy-pv.json
oc apply -f ./deploy/cluster-rbac.yaml
oc apply -f ./deploy/rbac.yaml
./deploy/deploy.sh
```
## Install Zookeeper Operator
## Install the operator
```shell
oc create -f https://raw.githubusercontent.com/pravega/zookeeper-operator/master/deploy/crds/zookeeper_v1beta1_zookeepercluster_crd.yaml
oc create -f https://raw.githubusercontent.com/pravega/zookeeper-operator/master/deploy/default_ns/rbac.yaml
oc create -f https://raw.githubusercontent.com/pravega/zookeeper-operator/master/deploy/default_ns/operator.yaml
```
## Install Zuul Operator
```shell
operator-sdk build 172.30.1.1:5000/myproject/zuul-operator:latest
docker push 172.30.1.1:5000/myproject/zuul-operator:latest
oc create -f deploy/crds/zuul-ci_v1alpha1_zuul_crd.yaml
oc create -f deploy/rbac.yaml
oc create -f deploy/operator.yaml
$ make install
kubectl apply -f deploy/crds/zuul-ci_v1alpha1_zuul_crd.yaml -f deploy/rbac.yaml -f deploy/operator.yaml
```
Look for operator pod and check it's output
```shell
$ oc get pods
$ kubectl get pods
NAME READY STATUS RESTARTS AGE
zuul-operator-c64756f66-rbdmg 2/2 Running 0 3s
$ oc logs zuul-operator-c64756f66-rbdmg -c operator
...
$ kubectl logs zuul-operator-c64756f66-rbdmg -c operator
[...]
{"level":"info","ts":1554197305.5853095,"logger":"cmd","msg":"Go Version: go1.10.3"}
{"level":"info","ts":1554197305.5854425,"logger":"cmd","msg":"Go OS/Arch: linux/amd64"}
{"level":"info","ts":1554197305.5854564,"logger":"cmd","msg":"Version of operator-sdk: v0.6.0"}
{"level":"info","ts":1554197305.5855,"logger":"cmd","msg":"Watching namespace.","Namespace":"default"}
...
[...]
```
## Usage
```
$ oc apply -f - <<EOF
apiVersion: zuul-ci.org/v1alpha1
$ kubectl apply -f - <<EOF
apiVersion: operator.zuul-ci.org/v1alpha1
kind: Zuul
metadata:
name: example-zuul
spec:
# Optional user-provided ssh key
#sshsecretename: ""
# Optional user-provided clouds.yaml
#cloudssecretname: ""
# Optional user-provided kube/config
#kubesecretname: ""
merger:
min: 0
max: 10
executor:
min: 1
max: 5
web:
min: 1
launcher:
min: 1
connections: []
tenants:
- tenant:
name: demo
source: {}
EOF
zuul.zuul-ci.org/example-zuul created
$ oc get zuul
NAME AGE
example-zuul 10s
# Get zuul public key
$ oc get secret example-ssh-secret-pub -o "jsonpath={.data.id_rsa\.pub}" | base64 -d
ssh-rsa AAAAB3Nza...
$ oc get pods
NAME READY STATUS RESTARTS AGE
example-zuul-executor-696f969c4-6cpjv 1/1 Running 0 8s
example-zuul-launcher-5974789746-wbwpv 1/1 Running 0 9s
example-zuul-pg-5dfc477bff-8426l 1/1 Running 0 30s
example-zuul-scheduler-77b6cf7967-ksh64 1/1 Running 0 11s
example-zuul-web-5f744f89c9-qjp9l 1/1 Running 0 6s
example-zuul-zk-0 1/1 Running 0 22s
$ oc get svc example-zuul-web
NAME TYPE CLUSTER-IP EXTERNAL-IP PORT(S) AGE
example-zuul-web ClusterIP 172.30.209.181 <none> 80/TCP 41s
$ curl 172.30.209.181/api/tenants
[{"name": "demo", "projects": 0, "queue": 0}]
```

View File

@ -1,58 +0,0 @@
---
# Default cr spec
meta:
name: demozuul
tenants:
- tenant:
name: demo
source: {}
connections: []
providers: []
labels:
- name: okd-fedora
min-ready: 1
launcher:
min: 1
merger:
min: 0
max: 5
executor:
min: 1
max: 5
web:
min: 1
max: 1
namespace: "{{ meta.namespace|default('default') }}"
state: "present"
zuul_app_name: "zuul"
zuul_cluster_name: "{{ meta.name }}"
sshsecretname: "{{ zuul_cluster_name }}-ssh-secret"
kubesecretname: "{{ zuul_cluster_name }}-kube-secret"
cloudssecretname: "{{ zuul_cluster_name }}-clouds-secret"
zuul_version: "latest" #"3.7.1"
nodepool_version: "latest"
# Use local image for https://review.openstack.org/650246
#zuul_image_name_base: "docker.io/zuul/zuul"
#nodepool_image_name_base: "docker.io/zuul/nodepool"
zuul_image_name_base: "172.30.1.1:5000/myproject/zuul"
nodepool_image_name_base: "172.30.1.1:5000/myproject/nodepool"
zuul_image_name:
scheduler: "{{ zuul_image_name_base }}-scheduler:{{ zuul_version }}"
merger: "{{ zuul_image_name_base }}-merger:{{ zuul_version }}"
executor: "{{ zuul_image_name_base }}-executor:{{ zuul_version }}"
web: "{{ zuul_image_name_base }}-web:{{ zuul_version }}"
launcher: "{{ nodepool_image_name_base }}-launcher:{{ nodepool_version }}"
zuul_service_account_name: "zuul-operator"
zuul_image_pull_policy: "IfNotPresent"
zuul_configmap_name: "{{ zuul_cluster_name }}-config"
zk_cluster_name: "{{ zuul_cluster_name }}-zk"
zk_api_version: "zookeeper.pravega.io/v1beta1"
pg_cluster_name: "{{ zuul_cluster_name }}-pg"
pg_cr_kind: "Pgcluster"
pg_api_version: "cr.client-go.k8s.io/v1"

View File

@ -1,237 +0,0 @@
---
- name: Create Postgresql Credential
when: not zuul_pg_user
block:
- name: Create k8s secret
k8s:
state: "{{ state }}"
definition:
apiVersion: v1
kind: Secret
metadata:
labels:
pg-database: "{{ pg_cluster_name }}"
app: "{{ zuul_app_name }}"
zuul_cluster: "{{ zuul_cluster_name }}"
name: "{{ pg_cluster_name }}-zuul-secret"
namespace: "{{ namespace }}"
type: Opaque
data:
password: UE5xOEVFVTBxTQ==
username: dGVzdHVzZXI=
- name: Set fact
set_fact:
zuul_pg_user:
- username: dGVzdHVzZXI=
password: UE5xOEVFVTBxTQ==
- name: Create ssh key
when: not zuul_ssh_key
block:
- name: Create ssh key
command: "ssh-keygen -f /opt/ansible/ssh-{{ zuul_cluster_name }} -m PEM -t rsa -N '' -C zuul"
args:
creates: "/opt/ansible/ssh-{{ zuul_cluster_name }}"
- name: Create ssh secret
k8s:
state: "{{ state }}"
definition:
apiVersion: v1
kind: Secret
metadata:
labels:
app: "{{ zuul_app_name }}"
zuul_cluster: "{{ zuul_cluster_name }}"
name: "{{ sshsecretname }}"
namespace: "{{ namespace }}"
type: Opaque
stringData:
id_rsa: |
{{lookup('file', '/opt/ansible/ssh-' + zuul_cluster_name) }}
- name: Create ssh pub secret
k8s:
state: "{{ state }}"
definition:
apiVersion: v1
kind: Secret
metadata:
labels:
app: "{{ zuul_app_name }}"
zuul_cluster: "{{ zuul_cluster_name }}"
name: "{{ sshsecretname }}-pub"
namespace: "{{ namespace }}"
type: Opaque
stringData:
id_rsa.pub: |
{{lookup('file', '/opt/ansible/ssh-' + zuul_cluster_name + '.pub') }}
# TODO: cleanup key file from operator pod
- name: Create cloud config
when: not zuul_clouds_secret
k8s:
state: "{{ state }}"
definition:
apiVersion: v1
kind: Secret
metadata:
labels:
app: "{{ zuul_app_name }}"
zuul_cluster: "{{ zuul_cluster_name }}"
name: "{{ cloudssecretname }}"
namespace: "{{ namespace }}"
type: Opaque
stringData:
clouds.yaml: |
cache:
expiration:
server: 5
port: 5
floating-ip: 5
- name: Create kube config
when: not zuul_kube_secret
k8s:
state: "{{ state }}"
definition:
apiVersion: v1
kind: Secret
metadata:
labels:
app: "{{ zuul_app_name }}"
zuul_cluster: "{{ zuul_cluster_name }}"
name: "{{ kubesecretname }}"
namespace: "{{ namespace }}"
type: Opaque
stringData:
config: |
apiVersion: v1
clusters: []
contexts: []
- name: Create the scheduler configmap
k8s:
state: "{{ state }}"
definition:
kind: ConfigMap
apiVersion: v1
metadata:
name: "{{ zuul_configmap_name }}-scheduler"
namespace: "{{ namespace }}"
labels:
app: "{{ zuul_app_name }}"
zuul_cluster: "{{ zuul_cluster_name }}"
data:
"zuul.conf": |
[gearman]
server=localhost
port=4730
[zookeeper]
hosts={{ zk_cluster_name }}-client:2181
[gearman_server]
start=true
[scheduler]
tenant_config=/etc/zuul/main.yaml
[connection sqlreporter]
driver=sql
dburi=postgresql://{{ zuul_pg_user[0]["username"] | b64decode }}:{{ zuul_pg_user[0]["password"] | b64decode }}@{{ pg_cluster_name }}/zuul
{% for connection in connections %}
[connection {{ connection["name"] }}]
{% if connection["driver"] == "gerrit" %}
sshkey=/var/lib/zuul/ssh-secret/id_rsa
{% endif %}
{% for k, v in connection.items() %}{% if k != "name" %}
{{ k }}={{ v }}
{% endif %}{% endfor %}
{% endfor %}
"main.yaml": |
{{ tenants|to_yaml|regex_replace('(config|untrusted)_projects:', '\1-projects:') }}
register: scheduler_config
- name: Register if tenant config changed
set_fact:
tenant_config_updated: >-
{% if (scheduler_config is changed and
scheduler_config.diff|default(None) and
'main.yaml' in scheduler_config.diff[-1][1]) %}True
{% endif %}
- debug:
msg: "Tenant config is updated"
when: tenant_config_updated
- name: Create the zuul service configmap
k8s:
state: "{{ state }}"
definition:
kind: ConfigMap
apiVersion: v1
metadata:
name: "{{ zuul_configmap_name }}"
namespace: "{{ namespace }}"
labels:
app: "{{ zuul_app_name }}"
zuul_cluster: "{{ zuul_cluster_name }}"
data:
"zuul.conf": |
[gearman]
server={{ zuul_cluster_name }}-scheduler
port=4730
[zookeeper]
hosts={{ zk_cluster_name }}-client:2181
[web]
listen_address=0.0.0.0
port=9000
[executor]
# TODO: add secret map for executor ssh key
private_key_file=/var/lib/zuul/ssh-secret/id_rsa
[connection sqlreporter]
driver=sql
dburi=postgresql://{{ zuul_pg_user[0]["username"] | b64decode }}:{{ zuul_pg_user[0]["password"] | b64decode }}@{{ pg_cluster_name }}/zuul
{% for connection in connections %}
[connection {{ connection["name"] }}]
{% if connection["driver"] == "gerrit" %}
sshkey=/var/lib/zuul/ssh-secret/id_rsa
{% endif %}
{% for k, v in connection.items() %}{% if k != "name" %}
{{ k }}={{ v }}
{% endif %}{% endfor %}
{% endfor %}
- name: Create the nodepool configmap
k8s:
state: "{{ state }}"
definition:
kind: ConfigMap
apiVersion: v1
metadata:
name: "{{ zuul_configmap_name }}-nodepool"
namespace: "{{ namespace }}"
labels:
app: "{{ zuul_app_name }}"
zuul_cluster: "{{ zuul_cluster_name }}"
data:
"nodepool.yaml": |
{{ ({'labels': labels})|to_yaml }}
{{ ({'providers': providers})|to_yaml }}
webapp:
port: 8006
zookeeper-servers:
- host: {{ zk_cluster_name }}-client
port: 2181
register: nodepool_config

View File

@ -1,67 +0,0 @@
- name: Get autoscale count
autoscale_gearman:
service: "{{ deployment_name }}"
gearman: "{{ gearman_service.spec.clusterIP|default(None) }}"
min: "{{ deployment_conf.min|default(0) }}"
max: "{{ deployment_conf.max|default(1) }}"
register: autoscale
when: gearman_service is defined
# TODO: ensure graceful scale-down of service's replicas
- name: Create Deployment
k8s:
state: "{{ state }}"
definition:
kind: "Deployment"
apiVersion: "extensions/v1beta1"
metadata:
name: "{{ zuul_cluster_name }}-{{ deployment_name }}"
namespace: "{{ namespace }}"
labels:
app: "{{ zuul_app_name }}"
zuul_cluster: "{{ zuul_cluster_name }}"
annotations:
configHash: ""
spec:
replicas: "{{ autoscale.count|default(deployment_conf.min) }}"
selector:
matchLabels:
app: "{{ zuul_cluster_name }}-{{ deployment_name }}"
zuul_cluster: "{{ zuul_cluster_name }}"
template:
metadata:
name: "{{ zuul_cluster_name }}-{{ deployment_name }}"
labels:
app: "{{ zuul_cluster_name }}-{{ deployment_name }}"
zuul_cluster: "{{ zuul_cluster_name }}"
spec:
containers:
- name: "{{ zuul_cluster_name }}-{{ deployment_name }}"
image: "{{ zuul_image_name[deployment_name] }}"
imagePullPolicy: "{{ zuul_image_pull_policy }}"
ports: "{{ deployment_ports|default([]) }}"
env:
- name: CONFIG_CHECKSUM
value: "{{ scheduler_config.result.data | checksum }}"
volumeMounts:
- mountPath: "/etc/zuul"
name: zuul-config-volume
readOnly: true
- mountPath: "/var/lib/zuul"
name: zuul-data-volume
- mountPath: "/var/lib/zuul/ssh-secret/"
name: zuul-ssh-key
command:
- "/uid_entrypoint"
- "zuul-{{ deployment_name }}"
- "-d"
volumes:
- name: zuul-config-volume
configMap:
name: "{{ deployment_config|default(zuul_configmap_name) }}"
- name: zuul-data-volume
emptyDir: {}
- name: zuul-ssh-key
secret:
secretName: "{{ sshsecretname }}"
defaultMode: 256

View File

@ -1,72 +0,0 @@
# TODO:
- name: Get autoscale count
# TODO: look for replicas count in zk requests list
# autoscale_zk:
# service: {{ deployment_name }}
# zkhost: "{{ zk_cluster_name }}-client:2181"
# min: {{ deployment_conf.min|default(0) }}
# register: autoscale
set_fact:
autoscale:
count: "{{ deployment_conf.min|default(0) }}"
- name: Create Deployment
k8s:
state: "{{ state }}"
definition:
kind: "Deployment"
apiVersion: "extensions/v1beta1"
metadata:
name: "{{ zuul_cluster_name }}-{{ deployment_name }}"
namespace: "{{ namespace }}"
labels:
app: "{{ zuul_app_name }}"
zuul_cluster: "{{ zuul_cluster_name }}"
annotations:
configHash: ""
spec:
replicas: "{{ autoscale.count }}"
selector:
matchLabels:
app: "{{ zuul_cluster_name }}-{{ deployment_name }}"
zuul_cluster: "{{ zuul_cluster_name }}"
template:
metadata:
name: "{{ zuul_cluster_name }}-{{ deployment_name }}"
labels:
app: "{{ zuul_cluster_name }}-{{ deployment_name }}"
zuul_cluster: "{{ zuul_cluster_name }}"
spec:
containers:
- name: "{{ zuul_cluster_name }}-{{ deployment_name }}"
image: "{{ zuul_image_name[deployment_name] }}"
imagePullPolicy: "{{ zuul_image_pull_policy }}"
env:
- name: CONFIG_CHECKSUM
value: "{{ nodepool_config.result.data | checksum }}"
volumeMounts:
- mountPath: "/etc/nodepool"
name: nodepool-config-volume
readOnly: true
- mountPath: "/var/lib/nodepool"
name: nodepool-data-volume
- mountPath: "/var/lib/nodepool/.kube"
name: nodepool-kube-volume
- mountPath: "/var/lib/nodepool/.config/openstack"
name: nodepool-clouds-volume
command:
- "/uid_entrypoint"
- "nodepool-{{ deployment_name }}"
- "-d"
volumes:
- name: nodepool-config-volume
configMap:
name: "{{ zuul_configmap_name }}-nodepool"
- name: nodepool-data-volume
emptyDir: {}
- name: nodepool-kube-volume
secret:
secretName: "{{ kubesecretname }}"
- name: nodepool-clouds-volume
secret:
secretName: "{{ cloudssecretname }}"

View File

@ -1,18 +0,0 @@
- name: Create Service
k8s:
state: "{{ state }}"
definition:
kind: Service
apiVersion: v1
metadata:
name: "{{ zuul_cluster_name }}-{{ service_name }}"
namespace: "{{ namespace }}"
labels:
app: "{{ zuul_app_name }}"
zuul_cluster: "{{ zuul_cluster_name }}"
spec:
type: ClusterIP
selector:
app: "{{ zuul_cluster_name }}-{{ service_name }}"
zuul_cluster: "{{ zuul_cluster_name }}"
ports: "{{ service_ports }}"

View File

@ -1,83 +0,0 @@
---
- name: Nodepool Deployment
vars:
deployment_name: launcher
deployment_conf: "{{ launcher }}"
include_tasks: "./create_nodepool_deployment.yaml"
- name: Save queue
include_tasks: "./queue_save.yaml"
when:
- scheduler_config is changed
- zuul_scheduler_pod
- name: Scheduler Deployment
vars:
deployment_name: scheduler
deployment_ports:
- containerPort: 4730
protocol: "TCP"
deployment_config: "{{ zuul_configmap_name }}-scheduler"
deployment_conf:
min: 1
include_tasks: "./create_deployment.yaml"
register: sched_deployment
- name: Scheduler service
vars:
service_name: scheduler
service_ports:
- name: "gearman"
port: 4730
protocol: TCP
include_tasks: "./create_service.yaml"
- name: Wait for Service
set_fact:
gearman_service: "{{ lookup('k8s', api_version='v1', kind='Service', namespace=namespace, resource_name=zuul_cluster_name + '-scheduler') }}"
until: gearman_service
retries: 5
delay: 10
- name: Reload scheduler
include_tasks: "./reload_scheduler.yaml"
when:
- sched_deployment is not changed
- tenant_config_updated
- name: Merger Deployment
vars:
deployment_name: merger
deployment_conf: "{{ merger }}"
include_tasks: "./create_deployment.yaml"
- name: Executor Deployment
vars:
deployment_name: executor
deployment_conf: "{{ executor }}"
include_tasks: "./create_deployment.yaml"
- name: Web Deployment
vars:
deployment_name: web
deployment_conf: "{{ web }}"
deployment_ports:
- containerPort: 9000
protocol: "TCP"
include_tasks: "./create_deployment.yaml"
- name: Web Service
vars:
service_name: web
service_ports:
- name: "web"
port: 80
protocol: TCP
targetPort: 9000
include_tasks: "./create_service.yaml"
- name: Load queue
include_tasks: "./queue_load.yaml"
when:
- scheduler_config is changed
- zuul_scheduler_pod

View File

@ -1,3 +0,0 @@
---
- name: Load scheduler queue
debug: msg="TODO..."

View File

@ -1,3 +0,0 @@
---
- name: Dump scheduler queue
debug: msg="TODO..."

View File

@ -1,3 +0,0 @@
---
- name: Add scheduler pod to the inventory
debug: msg="TODO..."

View File

@ -1,125 +0,0 @@
- name: Postgresql Secret
k8s:
definition:
apiVersion: v1
kind: Secret
metadata:
labels:
pg-database: "{{ pg_cluster_name }}"
app: "{{ zuul_app_name }}"
zuul_cluster: "{{ zuul_cluster_name }}"
name: "{{ pg_cluster_name }}-postgres-secret"
namespace: "{{ namespace }}"
type: Opaque
data:
password: M3pBeXpmMThxQg==
username: cG9zdGdyZXM=
- name: Postgresql Primary User
k8s:
definition:
apiVersion: v1
kind: Secret
metadata:
labels:
pg-database: "{{ pg_cluster_name }}"
app: "{{ zuul_app_name }}"
zuul_cluster: "{{ zuul_cluster_name }}"
name: "{{ pg_cluster_name }}-primaryuser-secret"
namespace: "{{ namespace }}"
type: Opaque
data:
password: d0ZvYWlRZFhPTQ==
username: cHJpbWFyeXVzZXI=
- name: Postgresql Deployment
k8s:
definition:
apiVersion: "{{ pg_api_version }}"
kind: "{{ pg_cr_kind }}"
metadata:
labels:
archive: 'false'
archive-timeout: '60'
crunchy-pgbadger: 'false'
crunchy_collect: 'false'
current-primary: "{{ pg_cluster_name }}"
deployment-name: "{{ pg_cluster_name }}"
name: "{{ pg_cluster_name }}"
pg-cluster: "{{ pg_cluster_name }}"
pgo-backrest: 'false'
pgo-version: 3.5.2
primary: 'true'
app: "{{ zuul_app_name }}"
zuul_cluster: "{{ zuul_cluster_name }}"
name: "{{ pg_cluster_name }}"
namespace: "{{ namespace }}"
spec:
ArchiveStorage:
accessmode: ''
fsgroup: ''
matchLabels: ''
name: ''
size: ''
storageclass: ''
storagetype: ''
supplementalgroups: ''
BackrestStorage:
accessmode: ReadWriteMany
fsgroup: ''
matchLabels: ''
name: ''
size: 1G
storageclass: ''
storagetype: create
supplementalgroups: ''
ContainerResources:
limitscpu: ''
limitsmemory: ''
requestscpu: ''
requestsmemory: ''
PrimaryStorage:
accessmode: ReadWriteMany
fsgroup: ''
matchLabels: ''
name: "{{ pg_cluster_name }}"
size: 1G
storageclass: ''
storagetype: create
supplementalgroups: ''
ReplicaStorage:
accessmode: ReadWriteMany
fsgroup: ''
matchLabels: ''
name: ''
size: 1G
storageclass: ''
storagetype: create
supplementalgroups: ''
backuppath: ''
backuppvcname: ''
ccpimage: crunchy-postgres
ccpimagetag: centos7-11.2-2.3.1
clustername: "{{ pg_cluster_name }}"
customconfig: ''
database: zuul
name: "{{ pg_cluster_name }}"
nodename: ''
policies: ''
port: '5432'
primaryhost: "{{ pg_cluster_name }}"
primarysecretname: "{{ pg_cluster_name }}-primaryuser-secret"
replicas: '0'
rootsecretname: "{{ pg_cluster_name }}-postgres-secret"
secretfrom: ''
status: ''
strategy: '1'
user: zuul
userlabels:
archive: 'false'
archive-timeout: '60'
crunchy-pgbadger: 'false'
crunchy_collect: 'false'
pgo-backrest: 'false'
pgo-version: 3.5.2
usersecretname: "{{ pg_cluster_name }}-zuul-secret"

View File

@ -1,14 +0,0 @@
- name: Zookeeper Deployment
k8s:
definition:
apiVersion: "{{ zk_api_version }}"
kind: "ZookeeperCluster"
metadata:
name: "{{ zk_cluster_name }}"
namespace: "{{ namespace }}"
labels:
app: "{{ zuul_app_name }}"
zuul_cluster: "{{ zuul_cluster_name }}"
spec:
size: 1
version: "3.5.3-beta"

View File

@ -1,42 +0,0 @@
---
- set_fact:
label_selector_value: "zuul_cluster={{ zuul_cluster_name }},app={{ zuul_app_name }}"
sched_selector_value: "zuul_cluster={{ zuul_cluster_name }},app={{ zuul_cluster_name }}-scheduler"
pg_user_query: "[?metadata.name=='{{ pg_cluster_name }}-zuul-secret'].data"
ssh_key_query: "[?metadata.name=='{{ sshsecretname }}'].data"
- name: lookup k8s secrets
set_fact:
secrets_lookup: "{{ lookup('k8s', api_version='v1', kind='Secret', namespace=namespace, label_selector=label_selector_value) }}"
- name: lookup cluster secret
set_fact:
zuul_pg_user: "{{ secrets_lookup | json_query(pg_user_query) }}"
zuul_ssh_key: "{{ lookup('k8s', api_version='v1', kind='Secret', namespace=namespace, resource_name=sshsecretname) }}"
zuul_clouds_secret: "{{ lookup('k8s', api_version='v1', kind='Secret', namespace=namespace, resource_name=cloudssecretname) }}"
zuul_kube_secret: "{{ lookup('k8s', api_version='v1', kind='Secret', namespace=namespace, resource_name=kubesecretname) }}"
- name: lookup k8s postgres cr
set_fact:
pg_cr_lookup: "{{ lookup('k8s', api_version=pg_api_version, kind=pg_cr_kind, namespace=namespace, resource_name=pg_cluster_name) }}"
- name: lookup k8s zookeeper cr
set_fact:
zk_cr_lookup: "{{ lookup('k8s', api_version=zk_api_version, kind='ZookeeperCluster', namespace=namespace, resource_name=zk_cluster_name) }}"
- name: lookup scheduler pod
set_fact:
zuul_scheduler_pod: "{{ lookup('k8s', api_version='v1', kind='Pod', namespace=namespace, label_selector=sched_selector_value) }}"
- name: lookup k8s Zuul deployment
set_fact:
zuul_deployment_lookup: "{{ lookup('k8s', api_version='extensions/v1beta1', kind='Deployment', namespace=namespace, resource_name=zuul_cluster_name) }}"
- name: get currently deployed Zuul image name
set_fact:
current_deployed_image: "{{ zuul_deployment_lookup.spec.template.spec.containers['name'=='zuul'].image }}"
when: zuul_deployment_lookup.spec is defined
- debug:
msg: "Zuul Version has CHANGED to '{{ zuul_version }}' - Currently at {{ current_deployed_image }}"
when: (current_deployed_image is defined) and (current_deployed_image != zuul_image_name)

View File

@ -1,19 +0,0 @@
---
- hosts: localhost
gather_facts: no
tasks:
- debug: msg="Running Zuul Operator Playbook"
- name: Show the env
command: env
- import_role:
name: get_status
- import_role:
name: create_config
- import_role:
name: deploy_pg
when: (pg_cr_lookup|length==0)
- import_role:
name: deploy_zk
when: (zk_cr_lookup|length==0)
- import_role:
name: deploy

View File

@ -25,9 +25,4 @@ RUN echo 'let Prelude = ~/conf/Prelude.dhall let Kubernetes = ~/conf/Kubernetes.
# Copy ansible operator requirements
COPY watches.yaml ${HOME}/watches.yaml
COPY ansible/zuul.yaml ${HOME}/zuul.yaml
COPY ansible/group_vars/ ${HOME}/group_vars/
COPY ansible/roles/ ${HOME}/roles/
COPY build/uid_entrypoint.sh /uid_entrypoint
COPY roles ${HOME}/roles

View File

@ -1,8 +0,0 @@
#!/bin/sh
if ! whoami &> /dev/null; then
if [ -w /etc/passwd ]; then
echo "${USER_NAME:-default}:x:$(id -u):0:${USER_NAME:-default} user:${HOME}:/sbin/nologin" >> /etc/passwd
fi
fi
exec "$@"

View File

@ -1,18 +1,134 @@
# User provided configuration
apiVersion: v1
kind: List
items:
- apiVersion: v1
kind: Secret
metadata:
name: executor-ssh-key
stringData:
id_rsa: |
-----BEGIN OPENSSH PRIVATE KEY-----
b3BlbnNzaC1rZXktdjEAAAAABG5vbmUAAAAEbm9uZQAAAAAAAAABAAAAlwAAAAdzc2gtcn
NhAAAAAwEAAQAAAIEAxETTnagYWAfJK5b4k8DYtoWnDu+Uwvfn7ZJSzSt9YJMU8nP02fIJ
tZ3A9WoV7tppNjzsT3kV/h1efagL0wEXqETjClYh+DQ0IKRrJiGZwD+JgJo0zP0gTuoga6
T0CkxJcraCrUpKJcN7sCEtb4rLbNCJDeZcJT7TE4Ce4SdskCcAAAIQOyrkbTsq5G0AAAAH
c3NoLXJzYQAAAIEAxETTnagYWAfJK5b4k8DYtoWnDu+Uwvfn7ZJSzSt9YJMU8nP02fIJtZ
3A9WoV7tppNjzsT3kV/h1efagL0wEXqETjClYh+DQ0IKRrJiGZwD+JgJo0zP0gTuoga6T0
CkxJcraCrUpKJcN7sCEtb4rLbNCJDeZcJT7TE4Ce4SdskCcAAAADAQABAAAAgENeGIDR9O
EqcvuaS2Jz3C7yO0BeLeKqMxqRkghIeDWQ5qy97npsoLaNGnFiY3rjzx9F1BsHRew+anOZ
Hc3kXxjSAKUJn2VO7MYWBdv8J+C9HGDyOhorvCWpwzTUaFMQn+QNIJ+y0A/AZKg7X9oZZn
0HgGQe7Zeam6vVX6HIrCVJAAAAQQCC7vzohHa7a4Yl/+O7XeV60taTE4siKhCmCXcVhIwt
98dRhFD4n7c3s4UTRABApNppVhdABMCGHlG/MSOf6o94AAAAQQDirDR46I7oMQ0OL4pPqp
1A1n21Kq8KYTw5KG7zpQOqaD2FN7vR8ugUe6vEa9eNptYA2tb25LHmEnvcC79qsCw9AAAA
QQDdqZgN/s2zo+FPm3cEFrUMxn5l9IBSSrRwy/BFC8wMDvjAZHW6HycCQ97l41HNudNX3S
a56gV9hNojMrdOHsAzAAAAFmZlZG9yYUBmZWRvcmEucmRvY2xvdWQBAgME
-----END OPENSSH PRIVATE KEY-----
- apiVersion: v1
kind: Secret
metadata:
name: zuul-yaml-conf
stringData:
main.yaml: |
- tenant:
name: local
source:
opendev.org:
config-projects:
- zuul/zuul-base-jobs
untrusted-projects:
- zuul/zuul-jobs
- apiVersion: v1
kind: Secret
metadata:
name: nodepool-yaml-conf
stringData:
nodepool.yaml: |
labels:
- name: pod-centos
min-ready: 1
providers:
- name: kube-cluster
driver: openshiftpods
context: local
max-pods: 15
pools:
- name: default
labels:
- name: pod-centos
image: quay.io/software-factory/pod-centos-7
python-path: /bin/python2
- apiVersion: v1
kind: Secret
metadata:
name: nodepool-kube-config
stringData:
kube.config: |
apiVersion: v1
kind: Config
preferences: {}
clusters:
- cluster:
server: https://10.43.0.1:8043
insecure-skip-tls-verify: true
name: local
users:
- name: nodepool
user:
ca.crt: |
-----BEGIN CERTIFICATE-----
MIIDPzCCAiegAwIBAgIUPa2ZqA4O9lLf8RUnfPSn7iGmPWswDQYJKoZIhvcNAQEL
BQAwLzELMAkGA1UEBhMCRlIxEzARBgNVBAoMClNpbHZlckt1YmUxCzAJBgNVBAsM
AjQyMB4XDTIwMDEwNjE3NTU0NVoXDTMwMDEwMzE3NTU0NVowLzELMAkGA1UEBhMC
RlIxEzARBgNVBAoMClNpbHZlckt1YmUxCzAJBgNVBAsMAjQyMIIBIjANBgkqhkiG
9w0BAQEFAAOCAQ8AMIIBCgKCAQEAlv5kLyjcBBnLIsZfBdQ/24dmZp3VGAXMYk8F
K8uiAORI4sHv7dcrT2seGQloaFHgduBEkkMOdHXvrAPptaFF6JWq5GnmwyEm2M5b
rRpYhgKD5BCOzTFS9IAgkVEyk7FAU+iHzFtQ2X1ktp6/59tlXACvefgFVDsQBsLb
9F549R89D8Di+SHYRT9lhRdZEwNmkIIP7HQaSWCd0EEjlDrK0NDrsilumt2gmo3k
24e/EVoKrAr8LOs9oP0w4Jzke1b41Jaax6P/iGs7TZWWB/YamYHWrz8bMglR19hV
Ozsdm5H0tgipTsl8Ozs0xQzTTjLI+axE6/+dogOxPCFzbrLlHQIDAQABo1MwUTAd
BgNVHQ4EFgQU0+sDEB0XYWICzTEZpXVu8Kxrgp4wHwYDVR0jBBgwFoAU0+sDEB0X
YWICzTEZpXVu8Kxrgp4wDwYDVR0TAQH/BAUwAwEB/zANBgkqhkiG9w0BAQsFAAOC
AQEAPWLVegnP5bhQ3zOkeY7+32uBIgN6DKtqyOsatj9xk6qEUk1syctyHfEGd7Uf
APtTDwJmPBCUsDeZZ4LX4CFc+6UWw4sKH04iSP1dGUEwWQCLTavP8WTIFSnI1hO1
1Pbj5iOVdCQerWkf9+cVPDCEQQtzYact6sbQnBHArSTpsyCMWeBribQ4Cz9Wbkrv
w92Dy/qosmiJ38b4ueaasyPJ7hyWIQvNDL+4doPGB7j9sgU+Kcwf6UxJViQGACJG
FDa1rKSfn0llBt5+oXB0e+1t3AUKJFieHuLyqbNTEkdlzDuJKW9kxSZhjetWGPQ3
EgliTqJgLjtNK7CjLotVN5k/QA==
-----END CERTIFICATE-----
token: eyJhbGciOiJSUzI1NiIsImtpZCI6InNPNGRNT3dFZGwzQzYxSm4tWC1DeWRieE4wUTRIc3dpWThrN1E5Mk8wS2cifQ.eyJpc3MiOiJrdWJlcm5ldGVzL3NlcnZpY2VhY2NvdW50Iiwia3ViZXJuZXRlcy5pby9zZXJ2aWNlYWNjb3VudC9uYW1lc3BhY2UiOiJkZWZhdWx0Iiwia3ViZXJuZXRlcy5pby9zZXJ2aWNlYWNjb3VudC9zZWNyZXQubmFtZSI6Im5vZGVwb29sLXRva2VuLTVsbjdsIiwia3ViZXJuZXRlcy5pby9zZXJ2aWNlYWNjb3VudC9zZXJ2aWNlLWFjY291bnQubmFtZSI6Im5vZGVwb29sIiwia3ViZXJuZXRlcy5pby9zZXJ2aWNlYWNjb3VudC9zZXJ2aWNlLWFjY291bnQudWlkIjoiYWRlMDZkNTUtMzEwZi00OWNiLWJlZGUtMjllOTMzMWExYmNhIiwic3ViIjoic3lzdGVtOnNlcnZpY2VhY2NvdW50OmRlZmF1bHQ6bm9kZXBvb2wifQ.xLi7vlNzMI7E8fqKNJ6VH-CxdzCw1--IEQ4QVD8dm3JdYMRrQT4uFeNnhrxT7gqUf_vV2B821W3ALmpDvJW9EzMGWshGGWsnNlUJ7w0T9tFBd-fsAgZeomBvq_yzJt7Ud7Ug7ysiR2_DMUNQynPavl-D3fS5SMDSHjm_vMKYYF78ds2SCFu85AJOG0Xu4_1Dzd2r0GihfXMc7aAxvtVba4g38_d1oZjmSWddNE4loHxocmAxfFohNl_hui22zAOM1Z4xlZ7w8MCuTTLfKIExSAaAsfZb1yGZ4eN35wfLBaGHCscCjZl4ctos92WeWyzEjs901AIg3BqqlbR7tSEdLg
contexts:
- context:
cluster: local
user: nodepool
namespace: default
name: local
current-context: local
apiVersion: v1
---
apiVersion: operator.zuul-ci.org/v1alpha1
kind: Zuul
metadata:
name: example-zuul
name: zuul
spec:
merger:
min: 0
max: 10
executor:
min: 1
max: 5
web:
min: 1
connections: []
tenants:
- tenant:
name: demo
source: {}
count: 1
ssh_key:
secretName: executor-ssh-key
merger:
count: 1
scheduler:
config:
secretName: zuul-yaml-conf
launcher:
config:
secretName: nodepool-yaml-conf
connections:
gits:
- baseurl: https://opendev.org
name: opendev.org
external_config:
kubernetes:
secretName: nodepool-kube-config
key: kube.config

View File

@ -16,18 +16,18 @@ spec:
containers:
- name: ansible
command:
- /uid_entrypoint
- /usr/local/bin/ao-logs
- /tmp/ansible-operator/runner
- stdout
# TODO: use a public name
image: "zuul/zuul-operator"
image: "docker.io/zuul/zuul-operator"
imagePullPolicy: "IfNotPresent"
volumeMounts:
- mountPath: /tmp/ansible-operator/runner
name: runner
readOnly: true
- name: operator
image: "zuul/zuul-operator"
image: "docker.io/zuul/zuul-operator"
imagePullPolicy: "IfNotPresent"
volumeMounts:
- mountPath: /tmp/ansible-operator/runner
name: runner

View File

@ -8,7 +8,6 @@ metadata:
apiVersion: rbac.authorization.k8s.io/v1
kind: Role
metadata:
creationTimestamp: null
name: zuul-operator
rules:
- apiGroups:
@ -16,29 +15,35 @@ rules:
resources:
- pods
- services
- services/finalizers
- endpoints
- persistentvolumeclaims
- events
- configmaps
- secrets
verbs:
- '*'
- apiGroups:
- ""
resources:
- namespaces
verbs:
- create
- delete
- get
- list
- patch
- update
- watch
- apiGroups:
- apps
- extensions
resources:
- deployments
- daemonsets
- replicasets
- statefulsets
verbs:
- '*'
- create
- delete
- get
- list
- patch
- update
- watch
- apiGroups:
- monitoring.coreos.com
resources:
@ -54,19 +59,31 @@ rules:
- deployments/finalizers
verbs:
- update
- apiGroups:
- ""
resources:
- pods
verbs:
- get
- apiGroups:
- apps
resources:
- replicasets
- deployments
verbs:
- get
- apiGroups:
- operator.zuul-ci.org
resources:
- '*'
verbs:
- '*'
- apiGroups:
- cr.client-go.k8s.io
resources:
- '*'
verbs:
- '*'
- create
- delete
- get
- list
- patch
- update
- watch
---
@ -81,15 +98,3 @@ roleRef:
kind: Role
name: zuul-operator
apiGroup: rbac.authorization.k8s.io
---
kind: RoleBinding
apiVersion: rbac.authorization.k8s.io/v1
metadata:
name: zuul-operator-zookeeper
subjects:
- kind: ServiceAccount
name: zuul-operator
roleRef:
kind: Role
name: zookeeper-operator
apiGroup: rbac.authorization.k8s.io

View File

@ -1,15 +1,3 @@
- name: View Status of K8s Pods
hosts: all
tasks:
- name: Get pods
command: kubectl get pods
- name: Get logs from ansible container
command: kubectl logs -l name=zuul-operator -c ansible
- name: Get logs from operator container
command: kubectl logs -l name=zuul-operator -c operator
- name: Log all Events
command: kubectl get events
- hosts: all
roles:
- collect-container-logs

View File

@ -1,10 +1,11 @@
- name: start kubernetes and install all dependencies
hosts: all
roles:
- role: bindep
- role: clear-firewall
- role: install-kubernetes
vars:
minikube_dns_resolvers:
- '1.1.1.1'
- '8.8.8.8'
- role: use-buildset-registry
buildset_registry_docker_user: root
docker_version: 18.06.1~ce~3-0~ubuntu

View File

@ -1,38 +1,20 @@
- name: install and start zuul operator
hosts: all
tasks:
- name: Set Operator SDK Release Version fact
set_fact:
RELEASE_VERSION: v0.8.1
- name: Setup CRD
command: kubectl create -f deploy/crds/zuul-ci_v1alpha1_zuul_crd.yaml
command: make install
args:
chdir: "{{ zuul.projects['opendev.org/zuul/zuul-operator'].src_dir }}"
- name: Setup rbac
command: kubectl create -f deploy/rbac.yaml
- name: Deploy CR
command: make deploy-cr
args:
chdir: "{{ zuul.projects['opendev.org/zuul/zuul-operator'].src_dir }}"
- name: Setup Operator
command: kubectl create -f deploy/operator.yaml
args:
chdir: "{{ zuul.projects['opendev.org/zuul/zuul-operator'].src_dir }}"
- name: Deploy Zookeeper operator
command: kubectl create -f {{ item }}
args:
chdir: "{{ zuul.projects['github.com/pravega/zookeeper-operator'].src_dir }}/deploy"
loop:
- crds/zookeeper_v1beta1_zookeepercluster_crd.yaml
- default_ns/rbac.yaml
- default_ns/operator.yaml
- name: wait for pods to come up
command: kubectl get pods -o json
register: kubectl_get_pods
until: kubectl_get_pods.stdout|from_json|json_query('items[*].status.phase')|unique == ["Running"]
retries: 30
delay: 10
- name: Wait 20 minutes and dump pods status periodically
shell: |
for idx in $(seq 1200); do
date;
kubectl get pods;
sleep 1;
done

View File

@ -0,0 +1,2 @@
namespace: "{{ meta.namespace|default('default') }}"
state: "{{ k8s_state | default('present') }}"

View File

@ -0,0 +1,61 @@
#!/usr/bin/env python3
# Copyright 2020 Red Hat, Inc
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
import argparse
import json
import subprocess
import sys
from typing import Any
from ansible.module_utils.basic import AnsibleModule # type: ignore
def run(expression: str) -> Any:
proc = subprocess.Popen(
['dhall-to-json', '--omit-empty', '--explain'], stdin=subprocess.PIPE, stdout=subprocess.PIPE, stderr=subprocess.PIPE)
stdout, stderr = proc.communicate(expression.encode('utf-8'))
if stderr:
return dict(failed=True, msg=stderr.decode('utf-8'))
result = dict(result=json.loads(stdout.decode('utf-8')))
result['changed'] = True
return result
def ansible_main():
module = AnsibleModule(
argument_spec=dict(
expression=dict(required=True, type='str'),
)
)
p = module.params
result = run(p['expression'])
if result.get('failed'):
module.fail_json(msg="Dhall expression failed:" + result['msg'])
module.exit_json(**result)
def cli_main():
parser = argparse.ArgumentParser()
parser.add_argument('expression')
args = parser.parse_args()
print(run(args.expression))
if __name__ == '__main__':
if sys.stdin.isatty():
cli_main()
else:
ansible_main()

View File

@ -0,0 +1,13 @@
---
- name: Convert expression to kubernetes objects
dhall_to_json:
expression: "{{ expression }}"
register: _dhall_output
- name: Apply objects
k8s:
state: "{{ state }}"
namespace: "{{ namespace }}"
definition: "{{ item }}"
apply: yes
loop: "{{ _dhall_output.result['items'] }}"

View File

@ -0,0 +1,7 @@
name: "{{ meta.name | default('zuul') }}"
zuul_app_path: "/opt/ansible/conf/zuul/applications/Zuul.dhall"
kubernetes_deploy_path: "/opt/ansible/conf/operator/deploy/Kubernetes.dhall"
# Here we use zuul_spec to get un-modified cr
# see: https://github.com/operator-framework/operator-sdk/issues/1770
raw_spec: "{{ vars['_operator_zuul-ci_org_zuul_spec'] }}"

View File

@ -0,0 +1,65 @@
#!/usr/bin/env python3
# Copyright 2020 Red Hat, Inc
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
import argparse
import subprocess
import sys
from typing import List
from ansible.module_utils.basic import AnsibleModule # type: ignore
def pread(args: List[str], stdin: str) -> str:
proc = subprocess.Popen(args, stdin=subprocess.PIPE, stdout=subprocess.PIPE, stderr=subprocess.PIPE)
stdout, stderr = proc.communicate(stdin.encode('utf-8'))
if stderr:
raise RuntimeError("Command failed: " + stderr.decode('utf-8'))
return stdout.decode('utf-8')
def run(schema: str, json_input: str) -> str:
return pread(['json-to-dhall', '--plain', schema], json_input)
def ansible_main():
module = AnsibleModule(
argument_spec=dict(
schema=dict(required=True, type='str'),
json=dict(required=True, type='str'),
)
)
p = module.params
try:
module.exit_json(changed=True, result=run(p['schema'], p['json']))
except Exception as e:
module.fail_json(msg="Dhall expression failed:" + str(e))
def cli_main():
parser = argparse.ArgumentParser()
parser.add_argument('schema')
parser.add_argument('--json')
parser.add_argument('--file')
args = parser.parse_args()
if args.file:
import yaml, json
args.json = json.dumps(yaml.safe_load(open(args.file)))
print(run(args.schema, args.json))
if __name__ == '__main__':
if sys.stdin.isatty():
cli_main()
else:
ansible_main()

View File

@ -0,0 +1,26 @@
# TODO: Generate tls cert secret
# TODO: query gearman for build queue size
# TODO: update the executor/merger replica count
- name: Convert spec to template input
json_to_dhall:
schema: "({{ zuul_app_path }}).Input"
json: "{{ spec | to_json }}"
vars:
spec:
name: "{{ meta.name | default('zuul') }}"
merger: "{{ raw_spec['merger'] | default({}) }}"
executor: "{{ raw_spec['executor'] | default({}) }}"
web: "{{ raw_spec['web'] | default({}) }}"
scheduler: "{{ raw_spec['scheduler'] | default({}) }}"
launcher: "{{ raw_spec['launcher'] | default({}) }}"
external_config: "{{ raw_spec['external_config'] | default({}) }}"
connections: "{{ raw_spec['connections'] | default({}) }}"
register: _dhall
- include_role:
name: dhall
vars:
expression: |
{{ kubernetes_deploy_path }}
(({{ zuul_app_path }}).Application {{ _dhall.result }})

View File

@ -2,4 +2,4 @@
- version: v1alpha1
group: operator.zuul-ci.org
kind: Zuul
playbook: /opt/ansible/zuul.yaml
role: /opt/ansible/roles/zuul