DNM checking CI for Publish container images to quay.io

Add script for executing test localy

Depends-On: https://review.opendev.org/c/zuul/zuul-jobs/+/894755

Change-Id: I92936af7c3d374f1598d7eab69fc8d21b3ced610
This commit is contained in:
Daniel Pawlik 2023-09-11 10:33:45 +02:00
parent 1132575d67
commit 0637aa584e
No known key found for this signature in database
5 changed files with 184 additions and 8 deletions

View File

@ -1,5 +1,7 @@
- name: start kubernetes and install all dependencies
hosts: all
vars:
minikube_vm_driver: podman
roles:
- role: clear-firewall
- role: ensure-pip
@ -15,6 +17,125 @@
- role: use-buildset-registry
buildset_registry_docker_user: root
post_tasks:
- name: Install ansible-core
become: true
package:
name: ansible-core
state: present
- name: Create zuul operator image
become: true
vars:
zuul_work_dir: "{{ zuul.projects['opendev.org/zuul/zuul-operator'].src_dir }}"
args:
chdir: "{{ zuul_work_dir | default( '~' + ansible_user + '/zuul-operator') }}"
command: make image
- name: Ensure there is no zuul operator tar
become: true
file:
path: /tmp/zuul-operator.tar
state: absent
- name: Save zuul operator image
become: true
shell: |
podman save --output /tmp/zuul-operator.tar quay.io/zuul-ci/zuul-operator
- name: Set proper permissions for the image
file:
path: /tmp/zuul-operator.tar
state: file
mode: "0644"
- name: Send image to Minikube
shell: >
scp
-o StrictHostKeyChecking=no
-i ~/.minikube/machines/minikube/id_rsa
/tmp/zuul-operator.tar docker@$(/tmp/minikube ip):/tmp/zuul-operator.tar
- name: Get minikube ip address
command: /tmp/minikube ip
register: _minikube_ip
- name: Create playbook to load image inside the Minikube
copy:
content: |
---
- name: Set custom config
hosts: all
tasks:
- name: Build image inside minikube
become: true
shell: |
podman load --input /tmp/zuul-operator.tar
dest: restore-zuul-operator.yaml
- name: Run playbook to restore zuul operator
ansible.builtin.command: >-
ansible-playbook -i {{ _minikube_ip.stdout }},
-u docker
-e "ansible_ssh_private_key_file=~/.minikube/machines/minikube/id_rsa"
-e "ansible_ssh_common_args='-o StrictHostKeyChecking=no'"
restore-zuul-operator.yaml
# https://github.com/containers/image/blob/main/docs/containers-registries.conf.5.md
# Failed to inspect image "zookeeper:3.5.5": rpc error: code = Unknown desc = short-name "zookeeper:3.5.5"
# did not resolve to an alias and no unqualified-search registries are defined in "/etc/containers/registries.conf"
#
# !!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!
# !!!WARNING!!! Issue is not on the host where minikube is running, but !
# !!! inside the minikube. !
# !!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!
#
# tl;dr: connect to minikube:
# - ssh -i ~/.minikube/machines/minikube/id_rsa docker@$(minikube ip)
# and add:
# unqualified-search-registries = ["docker.io", "quay.io"]
# compat_api_enforce_docker_hub = true
# into /etc/containers/registries.conf
- name: Create unqualified-search registries inside Minikube
block:
- name: Install ansible-core
become: true
package:
name: ansible-core
state: present
- name: Create playbook that will be executed inside the Minikube
copy:
content: |
---
- name: Set custom config
hosts: all
become: true
tasks:
- name: Overwrite config
copy:
content: |
unqualified-search-registries = ["docker.io", "quay.io"]
compat_api_enforce_docker_hub = true
dest: /etc/containers/registries.conf
- name: Restart crio
systemd:
name: crio
state: restarted
dest: reconfigure-minikube.yaml
- name: Get minikube ip address
command: /tmp/minikube ip
register: _minikube_ip
- name: Change the systemd parameters env params for Kubelet
ansible.builtin.command: >-
ansible-playbook -i {{ _minikube_ip.stdout }},
-u docker
-e "ansible_ssh_private_key_file=~/.minikube/machines/minikube/id_rsa"
-e "ansible_ssh_common_args='-o StrictHostKeyChecking=no'"
reconfigure-minikube.yaml
- name: Install openshift client for k8s tasks
command: python3 -m pip install --user openshift
- name: Install websocket

View File

@ -1,8 +1,64 @@
---
- name: Create static node image
become: true
args:
chdir: "{{ zuul_work_dir }}/playbooks/zuul-operator-functional/static-node"
command: podman build . -t static-node
chdir: "{{ zuul_work_dir | default('~' + ansible_user + '/zuul-operator') }}/playbooks/zuul-operator-functional/static-node"
command: podman build . -t localhost/static-node:latest
# We can not build the image inside the minikube, when using podman:
# https://github.com/containers/buildah/issues/2175
# In that case, we will use podman save && podman load
# FIXME: add condition to ensure that it is a minikube
- name: Synchronize static-node image with Minikube container
block:
- name: Ensure there is no static node tar
become: true
file:
path: /tmp/static-node.tar
state: absent
- name: Save static-node image
become: true
shell: |
podman save --output /tmp/static-node.tar localhost/static-node
- name: Set proper permissions for the image
file:
path: /tmp/static-node.tar
state: file
mode: "0644"
- name: Send image to Minikube
shell: >
scp
-o StrictHostKeyChecking=no
-i ~/.minikube/machines/minikube/id_rsa
/tmp/static-node.tar docker@$(/tmp/minikube ip):/tmp/static-node.tar
- name: Create playbook to load image inside the Minikube
copy:
content: |
---
- name: Set custom config
hosts: all
tasks:
- name: Build image inside minikube
become: true
shell: |
podman load --input /tmp/static-node.tar
dest: restore-node-image.yaml
- name: Get minikube ip address
command: /tmp/minikube ip
register: _minikube_ip
- name: Run playbook that will restore static node image
ansible.builtin.shell: >
ansible-playbook -i {{ _minikube_ip.stdout }},
-u docker
-e "ansible_ssh_private_key_file=~/.minikube/machines/minikube/id_rsa"
-e "ansible_ssh_common_args='-o StrictHostKeyChecking=no'"
restore-node-image.yaml
- name: Run static node
k8s:
@ -17,7 +73,7 @@
spec:
containers:
- name: node
image: static-node
image: localhost/static-node:latest
imagePullPolicy: Never
lifecycle:
postStart:

View File

@ -12,7 +12,7 @@
register: git_root
- name: get cluster ip
command: microk8s kubectl get node -o json | jq '.items[].status.addresses[] | select(.type=="InternalIP") | .address'
shell: kubectl get node -o json | jq '.items[].status.addresses[] | select(.type=="InternalIP") | .address'
register: _cluster_ip
- name: set cluster ip

View File

@ -98,7 +98,7 @@ class PXC:
obj.delete(propagation_policy="Foreground")
dburi = f'mysql+pymysql://zuul:{zuul_pw}@db-cluster-haproxy/zuul'
dburi = f'mysql+pymysql://zuul:{zuul_pw}@db-cluster-pxc/zuul'
utils.update_secret(self.api, self.namespace, 'zuul-db',
string_data={'dburi': dburi})

View File

@ -7,11 +7,11 @@ spec:
spec:
containers:
- name: mysql
image: percona:8.0
image: docker.io/percona:8.0
command:
- "mysql"
- "-h"
- "db-cluster-haproxy"
- "db-cluster-pxc"
- "-uroot"
- "-p{{ root_password }}"
- "mysql"
@ -19,4 +19,3 @@ spec:
- "create database if not exists zuul; create user if not exists 'zuul'@'%'; alter user 'zuul'@'%' identified by '{{ zuul_password }}'; grant all on zuul.* TO 'zuul'@'%'; flush privileges;"
restartPolicy: Never
backoffLimit: 4