Add a non-voting metal3 CI job

We'll use only one node as we're limited in resources but that
should suffice for a basic test.

Change-Id: I8e845d46ba0e13027aa1e628b0ad45eb24f9b387
This commit is contained in:
Dmitry Tantsur 2022-11-07 13:39:59 +01:00 committed by Riccardo Pittau
parent 17d97d51ad
commit a6d87a608c
6 changed files with 321 additions and 0 deletions

View File

@ -0,0 +1,32 @@
---
- name: Create the target directory
file:
path: "{{ logs_management_cluster }}/{{ namespace }}"
state: directory
- name: Fetch pods list
command: kubectl get pods -n "{{ namespace }}" -o json
ignore_errors: true
register: pods_result
- block:
- name: Save the pods list
copy:
dest: "{{ logs_management_cluster }}/{{ namespace }}/pods.yaml"
content: "{{ pods_result.stdout }}"
- name: Set pod names
set_fact:
pods: "{{ pods_result.stdout | from_json | json_query('items[*].metadata.name') }}"
- include_tasks: fetch_pod_logs.yaml
loop: "{{ pods }}"
loop_control:
loop_var: pod
when: pods_result is succeeded
- name: Fetch secrets
shell: |
kubectl get secrets -n "{{ namespace }}" -o yaml \
> "{{ logs_management_cluster }}/{{ namespace }}/secrets.yaml"
ignore_errors: true

View File

@ -0,0 +1,24 @@
---
- name: Create the target directory
file:
path: "{{ logs_management_cluster }}/{{ namespace }}/{{ pod }}"
state: directory
- name: Fetch pod information
command: kubectl get pod -n "{{ namespace }}" -o json "{{ pod }}"
register: pod_result
- name: Process pod JSON
set_fact:
pod_json: "{{ pod_result.stdout | from_json }}"
- name: Set container names
set_fact:
containers: "{{ pod_json.spec.containers | map(attribute='name') | list }}"
init_containers: "{{ pod_json.spec.initContainers | default([]) | map(attribute='name') | list }}"
- name: Fetch container logs
shell: |
kubectl logs -n "{{ namespace }}" "{{ pod }}" "{{ item }}" \
> "{{ logs_management_cluster }}/{{ namespace }}/{{ pod }}/{{ item }}.log" 2>&1
loop: "{{ containers + init_containers }}"

View File

@ -0,0 +1,194 @@
---
- hosts: all
tasks:
- name: Set the logs root
set_fact:
logs_root: "{{ ansible_user_dir }}/metal3-logs"
- name: Set log locations and containers
set_fact:
logs_before_pivoting: "{{ logs_root }}/before_pivoting"
logs_after_pivoting: "{{ logs_root }}/after_pivoting"
logs_management_cluster: "{{ logs_root }}/management_cluster"
containers:
- dnsmasq
- httpd-infra
- ironic
- ironic-endpoint-keepalived
- ironic-inspector
- ironic-log-watch
- registry
- sushy-tools
- vbmc
namespaces:
- baremetal-operator-system
- capi-system
- metal3
- name: Create log locations
file:
path: "{{ item }}"
state: directory
loop:
- "{{ logs_before_pivoting }}"
- "{{ logs_after_pivoting }}"
- "{{ logs_management_cluster }}"
- "{{ logs_root }}/libvirt"
- "{{ logs_root }}/system"
- name: Check if the logs before pivoting were stored
stat:
path: /tmp/docker
register: before_pivoting_result
- name: Copy logs before pivoting
copy:
src: /tmp/docker/
dest: "{{ logs_before_pivoting }}/"
remote_src: true
when: before_pivoting_result.stat.exists
- name: Set log location for containers (pivoting happened)
set_fact:
container_logs: "{{ logs_after_pivoting }}"
when: before_pivoting_result.stat.exists
- name: Set log location for containers (no pivoting)
set_fact:
container_logs: "{{ logs_before_pivoting }}"
when: not before_pivoting_result.stat.exists
- name: Fetch current container logs
shell: >
docker logs "{{ item }}" > "{{ container_logs }}/{{ item }}.log" 2>&1
become: true
ignore_errors: true
loop: "{{ containers }}"
- name: Fetch libvirt networks
shell: >
virsh net-dumpxml "{{ item }}" > "{{ logs_root }}/libvirt/net-{{ item }}.xml"
become: true
ignore_errors: true
loop:
- baremetal
- provisioning
- name: Fetch libvirt VMs
shell: |
for vm in $(virsh list --name --all); do
virsh dumpxml "$vm" > "{{ logs_root }}/libvirt/vm-$vm.xml"
done
become: true
ignore_errors: true
- name: Fetch system information
shell: "{{ item }} > {{ logs_root }}/system/{{ item | replace(' ', '-') }}.txt"
become: true
ignore_errors: true
loop:
- dmesg
- dpkg -l
- ip addr
- ip route
- iptables -L -v -n
- journalctl -b -o with-unit
- journalctl -u libvirtd
- pip freeze
- docker images
- docker ps --all
- systemctl
- name: Copy libvirt logs
copy:
src: /var/log/libvirt/qemu/
dest: "{{ logs_root }}/libvirt/"
remote_src: true
become: true
- name: Check if we have a cluster
command: kubectl cluster-info
ignore_errors: true
register: kubectl_result
- include_tasks: fetch_kube_logs.yaml
loop: "{{ namespaces }}"
loop_control:
loop_var: namespace
when: kubectl_result is succeeded
- name: Collect kubernetes resources
shell: |
kubectl get "{{ item }}" -A -o yaml > "{{ logs_management_cluster }}/{{ item }}.yaml"
loop:
- baremetalhosts
- clusters
- endpoints
- hostfirmwaresettings
- machines
- metal3ipaddresses
- metal3ippools
- metal3machines
- nodes
- pods
- preprovisioningimages
- services
ignore_errors: true
when: kubectl_result is succeeded
# FIXME(dtantsur): this is horrible, do something about it
- name: Fetch kubelet status logs from the master user metal3
shell: |
ssh -vvv -o StrictHostKeyChecking=accept-new metal3@192.168.111.100 "sudo systemctl status kubelet" > "{{ logs_root }}/kubelet-0-metal3-status.log"
ignore_errors: true
register: kubelet0metal3status
- debug:
var: kubelet0metal3status.stdout_lines
- debug:
var: kubelet0metal3status.stderr_lines
- name: Fetch kubelet journal logs from the master user metal3
shell: |
ssh -vvv -o StrictHostKeyChecking=accept-new metal3@192.168.111.100 "sudo journalctl -xeu kubelet" > "{{ logs_root }}/kubelet-0-metal3-journal.log"
ignore_errors: true
register: kubelet0metal3journal
- debug:
var: kubelet0metal3journal.stdout_lines
- debug:
var: kubelet0metal3journal.stderr_lines
- name: Fetch kubelet status logs from the master user zuul
shell: |
ssh -vvv -o StrictHostKeyChecking=accept-new zuul@192.168.111.100 "sudo systemctl status kubelet" > "{{ logs_root }}/kubelet-0-zuul-status.log"
ignore_errors: true
register: kubelet0zuulstatus
- debug:
var: kubelet0zuulstatus.stdout_lines
- debug:
var: kubelet0zuulstatus.stderr_lines
- name: Fetch kubelet journal logs from the master user zuul
shell: |
ssh -vvv -o StrictHostKeyChecking=accept-new zuul@192.168.111.100 "sudo journalctl -xeu kubelet" > "{{ logs_root }}/kubelet-0-zuul-journal.log"
ignore_errors: true
register: kubelet0zuuljournal
- debug:
var: kubelet0zuuljournal.stdout_lines
- debug:
var: kubelet0zuuljournal.stderr_lines
# # #
- name: Copy logs to the zuul location
synchronize:
src: "{{ logs_root }}/"
dest: "{{ zuul.executor.log_root }}/{{ inventory_hostname }}/"
mode: pull
become: true

View File

@ -0,0 +1,39 @@
---
- hosts: all
tasks:
- name: Define the metal3 variables
set_fact:
metal3_dev_env_src_dir: '{{ ansible_user_dir }}/metal3-dev-env'
metal3_environment:
CONTROL_PLANE_MACHINE_COUNT: 1
IMAGE_OS: ubuntu
IMAGE_USERNAME: zuul
# NOTE(dtantsur): we don't have enough resources to provision even
# a 2-node cluster, so only provision a control plane node.
NUM_NODES: 2
WORKER_MACHINE_COUNT: 1
# TODO(dtantsur): add metal3-io/metal3-dev-env as a recognized project to
# https://opendev.org/openstack/project-config/src/commit/e15b9cae77bdc243322cee64b3688a2a43dd193c/zuul/main.yaml#L1416
# TODO(dtantsur): replace my fork with the upstream source once all fixes
# merge there.
# TODO(rpittau): move back to dtantsur or metal3-io after we merge the changes
- name: Clone metal3-dev-env
git:
dest: "{{ metal3_dev_env_src_dir }}"
repo: "https://github.com/elfosardo/metal3-dev-env"
version: ironic-ci
- name: Build a metal3 environment
command: make
args:
chdir: "{{ metal3_dev_env_src_dir }}"
environment: "{{ metal3_environment }}"
# NOTE(rpittau) skip the tests for the time begin, they imply the presence of
# 2 nodes, 1 control plus 1 worker
# - name: Run metal3 tests
# command: make test
# args:
# chdir: "{{ metal3_dev_env_src_dir }}"
# environment: "{{ metal3_environment }}"

30
zuul.d/metal3-jobs.yaml Normal file
View File

@ -0,0 +1,30 @@
- job:
name: metal3-base
abstract: true
description: Base job for metal3-dev-env based ironic jobs.
nodeset: openstack-single-node-jammy
run: playbooks/metal3-ci/run.yaml
post-run: playbooks/metal3-ci/post.yaml
timeout: 10800
required-projects:
- opendev.org/openstack/ironic
- opendev.org/openstack/ironic-inspector
irrelevant-files:
- ^.*\.rst$
- ^api-ref/.*$
- ^doc/.*$
- ^driver-requirements.txt$
- ^install-guide/.*$
- ^ironic/locale/.*$
- ^ironic/tests/.*$
- ^ironic_inspector/locale/.*$
- ^ironic_inspector/test/.*$
- ^releasenotes/.*$
- ^setup.cfg$
- ^test-requirements.txt$
- ^tox.ini$
- job:
name: metal3-integration
description: Run metal3 CI on ironic.
parent: metal3-base

View File

@ -57,6 +57,8 @@
voting: false
- bifrost-benchmark-ironic:
voting: false
- metal3-integration:
voting: false
gate:
jobs:
- ironic-tox-unit-with-driver-libs