[tests] migrate tests to k8s

Migrate tests from microk8s to ck8s.
Bootstrap a controller on a manual cloud, and add ck8s to available
clouds.

Upgrade juju to 3.5

Configure ephemeral device when available, configure k8s to use it for
local storage.

Change-Id: Ief491f8b339307f0c43d11639336b02d9f6479b4
Signed-off-by: Guillaume Boutry <guillaume.boutry@canonical.com>
This commit is contained in:
Guillaume Boutry 2024-07-15 10:56:59 +02:00
parent 36cf947244
commit 0bda4738e3
No known key found for this signature in database
GPG Key ID: E95E3326872E55DE
22 changed files with 405 additions and 213 deletions

View File

@ -1,3 +1,3 @@
# This file is used to trigger a build.
# Change uuid to trigger a new build.
c3b9c7c9-2bd4-4df1-a1df-89c729b34eb6
b15a0dee-c765-4e23-b950-8eb6f8afc84f

View File

@ -0,0 +1,3 @@
- hosts: all
roles:
- configure-ephemeral

View File

@ -3,7 +3,7 @@
- ensure-tox
- role: use-docker-mirror
when: env_type == 'k8s'
- role: microk8s-cloud
- role: manual-cloud
when: env_type == 'k8s'
- role: lxd-cloud
when: env_type == 'lxd'

View File

@ -16,7 +16,7 @@
apt:
name: nftables
become: true
when: ansible_distribution_release == 'jammy'
when: ansible_distribution_release in ('jammy', 'noble')
- name: lxd snap is installed
snap:
@ -31,7 +31,7 @@
- name: allow packets from lxd bridge
command: nft insert rule filter openstack-INPUT iif lxdbr0 accept
become: true
when: ansible_distribution_release == 'jammy'
when: ansible_distribution_release in ('jammy', 'noble')
- name: current user is in lxd group
user:

View File

@ -1,25 +1,28 @@
- name: collect microk8s inspection report
- name: collect k8s logs
args:
executable: /bin/bash
shell: |
cp /var/snap/microk8s/current/inspection-report-*.tar.gz "{{ zuul.project.src_dir }}/log/"
failed_when: false
- name: collect microk8s logs
args:
executable: /bin/bash
shell: |
snap logs -n all microk8s > "{{ zuul.project.src_dir }}/log/microk8s-logs.txt"
snap logs -n all k8s > "{{ zuul.project.src_dir }}/log/k8s-logs.txt"
become: true
failed_when: false
- name: debug describe pods
- name: get main resources state
args:
executable: /bin/bash
shell: |
k8s kubectl get all --all-namespaces > "{{ zuul.project.src_dir }}/log/k8s-get-all.txt"
become: true
failed_when: false
- name: debug describe resources
args:
executable: /bin/bash
shell: |
set -o pipefail
sudo k8s kubectl describe nodes > {{ zuul.project.src_dir }}/log/describe-nodes.txt
sudo k8s kubectl describe pods -n kube-system > {{ zuul.project.src_dir }}/log/describe-pods-kube-system.txt
MODEL="$(juju models --format=json | jq -r '.models[]["short-name"]' | grep '^zaza-')"
microk8s.kubectl describe -n $MODEL pods > {{ zuul.project.src_dir }}/log/describe-pods.txt
CONTROLLER_MODEL="$(microk8s.kubectl get ns | grep controller | awk '{print $1}')"
microk8s.kubectl describe -n $CONTROLLER_MODEL pods > {{ zuul.project.src_dir }}/log/describe-controller-pods.txt
sudo k8s kubectl describe -n $MODEL pods > {{ zuul.project.src_dir }}/log/describe-pods.txt
CONTROLLER_MODEL="$(sudo k8s kubectl get ns | grep controller | awk '{print $1}')"
sudo k8s kubectl describe -n $CONTROLLER_MODEL pods > {{ zuul.project.src_dir }}/log/describe-controller-pods.txt
exit 0
- name: Collect var logs
args:
@ -30,7 +33,7 @@
UNITS=$(juju status --format oneline | awk '{print $2}' | sed -e 's!:!!' | grep -Ev '^$' | paste -s -d' ')
for UNIT_NAME in $UNITS; do
POD_NAME=$(echo $UNIT_NAME | sed -e 's!/!-!')
CONTAINERS=$(microk8s.kubectl get pods -n $MODEL_NAME $POD_NAME -o jsonpath='{.spec.containers[*].name}' | sed -e 's/charm //')
CONTAINERS=$(sudo k8s kubectl get pods -n $MODEL_NAME $POD_NAME -o jsonpath='{.spec.containers[*].name}' | sed -e 's/charm //')
for CONTAINER in $CONTAINERS; do
juju ssh --container $CONTAINER -m $MODEL_NAME $UNIT_NAME "tar zcf /tmp/logs.tgz /var/log/"
juju scp --container $CONTAINER -m $MODEL_NAME $UNIT_NAME:/tmp/logs.tgz {{ zuul.project.src_dir }}/log/$POD_NAME-$CONTAINER.tgz
@ -42,10 +45,11 @@
shell: |
set -o pipefail
LOG_FOLDER={{ zuul.project.src_dir }}/log/pods/
MODEL_NAME=$(juju models --format=json | jq -r '.models[]["short-name"]' | grep '^zaza-')
mkdir -p $LOG_FOLDER
for pod in $(microk8s.kubectl get pods -n $MODEL_NAME -o=jsonpath='{.items[*].metadata.name}');
sudo k8s kubectl logs -n kube-system deployment/coredns --all-containers > $LOG_FOLDER/coredns.log
MODEL_NAME=$(juju models --format=json | jq -r '.models[]["short-name"]' | grep '^zaza-')
for pod in $(sudo k8s kubectl get pods -n $MODEL_NAME -o=jsonpath='{.items[*].metadata.name}');
do
echo Collecting logs: $pod
microk8s.kubectl logs --ignore-errors -n $MODEL_NAME --all-containers $pod > $LOG_FOLDER/$pod.log
sudo k8s kubectl logs --ignore-errors -n $MODEL_NAME --all-containers $pod > $LOG_FOLDER/$pod.log
done

View File

@ -8,6 +8,10 @@
path: "{{ zuul.project.src_dir }}/log"
state: directory
mode: 0755
- name: collect disk usage
shell: df -h > {{ zuul.project.src_dir }}/log/df.txt
- name: collect mount list
shell: mount > {{ zuul.project.src_dir }}/log/mount.txt
- name: debug logs replay
args:
executable: /bin/bash

View File

@ -0,0 +1,64 @@
- name: Set partition names
ansible.builtin.set_fact:
opt_partition: "{{ ephemeral_device }}1"
- name: Ensure ephemeral device is unmounted
become: true
ansible.posix.mount:
name: "{{ ephemeral_device }}"
state: "{{ item }}"
with_items:
- unmounted
- absent
- name: Get existing partitions
become: true
community.general.parted:
device: "{{ ephemeral_device }}"
unit: MiB
register: ephemeral_partitions
- name: Remove any existing partitions
become: true
community.general.parted:
device: "{{ ephemeral_device }}"
number: "{{ item.num }}"
state: absent
with_items:
- "{{ ephemeral_partitions.partitions }}"
- name: Create new disk label
become: true
community.general.parted:
label: msdos
device: "{{ ephemeral_device }}"
- name: Create opt partition
become: true
community.general.parted:
device: "{{ ephemeral_device }}"
number: 1
state: present
part_start: "0%"
part_end: "100%"
- name: Create /opt filesystem
become: true
community.general.filesystem:
fstype: ext4
# The default ratio is 16384 bytes per inode or so. Reduce that to 8192
# bytes per inode so that we get roughly twice the number of inodes as
# by default. This should still be well above the block size of 4096.
# We do this because we have found in at least a couple locations that
# more inodes is useful and is painful to fix after the fact.
opts: -i 8192
dev: "{{ opt_partition }}"
- name: Add opt to fstab and mount
become: true
ansible.posix.mount:
path: /opt
src: "{{ opt_partition }}"
fstype: ext4
opts: noatime
state: mounted

View File

@ -0,0 +1,15 @@
# From https://opendev.org/openstack/openstack-zuul-jobs/src/commit/7f5a075f3de7eac295094258cbd309a41d8ac798/roles/configure-swap/tasks/main.yaml
# On RAX hosts, we have a small root partition and a large,
# unallocated ephemeral device attached at /dev/xvde
- name: Set ephemeral device if /dev/xvde exists
when: ansible_devices["xvde"] is defined
ansible.builtin.set_fact:
ephemeral_device: "/dev/xvde"
- name: Configure ephemeral device
ansible.builtin.include_tasks: ephemeral.yaml
when: ephemeral_device is defined
- name: Debug the ephemeral_device variable
ansible.builtin.debug:
var: ephemeral_device

View File

@ -0,0 +1,43 @@
- name: Ensure k8s is bootstrapped
ansible.builtin.include_role:
name: k8s
- name: Juju is installed
communityu.general.snap:
name: juju
classic: "{{ juju_classic_mode | default(false) }}"
channel: "{{ juju_channel | default('latest/stable') }}"
become: true
- name: Ensure ~/.local/share directory exist
ansible.builtin.file:
path: ~/.local/share
state: directory
mode: '0755'
- name: Juju is bootstrapped on k8s
ansible.builtin.command:
cmd: juju bootstrap --config bootstrap-timeout=600 --config caas-image-repo="ghcr.io/juju" k8s k8s
register: res
retries: 3
delay: 10
until: >
"Bootstrap complete" in res.stderr or
"already exists" in res.stderr
changed_when: '"already exists" not in res.stderr'
failed_when: '"ERROR" in res.stderr and "already exists" not in res.stderr'
- name: Current juju controller is k8s
ansible.builtin.command:
cmd: juju switch k8s
register: res
changed_when: '"no change" not in res.stderr'
- name: Collect snap versions
ansible.builtin.command: snap list
register: snap_out
changed_when: false
- name: Show snap versions
ansible.builtin.debug:
msg: "{{ snap_out.stdout }}"

View File

@ -0,0 +1,5 @@
k8s_channel: latest/stable
k8s_classic_mode: false
k8s_load_balancer_cidr: 10.170.0.248/29
k8s_pod_cidr: 10.1.0.0/16
k8s_host_ip: "{{ ansible_default_ipv4.address }}"

74
roles/k8s/tasks/main.yaml Normal file
View File

@ -0,0 +1,74 @@
- name: Snapd is installed
ansible.builtin.apt:
name: snapd
become: true
- name: NFtables is installed
ansible.builtin.apt:
name: nftables
become: true
when:
- ansible_distribution_release in ('jammy', 'noble')
- nftables_enabled | default(true) | bool
- name: Allow packets from pod cir
ansible.builtin.command: nft insert rule filter openstack-INPUT ip saddr {{ k8s_pod_cidr }} accept
become: true
changed_when: false
when:
- ansible_distribution_release in ('jammy', 'noble')
- nftables_enabled | default(true) | bool
- name: Allow packets to pod cir
ansible.builtin.command: nft insert rule filter openstack-INPUT ip daddr {{ k8s_pod_cidr }} accept
become: true
changed_when: false
when:
- ansible_distribution_release in ('jammy', 'noble')
- nftables_enabled | default(true) | bool
- name: Allow packets to metallb cir
ansible.builtin.command: nft insert rule filter openstack-INPUT ip daddr {{ k8s_load_balancer_cidr }} accept
become: true
changed_when: false
when:
- ansible_distribution_release in ('jammy', 'noble')
- nftables_enabled | default(true) | bool
- name: Ensure k8s is installed
community.general.snap:
name: k8s
channel: '{{ k8s_channel }}'
classic: '{{ k8s_classic_mode }}'
become: true
- name: Template docker.io registry
ansible.builtin.include_tasks: registry.yaml
when: docker_mirror is defined
vars:
reg_server_name: docker.io
reg_server: https://docker.io
reg_mirror_location: '{{ docker_mirror }}'
- name: Template k8s bootstrap configuration
ansible.builtin.template:
src: k8s-bootstrap.yaml.j2
dest: k8s-bootstrap.yaml
mode: '0644'
become: true
- name: Bootstrap k8s
ansible.builtin.command:
cmd: k8s bootstrap --file k8s-bootstrap.yaml --timeout 300s --address {{ k8s_host_ip }}
become: true
register: res
failed_when: res.rc != 0 and "already part of a cluster" not in res.stderr
changed_when: res.rc == 0 and "Bootstrapped a new Kubernetes cluster" in res.stdout
- name: Wait for k8s readiness
ansible.builtin.command:
cmd: k8s status --wait-ready --timeout 300s
register: res
become: true
changed_when: false
failed_when: 'res.rc != 0 or "status: ready" not in res.stdout'

View File

@ -0,0 +1,22 @@
- name: Create hosts dir
ansible.builtin.file:
path: /var/snap/k8s/common/etc/containerd/hosts.d/{{ reg_server_name }}
state: directory
owner: root
group: root
mode: "0770"
become: true
- name: Render registry mirror template
ansible.builtin.template:
src: hosts.j2
dest: /var/snap/k8s/common/etc/containerd/hosts.d/{{ reg_server_name }}/hosts.toml
group: "root"
mode: "0660"
become: true
- name: Display hosts.toml
ansible.builtin.command:
cmd: cat /var/snap/k8s/common/etc/containerd/hosts.d/{{ reg_server_name }}/hosts.toml
changed_when: false
become: true

View File

@ -0,0 +1,4 @@
server = "{{ reg_server }}"
[host."{{ reg_mirror_location }}"]
capabilities = ["pull", "resolve"]

View File

@ -0,0 +1,23 @@
cluster-config:
network:
enabled: true
dns:
enabled: true
upstream-nameservers:
- 8.8.8.8
load-balancer:
enabled: true
cidrs:
- {{ k8s_load_balancer_cidr }}
l2-mode: true
local-storage:
enabled: true
default: true
local-path: /opt/rawfile-storage
ingress:
enabled: false
gateway:
enabled: false
metrics-server:
enabled: false
pod-cidr: {{ k8s_pod_cidr }}

View File

@ -16,7 +16,7 @@
apt:
name: nftables
become: true
when: ansible_distribution_release == 'jammy'
when: ansible_distribution_release in ('jammy', 'noble')
- name: lxd is installed
snap:
@ -41,7 +41,7 @@
- name: allow packets from lxd bridge
command: nft insert rule filter openstack-INPUT iif lxdbr0 accept
become: true
when: ansible_distribution_release == 'jammy'
when: ansible_distribution_release in ('jammy', 'noble')
- name: lxd is running and ready
command:
@ -50,7 +50,7 @@
- name: juju is installed
snap:
name: juju
classic: "{{ juju_classic_mode | default(true) }}"
classic: "{{ juju_classic_mode | default(false) }}"
channel: "{{ juju_channel | default('latest/stable') }}"
become: true

View File

@ -0,0 +1,101 @@
- name: Snapd is installed
ansible.builtin.apt:
name: snapd
become: true
- name: Nftables is installed
ansible.builtin.apt:
name: nftables
become: true
when:
- ansible_distribution_release in ('jammy', 'noble')
- nftables_enabled | default(true) | bool
- name: Ensure localhost is trusted ssh
ansible.builtin.shell:
cmd: ssh-keyscan -H {{ ansible_default_ipv4.address }} >> ~/.ssh/known_hosts
args:
creates: ~/.ssh/known_hosts
- name: Ensure localhost SSH key exists
ansible.builtin.command:
cmd: ssh-keygen -b 4096 -f $HOME/.ssh/id_rsa -t rsa -N ""
args:
creates: ~/.ssh/id_rsa
- name: Ensure ssh public key is added to authorized_keys
ansible.builtin.shell:
cmd: cat ~/.ssh/id_rsa.pub >> ~/.ssh/authorized_keys
changed_when: false
- name: Juju is installed
community.general.snap:
name: juju
classic: false
channel: "{{ juju_channel | default('latest/stable') }}"
become: true
- name: Ensure ~/.local/share directory exist
ansible.builtin.file:
path: ~/.local/share
state: directory
mode: '0755'
- name: Ensure the clouds definition are templated
ansible.builtin.template:
src: clouds.yaml.j2
dest: ~/clouds.yaml
mode: '0644'
vars:
host_ip: '{{ ansible_default_ipv4.address }}'
- name: Ensure manual-cloud is added to Juju
ansible.builtin.command:
cmd: juju add-cloud --client manual-cloud -f ~/clouds.yaml
register: res
changed_when: '"already exists" not in res.stderr'
failed_when: '"ERROR" in res.stderr and "already exists" not in res.stderr'
- name: Ensure a juju controller is bootstrapped on manual-cloud
ansible.builtin.command:
cmd: juju bootstrap --config caas-image-repo="ghcr.io/juju" manual-cloud manual
register: res
changed_when: '"already exists" not in res.stderr'
failed_when: '"ERROR" in res.stderr and "already exists" not in res.stderr'
- name: Ensure the current juju controller is manual
ansible.builtin.command:
cmd: juju switch manual
register: res
changed_when: '"no change" not in res.stderr'
- name: Ensure k8s is bootstrapped
ansible.builtin.include_role:
name: k8s
- name: Ensure k8s is defined in juju clouds
ansible.builtin.shell:
cmd: set -o pipefail && sudo k8s config | juju add-k8s k8s --controller manual
executable: /bin/bash
register: res
changed_when: '"already exists" not in res.stderr'
failed_when: '"ERROR" in res.stderr and "already exists" not in res.stderr'
- name: Ensure zaza default cloud is k8s
ansible.builtin.copy:
content: |
cloud: k8s
credential: k8s
dest: '{{ ansible_env.HOME }}/.zaza.yaml'
mode: '0644'
owner: '{{ ansible_user }}'
when: env_type == 'k8s'
- name: Collect snap versions
ansible.builtin.command: snap list
register: snap_out
changed_when: false
- name: Show snap versions
ansible.builtin.debug:
msg: '{{ snap_out.stdout }}'

View File

@ -0,0 +1,4 @@
clouds:
manual-cloud:
type: manual
endpoint: {{ host_ip }}

View File

@ -1,176 +0,0 @@
- name: snapd is installed
apt:
name: snapd
become: true
- name: nftables is installed
apt:
name: nftables
become: true
when: ansible_distribution_release == 'jammy'
- name: allow packets from pod cir
command: nft insert rule filter openstack-INPUT ip saddr 10.1.0.0/16 accept
become: true
when: ansible_distribution_release == 'jammy'
- name: allow packets to pod cir
command: nft insert rule filter openstack-INPUT ip daddr 10.1.0.0/16 accept
become: true
when: ansible_distribution_release == 'jammy'
- name: allow packets to metallb cir
command: nft insert rule filter openstack-INPUT ip daddr 10.170.0.248/29 accept
become: true
when: ansible_distribution_release == 'jammy'
- name: set microk8s related variables
set_fact:
microk8s_group: "{{ 'microk8s' if microk8s_classic_mode | default(true) else 'snap_microk8s' }}"
microk8s_command_escalation: "{{ false if microk8s_classic_mode | default(true) else true }}"
- name: microk8s is installed
snap:
name: microk8s
classic: "{{ microk8s_classic_mode | default(true) }}"
channel: "{{ microk8s_channel | default('latest/stable') }}"
become: true
- name: current user is in microk8s group
user:
name: "{{ ansible_user }}"
groups: "{{ microk8s_group }}"
append: true
become: true
- name: reset ssh connection to apply permissions from new group
meta: reset_connection
- name: microk8s status
block:
- name: microk8s status
command:
cmd: microk8s status --wait-ready --timeout 300
rescue:
- name: microk8s inspect
command:
cmd: microk8s inspect
become: "{{ microk8s_command_escalation }}"
- name: microk8s status
command:
# second chance to get status
cmd: microk8s status
- name: Create docker.io certs dir
when:
- docker_mirror is defined
file:
path: /var/snap/microk8s/current/args/certs.d/docker.io
state: directory
owner: root
group: "{{ microk8s_group }}"
mode: '0770'
- name: Render microk8s registry mirror template
when:
- docker_mirror is defined
template:
src: hosts.j2
dest: /var/snap/microk8s/current/args/certs.d/docker.io/hosts.toml
group: "{{ microk8s_group }}"
vars:
mirror_location: "{{ docker_mirror }}"
server: https://docker.io
- name: Check docker.io hosts.toml
when:
- docker_mirror is defined
command:
cmd: cat /var/snap/microk8s/current/args/certs.d/docker.io/hosts.toml
- name: microk8s is started
command:
cmd: microk8s start
become: "{{ microk8s_command_escalation }}"
- name: microk8s is running and ready
command:
cmd: microk8s status --wait-ready
register: res
failed_when: '"is running" not in res.stdout'
- name: microk8s dns addon is enabled
command:
cmd: microk8s enable dns
register: res
changed_when: '"already enabled" not in res.stdout'
become: "{{ microk8s_command_escalation }}"
- name: microk8s hostpath storage addon is enabled
command:
cmd: microk8s enable hostpath-storage
register: res
changed_when: '"already enabled" not in res.stdout'
become: "{{ microk8s_command_escalation }}"
- name: microk8s metallb addon is enabled
command:
# ip range is an arbitrary choice; may need to be changed later
cmd: microk8s enable metallb:10.170.0.248/29
register: res
changed_when: '"already enabled" not in res.stdout'
become: "{{ microk8s_command_escalation }}"
- name: microk8s addons are ready
command:
cmd: microk8s status --format short
register: res
retries: 18
delay: 10 # 18 * 10 = 3 minutes
until: >
"core/dns: enabled" in res.stdout and
"core/hostpath-storage: enabled" in res.stdout and
"core/metallb: enabled" in res.stdout
changed_when: res.attempts > 1
- name: juju is installed
snap:
name: juju
classic: "{{ juju_classic_mode | default(true) }}"
channel: "{{ juju_channel | default('latest/stable') }}"
become: true
- name: Ensure ~/.local/share directory exist
file:
path: ~/.local/share
state: directory
- name: juju is bootstrapped on microk8s
command:
cmd: juju bootstrap --config bootstrap-timeout=600 --config caas-image-repo="public.ecr.aws/juju" microk8s microk8s
register: res
retries: 3
delay: 10
until: >
"Bootstrap complete" in res.stderr or
"already exists" in res.stderr
failed_when: '"ERROR" in res.stderr and "already exists" not in res.stderr'
- name: run microk8s inspect
command:
cmd: microk8s inspect
become: "{{ microk8s_command_escalation }}"
changed_when: false
- name: current juju controller is microk8s
command:
cmd: juju switch microk8s
register: res
changed_when: '"no change" not in res.stderr'
- name: Collect snap versions
command: snap list
register: snap_out
- name: Show snap versions
debug: msg="{{ snap_out.stdout }}"

View File

@ -1,4 +0,0 @@
server = "{{ server }}"
[host."{{ mirror_location }}"]
capabilities = ["pull", "resolve"]

View File

@ -26,6 +26,6 @@
with_items: "{{ relevant_charm_build_jobs }}"
- name: run smoke tests
command:
ansible.builtin.command:
cmd: "{{ tox_executable }} -e func -- --smoke --test-directory={{ test_dir }}"
chdir: "{{ zuul.project.src_dir }}"

View File

@ -328,6 +328,7 @@
description: |
Zaza smoke test for all the core sunbeam charms.
timeout: 3600
pre-run: playbooks/pre-run-func-test.yaml
run: playbooks/zaza-func-test.yaml
post-run: playbooks/collect-run-data.yaml
dependencies:
@ -379,6 +380,7 @@
description: |
Zaza tests for the tempest-k8s charm.
timeout: 3600
pre-run: playbooks/pre-run-func-test.yaml
run: playbooks/zaza-func-test.yaml
post-run: playbooks/collect-run-data.yaml
dependencies:
@ -422,6 +424,7 @@
Zaza smoke test for all the sunbeam charms that
requires storage/ceph.
timeout: 3600
pre-run: playbooks/pre-run-func-test.yaml
run: playbooks/zaza-func-test.yaml
post-run: playbooks/collect-run-data.yaml
dependencies:
@ -464,6 +467,7 @@
Zaza smoke test for magnum and dependent charms
like heat, octavia, barbican.
timeout: 3600
pre-run: playbooks/pre-run-func-test.yaml
run: playbooks/zaza-func-test.yaml
post-run: playbooks/collect-run-data.yaml
dependencies:
@ -505,6 +509,7 @@
Zaza smoke test for designate, desginate-bind,
keystone-ldap, openstack-exporter charms.
timeout: 3600
pre-run: playbooks/pre-run-func-test.yaml
run: playbooks/zaza-func-test.yaml
post-run: playbooks/collect-run-data.yaml
dependencies:
@ -543,6 +548,7 @@
description: |
Zaza smoke test for sunbeam-machine, sunbeam-clusterd charms.
timeout: 3600
pre-run: playbooks/pre-run-func-test.yaml
run: playbooks/zaza-func-test.yaml
post-run: playbooks/collect-run-data.yaml
dependencies:
@ -911,4 +917,4 @@
- rebuild
secrets:
- charmhub_token
timeout: 3600
timeout: 3600

View File

@ -7,22 +7,22 @@
check:
jobs:
- func-test-core:
nodeset: ubuntu-jammy
nodeset: ubuntu-noble
- func-test-ceph:
nodeset: ubuntu-jammy
nodeset: ubuntu-noble
- func-test-caas:
nodeset: ubuntu-jammy
nodeset: ubuntu-noble
- func-test-misc:
nodeset: ubuntu-jammy
nodeset: ubuntu-noble
- func-test-tempest:
nodeset: ubuntu-jammy
nodeset: ubuntu-noble
voting: false
vars:
juju_channel: 3.4/stable
juju_channel: 3.5/stable
juju_classic_mode: false
env_type: k8s
microk8s_channel: 1.28-strict/stable
microk8s_classic_mode: false
k8s_channel: 1.30-classic/candidate
k8s_classic_mode: true
charmcraft_channel: 2.x/stable
publish_charm: true
publish_channels: