Merge "Fix zuul settings to use latest kuryr-kubernetes"

This commit is contained in:
Zuul 2021-09-16 16:05:12 +00:00 committed by Gerrit Code Review
commit 9cc3cc1a34
4 changed files with 75 additions and 15 deletions

View File

@ -308,10 +308,7 @@
- openstack/heat
- openstack/horizon
- openstack/keystone
# TODO(ueha): temporarily use stable/wallaby branch for solve FT error.
# After confirming that it works in the master branch, modify it.
- name: openstack/kuryr-kubernetes
override-branch: stable/wallaby
- openstack/kuryr-kubernetes
- openstack/mistral
- openstack/neutron
- openstack/nova
@ -396,10 +393,7 @@
kuryr-kubernetes: https://opendev.org/openstack/kuryr-kubernetes
devstack_services:
etcd3: false
kubelet: true
kubernetes-api: true
kubernetes-controller-manager: true
kubernetes-scheduler: true
kubernetes-master: true
kuryr-daemon: true
kuryr-kubernetes: true
octavia: false
@ -415,13 +409,12 @@
CELLSV2_SETUP: singleconductor
DATABASE_TYPE: mysql
IS_ZUUL_FT: True
K8S_API_SERVER_IP: "{{ hostvars['controller-k8s']['nodepool']['private_ipv4'] }}"
KEYSTONE_SERVICE_HOST: "{{ hostvars['controller']['nodepool']['private_ipv4'] }}"
KURYR_FORCE_IMAGE_BUILD: true
KURYR_K8S_API_PORT: 8080
KURYR_K8S_API_URL: "http://{{ hostvars['controller-k8s']['nodepool']['private_ipv4'] }}:${KURYR_K8S_API_PORT}"
KURYR_K8S_CLOUD_PROVIDER: false
KURYR_K8S_API_PORT: 6443
KURYR_K8S_API_URL: "https://{{ hostvars['controller-k8s']['nodepool']['private_ipv4'] }}:${KURYR_K8S_API_PORT}"
KURYR_K8S_CONTAINERIZED_DEPLOYMENT: false
KURYR_K8S_MULTI_WORKER_TESTS: false
KURYR_NEUTRON_DEFAULT_SUBNETPOOL_ID: shared-default-subnetpool-v4
MYSQL_HOST: "{{ hostvars['controller']['nodepool']['private_ipv4'] }}"
OCTAVIA_AMP_IMAGE_FILE: "/tmp/test-only-amphora-x64-haproxy-ubuntu-bionic.qcow2"
@ -480,7 +473,7 @@
$OCTAVIA_CONF:
controller_worker:
amp_active_retries: 9999
kuryr_k8s_api_url: "http://{{ hostvars['controller-k8s']['nodepool']['private_ipv4'] }}:8080"
kuryr_k8s_api_url: "https://{{ hostvars['controller-k8s']['nodepool']['private_ipv4'] }}:6443"
helm_version: "3.5.4"
test_matrix_configs: [neutron]
zuul_work_dir: src/opendev.org/openstack/tacker

View File

@ -0,0 +1,23 @@
kind: ClusterRoleBinding
apiVersion: rbac.authorization.k8s.io/v1
metadata:
name: admin
annotations:
rbac.authorization.kubernetes.io/autoupdate: "true"
roleRef:
kind: ClusterRole
name: cluster-admin
apiGroup: rbac.authorization.k8s.io
subjects:
- kind: ServiceAccount
name: admin
namespace: kube-system
---
apiVersion: v1
kind: ServiceAccount
metadata:
name: admin
namespace: kube-system
labels:
kubernetes.io/cluster-service: "true"
addonmanager.kubernetes.io/mode: Reconcile

View File

@ -66,6 +66,41 @@
when:
- inventory_hostname == 'controller-tacker'
- block:
- name: Copy create_admin_token.yaml
copy:
src: "create_admin_token.yaml"
dest: "/tmp/create_admin_token.yaml"
mode: 0644
owner: stack
group: stack
become: yes
- name: Create admin ServiceAccount
command: kubectl create -f /tmp/create_admin_token.yaml
become: yes
become_user: stack
- name: Get admin secret name
shell: >
kubectl get secrets -n kube-system -o name
| grep admin-token
register: admin_secret_name
become: yes
become_user: stack
- name: Get admin token from described secret
shell: >
kubectl get {{ admin_secret_name.stdout }} -n kube-system -o jsonpath="{.data.token}"
| base64 -d
register: admin_token
become: yes
become_user: stack
when:
- inventory_hostname == 'controller-k8s'
- kuryr_k8s_api_url is defined
- block:
- name: Copy tools/test-setup-k8s-vim.sh
copy:
@ -126,6 +161,16 @@
when:
- p.stat.exists
- name: Replace k8s auth token in local-k8s-vim.yaml
replace:
path: "{{ item }}"
regexp: "secret_token"
replace: "{{ hostvars['controller-k8s'].admin_token.stdout }}"
with_items:
- "{{ zuul_work_dir }}/tacker/tests/etc/samples/local-k8s-vim.yaml"
when:
- p.stat.exists
- name: Replace the config file path in the test-setup-k8s-vim.sh
replace:
path: "{{ zuul_work_dir }}/tools/test-setup-k8s-vim.sh"

View File

@ -1,6 +1,5 @@
auth_url: "https://127.0.0.1:6443"
username: "admin"
password: "admin"
bearer_token: "secret_token"
project_name: "default"
ssl_ca_cert: None
type: "kubernetes"