diff --git a/.zuul.yaml b/.zuul.yaml index df06ce6e8..349dd636f 100644 --- a/.zuul.yaml +++ b/.zuul.yaml @@ -425,13 +425,10 @@ - openstack/heat - openstack/horizon - openstack/keystone - - openstack/kuryr-kubernetes - openstack/neutron - openstack/nova - - openstack/octavia - openstack/placement - openstack/python-barbicanclient - - openstack/python-octaviaclient - openstack/python-tackerclient - openstack/tacker - openstack/tacker-horizon @@ -441,7 +438,6 @@ barbican: https://opendev.org/openstack/barbican heat: https://opendev.org/openstack/heat neutron: https://opendev.org/openstack/neutron - octavia: https://opendev.org/openstack/octavia devstack_services: base: false c-api: true @@ -463,11 +459,6 @@ n-novnc: true n-sch: true neutron: true - o-api: true - o-cw: true - o-hk: true - o-hm: true - octavia: true placement-api: true placement-client: true ovn-controller: true @@ -506,18 +497,15 @@ devstack_local_conf: {} devstack_plugins: devstack-plugin-container: https://opendev.org/openstack/devstack-plugin-container - kuryr-kubernetes: https://opendev.org/openstack/kuryr-kubernetes devstack_services: etcd3: false - kubernetes-master: true - kuryr-daemon: true - kuryr-kubernetes: true - octavia: false ovn-controller: true ovn-northd: true ovs-vswitchd: true ovsdb-server: true q-ovn-metadata-agent: true + container: true + k8s-master: true tox_install_siblings: false group-vars: subnode: @@ -527,21 +515,10 @@ IS_ZUUL_FT: True K8S_API_SERVER_IP: "{{ hostvars['controller-k8s']['nodepool']['private_ipv4'] }}" KEYSTONE_SERVICE_HOST: "{{ hostvars['controller']['nodepool']['private_ipv4'] }}" - KURYR_FORCE_IMAGE_BUILD: true - KURYR_K8S_API_PORT: 6443 - KURYR_K8S_API_URL: "https://{{ hostvars['controller-k8s']['nodepool']['private_ipv4'] }}:${KURYR_K8S_API_PORT}" - KURYR_K8S_CONTAINERIZED_DEPLOYMENT: false - KURYR_NEUTRON_DEFAULT_SUBNETPOOL_ID: shared-default-subnetpool-v4 - # NOTES: - # - In Bobcat cycle, Kubernetes version is updated to 1.26. - # https://blueprints.launchpad.net/tacker/+spec/update-k8s-helm-prometheus - KURYR_KUBERNETES_VERSION: 1.26.8 CONTAINER_ENGINE: crio - CRIO_VERSION: 1.26 + K8S_VERSION: "1.30.5" + CRIO_VERSION: "1.30.5" MYSQL_HOST: "{{ hostvars['controller']['nodepool']['private_ipv4'] }}" - OCTAVIA_AMP_IMAGE_FILE: "/tmp/test-only-amphora-x64-haproxy-ubuntu-bionic.qcow2" - OCTAVIA_AMP_IMAGE_NAME: "test-only-amphora-x64-haproxy-ubuntu-bionic" - OCTAVIA_AMP_IMAGE_SIZE: 3 OVS_BRIDGE_MAPPINGS: public:br-ex,mgmtphysnet0:br-infra PHYSICAL_NETWORK: mgmtphysnet0 TACKER_HOST: "{{ hostvars['controller-tacker']['nodepool']['private_ipv4'] }}" @@ -551,6 +528,7 @@ Q_ML2_PLUGIN_MECHANISM_DRIVERS: ovn,logger # TODO(ueha): Remove this workarround if the Zuul jobs succeed with GLOBAL_VENV=true GLOBAL_VENV: false + K8S_TOKEN: "9agf12.zsu5uh2m4pzt3qba" devstack_services: dstat: false horizon: false @@ -576,9 +554,6 @@ KEYSTONE_SERVICE_HOST: "{{ hostvars['controller']['nodepool']['private_ipv4'] }}" L2_AGENT_EXTENSIONS: qos MYSQL_HOST: "{{ hostvars['controller']['nodepool']['private_ipv4'] }}" - OCTAVIA_AMP_IMAGE_FILE: "/tmp/test-only-amphora-x64-haproxy-ubuntu-bionic.qcow2" - OCTAVIA_AMP_IMAGE_NAME: "test-only-amphora-x64-haproxy-ubuntu-bionic" - OCTAVIA_AMP_IMAGE_SIZE: 3 OVS_BRIDGE_MAPPINGS: public:br-ex,mgmtphysnet0:br-infra PHYSICAL_NETWORK: mgmtphysnet0 Q_SERVICE_PLUGIN_CLASSES: ovn-router,neutron.services.qos.qos_plugin.QoSPlugin,qos @@ -596,15 +571,9 @@ $NEUTRON_DHCP_CONF: DEFAULT: enable_isolated_metadata: True - $OCTAVIA_CONF: - controller_worker: - amp_active_retries: 9999 - kuryr_k8s_api_url: "https://{{ hostvars['controller-k8s']['nodepool']['private_ipv4'] }}:6443" + k8s_api_url: "https://{{ hostvars['controller-k8s']['nodepool']['private_ipv4'] }}:6443" k8s_ssl_verify: true - # NOTES: - # - In Bobcat cycle, Helm version is updated to 3.11. - # https://blueprints.launchpad.net/tacker/+spec/update-k8s-helm-prometheus - helm_version: "3.11.3" + helm_version: "3.15.4" test_matrix_configs: [neutron] zuul_work_dir: src/opendev.org/openstack/tacker zuul_copy_output: diff --git a/playbooks/devstack/pre.yaml b/playbooks/devstack/pre.yaml index fb696faf6..7c4c7e944 100644 --- a/playbooks/devstack/pre.yaml +++ b/playbooks/devstack/pre.yaml @@ -3,6 +3,7 @@ - ensure-db-cli-installed - setup-k8s-nodes - orchestrate-devstack + - restart-kubelet-service - modify-heat-policy - setup-k8s-oidc - setup-default-vim diff --git a/roles/restart-kubelet-service/tasks/main.yaml b/roles/restart-kubelet-service/tasks/main.yaml new file mode 100644 index 000000000..19da6a632 --- /dev/null +++ b/roles/restart-kubelet-service/tasks/main.yaml @@ -0,0 +1,23 @@ +- block: + # NOTE: When create a k8s environment with devstack-plugin-container and + # deploy a Pod, the following error occured - `network: failed to set bridge + # addr: "cni0" already has an IP address different from 10.x.x.x` and + # the Pod fails to be deployed. As a fix, delete the related interface and + # restart service. + - name: k8s interface down + shell: ip link set cni0 down && ip link set flannel.1 down + become: yes + + - name: k8s interface delete + shell: ip link delete cni0 && ip link delete flannel.1 + become: yes + + - name: kubelet service restart + service: + name: kubelet + state: restarted + become: yes + + when: + - inventory_hostname == 'controller-k8s' + - k8s_api_url is defined diff --git a/roles/setup-default-vim/tasks/main.yaml b/roles/setup-default-vim/tasks/main.yaml index 4f685554c..2e1e31289 100644 --- a/roles/setup-default-vim/tasks/main.yaml +++ b/roles/setup-default-vim/tasks/main.yaml @@ -94,6 +94,9 @@ kubectl get {{ admin_secret_name.stdout }} -n kube-system -o jsonpath="{.data.token}" | base64 -d register: admin_token + until: admin_token.stdout != "" + retries: 10 + delay: 5 become: yes become_user: stack @@ -115,7 +118,7 @@ when: - inventory_hostname == 'controller-k8s' - - kuryr_k8s_api_url is defined + - k8s_api_url is defined - block: - name: Copy tools/test-setup-k8s-vim.sh @@ -182,7 +185,7 @@ replace: path: "{{ item }}" regexp: "https://127.0.0.1:6443" - replace: "{{ kuryr_k8s_api_url }}" + replace: "{{ k8s_api_url }}" with_items: - "{{ zuul_work_dir }}/samples/tests/etc/samples/local-k8s-vim.yaml" - "{{ zuul_work_dir }}/samples/tests/etc/samples/local-k8s-vim-helm.yaml" @@ -193,7 +196,7 @@ replace: path: "{{ item }}" regexp: "https://127.0.0.1:6443" - replace: "{{ kuryr_k8s_api_url }}" + replace: "{{ k8s_api_url }}" with_items: - "{{ zuul_work_dir }}/samples/tests/etc/samples/local-k8s-vim-oidc.yaml" when: @@ -283,7 +286,7 @@ when: - inventory_hostname == 'controller-tacker' - - kuryr_k8s_api_url is defined + - k8s_api_url is defined - block: - name: Copy tools/test-setup-mgmt.sh @@ -329,4 +332,4 @@ when: - inventory_hostname == 'controller-tacker' - - kuryr_k8s_api_url is defined + - k8s_api_url is defined diff --git a/roles/setup-k8s-nodes/tasks/main.yaml b/roles/setup-k8s-nodes/tasks/main.yaml index 3397ae158..05a86d58c 100644 --- a/roles/setup-k8s-nodes/tasks/main.yaml +++ b/roles/setup-k8s-nodes/tasks/main.yaml @@ -33,4 +33,4 @@ become: yes when: - inventory_hostname == 'controller-k8s' - - kuryr_k8s_api_url is defined + - k8s_api_url is defined diff --git a/roles/setup-k8s-oidc/tasks/main.yaml b/roles/setup-k8s-oidc/tasks/main.yaml index 1feda8bca..1967c95fe 100644 --- a/roles/setup-k8s-oidc/tasks/main.yaml +++ b/roles/setup-k8s-oidc/tasks/main.yaml @@ -89,11 +89,14 @@ ignore_errors: yes - name: Wait for k8s apiserver to restart - wait_for: - host: "{{ hostvars['controller-k8s']['nodepool']['private_ipv4'] }}" - port: 6443 - delay: 30 - timeout: 180 + command: > + kubectl get pods -n kube-system -l component=kube-apiserver -o jsonpath='{.items[0].status.phase}' + register: kube_apiserver_status + until: kube_apiserver_status.stdout == "Running" + delay: 30 + timeout: 180 + become: yes + become_user: stack ignore_errors: yes - name: Create clusterrolebinding on k8s server diff --git a/roles/setup-multi-tenant-vim/tasks/main.yaml b/roles/setup-multi-tenant-vim/tasks/main.yaml index 9889ebf32..a5ea032ab 100644 --- a/roles/setup-multi-tenant-vim/tasks/main.yaml +++ b/roles/setup-multi-tenant-vim/tasks/main.yaml @@ -151,7 +151,7 @@ --project {{ os_project_tenant1 }} --os-project-domain {{ os_domain_tenant1 }} --os-user-domain {{ os_domain_tenant1 }} - --endpoint {{ kuryr_k8s_api_url }} --os-disable-cert-verify + --endpoint {{ k8s_api_url }} --os-disable-cert-verify --k8s-token {{ hostvars['controller-k8s'].admin_token.stdout }} -o {{ k8s_vim_conf_path_tenant1 }} @@ -185,7 +185,7 @@ --project {{ os_project_tenant2 }} --os-project-domain {{ os_domain_tenant2 }} --os-user-domain {{ os_domain_tenant2 }} - --endpoint {{ kuryr_k8s_api_url }} --os-disable-cert-verify + --endpoint {{ k8s_api_url }} --os-disable-cert-verify --k8s-token {{ hostvars['controller-k8s'].admin_token.stdout }} -o {{ k8s_vim_conf_path_tenant2 }} @@ -213,4 +213,4 @@ when: - inventory_hostname == 'controller-tacker' - - kuryr_k8s_api_url is defined + - k8s_api_url is defined