diff --git a/.coveragerc b/.coveragerc deleted file mode 100644 index 01ea475ae..000000000 --- a/.coveragerc +++ /dev/null @@ -1,17 +0,0 @@ -[run] -branch = True -source = kuryr_kubernetes -omit = kuryr_kubernetes/tests/* - - -[report] -ignore_errors = True -exclude_lines = - # Have to re-enable the standard pragma - pragma: no cover - - # Don't complain if tests don't hit defensive assertion code: - raise NotImplementedError - - # Don't complain if non-runnable code isn't run: - if __name__ == .__main__.: diff --git a/.dockerignore b/.dockerignore deleted file mode 100644 index 8a0f61274..000000000 --- a/.dockerignore +++ /dev/null @@ -1,3 +0,0 @@ -.tox -.dockerignore -*.Dockerfile diff --git a/.gitignore b/.gitignore deleted file mode 100644 index 948f2fc65..000000000 --- a/.gitignore +++ /dev/null @@ -1,77 +0,0 @@ -*.py[cod] - -# C extensions -*.so - -# Packages -*.egg -*.egg-info -dist -build -eggs -parts -bin -var -sdist -develop-eggs -lib -lib64 - -# Installer logs -pip-log.txt - -# Unit test / coverage reports -nosetests.xml -cover - -# Translations -*.mo - -# Complexity -output/*.html -output/*/index.html - -# Sphinx -doc/build - -# Files created by releasenotes build -releasenotes/build - -# pbr generates these -AUTHORS -ChangeLog - -# Editors -*~ -*.sw? - -# Hidden directories -!/.coveragerc -!/.gitignore -!/.gitreview -!/.mailmap -!/.pylintrc -!/.testr.conf -!/.stestr.conf -.stestr - -contrib/vagrant/.vagrant - -# Configuration files -etc/kuryr.conf.sample - -# Ignore user specific local.conf settings for vagrant -contrib/vagrant/user_local.conf - -# Log files -*.log - -# devstack-heat -*.pem - -# Binaries from docker images builds -kuryr-cni-bin -kuryr-cni - -# editor tags dir -tags diff --git a/.pre-commit-config.yaml b/.pre-commit-config.yaml deleted file mode 100644 index 6c7a4dccf..000000000 --- a/.pre-commit-config.yaml +++ /dev/null @@ -1,6 +0,0 @@ -repos: - - repo: https://github.com/pre-commit/pre-commit-hooks - rev: v1.4.0 - hooks: - - id: flake8 - diff --git a/.stestr.conf b/.stestr.conf deleted file mode 100644 index 117657f2e..000000000 --- a/.stestr.conf +++ /dev/null @@ -1,3 +0,0 @@ -[DEFAULT] -test_path=${OS_TEST_PATH:-./kuryr_kubernetes/tests/} -top_dir=./ diff --git a/.zuul.d/base.yaml b/.zuul.d/base.yaml deleted file mode 100644 index 85d8490a8..000000000 --- a/.zuul.d/base.yaml +++ /dev/null @@ -1,259 +0,0 @@ -# Copyright 2018 Red Hat, Inc. -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. - -- job: - name: kuryr-kubernetes-base - parent: devstack-tempest - description: | - Base Kuryr Kubernetes tempest job. There are neither Neutron nor Octavia - services, its meant to be extended. - required-projects: - - openstack/devstack-plugin-container - - openstack/kuryr-kubernetes - - openstack/kuryr-tempest-plugin - - openstack/tempest - timeout: 10800 - post-run: - - playbooks/copy-k8s-logs.yaml - - playbooks/copy-crio-logs.yaml - host-vars: - controller: - devstack_plugins: - kuryr-kubernetes: https://opendev.org/openstack/kuryr-kubernetes - devstack-plugin-container: https://opendev.org/openstack/devstack-plugin-container - kuryr-tempest-plugin: https://opendev.org/openstack/kuryr-tempest-plugin - vars: - # Default swap size got shrinked to 1 GB, it's way too small for us. - configure_swap_size: 8192 - tempest_test_regex: '^(kuryr_tempest_plugin.tests.)' - # Since we switched the amphora image to focal, the tests started - # requiring more time. - tempest_test_timeout: 2400 - tox_envlist: 'all' - tempest_plugins: - - kuryr-tempest-plugin - devstack_localrc: - CONTAINER_ENGINE: crio - CRIO_VERSION: "1.28" - ENABLE_TLS: true - ETCD_USE_RAMDISK: true - KURYR_ENABLED_HANDLERS: vif,endpoints,service,namespace,pod_label,policy,kuryrnetworkpolicy,kuryrnetwork,kuryrport,kuryrloadbalancer - KURYR_SG_DRIVER: policy - KURYR_SUBNET_DRIVER: namespace - KURYR_SUPPORT_POD_SECURITY: true - devstack_services: - c-api: false - c-bak: false - c-sch: false - c-vol: false - cinder: false - coredns: false - # Need to disable dstat due to bug https://github.com/dstat-real/dstat/pull/162 - dstat: false - etcd3: true - g-api: true - g-reg: true - key: true - kubernetes-master: true - kuryr-daemon: true - kuryr-kubernetes: true - mysql: true - n-api-meta: true - n-api: true - n-cond: true - n-cpu: true - n-sch: true - placement-api: true - placement-client: true - rabbit: true - s-account: false - s-container: false - s-object: false - s-proxy: false - tempest: true - zuul_copy_output: - '{{ devstack_log_dir }}/kubernetes': 'logs' - '{{ devstack_log_dir }}/crio': 'logs' - irrelevant-files: - - ^.*\.rst$ - - ^doc/.*$ - - ^releasenotes/.*$ - - ^contrib/.*$ - -- job: - name: kuryr-kubernetes-base-ovn - parent: kuryr-kubernetes-base - description: Base kuryr-kubernetes-job with OVN - required-projects: - - openstack/neutron - timeout: 10800 - post-run: playbooks/copy-k8s-logs.yaml - host-vars: - controller: - devstack_plugins: - neutron: https://opendev.org/openstack/neutron - vars: - network_api_extensions_common: - - tag-ports-during-bulk-creation - devstack_localrc: - KURYR_NEUTRON_DEFAULT_ROUTER: kuryr-router - ML2_L3_PLUGIN: ovn-router,trunk,qos - OVN_BRANCH: v21.06.0 - OVS_BRANCH: "a4b04276ab5934d087669ff2d191a23931335c87" - OVN_BUILD_FROM_SOURCE: true - OVN_L3_CREATE_PUBLIC_NETWORK: true - VAR_RUN_PATH: /usr/local/var/run - devstack_services: - neutron-tag-ports-during-bulk-creation: true - neutron: true - q-qos: true - q-trunk: true - zuul_copy_output: - '{{ devstack_base_dir }}/data/ovn': 'logs' - '{{ devstack_log_dir }}/ovsdb-server-nb.log': 'logs' - '{{ devstack_log_dir }}/ovsdb-server-sb.log': 'logs' - '/home/zuul/np_sctp_kubetest.log': 'logs' - -- job: - name: kuryr-kubernetes-base-ovs - parent: kuryr-kubernetes-base - description: Base kuryr-kubernetes-job with OVS - required-projects: - - openstack/devstack-plugin-container - - openstack/kuryr-kubernetes - - openstack/kuryr-tempest-plugin - - openstack/tempest - - openstack/neutron - timeout: 10800 - post-run: playbooks/copy-k8s-logs.yaml - host-vars: - controller: - devstack_plugins: - neutron: https://opendev.org/openstack/neutron - vars: - network_api_extensions_common: - - tag-ports-during-bulk-creation - devstack_services: - neutron-tag-ports-during-bulk-creation: true - neutron: true - ovn-controller: false - ovn-northd: false - ovs-vswitchd: false - ovsdb-server: false - q-agt: true - q-dhcp: true - q-l3: true - q-meta: true - q-ovn-metadata-agent: false - q-svc: true - q-trunk: true - devstack_localrc: - KURYR_ENFORCE_SG_RULES: true - ML2_L3_PLUGIN: router - Q_AGENT: openvswitch - Q_ML2_PLUGIN_MECHANISM_DRIVERS: openvswitch - Q_ML2_TENANT_NETWORK_TYPE: vxlan - zuul_copy_output: - '{{ devstack_log_dir }}/ovsdb-server-nb.log': 'logs' - '{{ devstack_log_dir }}/ovsdb-server-sb.log': 'logs' - -- job: - name: kuryr-kubernetes-octavia-base - parent: kuryr-kubernetes-base-ovn - description: | - Kuryr-Kubernetes tempest job using OVN and ovn-octavia driver for Kuryr - required-projects: - - openstack/octavia - - openstack/python-octaviaclient - - openstack/ovn-octavia-provider - - openstack/octavia-tempest-plugin - pre-run: playbooks/get_amphora_tarball.yaml - host-vars: - controller: - devstack_plugins: - octavia: https://opendev.org/openstack/octavia - ovn-octavia-provider: https://opendev.org/openstack/ovn-octavia-provider - octavia-tempest-plugin: https://opendev.org/openstack/octavia-tempest-plugin - vars: - tempest_plugins: - - kuryr-tempest-plugin - - octavia-tempest-plugin - devstack_localrc: - KURYR_EP_DRIVER_OCTAVIA_PROVIDER: ovn - KURYR_ENFORCE_SG_RULES: false - KURYR_K8S_OCTAVIA_MEMBER_MODE: L2 - KURYR_LB_ALGORITHM: SOURCE_IP_PORT - OCTAVIA_AMP_IMAGE_FILE: /tmp/test-only-amphora-x64-haproxy-ubuntu-focal.qcow2 - OCTAVIA_AMP_IMAGE_NAME: test-only-amphora-x64-haproxy-ubuntu-focal - OCTAVIA_AMP_IMAGE_SIZE: 3 - devstack_local_conf: - post-config: - $OCTAVIA_CONF: - controller_worker: - amp_active_retries: 9999 - api_settings: - enabled_provider_drivers: amphora:'Octavia Amphora driver',ovn:'Octavia OVN driver' - health_manager: - failover_threads: 2 - health_update_threads: 2 - stats_update_threads: 2 - devstack_services: - octavia: true - o-api: true - o-cw: true - o-da: true - o-hk: true - o-hm: true - -- job: - name: kuryr-kubernetes-octavia-base-ovs - parent: kuryr-kubernetes-base-ovs - nodeset: kuryr-nested-virt-ubuntu-jammy - description: | - Kuryr-Kubernetes tempest job using OVS and amphora driver for Octavia - required-projects: - - openstack/octavia - - openstack/python-octaviaclient - - openstack/octavia-tempest-plugin - pre-run: playbooks/get_amphora_tarball.yaml - host-vars: - controller: - devstack_plugins: - octavia: https://opendev.org/openstack/octavia - octavia-tempest-plugin: https://opendev.org/openstack/octavia-tempest-plugin - vars: - tempest_plugins: - - kuryr-tempest-plugin - - octavia-tempest-plugin - devstack_localrc: - OCTAVIA_AMP_IMAGE_FILE: /tmp/test-only-amphora-x64-haproxy-ubuntu-focal.qcow2 - OCTAVIA_AMP_IMAGE_NAME: test-only-amphora-x64-haproxy-ubuntu-focal - OCTAVIA_AMP_IMAGE_SIZE: 3 - LIBVIRT_TYPE: kvm - LIBVIRT_CPU_MODE: host-passthrough - devstack_local_conf: - post-config: - $OCTAVIA_CONF: - controller_worker: - amp_active_retries: 9999 - health_manager: - failover_threads: 2 - health_update_threads: 2 - stats_update_threads: 2 - devstack_services: - octavia: true - o-api: true - o-cw: true - o-hk: true - o-hm: true diff --git a/.zuul.d/k8s-np-e2e.yaml b/.zuul.d/k8s-np-e2e.yaml deleted file mode 100644 index 63fc3bfd4..000000000 --- a/.zuul.d/k8s-np-e2e.yaml +++ /dev/null @@ -1,144 +0,0 @@ -# Copyright 2021 Red Hat, Inc. -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. - -- job: - name: kuryr-kubernetes-e2e-np - parent: devstack - description: | - Kuryr-Kubernetes job with OVN and Octavia provider OVN running k8s network policy e2e tests - required-projects: - - openstack/devstack-plugin-container - - openstack/kuryr-kubernetes - - openstack/neutron - - openstack/octavia - - openstack/ovn-octavia-provider - - openstack/python-octaviaclient - pre-run: playbooks/get_amphora_tarball.yaml - post-run: - - playbooks/run_k8s_e2e_tests.yaml - - playbooks/copy-k8s-logs.yaml - - playbooks/copy-crio-logs.yaml - post-timeout: 7200 - host-vars: - controller: - devstack_plugins: - devstack-plugin-container: https://opendev.org/openstack/devstack-plugin-container - kuryr-kubernetes: https://opendev.org/openstack/kuryr-kubernetes - neutron: https://opendev.org/openstack/neutron - octavia: https://opendev.org/openstack/octavia - ovn-octavia-provider: https://opendev.org/openstack/ovn-octavia-provider - vars: - network_api_extensions_common: - - tag-ports-during-bulk-creation - devstack_localrc: - CONTAINER_ENGINE: crio - CRIO_VERSION: "1.28" - ETCD_USE_RAMDISK: true - KURYR_ENABLED_HANDLERS: vif,endpoints,service,namespace,pod_label,policy,kuryrnetworkpolicy,kuryrnetwork,kuryrport,kuryrloadbalancer - KURYR_ENFORCE_SG_RULES: false - KURYR_EP_DRIVER_OCTAVIA_PROVIDER: ovn - KURYR_K8S_API_PORT: 6443 - KURYR_K8S_CLOUD_PROVIDER: false - KURYR_K8S_OCTAVIA_MEMBER_MODE: L2 - KURYR_LB_ALGORITHM: SOURCE_IP_PORT - KURYR_NEUTRON_DEFAULT_ROUTER: kuryr-router - KURYR_SG_DRIVER: policy - KURYR_SUBNET_DRIVER: namespace - ML2_L3_PLUGIN: ovn-router,trunk,qos - OCTAVIA_AMP_IMAGE_FILE: "/tmp/test-only-amphora-x64-haproxy-ubuntu-focal.qcow2" - OCTAVIA_AMP_IMAGE_NAME: "test-only-amphora-x64-haproxy-ubuntu-focal" - OCTAVIA_AMP_IMAGE_SIZE: 3 - OVN_BRANCH: v21.06.0 - OVS_BRANCH: "a4b04276ab5934d087669ff2d191a23931335c87" - OVN_BUILD_FROM_SOURCE: true - OVN_L3_CREATE_PUBLIC_NETWORK: true - PHYSICAL_NETWORK: public - Q_AGENT: ovn - Q_BUILD_OVS_FROM_GIT: true - Q_ML2_PLUGIN_MECHANISM_DRIVERS: ovn,logger - Q_ML2_PLUGIN_TYPE_DRIVERS: local,flat,vlan,geneve - Q_ML2_TENANT_NETWORK_TYPE: geneve - Q_USE_PROVIDERNET_FOR_PUBLIC: true - VAR_RUN_PATH: /usr/local/var/run - devstack_services: - # TODO(dmellado):Temporary workaround until proper fix - base: false - c-api: false - c-bak: false - c-sch: false - c-vol: false - cinder: false - coredns: false - # Need to disable dstat due to bug https://github.com/dstat-real/dstat/pull/162 - dstat: false - etcd3: true - g-api: true - g-reg: true - key: true - kubernetes-master: true - kuryr-daemon: true - kuryr-kubernetes: true - mysql: true - n-api-meta: true - n-api: true - n-cond: true - n-cpu: true - n-sch: true - neutron-tag-ports-during-bulk-creation: true - neutron: true - o-api: true - o-cw: true - o-da: true - o-hk: true - o-hm: true - octavia: true - ovn-controller: true - ovn-northd: true - placement-api: true - placement-client: true - q-agt: false - q-dhcp: false - q-l3: false - q-meta: false - q-ovn-metadata-agent: true - q-qos: true - q-svc: true - q-trunk: true - rabbit: true - s-account: false - s-container: false - s-object: false - s-proxy: false - devstack_local_conf: - post-config: - $OCTAVIA_CONF: - controller_worker: - amp_active_retries: 9999 - api_settings: - enabled_provider_drivers: amphora:'Octavia Amphora driver',ovn:'Octavia OVN driver' - kubetest_version: v1.22.5 - np_parallel_number: 2 - gopkg: go1.16.12.linux-amd64.tar.gz - np_sleep: 30 - zuul_copy_output: - '/home/zuul/np_kubetest.log': 'logs' - '/home/zuul/np_sctp_kubetest.log': 'logs' - '{{ devstack_log_dir }}/kubernetes': 'logs' - '{{ devstack_log_dir }}/crio': 'logs' - irrelevant-files: - - ^.*\.rst$ - - ^doc/.*$ - - ^releasenotes/.*$ - - ^contrib/.*$ - voting: false diff --git a/.zuul.d/nodesets.yaml b/.zuul.d/nodesets.yaml deleted file mode 100644 index 10381df7a..000000000 --- a/.zuul.d/nodesets.yaml +++ /dev/null @@ -1,63 +0,0 @@ -# Copyright 2018 Red Hat, Inc. -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. - -- nodeset: - name: openstack-centos-7-single-node - nodes: - - name: controller - label: centos-7 - groups: - - name: tempest - nodes: - - controller - -- nodeset: - name: kuryr-nested-virt-ubuntu-jammy - nodes: - - name: controller - label: nested-virt-ubuntu-jammy - groups: - - name: tempest - nodes: - - controller - -- nodeset: - name: kuryr-nested-virt-two-node-jammy - nodes: - - name: controller - label: nested-virt-ubuntu-jammy - - name: compute1 - label: nested-virt-ubuntu-jammy - groups: - # Node where tests are executed and test results collected - - name: tempest - nodes: - - controller - # Nodes running the compute service - - name: compute - nodes: - - controller - - compute1 - # Nodes that are not the controller - - name: subnode - nodes: - - compute1 - # Switch node for multinode networking setup - - name: switch - nodes: - - controller - # Peer nodes for multinode networking setup - - name: peers - nodes: - - compute1 diff --git a/.zuul.d/project.yaml b/.zuul.d/project.yaml deleted file mode 100644 index 3d1baa370..000000000 --- a/.zuul.d/project.yaml +++ /dev/null @@ -1,45 +0,0 @@ -# Copyright 2018 Red Hat, Inc. -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. - -- project-template: - name: kuryr-kubernetes-tempest-jobs - check: - jobs: - - kuryr-kubernetes-tempest - - kuryr-kubernetes-tempest-defaults - - kuryr-kubernetes-tempest-systemd - - kuryr-kubernetes-tempest-multinode - - kuryr-kubernetes-tempest-multinode-ovs - - kuryr-kubernetes-tempest-ipv6 - - kuryr-kubernetes-tempest-ipv6-ovs - - kuryr-kubernetes-tempest-amphora - - kuryr-kubernetes-tempest-amphora-ovs - - kuryr-kubernetes-tempest-annotation-project-driver - gate: - jobs: - - kuryr-kubernetes-tempest - - kuryr-kubernetes-tempest-systemd - experimental: - jobs: - - kuryr-kubernetes-tempest-pools-namespace - - kuryr-kubernetes-tempest-multinode-ha - - kuryr-kubernetes-tempest-dual-stack - -- project: - templates: - - openstack-python3-jobs - - publish-openstack-docs-pti - - release-notes-jobs-python3 - - check-requirements - - kuryr-kubernetes-tempest-jobs diff --git a/.zuul.d/tempest-jobs.yaml b/.zuul.d/tempest-jobs.yaml deleted file mode 100644 index 8fff68a74..000000000 --- a/.zuul.d/tempest-jobs.yaml +++ /dev/null @@ -1,239 +0,0 @@ -# Copyright 2018 Red Hat, Inc. -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. - -- job: - name: kuryr-kubernetes-tempest - parent: kuryr-kubernetes-octavia-base - description: | - Kuryr-Kubernetes tempest job running kuryr containerized - -- job: - name: kuryr-kubernetes-tempest-ovn-provider-ovn - parent: kuryr-kubernetes-octavia-base - description: | - Kuryr-Kubernetes alias for kuryr kubernetes tempest test. - Because of the change we introduced in switching over to Neutron OVN - and Octavia OVN provider, this can be removed after updating - ovn-octavia-provider zuul project. - -- job: - name: kuryr-kubernetes-tempest-systemd - parent: kuryr-kubernetes-octavia-base - description: | - Kuryr-Kubernetes tempest job using octavia and running kuryr as systemd - services - vars: - devstack_localrc: - KURYR_K8S_CONTAINERIZED_DEPLOYMENT: false - -- job: - name: kuryr-kubernetes-tempest-centos-7 - parent: kuryr-kubernetes-tempest-systemd - nodeset: openstack-centos-7-single-node - voting: false - -- job: - name: kuryr-kubernetes-tempest-defaults - parent: kuryr-kubernetes-octavia-base - nodeset: kuryr-nested-virt-ubuntu-jammy - description: | - Kuryr-Kubernetes tempest job running kuryr containerized with OVN, - Octavias amphora, default set of handlers, default SG driver and default - subnet driver. - host-vars: - controller: - devstack_plugins: - octavia: https://opendev.org/openstack/octavia - octavia-tempest-plugin: https://opendev.org/openstack/octavia-tempest-plugin - vars: - devstack_localrc: - KURYR_ENABLED_HANDLERS: '' - KURYR_ENFORCE_SG_RULES: true - KURYR_EP_DRIVER_OCTAVIA_PROVIDER: default - KURYR_K8S_OCTAVIA_MEMBER_MODE: L3 - KURYR_LB_ALGORITHM: ROUND_ROBIN - KURYR_SG_DRIVER: default - KURYR_SUBNET_DRIVER: default - LIBVIRT_TYPE: kvm - LIBVIRT_CPU_MODE: host-passthrough - devstack_local_conf: - post-config: - $OCTAVIA_CONF: - controller_worker: - amp_active_retries: 9999 - api_settings: - enabled_provider_drivers: amphora:'Octavia Amphora driver' - health_manager: - failover_threads: 2 - health_update_threads: 2 - stats_update_threads: 2 - devstack_services: - q-trunk: true - o-da: false - voting: false - -- job: - name: kuryr-kubernetes-tempest-ipv6 - nodeset: kuryr-nested-virt-ubuntu-jammy - parent: kuryr-kubernetes-octavia-base - description: | - Kuryr-Kubernetes tempest job running kuryr containerized with IPv6 pod - and service networks using OVN and Octavia Amphora - # TODO(gryf): investigate why NP does not work with IPv6 - host-vars: - controller: - devstack_plugins: - octavia: https://opendev.org/openstack/octavia - octavia-tempest-plugin: https://opendev.org/openstack/octavia-tempest-plugin - vars: - devstack_localrc: - KURYR_ENABLED_HANDLERS: '' - KURYR_ENFORCE_SG_RULES: true - KURYR_EP_DRIVER_OCTAVIA_PROVIDER: default - KURYR_IPV6: true - KURYR_K8S_OCTAVIA_MEMBER_MODE: L3 - KURYR_LB_ALGORITHM: ROUND_ROBIN - KURYR_SG_DRIVER: default - KURYR_SUBNET_DRIVER: default - LIBVIRT_TYPE: kvm - LIBVIRT_CPU_MODE: host-passthrough - devstack_local_conf: - post-config: - $OCTAVIA_CONF: - controller_worker: - amp_active_retries: 9999 - api_settings: - enabled_provider_drivers: amphora:'Octavia Amphora driver' - health_manager: - failover_threads: 2 - health_update_threads: 2 - stats_update_threads: 2 - devstack_services: - q-trunk: true - o-da: false - voting: false - -- job: - name: kuryr-kubernetes-tempest-ipv6-ovs - parent: kuryr-kubernetes-octavia-base-ovs - description: | - Kuryr-Kubernetes tempest job running kuryr containerized with IPv6 pod - and service networks based on OVS - # TODO(gryf): investigate why NP does not work with IPv6 - vars: - devstack_localrc: - KURYR_ENABLED_HANDLERS: '' - KURYR_IPV6: true - KURYR_SG_DRIVER: default - KURYR_SUBNET_DRIVER: default - devstack_services: - q-trunk: false - voting: false - -- job: - name: kuryr-kubernetes-tempest-dual-stack - parent: kuryr-kubernetes-octavia-base - description: | - Kuryr-Kubernetes tempest job running kuryr containerized with dual stack - pod and service networks - vars: - devstack_localrc: - KURYR_DUAL_STACK: true - voting: false - -- job: - name: kuryr-kubernetes-tempest-pools-namespace - parent: kuryr-kubernetes-octavia-base - description: | - Tempest with containers, port pools and namespace subnet driver - vars: - devstack_localrc: - KURYR_SUBNET_DRIVER: namespace - KURYR_ENABLED_HANDLERS: vif,endpoints,service,namespace,pod_label,policy,kuryrnetworkpolicy,kuryrnetwork,kuryrport,kuryrloadbalancer - KURYR_SG_DRIVER: policy - KURYR_USE_PORT_POOLS: true - KURYR_POD_VIF_DRIVER: neutron-vif - KURYR_VIF_POOL_DRIVER: neutron - KURYR_CONFIGMAP_MODIFIABLE: false - -- job: - name: kuryr-kubernetes-tempest-annotation-project-driver - parent: kuryr-kubernetes-octavia-base - description: | - Run kuryr-Kubernetes tempest job with annotation project driver - vars: - devstack_localrc: - KURYR_PROJECT_DRIVER: annotation - voting: true - -- job: - name: kuryr-kubernetes-tempest-amphora - parent: kuryr-kubernetes-base-ovn - nodeset: kuryr-nested-virt-ubuntu-jammy - required-projects: - - openstack/octavia - - openstack/python-octaviaclient - - openstack/octavia-tempest-plugin - pre-run: playbooks/get_amphora_tarball.yaml - host-vars: - controller: - devstack_plugins: - octavia: https://opendev.org/openstack/octavia - octavia-tempest-plugin: https://opendev.org/openstack/octavia-tempest-plugin - vars: - tempest_plugins: - - kuryr-tempest-plugin - - octavia-tempest-plugin - devstack_localrc: - KURYR_ENFORCE_SG_RULES: true - OCTAVIA_AMP_IMAGE_FILE: /tmp/test-only-amphora-x64-haproxy-ubuntu-focal.qcow2 - OCTAVIA_AMP_IMAGE_NAME: test-only-amphora-x64-haproxy-ubuntu-focal - OCTAVIA_AMP_IMAGE_SIZE: 3 - LIBVIRT_TYPE: kvm - LIBVIRT_CPU_MODE: host-passthrough - devstack_local_conf: - post-config: - $OCTAVIA_CONF: - controller_worker: - amp_active_retries: 9999 - health_manager: - failover_threads: 2 - health_update_threads: 2 - stats_update_threads: 2 - devstack_services: - octavia: true - o-api: true - o-cw: true - o-hk: true - o-hm: true - voting: false - -- job: - name: kuryr-kubernetes-tempest-amphora-ovs - parent: kuryr-kubernetes-octavia-base-ovs - vars: - devstack_localrc: - KURYR_EP_DRIVER_OCTAVIA_PROVIDER: amphora - devstack_local_conf: - post-config: - $OCTAVIA_CONF: - controller_worker: - amp_active_retries: 9999 - health_manager: - failover_threads: 2 - health_update_threads: 2 - stats_update_threads: 2 - api_settings: - enabled_provider_drivers: amphora:'Octavia Amphora driver' - voting: false diff --git a/.zuul.d/tempest-multinode-jobs.yaml b/.zuul.d/tempest-multinode-jobs.yaml deleted file mode 100644 index 189774848..000000000 --- a/.zuul.d/tempest-multinode-jobs.yaml +++ /dev/null @@ -1,168 +0,0 @@ -# Copyright 2018 Red Hat, Inc. -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. - -- job: - name: kuryr-kubernetes-tempest-multinode - parent: kuryr-kubernetes-octavia-base - description: | - Kuryr-Kubernetes tempest multinode job with OVN - nodeset: kuryr-nested-virt-two-node-jammy - host-vars: - controller: - devstack_plugins: - octavia: https://opendev.org/openstack/octavia - octavia-tempest-plugin: https://opendev.org/openstack/octavia-tempest-plugin - group-vars: - subnode: - devstack_plugins: - devstack-plugin-container: https://opendev.org/openstack/devstack-plugin-container - kuryr-kubernetes: https://opendev.org/openstack/kuryr-kubernetes - devstack_services: - c-bak: false - c-vol: false - dstat: false - kubernetes-master: false - kubernetes-worker: true - kuryr-daemon: true - kuryr-kubernetes: false - neutron: true - ovn-northd: false - ovn-octavia-provider: true - placement-client: true - q-svc: false - devstack_local_conf: - post-config: - $OCTAVIA_CONF: - controller_worker: - amp_active_retries: 9999 - api_settings: - enabled_provider_drivers: amphora:'Octavia Amphora driver',ovn:'Octavia OVN driver' - health_manager: - failover_threads: 2 - health_update_threads: 2 - stats_update_threads: 2 - devstack_localrc: - CONTAINER_ENGINE: crio - CRIO_VERSION: "1.28" - KURYR_ENABLED_HANDLERS: vif,endpoints,service,namespace,pod_label,policy,kuryrnetworkpolicy,kuryrnetwork,kuryrport,kuryrloadbalancer - KURYR_ENFORCE_SG_RULES: false - KURYR_EP_DRIVER_OCTAVIA_PROVIDER: ovn - KURYR_K8S_OCTAVIA_MEMBER_MODE: L2 - KURYR_LB_ALGORITHM: SOURCE_IP_PORT - KURYR_NEUTRON_DEFAULT_ROUTER: kuryr-router - KURYR_SG_DRIVER: policy - KURYR_SUBNET_DRIVER: namespace - OVN_BRANCH: v21.06.0 - OVS_BRANCH: "a4b04276ab5934d087669ff2d191a23931335c87" - OVN_BUILD_FROM_SOURCE: true - OVN_L3_CREATE_PUBLIC_NETWORK: true - VAR_RUN_PATH: /usr/local/var/run - vars: - tempest_test_regex: '^(kuryr_tempest_plugin.tests.scenario.test_cross_ping_multi_worker.TestCrossPingScenarioMultiWorker)' - devstack_localrc: - KURYR_K8S_MULTI_WORKER_TESTS: true - devstack_local_conf: - post-config: - $OCTAVIA_CONF: - controller_worker: - amp_active_retries: 9999 - api_settings: - enabled_provider_drivers: amphora:'Octavia Amphora driver',ovn:'Octavia OVN driver' - health_manager: - failover_threads: 2 - health_update_threads: 2 - stats_update_threads: 2 - devstack_services: - kubernetes-master: true - kubernetes-worker: false - kuryr-daemon: true - kuryr-kubernetes: true - zuul_copy_output: - '{{ devstack_base_dir }}/data/ovn': 'logs' - '{{ devstack_log_dir }}/ovsdb-server-nb.log': 'logs' - '{{ devstack_log_dir }}/ovsdb-server-sb.log': 'logs' - voting: false - -- job: - name: kuryr-kubernetes-tempest-multinode-ovs - parent: kuryr-kubernetes-octavia-base-ovs - description: | - Kuryr-Kubernetes tempest multinode job with OVS - nodeset: kuryr-nested-virt-two-node-jammy - group-vars: - subnode: - devstack_plugins: - devstack-plugin-container: https://opendev.org/openstack/devstack-plugin-container - kuryr-kubernetes: https://opendev.org/openstack/kuryr-kubernetes - devstack_services: - c-bak: false - c-vol: false - dstat: false - kubernetes-master: false - kubernetes-worker: true - kuryr-daemon: true - kuryr-kubernetes: false - neutron: true - ovn-controller: false - ovs-vswitchd: false - ovsdb-server: false - placement-client: true - q-agt: true - q-dhcp: true - q-l3: true - q-meta: true - q-ovn-metadata-agent: false - q-svc: false - devstack_localrc: - CONTAINER_ENGINE: crio - CRIO_VERSION: "1.26" - KURYR_ENABLED_HANDLERS: vif,endpoints,service,namespace,pod_label,policy,kuryrnetworkpolicy,kuryrnetwork,kuryrport,kuryrloadbalancer - KURYR_ENFORCE_SG_RULES: true - KURYR_SG_DRIVER: policy - KURYR_SUBNET_DRIVER: namespace - ML2_L3_PLUGIN: router - Q_AGENT: openvswitch - Q_ML2_PLUGIN_MECHANISM_DRIVERS: openvswitch - Q_ML2_TENANT_NETWORK_TYPE: vxlan - vars: - tempest_test_regex: '^(kuryr_tempest_plugin.tests.scenario.test_cross_ping_multi_worker.TestCrossPingScenarioMultiWorker)' - devstack_services: - dstat: false - kubernetes-master: true - kubernetes-worker: false - kuryr-daemon: true - kuryr-kubernetes: true - neutron: true - devstack_localrc: - KURYR_K8S_MULTI_WORKER_TESTS: true - voting: false - -- job: - name: kuryr-kubernetes-tempest-multinode-ha - parent: kuryr-kubernetes-tempest-multinode - description: | - Kuryr-Kubernetes tempest multinode job running containerized in HA - timeout: 7800 - vars: - devstack_localrc: - KURYR_CONTROLLER_REPLICAS: 2 - KURYR_K8S_SERIAL_TESTS: true - tempest_concurrency: 1 - group-vars: - subnode: - devstack_plugins: - devstack-plugin-container: https://opendev.org/openstack/devstack-plugin-container - kuryr-kubernetes: https://opendev.org/openstack/kuryr-kubernetes - devstack_services: - kubernetes-worker: true diff --git a/CONTRIBUTING.rst b/CONTRIBUTING.rst deleted file mode 100644 index ffce2e746..000000000 --- a/CONTRIBUTING.rst +++ /dev/null @@ -1,19 +0,0 @@ -The source repository for this project can be found at: - - https://opendev.org/openstack/kuryr-kubernetes - -Pull requests submitted through GitHub are not monitored. - -To start contributing to OpenStack, follow the steps in the contribution guide -to set up and use Gerrit: - - https://docs.openstack.org/contributors/code-and-documentation/quick-start.html - -Bugs should be filed on Launchpad: - - https://bugs.launchpad.net/kuryr-kubernetes - -For more specific information about contributing to this repository, see the -kuryr-kubernetes contributor guide: - - https://docs.openstack.org/kuryr-kubernetes/latest/contributor/contributing.html diff --git a/HACKING.rst b/HACKING.rst deleted file mode 100644 index fa6c33ecf..000000000 --- a/HACKING.rst +++ /dev/null @@ -1,5 +0,0 @@ -=================================== -kuryr-kubernetes Style Commandments -=================================== - -Read the OpenStack Style Commandments https://docs.openstack.org/hacking/latest diff --git a/LICENSE b/LICENSE deleted file mode 100644 index 68c771a09..000000000 --- a/LICENSE +++ /dev/null @@ -1,176 +0,0 @@ - - Apache License - Version 2.0, January 2004 - http://www.apache.org/licenses/ - - TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION - - 1. Definitions. - - "License" shall mean the terms and conditions for use, reproduction, - and distribution as defined by Sections 1 through 9 of this document. - - "Licensor" shall mean the copyright owner or entity authorized by - the copyright owner that is granting the License. - - "Legal Entity" shall mean the union of the acting entity and all - other entities that control, are controlled by, or are under common - control with that entity. For the purposes of this definition, - "control" means (i) the power, direct or indirect, to cause the - direction or management of such entity, whether by contract or - otherwise, or (ii) ownership of fifty percent (50%) or more of the - outstanding shares, or (iii) beneficial ownership of such entity. - - "You" (or "Your") shall mean an individual or Legal Entity - exercising permissions granted by this License. - - "Source" form shall mean the preferred form for making modifications, - including but not limited to software source code, documentation - source, and configuration files. - - "Object" form shall mean any form resulting from mechanical - transformation or translation of a Source form, including but - not limited to compiled object code, generated documentation, - and conversions to other media types. - - "Work" shall mean the work of authorship, whether in Source or - Object form, made available under the License, as indicated by a - copyright notice that is included in or attached to the work - (an example is provided in the Appendix below). - - "Derivative Works" shall mean any work, whether in Source or Object - form, that is based on (or derived from) the Work and for which the - editorial revisions, annotations, elaborations, or other modifications - represent, as a whole, an original work of authorship. For the purposes - of this License, Derivative Works shall not include works that remain - separable from, or merely link (or bind by name) to the interfaces of, - the Work and Derivative Works thereof. - - "Contribution" shall mean any work of authorship, including - the original version of the Work and any modifications or additions - to that Work or Derivative Works thereof, that is intentionally - submitted to Licensor for inclusion in the Work by the copyright owner - or by an individual or Legal Entity authorized to submit on behalf of - the copyright owner. For the purposes of this definition, "submitted" - means any form of electronic, verbal, or written communication sent - to the Licensor or its representatives, including but not limited to - communication on electronic mailing lists, source code control systems, - and issue tracking systems that are managed by, or on behalf of, the - Licensor for the purpose of discussing and improving the Work, but - excluding communication that is conspicuously marked or otherwise - designated in writing by the copyright owner as "Not a Contribution." - - "Contributor" shall mean Licensor and any individual or Legal Entity - on behalf of whom a Contribution has been received by Licensor and - subsequently incorporated within the Work. - - 2. Grant of Copyright License. Subject to the terms and conditions of - this License, each Contributor hereby grants to You a perpetual, - worldwide, non-exclusive, no-charge, royalty-free, irrevocable - copyright license to reproduce, prepare Derivative Works of, - publicly display, publicly perform, sublicense, and distribute the - Work and such Derivative Works in Source or Object form. - - 3. Grant of Patent License. Subject to the terms and conditions of - this License, each Contributor hereby grants to You a perpetual, - worldwide, non-exclusive, no-charge, royalty-free, irrevocable - (except as stated in this section) patent license to make, have made, - use, offer to sell, sell, import, and otherwise transfer the Work, - where such license applies only to those patent claims licensable - by such Contributor that are necessarily infringed by their - Contribution(s) alone or by combination of their Contribution(s) - with the Work to which such Contribution(s) was submitted. If You - institute patent litigation against any entity (including a - cross-claim or counterclaim in a lawsuit) alleging that the Work - or a Contribution incorporated within the Work constitutes direct - or contributory patent infringement, then any patent licenses - granted to You under this License for that Work shall terminate - as of the date such litigation is filed. - - 4. Redistribution. You may reproduce and distribute copies of the - Work or Derivative Works thereof in any medium, with or without - modifications, and in Source or Object form, provided that You - meet the following conditions: - - (a) You must give any other recipients of the Work or - Derivative Works a copy of this License; and - - (b) You must cause any modified files to carry prominent notices - stating that You changed the files; and - - (c) You must retain, in the Source form of any Derivative Works - that You distribute, all copyright, patent, trademark, and - attribution notices from the Source form of the Work, - excluding those notices that do not pertain to any part of - the Derivative Works; and - - (d) If the Work includes a "NOTICE" text file as part of its - distribution, then any Derivative Works that You distribute must - include a readable copy of the attribution notices contained - within such NOTICE file, excluding those notices that do not - pertain to any part of the Derivative Works, in at least one - of the following places: within a NOTICE text file distributed - as part of the Derivative Works; within the Source form or - documentation, if provided along with the Derivative Works; or, - within a display generated by the Derivative Works, if and - wherever such third-party notices normally appear. The contents - of the NOTICE file are for informational purposes only and - do not modify the License. You may add Your own attribution - notices within Derivative Works that You distribute, alongside - or as an addendum to the NOTICE text from the Work, provided - that such additional attribution notices cannot be construed - as modifying the License. - - You may add Your own copyright statement to Your modifications and - may provide additional or different license terms and conditions - for use, reproduction, or distribution of Your modifications, or - for any such Derivative Works as a whole, provided Your use, - reproduction, and distribution of the Work otherwise complies with - the conditions stated in this License. - - 5. Submission of Contributions. Unless You explicitly state otherwise, - any Contribution intentionally submitted for inclusion in the Work - by You to the Licensor shall be under the terms and conditions of - this License, without any additional terms or conditions. - Notwithstanding the above, nothing herein shall supersede or modify - the terms of any separate license agreement you may have executed - with Licensor regarding such Contributions. - - 6. Trademarks. This License does not grant permission to use the trade - names, trademarks, service marks, or product names of the Licensor, - except as required for reasonable and customary use in describing the - origin of the Work and reproducing the content of the NOTICE file. - - 7. Disclaimer of Warranty. Unless required by applicable law or - agreed to in writing, Licensor provides the Work (and each - Contributor provides its Contributions) on an "AS IS" BASIS, - WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or - implied, including, without limitation, any warranties or conditions - of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A - PARTICULAR PURPOSE. You are solely responsible for determining the - appropriateness of using or redistributing the Work and assume any - risks associated with Your exercise of permissions under this License. - - 8. Limitation of Liability. In no event and under no legal theory, - whether in tort (including negligence), contract, or otherwise, - unless required by applicable law (such as deliberate and grossly - negligent acts) or agreed to in writing, shall any Contributor be - liable to You for damages, including any direct, indirect, special, - incidental, or consequential damages of any character arising as a - result of this License or out of the use or inability to use the - Work (including but not limited to damages for loss of goodwill, - work stoppage, computer failure or malfunction, or any and all - other commercial damages or losses), even if such Contributor - has been advised of the possibility of such damages. - - 9. Accepting Warranty or Additional Liability. While redistributing - the Work or Derivative Works thereof, You may choose to offer, - and charge a fee for, acceptance of support, warranty, indemnity, - or other liability obligations and/or rights consistent with this - License. However, in accepting such obligations, You may act only - on Your own behalf and on Your sole responsibility, not on behalf - of any other Contributor, and only if You agree to indemnify, - defend, and hold each Contributor harmless for any liability - incurred by, or claims asserted against, such Contributor by reason - of your accepting any such warranty or additional liability. - diff --git a/README.rst b/README.rst index f493bec4e..4ee2c5f13 100644 --- a/README.rst +++ b/README.rst @@ -1,35 +1,10 @@ -======================== -Team and repository tags -======================== +This project is no longer maintained. -.. image:: https://governance.openstack.org/tc/badges/kuryr-kubernetes.svg - :target: https://governance.openstack.org/tc/reference/tags/index.html +The contents of this repository are still available in the Git +source code management system. To see the contents of this +repository before it reached its end of life, please check out the +previous commit with "git checkout HEAD^1". -.. Change things from this point on - - -Project description -=================== - -Kubernetes integration with OpenStack networking - -The OpenStack Kuryr project enables native Neutron-based networking in -Kubernetes. With Kuryr-Kubernetes it's now possible to choose to run both -OpenStack VMs and Kubernetes Pods on the same Neutron network if your workloads -require it or to use different segments and, for example, route between them. - -* Free software: Apache license -* Documentation: https://docs.openstack.org/kuryr-kubernetes/latest -* Source: https://opendev.org/openstack/kuryr-kubernetes -* Bugs: https://bugs.launchpad.net/kuryr-kubernetes -* Overview and demo: https://superuser.openstack.org/articles/networking-kubernetes-kuryr -* Release notes: https://docs.openstack.org/releasenotes/kuryr-kubernetes/ - - -Contribution guidelines ------------------------ - -For the process of new feature addition, refer to the `Kuryr Policy`_. - - -.. _Kuryr Policy: https://wiki.openstack.org/wiki/Kuryr#Kuryr_Policies +For any further questions, please email +openstack-discuss@lists.openstack.org or join #openstack-dev on +OFTC. diff --git a/babel.cfg b/babel.cfg deleted file mode 100644 index 15cd6cb76..000000000 --- a/babel.cfg +++ /dev/null @@ -1,2 +0,0 @@ -[python: **.py] - diff --git a/cni.Dockerfile b/cni.Dockerfile deleted file mode 100644 index 40c44da55..000000000 --- a/cni.Dockerfile +++ /dev/null @@ -1,42 +0,0 @@ -FROM quay.io/kuryr/golang:1.16 as builder - -WORKDIR /go/src/opendev.com/kuryr-kubernetes -COPY . . - -RUN GO111MODULE=auto go build -o /go/bin/kuryr-cni ./kuryr_cni/pkg/* - -FROM quay.io/centos/centos:stream9 -LABEL authors="Antoni Segura Puimedon, Michał Dulko" - -ARG UPPER_CONSTRAINTS_FILE="https://releases.openstack.org/constraints/upper/master" -ARG OSLO_LOCK_PATH=/var/kuryr-lock -ARG RDO_REPO=https://www.rdoproject.org/repos/rdo-release.el9.rpm - -RUN dnf upgrade -y && dnf install -y epel-release $RDO_REPO \ - && dnf install -y --setopt=tsflags=nodocs python3-pip openvswitch sudo iproute pciutils kmod-libs \ - && dnf install -y --setopt=tsflags=nodocs gcc gcc-c++ python3-devel git - -COPY . /opt/kuryr-kubernetes - -ARG VIRTUAL_ENV=/opt/venv -RUN python3 -m venv $VIRTUAL_ENV -# This is enough to activate a venv -ENV PATH="$VIRTUAL_ENV/bin:$PATH" - -RUN pip3 --no-cache-dir install -U pip \ - && python3 -m pip --no-cache-dir install -c $UPPER_CONSTRAINTS_FILE /opt/kuryr-kubernetes \ - && cp /opt/kuryr-kubernetes/cni_ds_init /usr/bin/cni_ds_init \ - && mkdir -p /etc/kuryr-cni \ - && cp /opt/kuryr-kubernetes/etc/cni/net.d/* /etc/kuryr-cni \ - && dnf -y history undo last \ - && dnf clean all \ - && rm -rf /opt/kuryr-kubernetes \ - && mkdir ${OSLO_LOCK_PATH} - -COPY --from=builder /go/bin/kuryr-cni /kuryr-cni - -ARG CNI_DAEMON=True -ENV CNI_DAEMON ${CNI_DAEMON} -ENV OSLO_LOCK_PATH=${OSLO_LOCK_PATH} - -ENTRYPOINT [ "cni_ds_init" ] diff --git a/cni_ds_init b/cni_ds_init deleted file mode 100755 index a7269b0ab..000000000 --- a/cni_ds_init +++ /dev/null @@ -1,22 +0,0 @@ -#!/bin/bash -ex - -function cleanup() { - rm -f "/etc/cni/net.d/10-kuryr.conflist" - rm -f "/opt/cni/bin/kuryr-cni" -} - -function deploy() { - # Copy the binary into the designated location - cp /kuryr-cni "/opt/cni/bin/kuryr-cni" - chmod +x /opt/cni/bin/kuryr-cni - if [ -f /etc/cni/net.d/kuryr.conflist.template ]; then - cp /etc/cni/net.d/kuryr.conflist.template /etc/cni/net.d/10-kuryr.conflist - else - cp /etc/kuryr-cni/kuryr.conflist.template /etc/cni/net.d/10-kuryr.conflist - fi -} - -cleanup -deploy - -exec kuryr-daemon --config-file /etc/kuryr/kuryr.conf diff --git a/contrib/devstack-heat/.gitignore b/contrib/devstack-heat/.gitignore deleted file mode 100644 index d0625d0ca..000000000 --- a/contrib/devstack-heat/.gitignore +++ /dev/null @@ -1,4 +0,0 @@ -.idea -*.pem -__pycache__ -*.pyc diff --git a/contrib/devstack-heat/README.rst b/contrib/devstack-heat/README.rst deleted file mode 100644 index 56ae96906..000000000 --- a/contrib/devstack-heat/README.rst +++ /dev/null @@ -1,88 +0,0 @@ -Kuryr Heat Templates -==================== - -This set of scripts and Heat templates are useful for deploying DevStack -scenarios. It handles the creation of an all-in-one DevStack nova instance and -its networking needs. - -Prerequisites -~~~~~~~~~~~~~ - -Packages to install on the host you run devstack-heat (not on the cloud -server): - -* python-openstackclient - -After creating the instance, devstack-heat will immediately start creating a -devstack `stack` user and using devstack to stack kuryr-kubernetes. When it is -finished, there'll be a file names `/opt/stack/ready`. - -How to run -~~~~~~~~~~ - -In order to run it, make sure you reviewed values in `hot/parameters.yml` -(especially the `image`, `flavor` and `public_net` properties, the last one -telling in which network to create the floating IPs). The cloud credentials -should be in `~/.config/openstack/clouds.yaml`. Then the most basic run -requires executing:: - - ./devstack_heat.py -c stack -e hot/parameters.yml - -This will deploy the latest master on cloud in a stack -. You can also specify other sources than master:: - - --gerrit GERRIT ID of Kuryr Gerrit change - --commit COMMIT Kuryr commit ID - --branch BRANCH Kuryr branch - --devstack-branch DEVSTACK_BRANCH DevStack branch to use - -Note that some options are excluding other ones. - -Besides that you can customize deployments using those options:: - - -p KEY=VALUE, --parameter KEY=VALUE Heat stack parameters - --local-conf LOCAL_CONF URL to DevStack local.conf file - --bashrc BASHRC URL to bashrc file to put on VM - --additional-key ADDITIONAL_KEY URL to additional SSH key to add for - stack user - -`stack` will save you a private key for the deployment in `.pem` -file in current directory. - -Getting inside the deployment ------------------------------ - -You can then ssh into the deployment in two ways:: - - ./devstack_heat.py show - -Write down the FIP it tells you and then (might be skipped, key should be -there):: - - ./devstack_heat.py key > ./.pem - -Finally to get in (use the default username for the distro of your chosen -glance image, in the example below centos):: - - ssh -i ./.pem ubuntu@ - -Alternatively, if you wait a bit, devstack-heat will have set up the devstack -stack user and you can just do:: - - ./devstack_heat.py ssh - -If you want to observe the progress of the installation you can use `join` to -make it stream `stack.sh` logs:: - - ./devstack_heat.py join - -Note that you can make `stack` join automatically using its `--join` option. - -To delete the deployment:: - - ./devstack_heat.py unstack - -Supported images ----------------- - -Scripts were tested with latest Ubuntu 20.04 cloud images. diff --git a/contrib/devstack-heat/devstack_heat.py b/contrib/devstack-heat/devstack_heat.py deleted file mode 100755 index b3a16732d..000000000 --- a/contrib/devstack-heat/devstack_heat.py +++ /dev/null @@ -1,218 +0,0 @@ -#!/usr/bin/env python3 - -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. - -import argparse -import os -import subprocess -import sys -import time - -import openstack -from openstack import exceptions as o_exc - - -class ParseDict(argparse.Action): - def __call__(self, parser, namespace, values, option_string=None): - d = getattr(namespace, self.dest, {}) - if not d: - d = {} - - if values: - split_items = values.split("=", 1) - key = split_items[0].strip() - value = split_items[1] - - d[key] = value - - setattr(namespace, self.dest, d) - - -class DevStackHeat(object): - HOT_FILE = 'hot/devstack_heat_template.yml' - - def __init__(self): - parser = self._get_arg_parser() - args = parser.parse_args() - if hasattr(args, 'func'): - self._setup_openstack(args.cloud) - args.func(args) - return - - parser.print_help() - parser.exit() - - def _get_arg_parser(self): - parser = argparse.ArgumentParser( - description="Deploy a DevStack VM with Kuryr-Kubernetes") - parser.add_argument('-c', '--cloud', help='name in clouds.yaml to use') - - subparsers = parser.add_subparsers(help='supported commands') - - stack = subparsers.add_parser('stack', help='run the VM') - stack.add_argument('name', help='name of the stack') - stack.add_argument('-e', '--environment', help='Heat stack env file', - default='hot/parameters.yml') - stack.add_argument('-p', '--parameter', help='Heat stack parameters', - metavar='KEY=VALUE', - action=ParseDict) - stack.add_argument('-j', '--join', help='SSH the stack and watch log', - action='store_true') - stack.add_argument('--local-conf', - help='URL to DevStack local.conf file') - stack.add_argument('--bashrc', - help='URL to bashrc file to put on VM') - source = stack.add_mutually_exclusive_group() - source.add_argument('--gerrit', help='ID of Kuryr Gerrit change') - source.add_argument('--commit', help='Kuryr commit ID') - source.add_argument('--branch', help='Kuryr branch') - stack.add_argument('--devstack-branch', help='DevStack branch to use', - default='master') - stack.add_argument('--additional-key', help='Additional SSH key to ' - 'add for stack user') - stack.set_defaults(func=self.stack) - - unstack = subparsers.add_parser('unstack', help='delete the VM') - unstack.add_argument('name', help='name of the stack') - unstack.set_defaults(func=self.unstack) - - key = subparsers.add_parser('key', help='get SSH key') - key.add_argument('name', help='name of the stack') - key.set_defaults(func=self.key) - - show = subparsers.add_parser('show', help='show basic stack info') - show.add_argument('name', help='name of the stack') - show.set_defaults(func=self.show) - - ssh = subparsers.add_parser('ssh', help='SSH to the stack') - ssh.add_argument('name', help='name of the stack') - ssh.set_defaults(func=self.ssh) - - join = subparsers.add_parser('join', help='join watching logs of' - 'DevStack installation') - join.add_argument('name', help='name of the stack') - join.set_defaults(func=self.join) - - return parser - - def _setup_openstack(self, cloud_name): - self.heat = openstack.connection.from_config( - cloud=cloud_name).orchestration - - def _find_output(self, stack, name): - for output in stack.outputs: - if output['output_key'] == name: - return output['output_value'] - return None - - def _get_private_key(self, name): - stack = self.heat.find_stack(name) - if stack: - return self._find_output(stack, 'master_key_priv') - return None - - def stack(self, args): - stack_attrs = self.heat.read_env_and_templates( - template_file=self.HOT_FILE, environment_files=[args.environment]) - - stack_attrs['name'] = args.name - stack_attrs['parameters'] = args.parameter or {} - if args.local_conf: - stack_attrs['parameters']['local_conf'] = args.local_conf - if args.bashrc: - stack_attrs['parameters']['bashrc'] = args.bashrc - if args.additional_key: - stack_attrs['parameters']['ssh_key'] = args.additional_key - if args.gerrit: - stack_attrs['parameters']['gerrit_change'] = args.gerrit - if args.commit: - stack_attrs['parameters']['git_hash'] = args.commit - if args.branch: - stack_attrs['parameters']['branch'] = args.branch - if args.devstack_branch: - stack_attrs['parameters']['devstack_branch'] = args.devstack_branch - - print(f'Creating stack {args.name}') - stack = self.heat.create_stack(**stack_attrs) - print(f'Wating for stack {args.name} to create') - self.heat.wait_for_status(stack, status='CREATE_COMPLETE', - failures=['CREATE_FAILED'], wait=600) - print(f'Stack {args.name} created') - - print(f'Saving SSH key to {args.name}.pem') - key = self._get_private_key(args.name) - if not key: - print(f'Private key or stack {args.name} not found') - with open(f'{args.name}.pem', "w") as pemfile: - print(key, file=pemfile) - - os.chmod(f'{args.name}.pem', 0o600) - - if args.join: - time.sleep(120) # FIXME(dulek): This isn't pretty. - self.join(args) - - def unstack(self, args): - stack = self.heat.find_stack(args.name) - if stack: - self.heat.delete_stack(stack) - try: - self.heat.wait_for_status(stack, status='DELETE_COMPLETE', - failures=['DELETE_FAILED']) - except o_exc.ResourceNotFound: - print(f'Stack {args.name} deleted') - print(f'Deleting SSH key {args.name}.pem') - os.unlink(f'{args.name}.pem') - else: - print(f'Stack {args.name} not found') - - def key(self, args): - key = self._get_private_key(args.name) - if not key: - print(f'Private key or stack {args.name} not found') - print(key) - - def show(self, args): - stack = self.heat.find_stack(args.name) - if not stack: - print(f'Stack {args.name} not found') - ips = self._find_output(stack, 'node_fips') - print(f'IPs: {", ".join(ips)}') - - def _ssh(self, keyname, ip, command=None): - if not command: - command = [] - subprocess.run(['ssh', '-i', keyname, f'stack@{ip}'] + command, - stdin=sys.stdin, stdout=sys.stdout) - - def ssh(self, args, command=None): - stack = self.heat.find_stack(args.name) - if not stack: - print(f'Stack {args.name} not found') - ips = self._find_output(stack, 'node_fips') - if not ips: - print(f'Stack {args.name} has no IPs') - self._ssh(f'{args.name}.pem', ips[0], command) - - def join(self, args): - stack = self.heat.find_stack(args.name) - if not stack: - print(f'Stack {args.name} not found') - ips = self._find_output(stack, 'node_fips') - if not ips: - print(f'Stack {args.name} has no IPs') - self.ssh(args, ['tail', '-f', '/opt/stack/devstack.log']) - - -if __name__ == '__main__': - DevStackHeat() diff --git a/contrib/devstack-heat/hot/devstack_heat_template.yml b/contrib/devstack-heat/hot/devstack_heat_template.yml deleted file mode 100644 index 234391bcc..000000000 --- a/contrib/devstack-heat/hot/devstack_heat_template.yml +++ /dev/null @@ -1,121 +0,0 @@ -heat_template_version: 2015-10-15 - -description: Simple template to deploy kuryr resources - -parameters: - image: - type: string - label: Image name or ID - description: Image to be used for the kuryr nodes - default: Ubuntu20.04 - flavor: - type: string - label: Flavor - description: Flavor to be used for the VM - default: m1.xlarge - public_net: - type: string - description: public network for the instances - default: public - vm_net_cidr: - type: string - description: vm_net network address (CIDR notation) - default: 10.11.0.0/24 - vm_net_gateway: - type: string - description: vm_net network gateway address - default: 10.11.0.1 - node_num: - type: number - description: Number of VMs - default: 1 - local_conf: - type: string - label: local.conf file to use - description: URL of local.conf file to use when deploying DevStack - default: "" - gerrit_change: - type: string - label: Gerrit change to deploy Kuryr from - description: Gerrit change number to clone Kuryr from - default: "" - git_hash: - type: string - label: Commit from which to deploy Kuryr - description: Commit hash from which Kuryr should be deployed - default: "" - bashrc: - type: string - label: bashrc file URL - description: URL of bashrc file that will be appended for stack user - default: "" - branch: - type: string - label: Branch which should be deployed - description: E.g. master or stable/queens - default: "" - devstack_branch: - type: string - label: Branch which should be deployed - description: E.g. master or stable/queens - default: "" - ssh_key: - type: string - label: Additional SSH key - description: To be added for stack user. - default: "" - -resources: - network: - type: OS::Kuryr::DevstackNetworking - properties: - public_net: { get_param: public_net } - vm_net_cidr: { get_param: vm_net_cidr } - vm_net_gateway: { get_param: vm_net_gateway } - - master_key: - type: OS::Nova::KeyPair - properties: - name: { get_param: 'OS::stack_name' } - save_private_key: true - - nodes: - type: OS::Heat::ResourceGroup - properties: - count: { get_param: node_num } - resource_def: - type: OS::Kuryr::DevstackNode - properties: - public_net: { get_param: public_net } - image: { get_param: image } - flavor: { get_param: flavor } - key: { get_resource: master_key } - local_conf: { get_param: local_conf } - gerrit_change: { get_param: gerrit_change } - branch: { get_param: branch } - devstack_branch: { get_param: devstack_branch } - ssh_key: { get_param: ssh_key } - git_hash: { get_param: git_hash } - bashrc: { get_param: bashrc } - private_key: { get_attr: [master_key, private_key] } - public_key: { get_attr: [master_key, public_key] } - vm_net: { get_attr: [network, vm_net_id] } - vm_subnet: { get_attr: [network, vm_subnet_id] } - vm_sg: { get_attr: [network, vm_sg_id] } - name: - str_replace: - template: "__stack__/vm-%index%" - params: - __stack__: { get_param: 'OS::stack_name' } - -outputs: - node_fips: - value: { get_attr: [nodes, node_fip] } - vm_subnet: - value: { get_attr: [network, vm_subnet_id] } - vm_sg: - value: { get_attr: [network, vm_sg_id] } - master_key_pub: - value: { get_attr: [master_key, public_key] } - master_key_priv: - value: { get_attr: [master_key, private_key] } diff --git a/contrib/devstack-heat/hot/distro_deps.sh b/contrib/devstack-heat/hot/distro_deps.sh deleted file mode 100644 index 972d5dd1e..000000000 --- a/contrib/devstack-heat/hot/distro_deps.sh +++ /dev/null @@ -1,19 +0,0 @@ -distro=$(awk -F'=' '/^ID=/ {print $2}' /etc/os-release) -distro="${distro%\"}" -distro="${distro#\"}" - -if [[ "$distro" =~ centos|fedora ]]; then - yum install -y git python-devel - yum group install -y Development Tools - if [[ "$distro" == "centos" ]]; then - yum install -y epel-release - sed -i -e '/Defaults requiretty/{ s/.*/# Defaults requiretty/ }' /etc/sudoers - fi - yum install -y jq - yum install -y python-pip - pip install -U setuptools -elif [[ "$distro" =~ ubuntu|debian ]]; then - apt update -y - apt upgrade -y - apt-get install -y build-essential git python-dev jq -fi diff --git a/contrib/devstack-heat/hot/networking_deployment.yaml b/contrib/devstack-heat/hot/networking_deployment.yaml deleted file mode 100644 index cf5f8ab3d..000000000 --- a/contrib/devstack-heat/hot/networking_deployment.yaml +++ /dev/null @@ -1,80 +0,0 @@ -heat_template_version: 2014-10-16 - -description: Simple template to deploy kuryr resources - -parameters: - public_net: - type: string - label: public net ID - description: Public network for the node FIPs - vm_net_cidr: - type: string - description: vm_net network address (CIDR notation) - vm_net_gateway: - type: string - description: vm_net network gateway address - -resources: - vm_net: - type: OS::Neutron::Net - properties: - name: - str_replace: - template: __stack__/vm_net - params: - __stack__: { get_param: 'OS::stack_name' } - - vm_subnet: - type: OS::Neutron::Subnet - properties: - network_id: { get_resource: vm_net } - cidr: { get_param: vm_net_cidr } - gateway_ip: { get_param: vm_net_gateway } - name: - str_replace: - template: __stack__/vm_subnet - params: - __stack__: { get_param: 'OS::stack_name' } - - kuryr_router: - type: OS::Neutron::Router - properties: - external_gateway_info: - network: { get_param: public_net } - name: - str_replace: - template: __stack__/router - params: - __stack__: { get_param: 'OS::stack_name' } - - kr_vm_iface: - type: OS::Neutron::RouterInterface - properties: - router_id: { get_resource: kuryr_router } - subnet_id: { get_resource: vm_subnet } - - vm_sg: - type: OS::Neutron::SecurityGroup - properties: - name: vm_sg - description: Ping and SSH - rules: - - protocol: icmp - - ethertype: IPv4 - remote_mode: remote_group_id - - ethertype: IPv6 - remote_mode: remote_group_id - - protocol: tcp - port_range_min: 22 - port_range_max: 22 - - protocol: tcp - port_range_min: 8080 - port_range_max: 8080 - -outputs: - vm_net_id: - value: { get_resource: vm_net } - vm_subnet_id: - value: { get_resource: vm_subnet } - vm_sg_id: - value: { get_resource: vm_sg } diff --git a/contrib/devstack-heat/hot/node.yaml b/contrib/devstack-heat/hot/node.yaml deleted file mode 100644 index e56183320..000000000 --- a/contrib/devstack-heat/hot/node.yaml +++ /dev/null @@ -1,206 +0,0 @@ -heat_template_version: 2015-10-15 - -description: template to deploy devstack nodes - -parameters: - public_net: - type: string - label: public net ID - description: Public network for the node FIPs - image: - type: string - label: Image name or ID - description: Image to be used for the kuryr nodes - flavor: - type: string - label: Flavor - description: Flavor to be used for the image - default: m1.small - key: - type: string - label: key name - description: Keypair to be used for the instance - public_key: - type: string - label: key content for stack user authorized_keys - description: private key to configure all nodes - private_key: - type: string - label: key content to access other nodes - description: private key to configure all nodes - vm_net: - type: string - label: VM Network - description: Neutron network for VMs - vm_subnet: - type: string - label: VM Subnet - description: Neutron subnet for VMs - vm_sg: - type: string - label: kubernetes API sg - description: Security Group for Kubernetes API - name: - type: string - label: Instance name - description: devstack node instance name - local_conf: - type: string - label: local.conf file to use - description: URL of local.conf file to use when deploying DevStack - gerrit_change: - type: string - label: Gerrit change to deploy Kuryr from - description: Gerrit change number to clone Kuryr from - git_hash: - type: string - label: Commit from which to deploy Kuryr - description: Commit hash from which Kuryr should be deployed - bashrc: - type: string - label: bashrc file URL - description: URL of bashrc file that will be injected for stack user - default: "" - branch: - type: string - label: Branch which should be deployed - description: E.g. master or stable/queens - default: "" - devstack_branch: - type: string - label: Branch which should be deployed - description: E.g. master or stable/queens - default: "" - ssh_key: - type: string - label: Additional SSH key - description: To be added for stack user. - default: "" - -resources: - instance_port: - type: OS::Neutron::Port - properties: - network: { get_param: vm_net } - security_groups: - - default - - { get_param: vm_sg } - fixed_ips: - - subnet: { get_param: vm_subnet } - - instance_fip: - type: OS::Neutron::FloatingIP - properties: - floating_network: { get_param: public_net } - port_id: { get_resource: instance_port } - - instance: - type: OS::Nova::Server - properties: - name: { get_param: name } - image: { get_param: image } - flavor: { get_param: flavor } - key_name: { get_param: key } - networks: - - port: { get_resource: instance_port } - user_data_format: RAW - user_data: - str_replace: - params: - __distro_deps__: { get_file: distro_deps.sh } - __gerrit_change__: { get_param: gerrit_change } - __git_hash__: { get_param: git_hash } - __local_conf__: { get_param: local_conf } - __bashrc__: { get_param: bashrc } - __pubkey__: { get_param: public_key } - __branch__: { get_param: branch } - __devstack_branch__: { get_param: devstack_branch } - __ssh_key__: { get_param: ssh_key } - template: | - #!/bin/bash - set -ex - - # Wait a bit for connectivity - sleep 30 - - # Stack user config - groupadd stack - useradd -s /bin/bash -d /opt/stack -m stack -g stack - mkdir /opt/stack/.ssh - cat > /opt/stack/.ssh/authorized_keys << EOF - __pubkey__ - EOF - if [[ ! -z "__ssh_key__" ]]; then - curl "__ssh_key__" >> /opt/stack/.ssh/authorized_keys - fi - echo "stack ALL=(ALL) NOPASSWD: ALL" | tee /etc/sudoers.d/stack - curl "__bashrc__" >> /opt/stack/.bashrc - chown -R stack:stack /opt/stack - chmod 755 /opt/stack - - # Deps for devstack - __distro_deps__ - - # Stacking - sudo -i -u stack /bin/bash - <<"EOF" - function get_from_gerrit() { - local gerrit_change - local ref - - gerrit_change="__gerrit_change__" - echo "Finding latest ref for change ${gerrit_change}" - ref=$(curl -s "https://review.opendev.org/changes/${gerrit_change}?o=CURRENT_REVISION" | tail -n +2 | jq -r '.revisions[].ref') - echo "Fetching ref ${ref}" - git fetch https://opendev.org/openstack/kuryr-kubernetes "${ref}" && git checkout FETCH_HEAD - } - - function get_from_sha() { - local commit_sha - - commit_sha="__git_hash__" - echo "Sha to fetch: ${commit_sha}" - git checkout "$commit_sha" - } - - cd /opt/stack - git clone https://opendev.org/openstack-dev/devstack - if [[ ! -z "__devstack_branch__" ]]; then - pushd devstack - git checkout "__devstack_branch__" - popd - fi - git clone https://github.com/openstack/kuryr-kubernetes - pushd kuryr-kubernetes - - if [[ ! -z "__git_hash__" ]]; then - get_from_sha - elif [[ ! -z "__gerrit_change__" ]]; then - get_from_gerrit - elif [[ ! -z "__branch__" ]]; then - git checkout "__branch__" - else - "Deploying from master" - fi - popd - pushd devstack - - if [[ -z "__local_conf__" ]]; then - # The change is already downloaded, do not reclone - sed -e 's/# RECLONE=/RECLONE=/' /opt/stack/kuryr-kubernetes/devstack/local.conf.sample > /opt/stack/devstack/local.conf - else - curl "__local_conf__" > /opt/stack/devstack/local.conf - fi - popd - - touch stacking - - pushd devstack - ./stack.sh >> /opt/stack/devstack.log 2>&1 - popd - - touch ready - EOF -outputs: - node_fip: - description: FIP address of the node - value: { get_attr: [instance_fip, floating_ip_address] } diff --git a/contrib/devstack-heat/hot/parameters.yml b/contrib/devstack-heat/hot/parameters.yml deleted file mode 100644 index ed15861e2..000000000 --- a/contrib/devstack-heat/hot/parameters.yml +++ /dev/null @@ -1,10 +0,0 @@ -parameter_defaults: - vm_net_cidr: 10.11.0.0/24 - vm_net_gateway: 10.11.0.1 - public_net: 316eeb47-1498-46b4-b39e-00ddf73bd2a5 - image: Ubuntu20.04 - flavor: m1.xlarge - -resource_registry: - OS::Kuryr::DevstackNetworking: networking_deployment.yaml - OS::Kuryr::DevstackNode: node.yaml diff --git a/contrib/kubectl_plugins/README.rst b/contrib/kubectl_plugins/README.rst deleted file mode 100644 index 4e3ba8848..000000000 --- a/contrib/kubectl_plugins/README.rst +++ /dev/null @@ -1,28 +0,0 @@ -==================== -Kuryr kubectl plugin -==================== - -This plugin aims to bring kuryr introspection an interaction to the kubectl and -oc command line tools. - - -Installation ------------- - -Place the kuryr directory in your ~/.kube/plugins - - -Usage ------ - -The way to use it is via the kubectl/oc plugin facility:: - - kubectl plugin kuryr get vif -o wide -l deploymentconfig=demo - - -Media ------ - -You can see an example of its operation: - -.. image:: kubectl_kuryr_plugin_1080.gif diff --git a/contrib/kubectl_plugins/kubectl_kuryr_plugin_1080.gif b/contrib/kubectl_plugins/kubectl_kuryr_plugin_1080.gif deleted file mode 100644 index eb2d23c41..000000000 Binary files a/contrib/kubectl_plugins/kubectl_kuryr_plugin_1080.gif and /dev/null differ diff --git a/contrib/kubectl_plugins/kuryr/kuryr b/contrib/kubectl_plugins/kuryr/kuryr deleted file mode 100755 index d3771bd0a..000000000 --- a/contrib/kubectl_plugins/kuryr/kuryr +++ /dev/null @@ -1,186 +0,0 @@ -#!/usr/bin/env python - -import argparse -import base64 -import json -import os -from os.path import expanduser -import sys -import tempfile -import urllib - -import yaml -import requests -from pprint import pprint - - -def _get_session_from_kubeconfig(): - kubeconfig = expanduser('~/.kube/config') - with open(kubeconfig, 'r') as f: - conf = yaml.safe_load(f.read()) - - for context in conf['contexts']: - if context['name'] == conf['current-context']: - current_context = context - break - - cluster_name = current_context['context']['cluster'] - for cluster in conf['clusters']: - if cluster['name'] == cluster_name: - current_cluster = cluster - break - server = current_cluster['cluster']['server'] - - if server.startswith('https'): - ca_cert_data = current_cluster['cluster']['certificate-authority-data'] - - for user in conf['users']: - if user['name'] == current_context['context']['user']: - current_user = user - break - client_cert_data = current_user['user']['client-certificate-data'] - client_key_data = current_user['user']['client-key-data'] - - client_cert_file = tempfile.NamedTemporaryFile(delete=False) - client_key_file = tempfile.NamedTemporaryFile(delete=False) - ca_cert_file = tempfile.NamedTemporaryFile(delete=False) - - client_cert_file.write(base64.decodebytes(client_cert_data.encode())) - client_cert_file.close() - - client_key_file.write(base64.decodebytes(client_key_data.encode())) - client_key_file.close() - - ca_cert_file.write(base64.decodebytes(ca_cert_data.encode())) - ca_cert_file.close() - - session = requests.Session() - session.cert = (client_cert_file.name, client_key_file.name) - session.verify = ca_cert_file.name - else: - session = requests.Session() - - return session, server - - -def get(args): - session, server = _get_session_from_kubeconfig() - namespace = os.getenv('KUBECTL_PLUGINS_CURRENT_NAMESPACE') - if args.resource in ('vif', 'vifs'): - vifs(session, server, namespace, args) - - -def _vif_formatted_output(vif_data, wide=False): - max_len = 12 - padding = 4 - vif_data.insert(0, - {'pod_name': 'POD NAME', - 'vif_name': 'VIF NAME', - 'host_ip': 'HOST IP', - 'plugin': 'BINDING', - 'active': 'ACTIVE', - 'address': 'IP ADDRESS', - 'port_id': 'PORT ID', - 'mac_address': 'MAC ADDRESS', - 'vlan_id': 'VLAN'}) - short_format = ('{pod_name:{tab_len:d}s} {vif_name:{tab_len:d}s} ' - '{plugin:10s} {address:{tab_len:d}s} {vlan_id:4}') - - long_format = ('{pod_name:{tab_len:d}s} {vif_name:{tab_len:d}s} ' - '{plugin:10s} {address:{tab_len:d}s} {vlan_id:4} ' - '{active:6} {host_ip:{tab_len:d}s} ' - '{mac_address:{tab_len:d}s} {port_id:{tab_len:d}s}') - for vif in vif_data: - active = vif['active'] - if type(active) == bool: - vif['active'] = 'yes' if active else 'no' - if 'vlan_id' not in vif: - vif['vlan_id'] = '' - - if wide: - print(long_format.format(tab_len=max_len+padding, **vif)) - else: - print(short_format.format(tab_len=max_len+padding, **vif)) - - -def vifs(session, server, namespace, args): - url = '%s/api/v1/namespaces/%s/pods' % (server, namespace) - selector = os.getenv('KUBECTL_PLUGINS_LOCAL_FLAG_SELECTOR') - if selector: - url += '?labelSelector=' + urllib.quote(selector) - - output = os.getenv('KUBECTL_PLUGINS_LOCAL_FLAG_OUTPUT') - - response = session.get(url) - - if response.ok: - pods = response.json() - else: - sys.stderr.write('Failed to retrieve pod data') - sys.exit(1) - - vif_data = [] - for pod in pods['items']: - data = {'pod_name': pod['metadata']['name']} - - if 'hostIP' in pod['status']: - data['host_ip'] = pod['status']['hostIP'] - - vif = pod['metadata']['annotations'].get('openstack.org/kuryr-vif') - if vif is None: - continue # not kuryr annotated - else: - vif = json.loads(vif) - - if vif['versioned_object.name'] == 'PodState': - # This is new format, fetch only default_vif from there. - vif = vif['versioned_object.data']['default_vif'] - - network = (vif['versioned_object.data']['network'] - ['versioned_object.data']) - first_subnet = (network['subnets']['versioned_object.data'] - ['objects'][0]['versioned_object.data']) - first_subnet_ip = (first_subnet['ips']['versioned_object.data'] - ['objects'][0]['versioned_object.data']['address']) - first_subnet_prefix = first_subnet['cidr'].split('/')[1] - - data['vif_name'] = vif['versioned_object.data']['vif_name'] - data['plugin'] = vif['versioned_object.data']['plugin'] - data['active'] = vif['versioned_object.data']['active'] - data['address'] = '%s/%s' % (first_subnet_ip, first_subnet_prefix) - data['port_id'] = vif['versioned_object.data']['id'] - data['mac_address'] = vif['versioned_object.data']['address'] - - vlan_id = vif['versioned_object.data'].get('vlan_id') - if vlan_id is not None: - data['vlan_id'] = vlan_id - - vif_data.append(data) - - if output == 'json': - pprint(vif_data) - elif output == 'tabular': - _vif_formatted_output(vif_data) - elif output == 'wide': - _vif_formatted_output(vif_data, wide=True) - else: - sys.stderr.write('Unrecognized output format') - sys.exit(1) - -if __name__ == '__main__': - parser = argparse.ArgumentParser(usage='kuryr [command] [options]') - subparsers = parser.add_subparsers(title='Available commands', metavar='') - - get_parser = subparsers.add_parser( - 'get', - usage='kuryr get [resource] [options]', - help='Gets Kuryr managed resource information.') - get_parser.add_argument( - 'resource', - action='store', - choices=('vif',), - help='Resource to return info for.') - get_parser.set_defaults(func=get) - args = parser.parse_args() - - args.func(args) diff --git a/contrib/kubectl_plugins/kuryr/plugin.yaml b/contrib/kubectl_plugins/kuryr/plugin.yaml deleted file mode 100644 index 42bf53409..000000000 --- a/contrib/kubectl_plugins/kuryr/plugin.yaml +++ /dev/null @@ -1,15 +0,0 @@ -name: kuryr -shortDesc: "OpenStack kuryr tools" -tree: - - name: get - shortDesc: "Retrieves Kuryr managed resources" - command: "./kuryr get" - flags: - - name: selector - shorthand: l - desc: "Selects which pods to find kuryr vif info for" - defValue: "" - - name: output - shorthand: o - desc: How to format the output - defValue: tabular diff --git a/contrib/pools-management/README.rst b/contrib/pools-management/README.rst deleted file mode 100644 index bf91218f3..000000000 --- a/contrib/pools-management/README.rst +++ /dev/null @@ -1,321 +0,0 @@ -============================= -Subport pools management tool -============================= - -This tool makes it easier to deal with subports pools. It allows to populate -a given amount of subports at the specified pools (i.e., at the VM trunks), as -well as to free the unused ones. - -The first step to perform is to enable the pool manager by adding this to -``/etc/kuryr/kuryr.conf``:: - - [kubernetes] - enable_manager = True - - -If the environment has been deployed with devstack, the socket file directory -will have been created automatically. However, if that is not the case, you -need to create the directory for the socket file with the right permissions. -If no other path is specified, the default location for the socket file is: -``/run/kuryr/kuryr_manage.sock`` - -Hence, you need to create that directory and give it read/write access to the -user who is running the kuryr-kubernetes.service, for instance:: - - $ sudo mkdir -p /run/kuryr - $ sudo chown stack:stack /run/kuryr - - -Finally, restart kuryr-k8s-controller:: - - $ sudo systemctl restart devstack@kuryr-kubernetes.service - - -Populate subport pools for nested environment ---------------------------------------------- - -Once the nested environment is up and running, and the pool manager has been -started, we can populate the pools, i.e., the trunk ports in used by the -overcloud VMs, with subports. From the *undercloud* we just need to make use -of the subports.py tool. - -To obtain information about the tool options:: - - $ python contrib/pools-management/subports.py -h - usage: subports.py [-h] {create,free} ... - - Tool to create/free subports from the subport pools - - positional arguments: - {create,free} commands - create Populate the pool(s) with subports - free Remove unused subports from the pools - - optional arguments: - -h, --help show this help message and exit - - -And to obtain information about the create subcommand:: - - $ python contrib/pools-management/subports.py create -h - usage: subports.py create [-h] --trunks SUBPORTS [SUBPORTS ...] [-n NUM] [-t TIMEOUT] - - optional arguments: - -h, --help show this help message and exit - --trunks SUBPORTS [SUBPORTS ...] - list of trunk IPs where subports will be added - -n NUM, --num-ports NUM - number of subports to be created per pool - -t TIMEOUT, --timeout TIMEOUT - set timeout for operation. Default is 180 sec - - -Then, we can check the existing (overcloud) VMs to use their (trunk) IPs to -later populate their respective pool:: - - $ openstack server list -f value -c Networks - net0-10.0.4.5 - net0=10.0.4.6, 172.24.4.5 - - -As it can be seen, the second VM has also a floating ip associated, but we -only need to use the one belonging to `net0`. If we want to create and attach -a subport to the 10.0.4.5 trunk, and the respective pool, we just need to do:: - - $ python contrib/pools-management/subports.py create --trunks 10.0.4.5 - - -As the number of ports to create is not specified, it only creates 1 subport -as this is the default value. We can check the result of this command with:: - - # Checking the subport named `available-port` has been created - $ openstack port list | grep available-port - | 1de77073-7127-4c39-a47b-cef15f98849c | available-port| fa:16:3e:64:7d:90 | ip_address='10.0.0.70', subnet_id='c3a8feb0-62b5-4b53-9235-af1ca93c2571' | ACTIVE | - - # Checking the subport is attached to the VM trunk - $ openstack network trunk show trunk1 - +-----------------+--------------------------------------------------------------------------------------------------+ - | Field | Value | - +-----------------+--------------------------------------------------------------------------------------------------+ - | admin_state_up | UP | - | created_at | 2017-08-28T15:06:54Z | - | description | | - | id | 9048c109-c1aa-4a41-9508-71b2ba98f3b0 | - | name | trunk1 | - | port_id | 4180a2e5-e184-424a-93d4-54b48490f50d | - | project_id | a05f6ec0abd04cba80cd160f8baaac99 | - | revision_number | 43 | - | status | ACTIVE | - | sub_ports | port_id='1de77073-7127-4c39-a47b-cef15f98849c', segmentation_id='3934', segmentation_type='vlan' | - | tags | [] | - | tenant_id | a05f6ec0abd04cba80cd160f8baaac99 | - | updated_at | 2017-08-29T06:12:39Z | - +-----------------+--------------------------------------------------------------------------------------------------+ - - -It can be seen that the port with id `1de77073-7127-4c39-a47b-cef15f98849c` -has been attached to `trunk1`. - -Similarly, we can add subport to different pools by including several IPs at -the `--trunks` option, and we can also modify the amount of subports created -per pool with the `--num` option:: - - $ python contrib/pools-management/subports.py create --trunks 10.0.4.6 10.0.4.5 --num 3 - - -This command will create 6 subports in total, 3 at trunk 10.0.4.5 and another -3 at trunk 10.0.4.6. So, to check the result of this command, as before:: - - $ openstack port list | grep available-port - | 1de77073-7127-4c39-a47b-cef15f98849c | available-port | fa:16:3e:64:7d:90 | ip_address='10.0.0.70', subnet_id='c3a8feb0-62b5-4b53-9235-af1ca93c2571' | ACTIVE | - | 52e52281-4692-45e9-935e-db77de44049a | available-port | fa:16:3e:0b:45:f6 | ip_address='10.0.0.73', subnet_id='c3a8feb0-62b5-4b53-9235-af1ca93c2571' | ACTIVE | - | 71245983-e15e-4ae8-9425-af255b54921b | available-port | fa:16:3e:e5:2f:90 | ip_address='10.0.0.68', subnet_id='c3a8feb0-62b5-4b53-9235-af1ca93c2571' | ACTIVE | - | b6a8aa34-feef-42d7-b7ce-f9c33ac499ca | available-port | fa:16:3e:0c:8c:b0 | ip_address='10.0.0.65', subnet_id='c3a8feb0-62b5-4b53-9235-af1ca93c2571' | ACTIVE | - | bee0cb3e-8d83-4942-8cdd-fc091b6e6058 | available-port | fa:16:3e:c2:0a:c6 | ip_address='10.0.0.74', subnet_id='c3a8feb0-62b5-4b53-9235-af1ca93c2571' | ACTIVE | - | c2d7b5c9-606d-4499-9981-0f94ec94f7e1 | available-port | fa:16:3e:73:89:d2 | ip_address='10.0.0.67', subnet_id='c3a8feb0-62b5-4b53-9235-af1ca93c2571' | ACTIVE | - | cb42940f-40c0-4e01-aa40-f3e9c5f6743f | available-port | fa:16:3e:49:73:ca | ip_address='10.0.0.66', subnet_id='c3a8feb0-62b5-4b53-9235-af1ca93c2571' | ACTIVE | - - $ openstack network trunk show trunk0 - +-----------------+--------------------------------------------------------------------------------------------------+ - | Field | Value | - +-----------------+--------------------------------------------------------------------------------------------------+ - | admin_state_up | UP | - | created_at | 2017-08-25T07:28:11Z | - | description | | - | id | c730ff56-69c2-4540-b3d4-d2978007236d | - | name | trunk0 | - | port_id | ad1b8e91-0698-473d-a2f2-d123e8a0af45 | - | project_id | a05f6ec0abd04cba80cd160f8baaac99 | - | revision_number | 381 | - | status | ACTIVE | - | sub_port | port_id='bee0cb3e-8d83-4942-8cdd-fc091b6e6058', segmentation_id='875', segmentation_type='vlan' | - | | port_id='71245983-e15e-4ae8-9425-af255b54921b', segmentation_id='1446', segmentation_type='vlan' | - | | port_id='b6a8aa34-feef-42d7-b7ce-f9c33ac499ca', segmentation_id='1652', segmentation_type='vlan' | - | tags | [] | - | tenant_id | a05f6ec0abd04cba80cd160f8baaac99 | - | updated_at | 2017-08-29T06:19:24Z | - +-----------------+--------------------------------------------------------------------------------------------------+ - - $ openstack network trunk show trunk1 - +-----------------+--------------------------------------------------------------------------------------------------+ - | Field | Value | - +-----------------+--------------------------------------------------------------------------------------------------+ - | admin_state_up | UP | - | created_at | 2017-08-28T15:06:54Z | - | description | | - | id | 9048c109-c1aa-4a41-9508-71b2ba98f3b0 | - | name | trunk1 | - | port_id | 4180a2e5-e184-424a-93d4-54b48490f50d | - | project_id | a05f6ec0abd04cba80cd160f8baaac99 | - | revision_number | 46 | - | status | ACTIVE | - | sub_ports | port_id='c2d7b5c9-606d-4499-9981-0f94ec94f7e1', segmentation_id='289', segmentation_type='vlan' | - | | port_id='cb42940f-40c0-4e01-aa40-f3e9c5f6743f', segmentation_id='1924', segmentation_type='vlan' | - | | port_id='52e52281-4692-45e9-935e-db77de44049a', segmentation_id='3866', segmentation_type='vlan' | - | | port_id='1de77073-7127-4c39-a47b-cef15f98849c', segmentation_id='3934', segmentation_type='vlan' | - | tags | [] | - | tenant_id | a05f6ec0abd04cba80cd160f8baaac99 | - | updated_at | 2017-08-29T06:19:28Z | - +-----------------+--------------------------------------------------------------------------------------------------+ - - -We can see that now we have 7 subports, 3 of them attached to `trunk0` and 4 -(1 + 3) attached to `trunk1`. - -After that, if we create a new pod, we can see that the pre-created subports -are being used:: - - $ kubectl create deployment demo --image=quay.io/kuryr/demo - $ kubectl scale deploy/demo --replicas=2 - $ kubectl get pods - NAME READY STATUS RESTARTS AGE - demo-2293951457-0l35q 1/1 Running 0 8s - demo-2293951457-nlghf 1/1 Running 0 17s - - $ openstack port list | grep demo - | 71245983-e15e-4ae8-9425-af255b54921b | demo-2293951457-0l35q | fa:16:3e:e5:2f:90 | ip_address='10.0.0.68', subnet_id='c3a8feb0-62b5-4b53-9235-af1ca93c2571' | ACTIVE | - | b6a8aa34-feef-42d7-b7ce-f9c33ac499ca | demo-2293951457-nlghf | fa:16:3e:0c:8c:b0 | ip_address='10.0.0.65', subnet_id='c3a8feb0-62b5-4b53-9235-af1ca93c2571' | ACTIVE | - - -Free pools for nested environment ---------------------------------- - -In addition to the create subcommand, there is a `free` command available that -allows to either remove the available ports at a given pool (i.e., VM trunk), -or in all of them:: - - $ python contrib/pools-management/subports.py free -h - usage: subports.py free [-h] [--trunks SUBPORTS [SUBPORTS ...]] [-t TIMEOUT] - - optional arguments: - -h, --help show this help message and exit - --trunks SUBPORTS [SUBPORTS ...] - list of trunk IPs where subports will be freed - -t TIMEOUT, --timeout TIMEOUT - set timeout for operation. Default is 180 sec - - -Following from the previous example, we can remove the available-ports -attached to a give pool, e.g.:: - - $ python contrib/pools-management/subports.py free --trunks 10.0.4.5 - $ openstack network trunk show trunk1 - +-----------------+--------------------------------------+ - | Field | Value | - +-----------------+--------------------------------------+ - | admin_state_up | UP | - | created_at | 2017-08-28T15:06:54Z | - | description | | - | id | 9048c109-c1aa-4a41-9508-71b2ba98f3b0 | - | name | trunk1 | - | port_id | 4180a2e5-e184-424a-93d4-54b48490f50d | - | project_id | a05f6ec0abd04cba80cd160f8baaac99 | - | revision_number | 94 | - | status | ACTIVE | - | sub_ports | | - | tags | [] | - | tenant_id | a05f6ec0abd04cba80cd160f8baaac99 | - | updated_at | 2017-08-29T06:40:18Z | - +-----------------+--------------------------------------+ - - -Or from all the pools at once:: - - $ python contrib/pools-management/subports.py free - $ openstack port list | grep available-port - $ # returns nothing - - -List pools for nested environment ---------------------------------- - -There is a `list` command available to show information about the existing -pools, i.e., it prints out the pool keys (trunk_ip, project_id, -[security_groups]) and the amount of available ports in each one of them:: - - $ python contrib/pools-management/subports.py list -h - usage: subports.py list [-h] [-t TIMEOUT] - - optional arguments: - -h, --help show this help message and exit - -t TIMEOUT, --timeout TIMEOUT - set timeout for operation. Default is 180 sec - - -As an example:: - - $ python contrib/pools-management/subports.py list - Content-length: 150 - - Pools: - ["10.0.0.6", "9d2b45c4efaa478481c30340b49fd4d2", ["00efc78c-f11c-414a-bfcd-a82e16dc07d1", "fd6b13dc-7230-4cbe-9237-36b4614bc6b5"]] has 4 ports - - -Show pool for nested environment --------------------------------- - -There is a `show` command available to print out information about a given -pool. It prints the ids of the ports associated to that pool::: - - $ python contrib/pools-management/subports.py show -h - usage: subports.py show [-h] --trunk TRUNK_IP -p PROJECT_ID --sg SG [SG ...] - [-t TIMEOUT] - - optional arguments: - -h, --help show this help message and exit - --trunk TRUNK_IP Trunk IP of the desired pool - -p PROJECT_ID, --project-id PROJECT_ID - project id of the pool - --sg SG [SG ...] Security group ids of the pool - -t TIMEOUT, --timeout TIMEOUT - set timeout for operation. Default is 180 sec - -As an example:: - - $ python contrib/pools-management/subports.py show --trunk 10.0.0.6 -p 9d2b45c4efaa478481c30340b49fd4d2 --sg 00efc78c-f11c-414a-bfcd-a82e16dc07d1 fd6b13dc-7230-4cbe-9237-36b4614bc6b5 - Content-length: 299 - - Pool (u'10.0.0.6', u'9d2b45c4efaa478481c30340b49fd4d2', (u'00efc78c-f11c-414a-bfcd-a82e16dc07d1', u'fd6b13dc-7230-4cbe-9237-36b4614bc6b5')) ports are: - 4913fbde-5939-4aef-80c0-7fcca0348871 - 864c8237-6ab4-4713-bec8-3d8bb6aa2144 - 8138134b-44df-489c-a693-3defeb2adb58 - f5e107c6-f998-4416-8f17-a055269f2829 - - -Without the script ------------------- - -Note the same can be done without using this script, by directly calling the -REST API with curl:: - - # To populate the pool - $ curl --unix-socket /run/kuryr/kuryr_manage.sock http://localhost/populatePool -H "Content-Type: application/json" -X POST -d '{"trunks": ["10.0.4.6"], "num_ports": 3}' - - # To free the pool - $ curl --unix-socket /run/kuryr/kuryr_manage.sock http://localhost/freePool -H "Content-Type: application/json" -X POST -d '{"trunks": ["10.0.4.6"]}' - - # To list the existing pools - $ curl --unix-socket /run/kuryr/kuryr_manage.sock http://localhost/listPools -H "Content-Type: application/json" -X GET -d '{}' - - # To show a specific pool - $ curl --unix-socket /run/kuryr/kuryr_manage.sock http://localhost/showPool -H "Content-Type: application/json" -X GET -d '{"pool_key": ["10.0.0.6", "9d2b45c4efaa478481c30340b49fd4d2", ["00efc78c-f11c-414a-bfcd-a82e16dc07d1", "fd6b13dc-7230-4cbe-9237-36b4614bc6b5"]]}' diff --git a/contrib/pools-management/subports.py b/contrib/pools-management/subports.py deleted file mode 100644 index 86eca7d10..000000000 --- a/contrib/pools-management/subports.py +++ /dev/null @@ -1,187 +0,0 @@ -# Copyright 2017 Red Hat, Inc. -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. - -import argparse -from http import client as httplib -import socket - -from oslo_serialization import jsonutils - -from kuryr_kubernetes import constants - - -class UnixDomainHttpConnection(httplib.HTTPConnection): - - def __init__(self, path, timeout): - httplib.HTTPConnection.__init__( - self, "localhost", timeout=timeout) - self.__unix_socket_path = path - self.timeout = timeout - - def connect(self): - sock = socket.socket(socket.AF_UNIX, socket.SOCK_STREAM) - sock.settimeout(self.timeout) - sock.connect(self.__unix_socket_path) - self.sock = sock - - -def create_subports(num_ports, trunk_ips, timeout=180): - method = 'POST' - body = jsonutils.dumps({"trunks": trunk_ips, "num_ports": num_ports}) - headers = {'Content-Type': 'application/json', 'Connection': 'close'} - headers['Content-Length'] = len(body) - path = 'http://localhost{0}'.format(constants.VIF_POOL_POPULATE) - socket_path = constants.MANAGER_SOCKET_FILE - conn = UnixDomainHttpConnection(socket_path, timeout) - conn.request(method, path, body=body, headers=headers) - resp = conn.getresponse() - print(resp.read()) - - -def delete_subports(trunk_ips, timeout=180): - method = 'POST' - body = jsonutils.dumps({"trunks": trunk_ips}) - headers = {'Content-Type': 'application/json', 'Connection': 'close'} - headers['Content-Length'] = len(body) - path = 'http://localhost{0}'.format(constants.VIF_POOL_FREE) - socket_path = constants.MANAGER_SOCKET_FILE - conn = UnixDomainHttpConnection(socket_path, timeout) - conn.request(method, path, body=body, headers=headers) - resp = conn.getresponse() - print(resp.read()) - - -def list_pools(timeout=180): - method = 'GET' - body = jsonutils.dumps({}) - headers = {'Context-Type': 'application/json', 'Connection': 'close'} - headers['Context-Length'] = len(body) - path = 'http://localhost{0}'.format(constants.VIF_POOL_LIST) - socket_path = constants.MANAGER_SOCKET_FILE - conn = UnixDomainHttpConnection(socket_path, timeout) - conn.request(method, path, body=body, headers=headers) - resp = conn.getresponse() - print(resp.read()) - - -def show_pool(trunk_ip, project_id, sg, timeout=180): - method = 'GET' - body = jsonutils.dumps({"pool_key": [trunk_ip, project_id, sg]}) - headers = {'Context-Type': 'application/json', 'Connection': 'close'} - headers['Context-Length'] = len(body) - path = 'http://localhost{0}'.format(constants.VIF_POOL_SHOW) - socket_path = constants.MANAGER_SOCKET_FILE - conn = UnixDomainHttpConnection(socket_path, timeout) - conn.request(method, path, body=body, headers=headers) - resp = conn.getresponse() - print(resp.read()) - - -def _get_parser(): - parser = argparse.ArgumentParser( - description='Tool to create/free subports from the subports pool') - subparser = parser.add_subparsers(help='commands', dest='command') - - create_ports_parser = subparser.add_parser( - 'create', - help='Populate the pool(s) with subports') - create_ports_parser.add_argument( - '--trunks', - help='list of trunk IPs where subports will be added', - nargs='+', - dest='subports', - required=True) - create_ports_parser.add_argument( - '-n', '--num-ports', - help='number of subports to be created per pool.', - dest='num', - default=1, - type=int) - create_ports_parser.add_argument( - '-t', '--timeout', - help='set timeout for operation. Default is 180 sec', - dest='timeout', - default=180, - type=int) - - delete_ports_parser = subparser.add_parser( - 'free', - help='Remove unused subports from the pools') - delete_ports_parser.add_argument( - '--trunks', - help='list of trunk IPs where subports will be freed', - nargs='+', - dest='subports') - delete_ports_parser.add_argument( - '-t', '--timeout', - help='set timeout for operation. Default is 180 sec', - dest='timeout', - default=180, - type=int) - - list_pools_parser = subparser.add_parser( - 'list', - help='List available pools and the number of ports they have') - list_pools_parser.add_argument( - '-t', '--timeout', - help='set timeout for operation. Default is 180 sec', - dest='timeout', - default=180, - type=int) - - show_pool_parser = subparser.add_parser( - 'show', - help='Show the ports associated to a given pool') - show_pool_parser.add_argument( - '--trunk', - help='Trunk IP of the desired pool', - dest='trunk_ip', - required=True) - show_pool_parser.add_argument( - '-p', '--project-id', - help='project id of the pool', - dest='project_id', - required=True) - show_pool_parser.add_argument( - '--sg', - help='Security group ids of the pool', - dest='sg', - nargs='+', - required=True) - show_pool_parser.add_argument( - '-t', '--timeout', - help='set timeout for operation. Default is 180 sec', - dest='timeout', - default=180, - type=int) - - return parser - - -def main(): - """Parse options and call the appropriate class/method.""" - parser = _get_parser() - args = parser.parse_args() - if args.command == 'create': - create_subports(args.num, args.subports, args.timeout) - elif args.command == 'free': - delete_subports(args.subports, args.timeout) - elif args.command == 'list': - list_pools(args.timeout) - elif args.command == 'show': - show_pool(args.trunk_ip, args.project_id, args.sg, args.timeout) - - -if __name__ == '__main__': - main() diff --git a/contrib/regenerate_controller_pod.sh b/contrib/regenerate_controller_pod.sh deleted file mode 100755 index 31dba8347..000000000 --- a/contrib/regenerate_controller_pod.sh +++ /dev/null @@ -1,17 +0,0 @@ -#!/bin/bash - -set -o errexit - -KURYR_DIR=${KURYR_DIR:-/opt/stack/kuryr-kubernetes} -KURYR_CONTROLLER_NAME=${KURYR_CONTROLLER_NAME:-kuryr-controller} - -function build_tagged_container { - docker build -t kuryr/controller -f $KURYR_DIR/controller.Dockerfile $KURYR_DIR -} - -function recreate_controller { - kubectl delete pods -n kube-system -l name=$KURYR_CONTROLLER_NAME -} - -build_tagged_container -recreate_controller diff --git a/contrib/regenerate_pod_resources_api.sh b/contrib/regenerate_pod_resources_api.sh deleted file mode 100755 index ea36a2978..000000000 --- a/contrib/regenerate_pod_resources_api.sh +++ /dev/null @@ -1,88 +0,0 @@ -#!/bin/bash - -set -o errexit - -# Exit early if python3 is not available. -python3 --version > /dev/null - -KURYR_DIR=${KURYR_DIR:-./} -KURYR_API_PROTO="kuryr_kubernetes/pod_resources/api.proto" - -# If API_VERSION is not specified assuming v1alpha1. -VERSION=${API_VERSION:-v1alpha1} - -ACTIVATED="no" -ENV_DIR=$(mktemp -d -t kuryr-tmp-env-XXXXXXXXXX) - -function cleanup() { - if [ "${ACTIVATED}" = "yes" ]; then deactivate; fi - rm -rf "${ENV_DIR}" -} -trap cleanup EXIT INT - -if [ -z "${KUBERNETES_API_PROTO}" ]; then - - echo "KUBERNETES_API_PROTO is not specified." \ - "Trying to download api.proto from the k8s github." - - pushd "${ENV_DIR}" - - BASE_URL="https://raw.githubusercontent.com/kubernetes/kubernetes/master" - PROTO_FILE="pkg/kubelet/apis/podresources/${VERSION}/api.proto" - - wget "${BASE_URL}/${PROTO_FILE}" -O api.proto - - KUBERNETES_API_PROTO="$PWD/api.proto" - popd -fi - -if [ ! -f "${KUBERNETES_API_PROTO}" ]; then - echo "Can't find ${KUBERNETES_API_PROTO}" - exit 1 -fi - -KUBERNETES_API_PROTO=$(readlink -e "${KUBERNETES_API_PROTO}") - -pushd "${KURYR_DIR}" - -# Obtaining api version from the proto file. -VERSION=$(grep package "${KUBERNETES_API_PROTO}" \ - | sed 's/^package *\(.*\)\;$/\1/') -echo "\ -// Generated from kubernetes/pkg/kubelet/apis/podresources/${VERSION}/api.proto -// To regenerate api.proto, api_pb2.py and api_pb2_grpc.py follow instructions -// from doc/source/devref/updating_pod_resources_api.rst. -" > ${KURYR_API_PROTO} - -# Stripping unwanted dependencies. -sed '/gogoproto/d;/api.pb.go/d' "${KUBERNETES_API_PROTO}" >> ${KURYR_API_PROTO} -echo '' >> ${KURYR_API_PROTO} -# Stripping redundant empty lines. -sed -i '/^$/N;/^\n$/D' ${KURYR_API_PROTO} - -# Creating new virtual environment. -python3 -m venv "${ENV_DIR}" -source "${ENV_DIR}/bin/activate" -ACTIVATED="yes" - -pip install grpcio-tools==1.19 - -# Checking protobuf version. -protobuf_version=$(grep protobuf lower-constraints.txt \ - | sed 's/^protobuf==\([0-9\.]*\)\.[0-9]*$/\1/') -protoc_version=$(python -m grpc_tools.protoc --version \ - | sed 's/^libprotoc \([0-9\.]*\)\.[0-9]*$/\1/') -if [ "${protobuf_version}" != "${protoc_version}" ]; then - echo "protobuf version in lower-constraints.txt (${protobuf_version})" \ - "!= installed protoc compiler version (${protoc_version})." - echo "Please, update requirements.txt and lower-constraints.txt or" \ - "change version of grpcio-tools used in this script." - # Clearing api.proto to highlight the issue. - echo '' > ${KURYR_API_PROTO} - exit 1 -fi - -# Generating python bindings. -python -m grpc_tools.protoc -I./ \ - --python_out=. --grpc_python_out=. ${KURYR_API_PROTO} -popd diff --git a/contrib/sctp_client.py b/contrib/sctp_client.py deleted file mode 100644 index b4a5248bf..000000000 --- a/contrib/sctp_client.py +++ /dev/null @@ -1,31 +0,0 @@ -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. - -import sctp -import socket -import sys - -sk = sctp.sctpsocket_tcp(socket.AF_INET) - - -def connect_plus_message(OUT_IP, OUT_PORT): - sk.connect((OUT_IP, OUT_PORT)) - print("Sending Message") - sk.sctp_send(msg='HELLO, I AM ALIVE!!!') - msgFromServer = sk.recvfrom(1024) - print(msgFromServer[0].decode('utf-8')) - sk.shutdown(0) - sk.close() - - -if __name__ == '__main__': - connect_plus_message(sys.argv[1], int(sys.argv[2])) diff --git a/contrib/testing/container/Dockerfile b/contrib/testing/container/Dockerfile deleted file mode 100644 index 13f0ff698..000000000 --- a/contrib/testing/container/Dockerfile +++ /dev/null @@ -1,3 +0,0 @@ -FROM scratch -ADD kuryr_testing_rootfs.tar.gz / -CMD ["/usr/bin/kuryr_hostname"] diff --git a/contrib/testing/container/build.sh b/contrib/testing/container/build.sh deleted file mode 100755 index 804c51fa3..000000000 --- a/contrib/testing/container/build.sh +++ /dev/null @@ -1,43 +0,0 @@ -#!/bin/bash - -set -o errexit - - -function install_busybox { - if [[ -x $(command -v apt-get 2> /dev/null) ]]; then - sudo apt-get update - sudo apt-get install -y busybox-static gcc - elif [[ -x $(command -v dnf 2> /dev/null) ]]; then - sudo dnf install -y busybox gcc - elif [[ -x $(command -v yum 2> /dev/null) ]]; then - sudo yum install -y busybox gcc - elif [[ -x $(command -v pacman 2> /dev/null) ]]; then - sudo pacman -S --noconfirm busybox gcc - else - echo "unknown distro" 1>2 - exit 1 - fi - return 0 -} - -function make_root { - local root_dir - local binary - - root_dir=$(mktemp -d) - mkdir -p "${root_dir}/bin" "${root_dir}/usr/bin" - binary=$(command -v busybox) - cp "$binary" "${root_dir}/bin/busybox" - "${root_dir}/bin/busybox" --install "${root_dir}/bin" - gcc --static hostname.c -o "${root_dir}/usr/bin/kuryr_hostname" - tar -C "$root_dir" -czvf kuryr_testing_rootfs.tar.gz bin usr - return 0 -} - -function build_container { - docker build -t kuryr/test_container . -} - -install_busybox -make_root -build_container diff --git a/contrib/testing/container/hostname.c b/contrib/testing/container/hostname.c deleted file mode 100644 index e31778261..000000000 --- a/contrib/testing/container/hostname.c +++ /dev/null @@ -1,129 +0,0 @@ -#define _GNU_SOURCE -#include -#include -#include -#include -#include -#include -#include -#include -#include -#include -#include - -#define MAX_LEN 1024 -#define BACKLOG 10 -#define LISTENING_PORT 8000 - -volatile sig_atomic_t running = 1; -volatile sig_atomic_t sig_number; - -static void handler(int signo) { - sig_number = signo; - running = 0; -} - - -int main() { - struct sigaction sa = { - .sa_handler = handler, - .sa_flags = 0}; - sigemptyset(&sa.sa_mask); - - if (sigaction(SIGINT, &sa, NULL) == -1) { - err(1, "Failed to set SIGINT handler"); - } - if (sigaction(SIGTERM, &sa, NULL) == -1) { - err(1, "Failed to set SIGTERM handler"); - } - - int enable = 1; - int result = 1; - char hostname[MAX_LEN]; - int res = gethostname(hostname, MAX_LEN); - if (res < 0) { - err(1, "Failed to retrieve hostname"); - } - - char *response; - ssize_t responselen; - responselen = asprintf(&response, "HTTP/1.1 200 OK\r\n" - "Content-Type: text/html; charset=UTF-8\r\n\r\n" - "%s\r\n", hostname); - if (responselen == -1) { - err(1, "Failed to form response"); - } - - int sock = socket(AF_INET, SOCK_STREAM, 0); - if (sock < 0) { - perror("Failed to open socket"); - goto nosocket; - } - - res = setsockopt(sock, SOL_SOCKET, SO_REUSEADDR, &enable, - sizeof(int)); - if (res < 0) { - perror("Failed to set socket options"); - goto cleanup; - } - - struct sockaddr_in srv = { - .sin_family = AF_INET, - .sin_port = htons(LISTENING_PORT), - .sin_addr = { .s_addr = INADDR_ANY}}; - socklen_t addrlen= sizeof(srv); - - res = bind(sock, (struct sockaddr *) &srv, (socklen_t) sizeof(srv)); - if (res < 0) { - res = close(sock); - if (res == -1) { - perror("Failed close socket"); - goto cleanup; - } - perror("Failed to bind socket"); - goto cleanup; - } - - res = listen(sock, BACKLOG); - if (res < 0) { - perror("Failed to set socket to listen"); - goto cleanup; - } - - while (running) { - struct sockaddr_in cli; - int client_fd = accept(sock, (struct sockaddr *) &cli, - &addrlen); - if (client_fd == -1) { - if (running) { - perror("failed to accept connection"); - continue; - } else { - char *signame = strsignal(sig_number); - printf("Received %s. Quitting\n", signame); - break; - } - } - fprintf(stderr, "Accepted client connection\n"); - - /* Assume we write it all at once */ - write(client_fd, response, responselen); - res = shutdown(client_fd, SHUT_RDWR); - if (res == -1) { - perror("Failed to shutdown client connection"); - goto cleanup; - } - res = close(client_fd); - if (res == -1) { - perror("Failed to close client connection"); - goto cleanup; - } - } - - result = 0; -cleanup: - close(sock); -nosocket: - free(response); - return result; -} diff --git a/contrib/vagrant/README.rst b/contrib/vagrant/README.rst deleted file mode 100644 index 4c780eee6..000000000 --- a/contrib/vagrant/README.rst +++ /dev/null @@ -1,99 +0,0 @@ -==================================================== -Vagrant based Kuryr-Kubernetes devstack installation -==================================================== - -Deploy kuryr-kubernetes on devstack in VM using `Vagrant`_. Vagrant simplifies -life cycle of the local virtual machine and provides automation for repetitive -tasks. - -Requirements ------------- - -For comfortable work, here are minimal host requirements: - -#. ``vagrant`` installed -#. 4 CPU cores -#. At least 8GB of RAM -#. Around 20GB of free disk space - -Vagrant will create VM with 2 cores, 6GB of RAM and dynamically expanded disk -image. - - -Getting started ---------------- - -You'll need vagrant itself, i.e.: - -.. code:: console - - $ apt install vagrant virtualbox - -As an option, you can install libvirt instead of VirtualBox, although -VirtualBox is as an easiest drop-in. - -Next, clone the kuryr-kubernetes repository: - -.. code:: console - - $ git clone https://opendev.org/openstack/kuryr-kubernetes - -And run provided vagrant file, by executing: - -.. code:: console - - $ cd kuryr-kubernetes/contrib/vagrant - $ vagrant up - -This can take some time, depending on your host performance, and may take -20 minutes and up. - -After deploying is complete, you can access VM by ssh: - -.. code:: console - - $ vagrant ssh - -At this point you should have experimental kubernetes (etcdv3, k8s-apiserver, -k8s-controller-manager, k8s-scheduler, kubelet and kuryr-controller), docker, -OpenStack services (neutron, keystone, placement, nova, octavia), kuryr-cni and -kuryr-controller all up, running and pointing to each other. Pods and services -orchestrated by kubernetes will be backed by kuryr+neutron and Octavia. The -architecture of the setup `can be seen here`_. - - -Vagrant Options available -------------------------- - -You can set the following environment variables before running `vagrant up` to -modify the definition of the Virtual Machine spawned: - -* ``VAGRANT_KURYR_VM_BOX`` - to change the Vagrant Box used. Should be - available in `atlas `_. For example of a - rpm-based option: - - .. code:: console - - $ export VAGRANT_KURYR_VM_BOX=centos/8 - -* ``VAGRANT_KURYR_VM_MEMORY`` - to modify the RAM of the VM. Defaulted to: - **6144**. If you plan to create multiple Kubernetes services on the setup and - the Octavia driver used is Amphora, you should increase this setting. -* ``VAGRANT_KURYR_VM_CPU``: to modify number of CPU cores for the VM. Defaulted - to: **2**. -* ``VAGRANT_KURYR_RUN_DEVSTACK`` - whether ``vagrant up`` should run devstack - to have an environment ready to use. Set it to 'false' if you want to edit - ``local.conf`` before stacking devstack in the VM. Defaulted to: **true**. - See below for additional options for editing local.conf. - - -Additional devstack configuration ---------------------------------- - -To add additional configuration to local.conf before the VM is provisioned, you -can create a file called ``user_local.conf`` in the contrib/vagrant directory -of networking-kuryr. This file will be appended to the "local.conf" created -during the Vagrant provisioning. - -.. _Vagrant: https://www.vagrantup.com/ -.. _can be seen here: https://docs.openstack.org/developer/kuryr-kubernetes/devref/kuryr_kubernetes_design.html diff --git a/contrib/vagrant/Vagrantfile b/contrib/vagrant/Vagrantfile deleted file mode 100644 index d7be10bda..000000000 --- a/contrib/vagrant/Vagrantfile +++ /dev/null @@ -1,50 +0,0 @@ -VAGRANTFILE_API_VERSION = "2" - -Vagrant.configure(VAGRANTFILE_API_VERSION) do |config| - - VM_MEMORY = ENV.fetch('VAGRANT_KURYR_VM_MEMORY', 6144).to_i - VM_CPUS = ENV.fetch('VAGRANT_KURYR_VM_CPUS', 2).to_i - RUN_DEVSTACK = ENV.fetch('VAGRANT_KURYR_RUN_DEVSTACK', 'true') - - config.vm.hostname = 'devstack' - - config.vm.provider 'virtualbox' do |v, override| - override.vm.box = ENV.fetch('VAGRANT_KURYR_VM_BOX', 'generic/ubuntu2004') - v.memory = VM_MEMORY - v.cpus = VM_CPUS - v.customize "post-boot", ['controlvm', :id, 'setlinkstate1', 'on'] - end - - config.vm.provider 'parallels' do |v, override| - override.vm.box = ENV.fetch('VAGRANT_KURYR_VM_BOX', 'generic/ubuntu2004') - v.memory = VM_MEMORY - v.cpus = VM_CPUS - v.customize ['set', :id, '--nested-virt', 'on'] - end - - config.vm.provider 'libvirt' do |v, override| - override.vm.box = ENV.fetch('VAGRANT_KURYR_VM_BOX', 'generic/ubuntu2004') - v.memory = VM_MEMORY - v.cpus = VM_CPUS - v.nested = true - v.graphics_type = 'spice' - v.video_type = 'qxl' - end - - config.vm.synced_folder '../../devstack/', '/devstack', type: 'rsync' - # For CentOS machines it needs to be specified - config.vm.synced_folder '.', '/vagrant', type: 'rsync' - - - config.vm.provision :shell do |s| - s.path = 'vagrant.sh' - s.args = RUN_DEVSTACK - end - - - if Vagrant.has_plugin?('vagrant-cachier') - config.cache.scope = :box - end - - config.vm.network :forwarded_port, guest: 80, host_ip: "127.0.0.1", host: 8080 -end diff --git a/contrib/vagrant/config/kuryr_rc b/contrib/vagrant/config/kuryr_rc deleted file mode 100644 index 06682b54b..000000000 --- a/contrib/vagrant/config/kuryr_rc +++ /dev/null @@ -1,4 +0,0 @@ -export OS_USERNAME=admin -export OS_PASSWORD=pass -export OS_PROJECT_NAME=admin -export OS_AUTH_URL=http://127.0.0.1/identity diff --git a/contrib/vagrant/devstack.sh b/contrib/vagrant/devstack.sh deleted file mode 100755 index ac5e2ddaf..000000000 --- a/contrib/vagrant/devstack.sh +++ /dev/null @@ -1,62 +0,0 @@ -#!/bin/bash - -set -ex - -echo $(whoami) - -BASHPATH=$(dirname "$0"\") -RUN_DEVSTACK="$1" -echo "Run script from $BASHPATH" - -# Copied shamelessly from Devstack -function GetOSVersion { - if [[ -x $(which lsb_release 2>/dev/null) ]]; then - os_FAMILY='Debian' - elif [[ -r /etc/redhat-release ]]; then - os_FAMILY='RedHat' - else - echo "Unsupported distribution!" - exit 1; - fi -} - -GetOSVersion - -if [[ "$os_FAMILY" == "Debian" ]]; then - export DEBIAN_FRONTEND noninteractive - sudo apt-get update - sudo apt-get install -qqy git -elif [[ "$os_FAMILY" == "RedHat" ]]; then - sudo yum install -y -d 0 -e 0 git -fi - -# determine checkout folder -PWD=$(getent passwd $OS_USER | cut -d: -f6) -DEVSTACK=$PWD/devstack - -# check if devstack is already there -if [[ ! -d "$DEVSTACK" ]] -then - echo "Download devstack into $DEVSTACK" - - # clone devstack - su "$OS_USER" -c "cd && git clone -b master https://github.com/openstack-dev/devstack.git $DEVSTACK" - - echo "Copy configuration" - - # copy local.conf.sample settings (source: kuryr/devstack/local.conf.sample) - cp /devstack/local.conf.sample $DEVSTACK/local.conf - # If local settings are present, append them - if [ -f "/vagrant/user_local.conf" ]; then - cat /vagrant/user_local.conf >> $DEVSTACK/local.conf - fi - chown "$OS_USER":"$OS_USER" "$DEVSTACK"/local.conf - -fi - -if $RUN_DEVSTACK; then - echo "Start Devstack" - su "$OS_USER" -c "cd $DEVSTACK && ./stack.sh" -else - echo "Virtual Machine ready. You can run devstack by executing '/home/$OS_USER/devstack/stack.sh'" -fi diff --git a/contrib/vagrant/vagrant.sh b/contrib/vagrant/vagrant.sh deleted file mode 100755 index 18af30af4..000000000 --- a/contrib/vagrant/vagrant.sh +++ /dev/null @@ -1,25 +0,0 @@ -#!/bin/sh - -getent passwd vagrant > /dev/null -if [ $? -eq 0 ]; then - export OS_USER=vagrant -else - getent passwd ubuntu > /dev/null - if [ $? -eq 0 ]; then - export OS_USER=ubuntu - fi -fi - -set -ex - -export HOST_IP=127.0.0.1 - -# Enable IPv6 -sudo sysctl -w net.ipv6.conf.default.disable_ipv6=0 -sudo sysctl -w net.ipv6.conf.all.disable_ipv6=0 - -# run script -bash /vagrant/devstack.sh "$1" - -#set environment variables for kuryr -su "$OS_USER" -c "echo 'source /vagrant/config/kuryr_rc' >> ~/.bash_profile" diff --git a/controller.Dockerfile b/controller.Dockerfile deleted file mode 100644 index 125f7a629..000000000 --- a/controller.Dockerfile +++ /dev/null @@ -1,32 +0,0 @@ -FROM quay.io/centos/centos:stream9 -LABEL authors="Antoni Segura Puimedon, Michał Dulko" - -ARG UPPER_CONSTRAINTS_FILE="https://releases.openstack.org/constraints/upper/master" - -RUN dnf upgrade -y \ - && dnf install -y epel-release \ - && dnf install -y --setopt=tsflags=nodocs python3-pip libstdc++ \ - && dnf install -y --setopt=tsflags=nodocs gcc gcc-c++ python3-devel git - -COPY . /opt/kuryr-kubernetes - -ARG VIRTUAL_ENV=/opt/venv -RUN python3 -m venv $VIRTUAL_ENV -# This is enough to activate a venv -ENV PATH="$VIRTUAL_ENV/bin:$PATH" - -RUN pip3 --no-cache-dir install -U pip \ - && python3 -m pip install -c $UPPER_CONSTRAINTS_FILE --no-cache-dir /opt/kuryr-kubernetes \ - && dnf -y history undo last \ - && dnf clean all \ - && rm -rf /opt/kuryr-kubernetes \ - && groupadd -r kuryr -g 1000 \ - && useradd -u 1000 -g kuryr \ - -d /opt/kuryr-kubernetes \ - -s /sbin/nologin \ - -c "Kuryr controller user" \ - kuryr - -USER kuryr -CMD ["--config-dir", "/etc/kuryr"] -ENTRYPOINT [ "kuryr-k8s-controller" ] diff --git a/devstack/files/debs/kuryr-kubernetes b/devstack/files/debs/kuryr-kubernetes deleted file mode 100644 index f5267c266..000000000 --- a/devstack/files/debs/kuryr-kubernetes +++ /dev/null @@ -1 +0,0 @@ -golang diff --git a/devstack/files/rpms/kuryr-kubernetes b/devstack/files/rpms/kuryr-kubernetes deleted file mode 100644 index f5267c266..000000000 --- a/devstack/files/rpms/kuryr-kubernetes +++ /dev/null @@ -1 +0,0 @@ -golang diff --git a/devstack/lib/kubernetes b/devstack/lib/kubernetes deleted file mode 100644 index 403479c8f..000000000 --- a/devstack/lib/kubernetes +++ /dev/null @@ -1,245 +0,0 @@ -#!/bin/bash - -KURYR_KUBEADMIN_IMAGE_REPOSITORY="registry.k8s.io" -function get_k8s_log_level { - if [[ ${ENABLE_DEBUG_LOG_LEVEL} == "True" ]]; then - echo "4" - else - echo "2" - fi -} - -function kubeadm_install { - if ! is_ubuntu && ! is_fedora; then - (>&2 echo "WARNING: kubeadm installation is not supported in this \ -distribution.") - return - fi - - if is_ubuntu; then - apt_get install apt-transport-https gpg - sudo mkdir -p -m 755 /etc/apt/keyrings - curl -fsSL https://pkgs.k8s.io/core:/stable:/v${KURYR_KUBERNETES_VERSION%.*}/deb/Release.key | \ - sudo gpg --dearmor -o /etc/apt/keyrings/kubernetes-apt-keyring.gpg - echo 'deb [signed-by=/etc/apt/keyrings/kubernetes-apt-keyring.gpg] https://pkgs.k8s.io/core:/stable:/v'${KURYR_KUBERNETES_VERSION%.*}'/deb/ /' | \ - sudo tee /etc/apt/sources.list.d/kubernetes.list - - REPOS_UPDATED=False apt_get_update - - # NOTE(gryf): kubectl will be installed alongside with the kubeadm as - # a dependency, although let's pin it to the k8s version as well. - kube_pkg_version=$(sudo apt-cache show kubeadm | grep "Version: $KURYR_KUBERNETES_VERSION-" | awk '{ print $2 }') - apt_get install \ - kubelet="${kube_pkg_version}" \ - kubeadm="${kube_pkg_version}" \ - kubectl="${kube_pkg_version}" - sudo apt-mark hold kubelet kubeadm kubectl - # NOTE(hongbin): This work-around an issue that kubelet pick a wrong - # IP address if the node has multiple network interfaces. - # See https://github.com/kubernetes/kubeadm/issues/203 - echo "KUBELET_EXTRA_ARGS=--node-ip=$HOST_IP" | sudo tee -a \ - /etc/default/kubelet - sudo systemctl daemon-reload && sudo systemctl restart kubelet - fi - - if is_fedora; then - source /etc/os-release - os_VENDOR=$(echo $NAME | tr -d '[:space:]') - if [[ $os_VENDOR =~ "CentOS" ]]; then - cat <> ${output_dir}/kubeadm-init.yaml << EOF -apiVersion: kubeadm.k8s.io/v1beta3 -kind: ClusterConfiguration -imageRepository: "${KURYR_KUBEADMIN_IMAGE_REPOSITORY}" -etcd: - external: - endpoints: - - "http://${SERVICE_HOST}:${ETCD_PORT}" -networking: - serviceSubnet: "$(IFS=, ; echo "${cluster_ip_ranges[*]}")" -apiServer: - extraArgs: - endpoint-reconciler-type: "none" - min-request-timeout: "300" - allow-privileged: "true" - v: "$(get_k8s_log_level)" -controllerManager: - extraArgs: - master: "$KURYR_K8S_API_URL" - min-resync-period: "3m" - v: "$(get_k8s_log_level)" - leader-elect: "false" -scheduler: - extraArgs: - master: "${KURYR_K8S_API_URL}" - v: "$(get_k8s_log_level)" - leader-elect: "false" ---- -apiVersion: kubeadm.k8s.io/v1beta3 -kind: InitConfiguration -bootstrapTokens: -- token: "${KURYR_K8S_TOKEN}" - ttl: 0s -localAPIEndpoint: - advertiseAddress: "${K8S_API_SERVER_IP}" - bindPort: ${K8S_API_SERVER_PORT} -nodeRegistration: - criSocket: "$cri_socket" - kubeletExtraArgs: - enable-server: "true" - taints: - [] ---- -apiVersion: kubelet.config.k8s.io/v1beta1 -kind: KubeletConfiguration -failSwapOn: false -address: "0.0.0.0" -enableServer: true -cgroupDriver: $cgroup_driver -EOF - sudo kubeadm config images pull --image-repository=${KURYR_KUBEADMIN_IMAGE_REPOSITORY} - args="--config ${output_dir}/kubeadm-init.yaml" - # NOTE(gryf): skip installing kube proxy, kuryr will handle services. - args+=" --skip-phases=addon/kube-proxy" - args+=" --ignore-preflight-errors Swap" - - if ! is_service_enabled coredns; then - # FIXME(gryf): Do we need specific configuration for coredns? - args+=" --skip-phases=addon/coredns" - fi - sudo kubeadm init $args - - local kube_config_file=$HOME/.kube/config - mkdir -p $(dirname ${kube_config_file}) - sudo cp /etc/kubernetes/admin.conf $kube_config_file - safe_chown $STACK_USER:$STACK_USER $kube_config_file -} - -function kubeadm_join { - local output_dir="${DATA_DIR}/kuryr-kubernetes" - local cgroup_driver - local cri_socket - - mkdir -p "${output_dir}" - - if [[ ${CONTAINER_ENGINE} == 'crio' ]]; then - local crio_conf="/etc/crio/crio.conf" - cgroup_driver=$(iniget ${crio_conf} crio.runtime cgroup_manager) - cri_socket="unix:///var/run/crio/crio.sock" - else - # docker is used - cgroup_driver=$(docker info -f '{{.CgroupDriver}}') - cri_socket="/var/run/dockershim.sock" - fi - cluster_ip_ranges=() - for service_subnet_id in ${KURYR_SERVICE_SUBNETS_IDS[@]}; do - service_cidr=$(openstack --os-cloud devstack-admin \ - --os-region "$REGION_NAME" \ - subnet show "$service_subnet_id" \ - -c cidr -f value) - cluster_ip_ranges+=($(split_subnet "$service_cidr" | cut -f1)) - done - - # TODO(gryf): take care of cri-o case aswell - rm -f ${output_dir}/kubeadm-join.yaml - cat >> ${output_dir}/kubeadm-join.yaml << EOF -apiVersion: kubeadm.k8s.io/v1beta3 -discovery: - bootstrapToken: - apiServerEndpoint: ${SERVICE_HOST}:${KURYR_K8S_API_PORT} - token: "${KURYR_K8S_TOKEN}" - unsafeSkipCAVerification: true - tlsBootstrapToken: "${KURYR_K8S_TOKEN}" -kind: JoinConfiguration -nodeRegistration: - criSocket: "$cri_socket" - kubeletExtraArgs: - enable-server: "true" - taints: - [] ---- -apiVersion: kubelet.config.k8s.io/v1beta1 -kind: KubeletConfiguration -failSwapOn: false -address: "0.0.0.0" -enableServer: true -cgroupDriver: $cgroup_driver -EOF - sudo -E kubeadm join --ignore-preflight-errors Swap \ - --config ${output_dir}/kubeadm-join.yaml -} - -function get_k8s_apiserver { - # assumption is, there is no other cluster, so there is only one API - # server. - echo "$(kubectl config view -o jsonpath='{.clusters[].cluster.server}')" -} - -function get_k8s_token { - local secret - secret=$(kubectl get secrets -o jsonpath='{.items[0].metadata.name}') - echo $(kubectl get secret $secret -o jsonpath='{.items[0].data.token}' | \ - base64 -d) -} - -function kubeadm_reset { - sudo kubeadm reset -f - sudo iptables -F - sudo iptables -t nat -F - sudo iptables -t mangle -F - sudo iptables -X - sudo ipvsadm -C -} - -function kubeadm_uninstall { - sudo systemctl stop kubelet - apt_get purge --allow-change-held-packages. kubelet kubeadm kubeadm \ - kubernetes-cni apt-transport-https - sudo add-apt-repository -r -y \ - "deb https://apt.kubernetes.io/ kubernetes-xenial main" - REPOS_UPDATED=False apt_get_update - sudo rm -fr /etc/default/kubelet /etc/kubernetes -} diff --git a/devstack/lib/kuryr_kubernetes b/devstack/lib/kuryr_kubernetes deleted file mode 100644 index 759257f94..000000000 --- a/devstack/lib/kuryr_kubernetes +++ /dev/null @@ -1,1641 +0,0 @@ -#!/bin/bash -# -# lib/kuryr -# Utilities for kuryr-kubernetes devstack -# bind_for_kubelet -# Description: Creates an OVS internal port so that baremetal kubelet will be -# able to make both liveness and readiness http/tcp probes. -# Params: -# project - Id or name of the project used for kuryr devstack -# port - Port to open for K8s API, relevant only for OpenStack infra - -# Dependencies: -# (none) - -KURYR_CONF_NEUTRON=$(trueorfalse True KURYR_CONFIGURE_NEUTRON_DEFAULTS) -KURYR_IPV6=$(trueorfalse False KURYR_IPV6) -KURYR_DUAL_STACK=$(trueorfalse False KURYR_DUAL_STACK) -KURYR_USE_LC=$(trueorfalse False KURYR_CONTAINERS_USE_LOWER_CONSTRAINTS) - - -function container_runtime { - # Ignore error at killing/removing a container doesn't running to avoid - # unstack is terminated. - # TODO: Support for CRI-O if it's required. - local regex_cmds_ignore="(kill|rm)\s+" - - if [[ ${CONTAINER_ENGINE} == 'crio' ]]; then - sudo podman "$@" || die $LINENO "Error when running podman command" - else - if [[ $@ =~ $regex_cmds_ignore ]]; then - docker "$@" - else - docker "$@" || die $LINENO "Error when running docker command" - fi - fi -} - -function ovs_bind_for_kubelet { - local port_id - local port_mac - local fixed_ips - local port_ips - local port_subnets - local prefix - local project_id - local port_number - local security_group - local ifname - local service_subnet_cidr - local pod_subnet_gw - local cidrs - local _sp_id=${KURYR_NEUTRON_DEFAULT_SUBNETPOOL_ID} - - project_id="$1" - port_number="$2" - security_group=$(openstack security group list \ - --project "$project_id" -c ID -c Name -f value | \ - awk '{if ($2=="default") print $1}') - port_id=$(openstack port create \ - --device-owner compute:kuryr \ - --project "$project_id" \ - --security-group "$security_group" \ - --security-group service_pod_access \ - --host "${HOSTNAME}" \ - --network "${KURYR_NEUTRON_DEFAULT_POD_NET}" \ - -f value -c id \ - kubelet-"${HOSTNAME}") - - ifname="kubelet${port_id}" - ifname="${ifname:0:14}" - port_mac=$(openstack port show "$port_id" -c mac_address -f value) - fixed_ips=$(openstack port show "$port_id" -f value -c fixed_ips) - port_ips=($(python3 -c "print(' '.join([x['ip_address'] for x in ${fixed_ips}]))")) - port_subnets=($(python3 -c "print(' '.join([x['subnet_id'] for x in ${fixed_ips}]))")) - - sudo ovs-vsctl -- --may-exist add-port $OVS_BRIDGE "$ifname" \ - -- set Interface "$ifname" type=internal \ - -- set Interface "$ifname" external-ids:iface-status=active \ - -- set Interface "$ifname" external-ids:attached-mac="$port_mac" \ - -- set Interface "$ifname" external-ids:iface-id="$port_id" - - sudo ip link set dev "$ifname" address "$port_mac" - sudo ip link set dev "$ifname" up - for i in "${!port_ips[@]}"; do - prefix=$(openstack subnet show "${port_subnets[$i]}" \ - -c cidr -f value | \ - cut -f2 -d/) - sudo ip addr add "${port_ips[$i]}/${prefix}" dev "$ifname" - done - - # TODO(dulek): This hack is for compatibility with multinode job, we might - # want to do it better one day and actually support dual stack - # and NP here. - if [[ -z ${KURYR_SERVICE_SUBNETS_IDS} ]]; then - KURYR_SERVICE_SUBNETS_IDS=(${KURYR_NEUTRON_DEFAULT_SERVICE_SUBNET}-IPv4) - KURYR_POD_SUBNETS_IDS=(${KURYR_NEUTRON_DEFAULT_POD_SUBNET}-IPv4) - fi - - if [[ -z ${KURYR_SUBNETPOOLS_IDS} ]]; then - # NOTE(gryf): In case we are missing KURYR_SUBNETPOOLS_IDS variable - # populated, which probably means, that kuryr-kubernetes service is - # not enabled, but if kuryr-daemon service is enabled (which is the - # case for multi node setup, where worker nodes should have it - # enabled), we need to have it filled. - export KURYR_SUBNETPOOLS_IDS=() - export KURYR_ETHERTYPES=() - if [[ "$KURYR_IPV6" == "False" ]]; then - export KURYR_ETHERTYPE=IPv4 - KURYR_ETHERTYPES+=("IPv4") - KURYR_SUBNETPOOLS_IDS+=(${_sp_id:-${SUBNETPOOL_V4_ID}}) - else - KURYR_ETHERTYPES+=("IPv6") - KURYR_SUBNETPOOLS_IDS+=($(openstack \ - --os-cloud devstack-admin \ - --os-region "${REGION_NAME}" \ - subnet pool show ${SUBNETPOOL_KURYR_NAME_V6} -c id -f value)) - fi - fi - - for i in "${!KURYR_SERVICE_SUBNETS_IDS[@]}"; do - pod_subnet_gw=$(openstack subnet show "${KURYR_POD_SUBNETS_IDS[$i]}" \ - -c gateway_ip -f value) - if is_service_enabled kuryr-kubernetes && [[ "$KURYR_SUBNET_DRIVER" == "namespace" ]]; then - cidrs=$(openstack subnet pool show "${KURYR_SUBNETPOOLS_IDS[$i]}" -c prefixes -f value) - subnetpool_cidr=$(python3 -c "print(${cidrs}[0])") - sudo ip route add "$subnetpool_cidr" via "$pod_subnet_gw" dev "$ifname" - else - service_subnet_cidr=$(openstack --os-cloud devstack-admin \ - --os-region "$REGION_NAME" \ - subnet show "${KURYR_SERVICE_SUBNETS_IDS[$i]}" \ - -c cidr -f value) - sudo ip route add "$service_subnet_cidr" via "$pod_subnet_gw" dev "$ifname" - fi - done - - if [ -n "$port_number" ]; then - # if openstack-INPUT chain doesn't exist we create it in INPUT (for - # local development envs since openstack-INPUT is usually only in gates) - if [[ "$KURYR_IPV6" == "False" || "$KURYR_DUAL_STACK" == "True" ]]; then - sudo iptables -I openstack-INPUT 1 \ - -p tcp -s 0.0.0.0/0 -d 0.0.0.0/0 --dport $port_number -j ACCEPT || \ - sudo iptables -I INPUT 1 \ - -p tcp -m conntrack --ctstate NEW \ - -m tcp --dport "$port_number" \ - -m comment --comment "kuryr-devstack: Access to OpenShift API" -j ACCEPT - fi - if [[ "$KURYR_IPV6" == "True" || "$KURYR_DUAL_STACK" == "True" ]]; then - sudo ip6tables -I openstack-INPUT 1 \ - -p tcp -s ::/0 -d ::/0 --dport $port_number -j ACCEPT || \ - sudo ip6tables -I INPUT 1 \ - -p tcp -m conntrack --ctstate NEW \ - -m tcp --dport "$port_number" \ - -m comment --comment "kuryr-devstack: Access to OpenShift API" -j ACCEPT - fi - fi -} - -# _allocation_range -# Description: Writes out tab separated usable ip range for a CIDR -# Params: -# cidr - The cidr to get the range for -# gateway_position - Whether to reserve at 'beginning' or at 'end' -function _allocation_range { - python3 - <> "${output_dir}/config_map.yml" << EOF -apiVersion: v1 -kind: ConfigMap -metadata: - name: kuryr-config - namespace: kube-system -data: - kuryr.conf: | -EOF - - indent < "${conf_path}" >> "${output_dir}/config_map.yml" -} - -function generate_kuryr_certificates_secret { - local output_dir - local certs_bundle_path - output_dir=$1 - certs_bundle_path=${2:-""} - - mkdir -p "$output_dir" - rm -f "${output_dir}/certificates_secret.yml" - - CA_CERT=\"\" # It's a "" string that will be inserted into yaml file. - - if [ "$certs_bundle_path" -a -f "$certs_bundle_path" ]; then - CA_CERT=$(base64 -w0 < "$certs_bundle_path") - fi - - cat >> "${output_dir}/certificates_secret.yml" << EOF -apiVersion: v1 -kind: Secret -metadata: - name: kuryr-certificates - namespace: kube-system -type: Opaque -data: - kuryr-ca-bundle.crt: $CA_CERT -EOF -} - -# Generates kuryr-controller service account and kuryr-cni service account. -function generate_kuryr_service_account { - output_dir=$1 - mkdir -p "$output_dir" - rm -f "${output_dir}/controller_service_account.yml" - rm -f "${output_dir}/cni_service_account.yml" - cat >> "${output_dir}/controller_service_account.yml" << EOF ---- -apiVersion: v1 -kind: ServiceAccount -metadata: - name: kuryr-controller - namespace: kube-system ---- -kind: ClusterRole -apiVersion: rbac.authorization.k8s.io/v1 -metadata: - name: kuryr-controller -rules: -- apiGroups: - - "" - verbs: ["*"] - resources: - - endpoints - - pods - - services - - services/status - - namespaces -- apiGroups: - - "" - verbs: ["get", "list", "watch"] - resources: - - nodes -- apiGroups: - - openstack.org - verbs: ["*"] - resources: - - kuryrnetworks - - kuryrnetworkpolicies - - kuryrloadbalancers - - kuryrports -- apiGroups: ["networking.k8s.io"] - resources: - - networkpolicies - verbs: - - get - - list - - watch - - update - - patch -- apiGroups: ["k8s.cni.cncf.io"] - resources: - - network-attachment-definitions - verbs: - - get -- apiGroups: ["", "events.k8s.io"] - resources: - - events - verbs: - - create ---- -kind: ClusterRoleBinding -apiVersion: rbac.authorization.k8s.io/v1 -metadata: - name: kuryr-controller-global -subjects: -- kind: ServiceAccount - name: kuryr-controller - namespace: kube-system -roleRef: - kind: ClusterRole - name: kuryr-controller - apiGroup: rbac.authorization.k8s.io -EOF - - cat >> "${output_dir}/cni_service_account.yml" << EOF ---- -apiVersion: v1 -kind: ServiceAccount -metadata: - name: kuryr-cni - namespace: kube-system ---- -kind: ClusterRole -apiVersion: rbac.authorization.k8s.io/v1 -metadata: - name: kuryr-cni -rules: -- apiGroups: - - "" - verbs: ["*"] - resources: - - pods -- apiGroups: - - openstack.org - verbs: ["*"] - resources: - - kuryrports -- apiGroups: ["", "events.k8s.io"] - resources: - - events - verbs: - - create ---- -kind: ClusterRoleBinding -apiVersion: rbac.authorization.k8s.io/v1 -metadata: - name: kuryr-cni-global -subjects: -- kind: ServiceAccount - name: kuryr-cni - namespace: kube-system -roleRef: - kind: ClusterRole - name: kuryr-cni - apiGroup: rbac.authorization.k8s.io -EOF -} - -function generate_controller_deployment { - output_dir=$1 - health_server_port=$2 - controller_ha=$3 - mkdir -p "$output_dir" - rm -f "${output_dir}/controller_deployment.yml" - cat >> "${output_dir}/controller_deployment.yml" << EOF -apiVersion: apps/v1 -kind: Deployment -metadata: - labels: - name: kuryr-controller - name: kuryr-controller - namespace: kube-system -spec: - replicas: ${KURYR_CONTROLLER_REPLICAS:-1} - selector: - matchLabels: - name: kuryr-controller -EOF - - # When running without HA we should make sure that we won't have more than - # one kuryr-controller pod in the deployment. - if [ "$controller_ha" == "False" ]; then - cat >> "${output_dir}/controller_deployment.yml" << EOF - strategy: - type: RollingUpdate - rollingUpdate: - maxSurge: 0 - maxUnavailable: 1 -EOF - fi - - cat >> "${output_dir}/controller_deployment.yml" << EOF - template: - metadata: - labels: - name: kuryr-controller - name: kuryr-controller - spec: - serviceAccountName: kuryr-controller - automountServiceAccountToken: true - hostNetwork: true - containers: -EOF - - if [ "$controller_ha" == "True" ]; then - cat >> "${output_dir}/controller_deployment.yml" << EOF - - image: gcr.io/google_containers/leader-elector:0.5 - name: leader-elector - args: - - "--election=kuryr-controller" - - "--http=0.0.0.0:${KURYR_CONTROLLER_HA_PORT:-16401}" - - "--election-namespace=kube-system" - - "--ttl=5s" - ports: - - containerPort: ${KURYR_CONTROLLER_HA_PORT:-16401} - protocol: TCP -EOF - fi - - cat >> "${output_dir}/controller_deployment.yml" << EOF - - image: kuryr/controller:latest - imagePullPolicy: Never - name: controller - terminationMessagePath: "/dev/termination-log" - volumeMounts: - - name: config-volume - mountPath: "/etc/kuryr" - - name: certificates-volume - mountPath: "/etc/ssl/certs" - readOnly: true - readinessProbe: - httpGet: - path: /ready - port: ${health_server_port} - scheme: HTTP - timeoutSeconds: 5 - livenessProbe: - httpGet: - path: /alive - port: ${health_server_port} - initialDelaySeconds: 15 -EOF - - cat >> "${output_dir}/controller_deployment.yml" << EOF - volumes: - - name: config-volume - configMap: - name: kuryr-config - - name: certificates-volume - secret: - secretName: kuryr-certificates - restartPolicy: Always - tolerations: - - key: "node-role.kubernetes.io/master" - operator: "Exists" - effect: "NoSchedule" - - key: "node.kubernetes.io/not-ready" - operator: "Exists" - effect: "NoSchedule" -EOF -} - -function generate_cni_daemon_set { - output_dir=$1 - cni_health_server_port=$2 - local var_run=${VAR_RUN_PATH:-/var/run} - mkdir -p "$output_dir" - rm -f "${output_dir}/cni_ds.yml" - cat >> "${output_dir}/cni_ds.yml" << EOF -apiVersion: apps/v1 -kind: DaemonSet -metadata: - name: kuryr-cni-ds - namespace: kube-system - labels: - tier: node - app: kuryr-cni -spec: - selector: - matchLabels: - app: kuryr-cni - template: - metadata: - labels: - tier: node - app: kuryr-cni - spec: - hostNetwork: true - tolerations: - - key: node-role.kubernetes.io/master - operator: Exists - effect: NoSchedule - - key: "node.kubernetes.io/not-ready" - operator: "Exists" - effect: "NoSchedule" - serviceAccountName: kuryr-cni - containers: - - name: kuryr-cni - image: kuryr/cni:latest - imagePullPolicy: Never - command: [ "cni_ds_init" ] - env: - - name: KUBERNETES_NODE_NAME - valueFrom: - fieldRef: - fieldPath: spec.nodeName - - name: KURYR_CNI_POD_NAME - valueFrom: - fieldRef: - fieldPath: metadata.name - securityContext: - privileged: true - volumeMounts: - - name: bin - mountPath: /opt/cni/bin - - name: net-conf - mountPath: /etc/cni/net.d - - name: config-volume - mountPath: /etc/kuryr -EOF - if [ "$CONTAINER_ENGINE" != "crio" ]; then - cat >> "${output_dir}/cni_ds.yml" << EOF - - name: proc - mountPath: /host_proc -EOF - - fi - cat >> "${output_dir}/cni_ds.yml" << EOF - - name: var-pci - mountPath: /var/pci_address - - name: var-run - mountPath: /var/run - mountPropagation: HostToContainer -EOF - # NOTE(gryf): assuming the --namespaces-dir parameter would not be used, - # otherwise /var/run/$crio_netns_path is all wrong - if [ "$CONTAINER_ENGINE" = "crio" ] && \ - [ "${VAR_RUN_PATH}" != "/var/run" ]; then - cat >> "${output_dir}/cni_ds.yml" << EOF - - name: netns - mountPath: /var/run/netns - mountPropagation: HostToContainer -EOF - fi - cat >> "${output_dir}/cni_ds.yml" << EOF - readinessProbe: - httpGet: - path: /ready - port: ${cni_health_server_port} - scheme: HTTP - initialDelaySeconds: 60 - timeoutSeconds: 10 - livenessProbe: - httpGet: - path: /alive - port: ${cni_health_server_port} - initialDelaySeconds: 60 - volumes: - - name: bin - hostPath: - path: ${CNI_PLUGIN_DIR} - - name: net-conf - hostPath: - path: ${CNI_CONF_DIR} - - name: config-volume - configMap: - name: kuryr-config - - name: var-run - hostPath: - path: ${var_run} -EOF - if [[ "$CONTAINER_ENGINE" != "crio" ]]; then - cat >> "${output_dir}/cni_ds.yml" << EOF - - name: proc - hostPath: - path: /proc -EOF - fi - cat >> "${output_dir}/cni_ds.yml" << EOF - - name: var-pci - hostPath: - path: /var/pci_address -EOF - if [ "${CONTAINER_ENGINE}" = "crio" ] && \ - [ "${VAR_RUN_PATH}" != "/var/run" ]; then - cat >> "${output_dir}/cni_ds.yml" << EOF - - name: netns - hostPath: - path: /var/run/netns -EOF - fi -} - -# lb_state -# Description: Returns the state of the load balancer -# Params: -# id - Id or name of the loadbalancer the state of which needs to be -# retrieved. -function lb_state { - local lb_id - - lb_id="$1" - openstack loadbalancer show "$lb_id" | \ - awk '/provisioning_status/ {print $4}' -} - -function _wait_for_lb { - local lb_name - local curr_time - local time_diff - local start_time - - lb_name="$1" - timeout=${2:-$KURYR_WAIT_TIMEOUT} - - echo -n "Waiting for LB:$lb_name" - start_time=$(date +%s) - - while [[ "$(lb_state "$lb_name")" != "ACTIVE" ]]; do - echo -n "Waiting till LB=$lb_name is ACTIVE." - curr_time=$(date +%s) - time_diff=$((curr_time - start_time)) - [[ $time_diff -le $timeout ]] || die "Timed out waiting for $lb_name" - sleep 5 - done -} - -# create_load_balancer -# Description: Creates an OpenStack Load Balancer with either neutron LBaaS -# or Octavia -# Params: -# lb_name: Name to give to the load balancer. -# lb_vip_subnet: Id or name of the subnet where lb_vip should be -# allocated. -# project_id: Id of the project where the load balancer should be -# allocated. -# lb_vip: Virtual IP to give to the load balancer - optional. -function create_load_balancer { - local lb_name - local lb_vip_subnet - local lb_params - local project_id - - lb_name="$1" - lb_vip_subnet="$2" - project_id="$3" - - lb_params=" --name $lb_name " - if [ -z "$4" ]; then - echo -n "create_load_balancer LB=$lb_name, lb_vip not provided." - else - lb_params+=" --vip-address $4" - fi - - lb_params+=" --project ${project_id} --vip-subnet-id $lb_vip_subnet" - openstack loadbalancer create $lb_params -} - -# create_load_balancer_listener -# Description: Creates an OpenStack Load Balancer Listener for the specified -# Load Balancer with either neutron LBaaS or Octavia -# Params: -# name: Name to give to the load balancer listener. -# protocol: Whether it is HTTP, HTTPS, TCP, etc. -# port: The TCP port number to listen to. -# data_timeouts: Octavia's timeouts for client and server inactivity. -# lb: Id or name of the Load Balancer we want to add the Listener to. -# project_id: Id of the project where this listener belongs to. -function create_load_balancer_listener { - local name - local protocol - local port - local lb - local data_timeouts - local max_timeout - local project_id - - name="$1" - protocol="$2" - port="$3" - lb="$4" - project_id="$5" - data_timeouts="$6" - - max_timeout=1200 - # Octavia needs the LB to be active for the listener - _wait_for_lb "$lb" "$max_timeout" - - openstack loadbalancer listener create --name "$name" \ - --protocol "$protocol" \ - --protocol-port "$port" \ - --timeout-client-data "$data_timeouts" \ - --timeout-member-data "$data_timeouts" \ - "$lb" -} - -# create_load_balancer_pool -# Description: Creates an OpenStack Load Balancer Pool for the specified -# Load Balancer listener with either neutron LBaaS or Octavia -# Params: -# name: Name to give to the load balancer listener. -# protocol: Whether it is HTTP, HTTPS, TCP, etc. -# algorithm: Load Balancing algorithm to use. -# listener: Id or name of the Load Balancer Listener we want to add the -# pool to. -# project_id: Id of the project where this pool belongs to. -# lb: Id or name of the Load Balancer we want to add the pool to -# (optional). -function create_load_balancer_pool { - local name - local protocol - local algorithm - local listener - local lb - local project_id - - name="$1" - protocol="$2" - algorithm="$3" - listener="$4" - project_id="$5" - lb="$6" - - # We must wait for the LB to be active before we can put a Pool for it - _wait_for_lb "$lb" - - openstack loadbalancer pool create --name "$name" \ - --listener "$listener" \ - --protocol "$protocol" \ - --lb-algorithm "$algorithm" -} - -# create_load_balancer_member -# Description: Creates an OpenStack load balancer pool member -# Params: -# name: Name to give to the load balancer pool member. -# address: Whether it is HTTP, HTTPS, TCP, etc. -# port: Port number the pool member is listening on. -# pool: Id or name of the Load Balancer pool this member belongs to. -# subnet: Id or name of the subnet the member address belongs to. -# lb: Id or name of the load balancer the member belongs to. -# project_id: Id of the project where this pool belongs to. -function create_load_balancer_member { - local name - local address - local port - local pool - local lb - local project_id - - name="$1" - address="$2" - port="$3" - pool="$4" - lb="$5" - project_id="$6" - - # We must wait for the pool creation update before we can add members - _wait_for_lb "$lb" - - openstack loadbalancer member create --name "$name" \ - --address "$address" \ - --protocol-port "$port" \ - "$pool" -} - -# split_subnet -# Description: Splits a subnet in two subnets that constitute its halves -# Params: -# cidr: Subnet CIDR to split -# Returns: tab separated CIDRs of the two halves. -function split_subnet { - # precondition: The passed cidr must be of a prefix <= 30 - python3 - < -
Network Policy
driver
[Not supported by viewer]
Network Policy Pod SG driver
Network Policy Pod SG driver
VIF Pool
driver
[Not supported by viewer]
Network Policy
svc SG driver
[Not supported by viewer]
LBaaS
driver
[Not supported by viewer]
Policy handler
Policy handler
update_vif_sgs
update_vif_sgs
update_lbaas_sg
update_lbaas_sg
ensure_network_policy
ensure_network_policy
get_pod_sgs
get_pod_sgs
get_affected_pods
get_affected_pods
get_svc_sgs
get_svc_sgs
\ No newline at end of file diff --git a/doc/images/external_traffic_to_l7_router.svg b/doc/images/external_traffic_to_l7_router.svg deleted file mode 100644 index caf90638e..000000000 --- a/doc/images/external_traffic_to_l7_router.svg +++ /dev/null @@ -1,4 +0,0 @@ - - - - diff --git a/doc/images/fuxi_k8s_components.png b/doc/images/fuxi_k8s_components.png deleted file mode 100644 index a30a5e23f..000000000 Binary files a/doc/images/fuxi_k8s_components.png and /dev/null differ diff --git a/doc/images/kuryr_k8s_components.png b/doc/images/kuryr_k8s_components.png deleted file mode 100644 index f797d7ccb..000000000 Binary files a/doc/images/kuryr_k8s_components.png and /dev/null differ diff --git a/doc/images/kuryr_k8s_components.svg b/doc/images/kuryr_k8s_components.svg deleted file mode 100644 index 32d517897..000000000 --- a/doc/images/kuryr_k8s_components.svg +++ /dev/null @@ -1,367 +0,0 @@ - - - - - - - - - - image/svg+xml - - - - - - - - - - - - - - K8s APIserver - Patch - - Watch - - Controller - - - - - - - - - Kubelet - - - - - - - - CNI Driver - - - - CNI 0.3.1 - - - - - - - - - - - - - - - - Neutron - - - v2 - - - - - diff --git a/doc/images/kuryr_k8s_ingress_ctrl_flow_diagram.svg b/doc/images/kuryr_k8s_ingress_ctrl_flow_diagram.svg deleted file mode 100644 index e85bd3010..000000000 --- a/doc/images/kuryr_k8s_ingress_ctrl_flow_diagram.svg +++ /dev/null @@ -1,4 +0,0 @@ - - - - diff --git a/doc/images/kuryr_k8s_ingress_sw_components.svg b/doc/images/kuryr_k8s_ingress_sw_components.svg deleted file mode 100644 index 839508c9a..000000000 --- a/doc/images/kuryr_k8s_ingress_sw_components.svg +++ /dev/null @@ -1,4 +0,0 @@ - - - - diff --git a/doc/images/kuryr_k8s_ocp_route_ctrl_sw.svg b/doc/images/kuryr_k8s_ocp_route_ctrl_sw.svg deleted file mode 100644 index 41d84bb9d..000000000 --- a/doc/images/kuryr_k8s_ocp_route_ctrl_sw.svg +++ /dev/null @@ -1,4 +0,0 @@ - - - - diff --git a/doc/images/l7_routing_and_user_lb_neutron_entities.svg b/doc/images/l7_routing_and_user_lb_neutron_entities.svg deleted file mode 100644 index 391792081..000000000 --- a/doc/images/l7_routing_and_user_lb_neutron_entities.svg +++ /dev/null @@ -1,4 +0,0 @@ - - - - diff --git a/doc/images/lbaas_translation.svg b/doc/images/lbaas_translation.svg deleted file mode 100644 index 4a45332d1..000000000 --- a/doc/images/lbaas_translation.svg +++ /dev/null @@ -1,2569 +0,0 @@ - - - - - - - - - - - - - - - - - - - - - - - - - - image/svg+xml - - - - - - - - - - - - ref. impl - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - Worker B - - - - - Pod - - - 10.8.1.5/16 - - - - - iptableskubeproxy - - - - - - - - - - Worker A - - - - - Pod - - - 10.8.0.3/16 - - - - - - Pod - - - 10.8.0.4/16 - - - - - - iptableskubeproxy - - - - - - - - - - - - - - - - - - - - - - - Serviceendpoints - - - - - Pod - - - 10.8.0.3/16 - - - - - - Pod - - - 10.8.0.4/16 - - - - - - Pod - - - 10.8.1.5/16 - - - - - 192.168.2.8/24 - - - - - - Cluster IP - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - resources - - - - - - 80 - - 443 - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - Neutron - - - - - - Worker A - - - - - Pod - - - 10.8.0.3/16 - - - - - - Pod - - - 10.8.0.4/16 - - - - - - - - - Worker B - - - - - Pod - - - 10.8.0.5/16 - - - - - - - - lbaasv2 - - - - - - - - - - - - - - - - - - - - - lb - - - - - - listener - - - - - - pool - - - - - - - - listener - - - - - pool - - - - 192.168.2.8/24 - - - - - - - - - - - - - - - - - - 80 - 443 - - - - - - diff --git a/doc/images/net-policy.svg b/doc/images/net-policy.svg deleted file mode 100644 index adb6a0293..000000000 --- a/doc/images/net-policy.svg +++ /dev/null @@ -1,687 +0,0 @@ - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - Api server - - - - - - - - Pod watch - - - - - - - - Policy selectedPods watch - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - Pod created - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - Pod created - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - Pod created with default SG - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - Pod annotated with net-policy SG - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - Pod update - - - - - - Attach policy - - - - - - vxcxvattsdsadsadsa - - - - - - - - - Attach policy sg to port - - - - - - - - \ No newline at end of file diff --git a/doc/images/pod_creation_flow_daemon.png b/doc/images/pod_creation_flow_daemon.png deleted file mode 100644 index cbe9a060b..000000000 Binary files a/doc/images/pod_creation_flow_daemon.png and /dev/null differ diff --git a/doc/images/pod_creation_flow_daemon.svg b/doc/images/pod_creation_flow_daemon.svg deleted file mode 100644 index e887c5e90..000000000 --- a/doc/images/pod_creation_flow_daemon.svg +++ /dev/null @@ -1,1496 +0,0 @@ - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - image/svg+xml - - - - - - - - - - - - - - - K8s APIserver - - - - KuryrController - - - - Neutron - - - - Kubelet - - - - Daemon(Watcher) - - - - - - - - - - - - - watch pod/kp ev() - - - - - watch kp ev() - - - - - - create pod() - - - ADD ev(kp) - - - - - ADD ev(pod) - - - - - - create port() - - - - - update kp status - - - - update kp (vif) - - - - - - ADD to network(kp) - - - - POST addNetwork - - - - MODIFIED ev(kp)with vif info - - - - - show port(port.id) - - - - - show port(port.id) - - while port.status ≠ active - - - - - - - - - - - - - - run podcontainers() - - - - watch for pod running() - - - MODIFIED ev(pod)running - - - - - - pod running() - - - MODIFIED ev(kp)with vif info - - - ovs-agent port active - - - KuryrCNI - - - - watch pod ev() - - - - MODIFIED ev(kp)with vif active info - - - 201 Accepted - - - - - - Daemon(Server) - - - - - - - - - - create vif() - - - - - - - - - - vif plug() - - - - - - - - - - configure vif() - - - - - - - - - - wait vif() - - - - Get VIF (Manager) - - - - - - - - - - - - - wait active - - - Get active VIF - - - - - - - - create kp CRD - - - - - ADD ev(kp) - - - diff --git a/doc/images/service_creation_diagram.png b/doc/images/service_creation_diagram.png deleted file mode 100644 index e0fa22118..000000000 Binary files a/doc/images/service_creation_diagram.png and /dev/null differ diff --git a/doc/images/service_creation_diagram.svg b/doc/images/service_creation_diagram.svg deleted file mode 100644 index 3cfbdc926..000000000 --- a/doc/images/service_creation_diagram.svg +++ /dev/null @@ -1 +0,0 @@ - \ No newline at end of file diff --git a/doc/images/update_network_policy_on_pod_creation.svg b/doc/images/update_network_policy_on_pod_creation.svg deleted file mode 100644 index 93050adf1..000000000 --- a/doc/images/update_network_policy_on_pod_creation.svg +++ /dev/null @@ -1,2 +0,0 @@ - -
Network Policy Pod SG driver
Network Policy Pod SG driver
Network Policy SVC SG driver
Network Policy SVC SG driver
VIF handler
VIF handler
create_sg_rules
create_sg_rules
...
[Not supported by viewer]
matched_selectors
matched_selectors
get_svc_sgs
get_svc_sgs
LBaaS driver
LBaaS driver
update_lbaas_sgs
update_lbaas_sgs
\ No newline at end of file diff --git a/doc/images/vif_handler_drivers_design.png b/doc/images/vif_handler_drivers_design.png deleted file mode 100644 index 64e318091..000000000 Binary files a/doc/images/vif_handler_drivers_design.png and /dev/null differ diff --git a/doc/requirements.txt b/doc/requirements.txt deleted file mode 100644 index 56f8df5ad..000000000 --- a/doc/requirements.txt +++ /dev/null @@ -1,6 +0,0 @@ -# The order of packages is significant, because pip processes them in the order -# of appearance. Changing the order has an impact on the overall integration -# process, which may cause wedges in the gate later. -sphinx>=2.0.0,!=2.1.0 # BSD -openstackdocstheme>=2.2.1 # Apache-2.0 -reno>=3.1.0 # Apache-2.0 diff --git a/doc/source/conf.py b/doc/source/conf.py deleted file mode 100755 index 6c80ce1dc..000000000 --- a/doc/source/conf.py +++ /dev/null @@ -1,82 +0,0 @@ -# -*- coding: utf-8 -*- -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or -# implied. -# See the License for the specific language governing permissions and -# limitations under the License. - -import os -import sys - -sys.path.insert(0, os.path.abspath('../..')) -# -- General configuration ---------------------------------------------------- - -# Add any Sphinx extension module names here, as strings. They can be -# extensions coming with Sphinx (named 'sphinx.ext.*') or your custom ones. -extensions = [ - 'sphinx.ext.autodoc', - 'sphinx.ext.todo', - 'openstackdocstheme', - 'reno.sphinxext' -] - -# autodoc generation is a bit aggressive and a nuisance when doing heavy -# text edit cycles. -# execute "export SPHINX_DEBUG=1" in your terminal to disable - -# The suffix of source filenames. -source_suffix = '.rst' - -# The master toctree document. -master_doc = 'index' - -# General information about the project. -project = 'kuryr-kubernetes' -copyright = '2013, OpenStack Foundation' - -# openstackdocstheme options -openstackdocs_repo_name = 'openstack/kuryr-kubernetes' -openstackdocs_auto_name = False -openstackdocs_bug_project = 'kuryr-kubernetes' -openstackdocs_bug_tag = '' - -# If true, '()' will be appended to :func: etc. cross-reference text. -add_function_parentheses = True - -# If true, the current module name will be prepended to all description -# unit titles (such as .. function::). -add_module_names = True - -# The name of the Pygments (syntax highlighting) style to use. -pygments_style = 'native' - -# -- Options for HTML output -------------------------------------------------- - -# The theme to use for HTML and HTML Help pages. Major themes that come with -# Sphinx are currently 'default' and 'sphinxdoc'. -# html_theme_path = ["."] -html_theme = 'openstackdocs' -# html_static_path = ['static'] - -# Output file base name for HTML help builder. -htmlhelp_basename = '%sdoc' % project - -# Grouping the document tree into LaTeX files. List of tuples -# (source start file, target name, title, author, documentclass -# [howto/manual]). -latex_documents = [ - ('index', - '%s.tex' % project, - '%s Documentation' % project, - 'OpenStack Foundation', 'manual'), -] - -# Example configuration for intersphinx: refer to the Python standard library. -#intersphinx_mapping = {'http://docs.python.org/': None} diff --git a/doc/source/contributor/contributing.rst b/doc/source/contributor/contributing.rst deleted file mode 100644 index ebb97911f..000000000 --- a/doc/source/contributor/contributing.rst +++ /dev/null @@ -1,82 +0,0 @@ -============================ -So You Want to Contribute... -============================ - -For general information on contributing to OpenStack, please check out the -`contributor guide `_ to get started. -It covers all the basics that are common to all OpenStack projects: the -accounts you need, the basics of interacting with our Gerrit review system, how -we communicate as a community, etc. - -Below will cover the more project specific information you need to get started -with kuryr-kubernetes. - - -Communication -------------- - -The primary communication channel of kuryr-kubernetes team is `#openstack-kuryr -channel on IRC `_. For more -formal inquiries you can use [kuryr] tag on `openstack-discuss mailing list -`_. -kuryr-kubernetes team is not holding weekly meetings, but we have office hours -every Monday at 15:00 UTC on our IRC channel. - - -Contacting the Core Team ------------------------- - -Outside of office hours, kuryr-kubernetes team is available mostly in the CET -working hours (7:00-17:00 UTC), as most of the team is located in Europe. Feel -free to try pinging dulek, ltomasbo, maysams or gryf on IRC, we have bouncers -set up so we'll answer once online. - - -New Feature Planning --------------------- - -We don't really follow a very detailed way of feature planning. If you want to -implement a feature, come talk to us on IRC, create a `blueprint on Launchpad -`_ and start coding! -kuryr-kubernetes follows OpenStack release schedule pretty loosely as we're -more bound to Kubernetes release schedule. This means that we do not observe as -hard deadlines as other projects. - - -Task Tracking -------------- - -We track our `tasks in Launchpad -`_. - -If you're looking for some smaller, easier work item to pick up and get started -on, search for the 'low-hanging-fruit' tag in either blueprints or bugs. - - -Reporting a Bug ---------------- - -You found an issue and want to make sure we are aware of it? You can do so on -`Launchpad `_. It won't hurt to -ping us about it on IRC too. - - -Getting Your Patch Merged -------------------------- - -We follow the normal procedures, requiring two +2's before approving the patch. -Due to limited number of contributors we do not require that those +2's are -from reviewers working for separate businesses. - -If your patch is stuck in review, please ping us on IRC as listed in sections -above. - - -Project Team Lead Duties ------------------------- - -All common PTL duties are enumerated in the `PTL guide -`_. - -And additional PTL duty is to maintain `kuryr images on Docker Hub -`_. diff --git a/doc/source/contributor/index.rst b/doc/source/contributor/index.rst deleted file mode 100644 index c5a86cac1..000000000 --- a/doc/source/contributor/index.rst +++ /dev/null @@ -1,8 +0,0 @@ -=========================== - Contributor Documentation -=========================== - -.. toctree:: - :maxdepth: 2 - - contributing diff --git a/doc/source/devref/annotation_project_driver.rst b/doc/source/devref/annotation_project_driver.rst deleted file mode 100644 index 9991ebcaa..000000000 --- a/doc/source/devref/annotation_project_driver.rst +++ /dev/null @@ -1,98 +0,0 @@ -.. - This work is licensed under a Creative Commons Attribution 3.0 Unported - License. - - http://creativecommons.org/licenses/by/3.0/legalcode - - Convention for heading levels in Neutron devref: - ======= Heading 0 (reserved for the title in a document) - ------- Heading 1 - ~~~~~~~ Heading 2 - +++++++ Heading 3 - ''''''' Heading 4 - (Avoid deeper levels because they do not render well.) - - -====================================== -Kuryr Support Multiple Projects Design -====================================== - - -Purpose -------- - -Now, ``kuryr-kubernetes`` just implement a default project driver, the project -id of openstack resource which used to support k8s resource was specified by -configuration option ``neutron_defaults.project``. This means all of these -openstack resources have the same project id. This will result in some puzzling -issues in multiple tenant environment. Such as, the metering and billing system -can not classify these resources and the resources will exceed the tenant's -quota. In order to resolve these issues, we need to ensure these resources have -different project id (For the sake of simplicity, we can treat a project as a -tenant). - - -Overview --------- - -Implement an annotation project driver for ``namespace``, ``pod`. ``service`` -and ``network policy``. The driver can read project id from the annotations of -this resources' namespace. - - -Proposed Solution ------------------ - -Now, the openstack resources that are created by ``kuryr-kubernetes`` only -involves ``neutron`` and ``octavia``. ``Neutron`` and ``octavia`` use openstack -project id to isolate their resources, so we can treat a openstack project as a -metering or billing tenant. Generally, ``kuryr-kubernetes`` use ``kuryr`` user -to create/delete/update/read ``neutron`` or ``octavia`` resources. The -``kuryr`` user has admin role, so ``kuryr-kubernetes`` can manage any project's -resources. - -So, I propose that we introduce an annotation ``openstack.org/kuryr-project``, -the annotation should be set when a k8s namespace was created. The annotation's -value is a openstack project's id. One k8s namespace can only specify one -openstack project, but one openstack project can be associated with one or -multiple k8s namespace. - -.. note:: - - ``kuryr-kubernetes`` can not verify the project id that speficied by - ``openstack.org/kuryr-project``. So, the validity of project id should be - ensured by third-party process. In addition to, we suggest that the - privilege of k8s namespace creation and updation only grant the user who has - admin role (avoid the common user to create k8s namespace arbitrarily). - -When user create a ``pod``, ``service`` or ``network policy``, the new project -driver will retrieve these resources's namespace and get the namespace's -information, then the driver will try to get project id from annotaion -``openstack.org/kuryr-project``. If the driver succeed get project id, the -project id will return to these resource's handlers, then these handlers will -create related openstack resource with the project id. - -.. note:: - - This is only solving the resource ownership issues. No isolation in terms - of networking will be achieved this way. - -For namespace, then namespace handler can get namespace information from the -``on_present`` function's parameter. So, the namespace annotaion project driver -can try get project id from the information directly. - -If user don't add ``openstack.org/kuryr-project`` annotation to namespace, the -default project need to be selected, the default project specified by -configuration option ``neutron_defaults.project``. If the default project not -specified still, the driver will raise ``cfg.RequiredOptError`` error. - - -Testing -------- - -Need to add a new CI gate with these drivers - -Tempest Tests -~~~~~~~~~~~~~ - -Need to add tempest tests diff --git a/doc/source/devref/health_manager.rst b/doc/source/devref/health_manager.rst deleted file mode 100644 index eb82185ae..000000000 --- a/doc/source/devref/health_manager.rst +++ /dev/null @@ -1,84 +0,0 @@ -.. - This work is licensed under a Creative Commons Attribution 3.0 Unported - License. - - http://creativecommons.org/licenses/by/3.0/legalcode - - Convention for heading levels in Neutron devref: - ======= Heading 0 (reserved for the title in a document) - ------- Heading 1 - ~~~~~~~ Heading 2 - +++++++ Heading 3 - ''''''' Heading 4 - (Avoid deeper levels because they do not render well.) - -====================================== -Kuryr Kubernetes Health Manager Design -====================================== - -Purpose -------- - -The purpose of this document is to present the design decision behind -Kuryr Kubernetes Health Managers. - -The main purpose of the Health Managers is to perform Health verifications that -assures readiness and liveness to Kuryr Controller and CNI pod, and so improve -the management that Kubernetes does on Kuryr-Kubernetes pods. - - -Overview --------- - -Kuryr Controller might get to a broken state due to problems like: -unable to connect with services it depends on and they being not healthy. - -It is important to check health of these services so that Kubernetes and -its users know when Kuryr Controller is ready to perform its networking -tasks. Also, it is necessary to check the health state of Kuryr components in -order to assure Kuryr Controller service is alive. To provide these -functionalities, Controller's Health Manager will verify and serve the health -state of these services and components to the probes. - -Besides these problems on the Controller, Kuryr CNI daemon also might get to a -broken state as a result of its components being not healthy and necessary -configurations not present. It is essential that CNI components health and -configurations are properly verified to assure CNI daemon is in a good shape. -On this way, the CNI Health Manager will check and serve the health state to -Kubernetes readiness and liveness probes. - - -Proposed Solution ------------------ - -One of the endpoints provided by the Controller Health Manager will check -whether it is able to watch the Kubernetes API, authenticate with Keystone -and talk to Neutron, since these are services needed by Kuryr Controller. -These checks will assure the Controller readiness. The other endpoint, will -verify the health state of Kuryr components and guarantee Controller liveness. - -The CNI Health Manager also provides two endpoints to Kubernetes probes. -The endpoint that provides readiness state to the probe checks connection -to Kubernetes API and presence of NET_ADMIN capabilities. The other endpoint, -which provides liveness, validates whether IPDB is in working order, maximum -CNI ADD failure is reached, health of CNI components and existence of memory -leak. - -.. note:: - - The CNI Health Manager will be started with the check for memory leak - disabled. In order to enable, set the following option in kuryr.conf to a - limit value of memory in MiBs. - - .. code-block:: ini - - [cni_health_server] - max_memory_usage = -1 - -The CNI Health Manager is added as a process to CNI daemon and communicates -to the other two processes i.e. Watcher and Server with a shared boolean -object, which indicates the current health state of each component. - -The idea behind these two Managers is to combine all the necessary checks in -servers running inside Kuryr Controller and CNI pods to provide the result of -these checks to the probes. diff --git a/doc/source/devref/high_availability.rst b/doc/source/devref/high_availability.rst deleted file mode 100644 index 2af75d718..000000000 --- a/doc/source/devref/high_availability.rst +++ /dev/null @@ -1,143 +0,0 @@ -.. - This work is licensed under a Creative Commons Attribution 3.0 Unported - License. - - http://creativecommons.org/licenses/by/3.0/legalcode - - Convention for heading levels in Neutron devref: - ======= Heading 0 (reserved for the title in a document) - ------- Heading 1 - ~~~~~~~ Heading 2 - +++++++ Heading 3 - ''''''' Heading 4 - (Avoid deeper levels because they do not render well.) - -================================ -Active/Passive High Availability -================================ - -Overview --------- - -Initially it was assumed that there will only be a single kuryr-controller -instance in the Kuryr-Kubernetes deployment. While it simplified a lot of -controller code, it is obviously not a perfect situation. Having redundant -controllers can help with achieving higher availability and scalability of the -deployment. - -Now with introduction of possibility to run Kuryr in Pods on Kubernetes cluster -HA is much easier to be implemented. The purpose of this document is to explain -how will it work in practice. - - -Proposed Solution ------------------ - -There are two types of HA - Active/Passive and Active/Active. In this document -we'll focus on the former. A/P basically works as one of the instances being -the leader (doing all the exclusive tasks) and other instances waiting in -*standby* mode in case the leader *dies* to take over the leader role. As you -can see a *leader election* mechanism is required to make this work. - - -Leader election -~~~~~~~~~~~~~~~ - -The idea here is to use leader election mechanism based on Kubernetes -endpoints. The idea is neatly `explained on Kubernetes blog`_. Election is -based on Endpoint resources, that hold annotation about current leader and its -leadership lease time. If leader dies, other instances of the service are free -to take over the record. Kubernetes API mechanisms will provide update -exclusion mechanisms to prevent race conditions. - -This can be implemented by adding another *leader-elector* container to each -of kuryr-controller pods: - -.. code-block:: yaml - - - image: gcr.io/google_containers/leader-elector:0.5 - name: leader-elector - args: - - "--election=kuryr-controller" - - "--http=0.0.0.0:${KURYR_CONTROLLER_HA_PORT:-16401}" - - "--election-namespace=kube-system" - - "--ttl=5s" - ports: - - containerPort: ${KURYR_CONTROLLER_HA_PORT:-16401} - protocol: TCP - -This adds a new container to the pod. This container will do the -leader-election and expose the simple JSON API on port 16401 by default. This -API will be available to kuryr-controller container. - - -Kuryr Controller Implementation -~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ - -The main issue with having multiple controllers is task division. All of the -controllers are watching the same endpoints and getting the same notifications, -but those notifications cannot be processed by multiple controllers at once, -because we end up with a huge race condition, where each controller creates -Neutron resources but only one succeeds to put the annotation on the Kubernetes -resource it is processing. - -This is obviously unacceptable so as a first step we're implementing A/P HA, -where only the leader is working on the resources and the other instances wait -as standby. This will be implemented by periodically calling the leader-elector -API to check the current leader. On leader change: - -* Pod losing the leadership will stop its Watcher. Please note that it will be - stopped gracefully, so all the ongoing operations will be completed. -* Pod gaining the leadership will start its Watcher. Please note that it will - get notified about all the previously created Kubernetes resources, but will - ignore them as they already have the annotations. -* Pods not affected by leadership change will continue to be in standby mode - with their Watchers stopped. - -Please note that this means that in HA mode Watcher will not get started on -controller startup, but only when periodic task will notice that it is the -leader. - - -Issues -~~~~~~ - -There are certain issues related to orphaned OpenStack resources that we may -hit. Those can happen in two cases: - -* Controller instance dies instantly during request processing. Some of - OpenStack resources were already created, but information about them was not - yet annotated onto Kubernetes resource. Therefore information is lost and we - end up with orphaned OpenStack resources. New leader will process the - Kubernetes resource by creating resources again. -* During leader transition (short period after a leader died, but before its - lease expired and periodic task on other controllers noticed that; this - shouldn't exceed 10s) some K8s resources are deleted. New leader will not - get the notification about the deletion and those will go unnoticed. - -Both of this issues can be tackled by garbage-collector mechanism that will -periodically look over Kubernetes resources and delete OpenStack resources that -have no representation in annotations. - -The latter of the issues can also be tackled by saving last seen -``resourceVersion`` of watched resources list when stopping the Watcher and -restarting watching from that point. - - -Future enhancements -~~~~~~~~~~~~~~~~~~~ - -It would be useful to implement the garbage collector and -``resourceVersion``-based protection mechanism described in section above. - -Besides that to further improve the scalability, we should work on -Active/Active HA model, where work is divided evenly between all of the -kuryr-controller instances. This can be achieved e.g. by using -consistent hash ring to decide which instance will process which resource. - -Potentially this can be extended with support for non-containerized deployments -by using Tooz and some other tool providing leader-election - like Consul or -Zookeeper. - - -.. _explained on Kubernetes blog: https://kubernetes.io/blog/2016/01/simple-leader-election-with-kubernetes/ diff --git a/doc/source/devref/index.rst b/doc/source/devref/index.rst deleted file mode 100644 index 86897b8f8..000000000 --- a/doc/source/devref/index.rst +++ /dev/null @@ -1,56 +0,0 @@ -.. - Licensed under the Apache License, Version 2.0 (the "License"); you may - not use this file except in compliance with the License. You may obtain - a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - - Unless required by applicable law or agreed to in writing, software - distributed under the License is distributed on an "AS IS" BASIS, WITHOUT - WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the - License for the specific language governing permissions and limitations - under the License. - - Convention for heading levels in Neutron devref: - ======= Heading 0 (reserved for the title in a document) - ------- Heading 1 - ~~~~~~~ Heading 2 - +++++++ Heading 3 - ''''''' Heading 4 - (Avoid deeper levels because they do not render well.) - - -=========================== -Design and Developer Guides -=========================== - -In the Design and Developer Guides, you will find information on kuryr -kubernetes integration plans and design decisions. There are sections that -contain specific integration components description and detailed designs. -Finally, the developer guide includes information about kuryr kubernetes -testing infrastructure. - - -Design documents ----------------- -.. toctree:: - :maxdepth: 3 - - kuryr_kubernetes_design - service_support - port_manager - vif_handler_drivers_design - health_manager - high_availability - kuryr_kubernetes_versions - network_policy - updating_pod_resources_api - annotation_project_driver - - -Indices and tables ------------------- - -* :ref:`genindex` -* :ref:`modindex` -* :ref:`search` diff --git a/doc/source/devref/kuryr_kubernetes_design.rst b/doc/source/devref/kuryr_kubernetes_design.rst deleted file mode 100644 index 4e6327665..000000000 --- a/doc/source/devref/kuryr_kubernetes_design.rst +++ /dev/null @@ -1,328 +0,0 @@ -.. - This work is licensed under a Creative Commons Attribution 3.0 Unported - License. - - http://creativecommons.org/licenses/by/3.0/legalcode - - Convention for heading levels in Neutron devref: - ======= Heading 0 (reserved for the title in a document) - ------- Heading 1 - ~~~~~~~ Heading 2 - +++++++ Heading 3 - ''''''' Heading 4 - (Avoid deeper levels because they do not render well.) - -=================================== -Kuryr Kubernetes Integration Design -=================================== - -Purpose -------- - -The purpose of this document is to present the main Kuryr-K8s integration -components and capture the design decisions of each component currently taken -by the Kuryr team. - - -Goal Statement --------------- - -Enable OpenStack Neutron realization of the Kubernetes networking. Start by -supporting network connectivity and expand to support advanced features, such -as Network Policies. In the future, it may be extended to some other -OpenStack services. - - -Overview --------- - -In order to integrate Neutron into Kubernetes networking, 2 components are -introduced: Controller and CNI Driver. Controller is a supervisor component -responsible to maintain translation of networking relevant Kubernetes model -into the OpenStack (i.e. Neutron) model. This can be considered as a -centralized service (supporting HA mode in the future). CNI driver is -responsible for binding Kubernetes pods on worker nodes into Neutron ports -ensuring requested level of isolation. Please see below the component view of -the integrated system: - -.. image:: ../../images/kuryr_k8s_components.png - :alt: integration components - :align: center - :width: 100% - - -Design Principles ------------------ - -#. Loose coupling between integration components. -#. Flexible deployment options to support different project, subnet and - security groups assignment profiles. -#. The communication of the pod binding data between Controller and CNI driver - should rely on existing communication channels, currently through the - KuryrPort CRDs. -#. CNI Driver should not depend on Neutron. It gets all required details - from Kubernetes API server (currently through Kubernetes CRDs), - therefore depending on Controller to perform its translation tasks. -#. Allow different neutron backends to bind Kubernetes pods without code - modification. This means that both Controller and CNI binding mechanism - should allow loading of the vif management and binding components, - manifested via configuration. If some vendor requires some extra code, it - should be handled in one of the stevedore drivers. - - -Kuryr Controller Design ------------------------ - -Controller is responsible for watching Kubernetes API endpoints to make sure -that the corresponding model is maintained in Neutron. Controller updates K8s -resources endpoints' annotations and/or CRDs to keep neutron details required -by the CNI driver as well as for the model mapping persistency. - -Controller is composed from the following components: - - -Watcher -~~~~~~~ - -Watcher is a common software component used by both the Controller and the CNI -driver. Watcher connects to Kubernetes API. Watcher's responsibility is to -observe the registered (either on startup or dynamically during its runtime) -endpoints and invoke registered callback handler (pipeline) to pass all events -from registered endpoints. As an example, if a Service is created at the -Kubernetes end, the ServiceHandler which is watching the Service Objects uses -the watcher to detect the changes on them and calls the right driver for the -reconciliation of Kubernetes and the needed OpenStack resources. - - - -Event Handler -~~~~~~~~~~~~~ - -EventHandler is an interface class for the Kubernetes event handling. There are -several 'wrapper' event handlers that can be composed to implement Controller -handling pipeline. - -**Retry** Event Handler is used for handling specified failures during event -processing. It can be used to 'wrap' another EventHandler and in case of -specified error will retry the wrapped event handler invocation within -specified timeout. In case of persistent failure, Retry will raise the wrapped -EventHandler exception. - -**Async** Event Handler is used to execute event handling asynchronously. -Events are grouped based on the specified 'thread_groups'. Events of the same -group are processed in order of arrival. Thread group maps to an unique K8s -resource (each Pod, Service, etc.). Async can be used to 'wrap' another -EventHandler. Queues per thread group are added dynamically once relevant -events arrive and removed once queue is empty. - -**LogExceptions** Event Handler suppresses exceptions and sends them to log -facility. - -**Dispatcher** is an Event Handler that distributes events to registered -handlers based on event content and handler predicate provided during event -handler registration. - - -ControllerPipeline -~~~~~~~~~~~~~~~~~~ - -ControllerPipeline serves as an event dispatcher of the Watcher for Kuryr-K8s -controller Service. Currently watched endpoints are 'pods', 'services' and -'endpoints'. Kubernetes resource event handlers (Event Consumers) are -registered into the Controller Pipeline. There is a special EventConsumer, -ResourceEventHandler, that provides API for Kubernetes event handling. When a -watched event arrives, it is processed by all Resource Event Handlers -registered for specific Kubernetes object kind. Pipeline retries on resource -event handler invocation in case of the ResourceNotReady exception till it -succeeds or the number of retries (time-based) is reached. Any unrecovered -failure is logged without affecting other Handlers (of the current and other -events). Events of the same group (same Kubernetes object) are handled -sequentially in the order arrival. Events of different Kubernetes objects are -handled concurrently. - -.. image:: ../..//images/controller_pipeline.png - :alt: controller pipeline - :align: center - :width: 100% - - -ResourceEventHandler -~~~~~~~~~~~~~~~~~~~~ - -ResourceEventHandler is a convenience base class for the Kubernetes event -processing. The specific Handler associates itself with specific Kubernetes -object kind (through setting OBJECT_KIND) and is expected to implement at -least one of the methods of the base class to handle at least one of the -ADDED/MODIFIED/DELETED events of the Kubernetes object. For details, see -`k8s-api`_. Since both ADDED and MODIFIED event types trigger very similar -sequence of actions, Handler has 'on_present' method that is invoked for both -event types. The specific Handler implementation should strive to put all the -common ADDED and MODIFIED event handling logic in this method to avoid code -duplication. - - -Pluggable Handlers -~~~~~~~~~~~~~~~~~~ - -Starting with the Rocky release, Kuryr-Kubernetes includes a pluggable -interface for the Kuryr controller handlers. - -The pluggable handlers framework allows : - -- Using externally provided handlers. -- Controlling which handlers should be active. - -To control which Kuryr Controller handlers should be active, the selected -handlers need to be included in kuryr.conf at the 'kubernetes' section. -If not specified, Kuryr Controller will run the default handlers, which -currently includes the following: - -====================== ========================= - Handler Kubernetes resource -====================== ========================= -vif Pod -kuryrport KuryrPort CRD -endpoints Endpoints -service Service -kuryrloadbalancer KuryrLoadBalancer CRD -kuryrnetwork KuryrNetwork CRD -namespace Namespaces -kuryrnetworkpolicy KuryrNetworkPolicy CRD -podlabel Pod -policy NetworkPolicy -machine Machine -kuryrnetworkpopulation KuryrNetwork CRD - -====================== ========================= - -For example, to enable only the 'vif' controller handler we should set the -following at kuryr.conf: - -.. code-block:: ini - - [kubernetes] - enabled_handlers=vif,kuryrport - -Note, that we have to specify vif and kuryrport together, since currently those -two plugins works together. - -Providers -~~~~~~~~~ - -Provider (Drivers) are used by ResourceEventHandlers to manage specific aspects -of the Kubernetes resource in the OpenStack domain. For example, creating a -Kubernetes Pod will require a neutron port to be created on a specific network -with the proper security groups applied to it. There will be dedicated Drivers -for Project, Subnet, Port and Security Groups settings in neutron. For -instance, the Handler that processes pod events, will use PodVIFDriver, -PodProjectDriver, PodSubnetsDriver and PodSecurityGroupsDriver. The Drivers -model is introduced in order to allow flexibility in the Kubernetes model -mapping to the OpenStack. There can be different drivers that do Neutron -resources management, i.e. create on demand or grab one from the precreated -pool. There can be different drivers for the Project management, i.e. single -Tenant or multiple. Same goes for the other drivers. There are drivers that -handle the Pod based on the project, subnet and security groups specified via -configuration settings during cluster deployment phase. - - -NeutronPodVifDriver -~~~~~~~~~~~~~~~~~~~ - -PodVifDriver subclass should implement request_vif, release_vif and -activate_vif methods. In case request_vif returns Vif object in down state, -Controller will invoke activate_vif. Vif 'active' state is required by the -CNI driver to complete pod handling. -The NeutronPodVifDriver is the default driver that creates neutron port upon -Pod addition and deletes port upon Pod removal. - - -CNI Driver ----------- - -CNI driver is just a thin client that passes CNI ADD and DEL requests to -kuryr-daemon instance via its HTTP API. It's a simple executable that is -supposed to be called by kubelet's CNI. Since Train release the CNI driver -has an alternative golang implementation (see the kuryr_cni directory) to make -injecting it onto the Kubernetes node from the kuryr-cni pod easier. This -enables Kuryr to work on K8s deployments that does not have Python or curl on -Kubernetes nodes. Compatibility between Python and golang CNI drivers is -supposed to be maintained. - -.. _cni-daemon: - - -CNI Daemon ----------- - -CNI Daemon is a service that should run on every Kubernetes node. Starting from -Rocky release it should be seen as a default supported deployment option. And -running without it is impossible starting from Stein release. It is responsible -for watching pod events on the node it's running on, answering calls from CNI -Driver and attaching VIFs when they are ready. In the future it will also keep -information about pooled ports in memory. This helps to limit the number of -processes spawned when creating multiple Pods, as a single Watcher is enough -for each node and CNI Driver will only wait on local network socket for -response from the Daemon. - -Currently CNI Daemon consists of two processes i.e. Watcher and Server. -Processes communicate between each other using Python's -``multiprocessing.Manager`` and a shared dictionary object. Watcher is -responsible for extracting VIF information from KuryrPort CRD events and -putting them into the shared dictionary. Server is a regular WSGI server that -will answer CNI Driver calls. When a CNI request comes, Server is waiting for -VIF object to appear in the shared dictionary. As CRD data is read from -kubernetes API and added to the registry by Watcher thread, Server will -eventually get VIF it needs to connect for a given pod. Then it waits for the -VIF to become active before returning to the CNI Driver. - - -Communication -~~~~~~~~~~~~~ - -CNI Daemon Server is starting an HTTP server on a local network socket -(``127.0.0.1:5036`` by default). Currently server is listening for 2 API -calls. Both calls load the ``CNIParameters`` from the body of the call (it is -expected to be JSON). - -For reference see updated pod creation flow diagram: - -.. image:: ../../images/pod_creation_flow_daemon.png - :alt: Controller-CNI-daemon interaction - :align: center - :width: 100% - - -/addNetwork -+++++++++++ - -**Function**: Is equivalent of running ``K8sCNIPlugin.add``. - -**Return code:** 202 Accepted - -**Return body:** Returns VIF data in JSON form. This is serialized -oslo.versionedobject from ``os_vif`` library. On the other side it can be -deserialized using o.vo's ``obj_from_primitive()`` method. - - -/delNetwork -+++++++++++ - -**Function**: Is equivalent of running ``K8sCNIPlugin.delete``. - -**Return code:** 204 No content - -**Return body:** None. - -When running in daemonized mode, CNI Driver will call CNI Daemon over those -APIs to perform its tasks and wait on socket for result. - - -Kubernetes Documentation ------------------------- - -The `Kubernetes reference documentation`_ is a great source for finding more -details about Kubernetes API, CLIs, and tools. - - -.. _k8s-api: https://github.com/kubernetes/kubernetes/blob/release-1.4/docs/devel/api-conventions.md#types-kinds> -.. _Kubernetes reference documentation: https://kubernetes.io/docs/reference/ diff --git a/doc/source/devref/kuryr_kubernetes_versions.rst b/doc/source/devref/kuryr_kubernetes_versions.rst deleted file mode 100644 index 2f9726fb7..000000000 --- a/doc/source/devref/kuryr_kubernetes_versions.rst +++ /dev/null @@ -1,43 +0,0 @@ -=============================================== -Kubernetes and OpenShift version support matrix -=============================================== - -This document maintains updated documentation about what Kubernetes and -OpenShift versions are supported at each Kuryr-Kubernetes release. - - -.. note:: - - In general Kuryr should work fine with older versions of Kubernetes and - OpenShift as well as it only depends from the APIs that are quite stable - in Kubernetes itself. However we try to limit the number of supported - versions, as Kubernetes policy is to only support last 3 minor releases. - -.. note:: - - Kuryr-Kubernetes follows *cycle-with-intermediary* release model and that's - why there are multiple minor releases per single OpenStack release. Going - forward it is possible that Kuryr-Kubernetes will switch to *independent* - release model, that would completely untie it from OpenStack releases. This - is because it seems to be easier to follow Kubernetes releases than - OpenStack releases. - -.. warning:: - - In most cases only the latest supported version is tested in the CI/CD - system. - -======================== ====================================== ======================== -Kuryr-Kubernetes version Kubernetes version OpenShift Origin version -======================== ====================================== ======================== -master (Victoria) v1.16.x, v1.17.x, v1.18.x 4.3, 4.4, 4.5 -2.0.0 (Ussuri) v1.14.x, v1.15.x, v1.16.x 3.11, 4.2 -1.1.0 (Train) v1.13.x, v1.14.x, v1.15.x 3.9, 3.10, 3.11, 4.2 -0.6.x, 1.0.0 (Stein) v1.11.x, v1.12.x, v1.13.x 3.9, 3.10, 3.11, 4.2 -0.5.2-3 (Rocky) v1.9.x, v1.10.x, v1.11.x, v1.12.x 3.9, 3.10 -0.5.0-1 (Rocky) v1.9.x, v1.10.x 3.9, 3.10 -0.4.x (Queens) v1.8.x 3.7 -0.3.0 (Queens) v1.6.x, v1.8.x No support -0.2.x (Pike) v1.4.x, v1.6.x No support -0.1.0 (Pike) v1.3.x, v1.4.x No support -======================== ====================================== ======================== diff --git a/doc/source/devref/network_policy.rst b/doc/source/devref/network_policy.rst deleted file mode 100644 index ce01c3c5c..000000000 --- a/doc/source/devref/network_policy.rst +++ /dev/null @@ -1,521 +0,0 @@ -.. - This work is licensed under a Creative Commons Attribution 3.0 Unported - License. - - http://creativecommons.org/licenses/by/3.0/legalcode - - Convention for heading levels in Neutron devref: - ======= Heading 0 (reserved for the title in a document) - ------- Heading 1 - ~~~~~~~ Heading 2 - +++++++ Heading 3 - ''''''' Heading 4 - (Avoid deeper levels because they do not render well.) - -============== -Network Policy -============== - -Purpose --------- - -The purpose of this document is to present how Network Policy is supported by -Kuryr-Kubernetes. - - -Overview --------- - -Kubernetes supports a Network Policy object to express ingress and egress rules -for pods. Network Policy reacts on labels to qualify multiple pods, and defines -rules based on different labeling and/or CIDRs. When combined with a networking -plugin, those policy objects are enforced and respected. - - -Proposed Solution ------------------ - -Kuryr-Kubernetes relies on Neutron security groups and security group rules to -enforce a Network Policy object, more specifically one security group per policy -with possibly multiple rules. Each object has a namespace scoped Network Policy -CRD that stores all OpenStack related resources on the Kubernetes side, avoiding -many calls to Neutron and helping to differentiate between the current Kubernetes -status of the Network Policy and the last one Kuryr-Kubernetes enforced. - -The network policy CRD has the following format: - -.. code-block:: yaml - - apiVersion: openstack.org/v1 - kind: KuryrNetworkPolicy - metadata: - ... - spec: - egressSgRules: - - sgRule: - ... - ingressSgRules: - - sgRule: - ... - podSelector: - ... - status: - securityGroupId: ... - podSelector: ... - securityGroupRules: ... - -A new handler has been added to react to Network Policy events, and the existing -ones, for instance service/pod handlers, have been modified to account for the -side effects/actions of when a Network Policy is being enforced. - -.. note:: - - Kuryr supports a network policy that contains: - - * Ingress and Egress rules - * namespace selector and pod selector, defined with match labels or match - expressions, mix of namespace and pod selector, ip block - * named port - - -New handlers and drivers -~~~~~~~~~~~~~~~~~~~~~~~~ - -The Network Policy handler -++++++++++++++++++++++++++ - -This handler is responsible for triggering the Network Policy Spec processing, -and the creation or removal of security group with appropriate security group -rules. It also, applies the security group to the pods and services affected -by the policy. - - -The Pod Label handler -+++++++++++++++++++++ - -This new handler is responsible for triggering the update of a security group -rule upon pod labels changes, and its enforcement on the pod port and service. - - -The Network Policy driver -++++++++++++++++++++++++++ - -Is the main driver. It ensures a Network Policy by processing the Spec -and creating or updating the Security group with appropriate -security group rules. - - -The Network Policy Security Group driver -++++++++++++++++++++++++++++++++++++++++ - -It is responsible for creating, deleting, or updating security group rules -for pods, namespaces or services based on different Network Policies. - - -Modified handlers and drivers -~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ - -The KuryrPort handler -+++++++++++++++++++++ - -As network policy rules can be defined based on pod labels, this handler -has been enhanced to trigger a security group rule creation or deletion, -depending on the type of pod event, if the pod is affected by the network -policy and if a new security group rule is needed. Also, it triggers the -translation of the pod rules to the affected service. Note, that KuryrPort -takes over most of the VIF handler functionality, although it has to be enabled -together with VIF handler. - - -The Namespace handler -+++++++++++++++++++++ - -Just as the pods labels, namespaces labels can also define a rule in a -Network Policy. To account for this, the namespace handler has been -incremented to trigger the creation, deletion or update of a -security group rule, in case the namespace affects a Network Policy rule, -and the translation of the rule to the affected service. - - -The Namespace Subnet driver -+++++++++++++++++++++++++++ - -In case of a namespace event and a Network Policy enforcement based -on the namespace, this driver creates a subnet to this namespace, -and restrict the number of security group rules for the Network Policy -to just one with the subnet CIDR, instead of one for each pod in the namespace. - - -The LBaaS driver -++++++++++++++++ - -To restrict the incoming traffic to the backend pods, the LBaaS driver -has been enhanced to translate pods rules to the listener port, and react -to Service ports updates. E.g., when the target port is not allowed by the -policy enforced in the pod, the rule should not be added. - - -The VIF Pool driver -+++++++++++++++++++ - -The VIF Pool driver is responsible for updating the Security group applied -to the pods ports. It has been modified to embrace the fact that with Network -Policies pods' ports changes their security group while being used, meaning the -original pool does not fit them anymore, resulting in useless pools and ports -reapplying the original security group. To avoid it, the security group id -is removed from the pool merging all pools with same network, project -and host id. Thus if there is no ports on the pool with the needed -security group id(s), one of the existing ports in the pool is updated -to match the requested sg Id. - - -Use cases examples -~~~~~~~~~~~~~~~~~~ - -This section describes some scenarios with a Network Policy being enforced, -what Kuryr components gets triggered and what resources are created. - - -Deny all incoming traffic -+++++++++++++++++++++++++ - -By default, Kubernetes clusters do not restrict traffic. Only once a network -policy is enforced to a namespace, all traffic not explicitly allowed in the -policy becomes denied. As specified in the following policy: - -.. code-block:: yaml - - apiVersion: networking.k8s.io/v1 - kind: NetworkPolicy - metadata: - name: default-deny - spec: - podSelector: {} - policyTypes: - - Ingress - -The following CRD is the translation of policy rules to security group rules. -No ingress rule was created, which means traffic is blocked, and since -there is no restriction for egress traffic, it is allowed to everywhere. Note -that the same happens when no ``policyType`` is defined, since all policies -are assumed to affect Ingress. - -.. code-block:: yaml - - apiVersion: openstack.org/v1 - kind: KuryrNetworkPolicy - metadata: - name: default-deny - namespace: default - ... - spec: - egressSgRules: - - sgRule: - description: Kuryr-Kubernetes NetPolicy SG rule - direction: egress - ethertype: IPv4 - security_group_id: 20d9b623-f1e0-449d-95c1-01624cb3e315 - ingressSgRules: [] - podSelector: - ... - status: - securityGroupId: 20d9b623-f1e0-449d-95c1-01624cb3e315 - securityGroupRules: ... - podSelector: ... - - -Allow traffic from pod -++++++++++++++++++++++ - -The following Network Policy specification has a single rule allowing traffic -on a single port from the group of pods that have the label ``role=monitoring``. - -.. code-block:: yaml - - apiVersion: networking.k8s.io/v1 - kind: NetworkPolicy - metadata: - name: allow-monitoring-via-pod-selector - spec: - podSelector: - matchLabels: - app: server - policyTypes: - - Ingress - ingress: - - from: - - podSelector: - matchLabels: - role: monitoring - ports: - - protocol: TCP - port: 8080 - -Create the following pod with label ``role=monitoring``: - -.. code-block:: console - - $ kubectl create deployment monitor --image=busybox --restart=Never --labels=role=monitoring - -The generated CRD contains an ingress rule allowing traffic on port 8080 from -the created pod, and an egress rule allowing traffic to everywhere, since no -restriction was enforced. - -.. code-block:: yaml - - apiVersion: openstack.org/v1 - kind: KuryrNetworkPolicy - metadata: - name: allow-monitoring-via-pod-selector - namespace: default - ... - spec: - egressSgRules: - - sgRule: - description: Kuryr-Kubernetes NetPolicy SG rule - direction: egress - ethertype: IPv4 - ingressSgRules: - - namespace: default - sgRule: - description: Kuryr-Kubernetes NetPolicy SG rule - direction: ingress - ethertype: IPv4 - port_range_max: 8080 - port_range_min: 8080 - protocol: tcp - remote_ip_prefix: 10.0.1.143 - podSelector: - ... - status: - securityGroupId: 7f0ef8c2-4846-4d8c-952f-94a9098fff17 - securityGroupRules: ... - podSelector: ... - - -Allow traffic from namespace -++++++++++++++++++++++++++++ - -The following network policy only allows ingress traffic -from namespace with the label ``purpose=test``: - -.. code-block:: yaml - - apiVersion: networking.k8s.io/v1 - kind: NetworkPolicy - metadata: - name: allow-test-via-ns-selector - spec: - podSelector: - matchLabels: - app: server - policyTypes: - - Ingress - ingress: - - from: - - namespaceSelector: - matchLabels: - purpose: test - ports: - - protocol: TCP - port: 8080 - -Create a namespace and label it with ``purpose=test``: - -.. code-block:: console - - $ kubectl create namespace dev - $ kubectl label namespace dev purpose=test - -The resulting CRD has an ingress rule allowing traffic -from the namespace CIDR on the specified port, and an -egress rule allowing traffic to everywhere. - -.. code-block:: yaml - - apiVersion: openstack.org/v1 - kind: KuryrNetworkPolicy - name: allow-test-via-ns-selector - namespace: default - ... - spec: - egressSgRules: - - sgRule: - description: Kuryr-Kubernetes NetPolicy SG rule - direction: egress - ethertype: IPv4 - ingressSgRules: - - namespace: dev - sgRule: - description: Kuryr-Kubernetes NetPolicy SG rule - direction: ingress - ethertype: IPv4 - port_range_max: 8080 - port_range_min: 8080 - protocol: tcp - remote_ip_prefix: 10.0.1.192/26 - podSelector: - ... - status: - securityGroupId: c480327c-2db4-4eb6-af1e-eeb0ce9b46c9 - securityGroupRules: ... - podSelector: ... - -.. note:: - - Only when using Amphora Octavia provider and Services with selector, - the Load Balancer security groups need to be rechecked when a - network policy that affects ingress traffic is created, and - also everytime a pod or namespace is created. Network Policy - is not enforced on Services without Selectors. - -Allow traffic to a Pod in a Namespace -+++++++++++++++++++++++++++++++++++++ - -The following network policy only allows egress traffic from Pods -with the label ``app=demo`` at Namespace with label ``app=demo``: - -.. code-block:: yaml - - apiVersion: networking.k8s.io/v1 - kind: NetworkPolicy - metadata: - name: block-egress - namespace: client - spec: - podSelector: - matchLabels: - app: client - policyTypes: - - Egress - egress: - - to: - - namespaceSelector: - matchLabels: - app: demo - podSelector: - matchLabels: - app: demo - -The resulting CRD has an ingress rules allowing traffic -from everywhere and egress rules to the selected Pod -and the Service that points to it. - -.. code-block:: yaml - - apiVersion: openstack.org/v1 - kind: KuryrNetworkPolicy - metadata: ... - spec: - egressSgRules: - - namespace: demo - sgRule: - description: Kuryr-Kubernetes NetPolicy SG rule - direction: egress - ethertype: IPv4 - port_range_max: 65535 - port_range_min: 1 - protocol: tcp - remote_ip_prefix: 10.0.2.120 - - sgRule: - description: Kuryr-Kubernetes NetPolicy SG rule - direction: egress - ethertype: IPv4 - port_range_max: 65535 - port_range_min: 1 - protocol: tcp - remote_ip_prefix: 10.0.0.144 - ingressSgRules: - - sgRule: - description: Kuryr-Kubernetes NetPolicy SG rule - direction: ingress - ethertype: IPv4 - - sgRule: - description: Kuryr-Kubernetes NetPolicy SG rule - direction: ingress - ethertype: IPv6 - podSelector: - matchLabels: - app: client - policyTypes: - - Egress - status: - podSelector: - matchLabels: - app: client - securityGroupId: 322a347b-0684-4aea-945a-5f204361a64e - securityGroupRules: ... - -.. note:: - - A Network Policy egress rule creates a Security Group rule - corresponding to a Service(with or without selectors) that - points to the selected Pod. - - -Create network policy flow -++++++++++++++++++++++++++ - -.. image:: ../../images/create_network_policy_flow.svg - :alt: Network Policy creation flow - :align: center - :width: 100% - - -Create pod flow -+++++++++++++++ - -The following diagram only covers the implementation part that affects -network policy. - -.. image:: ../../images/update_network_policy_on_pod_creation.svg - :alt: Pod creation flow - :align: center - :width: 100% - - -Network policy rule definition -++++++++++++++++++++++++++++++ - -======================== ======================= ============================================== -NamespaceSelector podSelector Expected result -======================== ======================= ============================================== -namespaceSelector: ns1 podSelector: pod1 Allow traffic from pod1 at ns1 -namespaceSelector: ns1 podSelector: {} Allow traffic from all pods at ns1 -namespaceSelector: ns1 none Allow traffic from all pods at ns1 -namespaceSelector: {} podSelector: pod1 Allow traffic from pod1 from all namespaces -namespaceSelector: {} podSelector: {} Allow traffic from all namespaces -namespaceSelector: {} none Allow traffic from all namespaces -none podSelector: pod1 Allow traffic from pod1 from NP namespace -none podSelector: {} Allow traffic from all pods from NP namespace -======================== ======================= ============================================== - -======================== ================================================ -Rules definition Expected result -======================== ================================================ -No FROM (or from: []) Allow traffic from all pods from all namespaces -Ingress: {} Allow traffic from all namespaces -ingress: [] Deny all traffic -No ingress Blocks all traffic -======================== ================================================ - - -Policy types definition -+++++++++++++++++++++++ - -=============== ===================== ======================= ====================== -PolicyType Spec Ingress/Egress Ingress generated rules Egress generated rules -=============== ===================== ======================= ====================== -none none BLOCK ALLOW -none ingress Specific rules ALLOW -none egress Block Specific rules -none ingress, egress Specific rules Specific rules -ingress none Block ALLOW -ingress ingress Specific rules ALLOW -egress none ALLOW BLOCK -egress egress ALLOW Specific rules -Ingress, egress none BLOCK BLOCK -Ingress, egress ingress Specific rules BLOCK -Ingress, egress egress BLOCK Specific rules -Ingress, egress ingress,egress Specific rules Specific rules -=============== ===================== ======================= ====================== diff --git a/doc/source/devref/port_manager.rst b/doc/source/devref/port_manager.rst deleted file mode 100644 index a504c7db9..000000000 --- a/doc/source/devref/port_manager.rst +++ /dev/null @@ -1,195 +0,0 @@ -.. - This work is licensed under a Creative Commons Attribution 3.0 Unported - License. - - http://creativecommons.org/licenses/by/3.0/legalcode - - Convention for heading levels in Neutron devref: - ======= Heading 0 (reserved for the title in a document) - ------- Heading 1 - ~~~~~~~ Heading 2 - +++++++ Heading 3 - ''''''' Heading 4 - (Avoid deeper levels because they do not render well.) - -==================================== -Kuryr Kubernetes Port Manager Design -==================================== - -Purpose -------- - -The purpose of this document is to present Kuryr Kubernetes Port Manager, -capturing the design decision currently taken by the kuryr team. - -The main purpose of the Port Manager is to perform Neutron resources handling, -i.e., ports creation and deletion. The main idea behind is to try to minimize -the amount of calls to Neutron by ensuring port reuse as well as performing -bulk actions, e.g., creating/deleting several ports within the same Neutron -call. - - -Overview --------- - -Interactions between Kuryr and Neutron may take more time than desired from -the container management perspective. - -Some of these interactions between Kuryr and Neutron can be optimized. For -instance, by maintaining pre-created pools of Neutron port resources instead -of asking for their creation during pod lifecycle pipeline. - -As an example, every time a container is created or deleted, there is a call -from Kuryr to Neutron to create/remove the port used by the container. To -optimize this interaction and speed up both container creation and deletion, -the Kuryr-Kubernetes Port Manager will take care of both: Neutron ports -creation beforehand, and Neutron ports deletion afterwards. This will -consequently remove the waiting time for: - -- Creating ports and waiting for them to become active when booting containers -- Deleting ports when removing containers - - -Proposed Solution ------------------ - -The Port Manager will be in charge of handling Neutron ports. The main -difference with the current implementation resides on when and how these -ports are managed. The idea behind is to minimize the amount of calls to the -Neutron server by reusing already created ports as well as by creating/deleting -them in bulk requests. - -This design focuses on Neutron ports management, but similar optimization can -be done for other Neutron resources, and consequently new resource managers -can be added. - -Ports Manager -~~~~~~~~~~~~~ - -The Port Manager will handle different pool of Neutron ports: - -- Available pools: There will be a pool of ports for each tenant, host (or - trunk port for the nested case) and security group, ready to be used by the - pods. Note at the beginning there are no pools. Once a pod is created at - a given host/VM by a tenant, with a specific security group, a corresponding - pool gets created and populated with the desired minimum amount of ports. - Note the Neutron port quota needs to be considered when configuring the - parameters of the pool, i.e., the minimum and/or maximum size of the pools as - well as the size of the bulk creation requests. -- Recyclable pool: Instead of deleting the port during pods removal it will - just be included into this pool. The ports in this pool will be later - recycled by the Port Manager and put back into the corresponding - available pool. - -The Port Manager will handle the available pools ensuring that at least X ports -are ready to be used at each existing pool, i.e., for each security group -and tenant which already has a pod on it. The port creation at each -available_pool will be handled in batches, i.e., instead of creating one port -at a time, a configurable amount of them will be created altogether. -The Port Manager will check for each pod creation that the remaining number of -ports in the specific pool is above X. Otherwise it creates Y extra ports for -that pool (with the specific tenant and security group). Note both X and Y are -configurable. - -Thanks to having the available ports pool, during the container creation -process, instead of calling Neutron port_create and then waiting for the port -to become active, a port will be taken from the right available_pool (hence, -no need to call Neutron) and then the port info will be updated with the -proper container name (i.e., call to Neutron port_update). Thus, thanks to the -Port Manager, at least two calls to Neutron are skipped (port create and -pooling waiting for port to become ACTIVE), while doing an extra one (update) -which is faster than the other ones. Similarly, for the port deletion we save -the call to remove the port as it is just included in the recyclable pool. - -The port cleanup actions return ports to the corresponding available_pool after -re-applying security groups and changing the device name to 'available-port'. -A maximum limit for the pool can be specified to ensure that once the -corresponding available_pool reach a certain size, the ports gets deleted -instead of recycled. Note this upper limit can be disabled by setting it to 0. -In addition, a Time-To-Live (TTL) could be set to the ports at the pool, so -that if they are not used during a certain period of time, they are removed -- -if and only if the available_pool size is still larger than the target minimum. - - -Recovery of pool ports upon Kuryr-Controller restart -~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ - -If the Kuryr-Controller is restarted, the pre-created ports will still exist -on the Neutron side but the Kuryr-controller will be unaware of them, thus -pre-creating more upon pod allocation requests. To avoid having these existing -but unused ports a mechanisms is needed to either delete them after -controller's reboot, or obtain their information and re-include them into -their corresponding pool. - -For the baremetal (NeutronVIF) case, as the ports are not attached to any -hosts (at least not until CNI support is included) there is not enough -information to decide which pool should be selected for adding the port. -For simplicity, and as a temporal solution until CNI support is developed, -the implemented mechanism will find the previously created ports by looking -at the existing neutron ports and filtering them by device_owner and name, -which should be compute:kuryr and available-port, respectively. -Once these ports are obtained, they are deleted to release unused Neutron -resources and avoid problems related to ports quota limits. - -By contrast, it is possible to obtain all the needed information for the -subports previously created for the nested (VLAN+Trunk) case as they are still -attached to their respective trunk ports. Therefore, these ports instead of -being deleted will be re-added to their corresponding pools. -To do this, the Neutron ports are filtered by device_owner (trunk:subport in -this case) and name (available-port), and then we iterate over the subports -attached to each existing trunk port to find where the filtered ports are -attached and then obtain all the needed information to re-add them into the -corresponding pools. - - -Kuryr Controller Impact -~~~~~~~~~~~~~~~~~~~~~~~ - -A new VIF Pool driver is created to manage the ports pools upon pods creation -and deletion events. It will ensure that a pool with at least X ports is -available for each tenant, host or trunk port, and security group, when the -first request to create a pod with these attributes happens. Similarly, it will -ensure that ports are recycled from the recyclable pool after pods deletion and -are put back in the corresponding available_pool to be reused. Thanks to this -Neutron calls are skipped and the ports of the pools are used instead. If the -corresponding pool is empty, a ResourceNotReady exception will be triggered and -the pool will be repopulated. - -In addition to the handler modification and the new pool drivers there are -changes related to the VIF drivers. The VIF drivers (neutron-vif and nested) -will be extended to support bulk ports creation of Neutron ports and similarly -for the VIF objects requests. - - -Future enhancement -++++++++++++++++++ - -The VIFHandler needs to be aware of the new Pool driver, which will load the -respective VIF driver to be used. In a sense, the Pool Driver will be a proxy -to the VIF Driver, but also managing the pools. When a mechanism to load and -set the VIFHandler drivers is in place, this will be reverted so that the -VIFHandlers becomes unaware of the pool drivers. - - -Kuryr CNI Impact -~~~~~~~~~~~~~~~~ - -For the nested vlan case, the subports at the different pools are already -attached to the VMs trunk ports, therefore they are already in ACTIVE status. -However, for the generic case the ports are not really bond to anything (yet), -therefore their status will be DOWN. In order to keep these ports returned to -the pool in ACTIVE status, we will implement another pool at the CNI side for -the generic case. This solution could be different for different SDN -controllers. The main idea is that they should keep the port in ACTIVE -state without allowing network traffic through them. For instance, for the -Neutron reference implementation, this pool will maintain a pool of veth -devices at each host, by connecting them to a recyclable namespace so that the -OVS agent sees them as 'still connected' and maintains their ACTIVE status. -This modification must ensure the OVS (br-int) ports where these veth devices -are connected are not deleted after container deletion by the CNI. - - -Future enhancement -++++++++++++++++++ - -The CNI modifications will be implemented in a second phase. diff --git a/doc/source/devref/service_support.rst b/doc/source/devref/service_support.rst deleted file mode 100644 index 278f37dae..000000000 --- a/doc/source/devref/service_support.rst +++ /dev/null @@ -1,157 +0,0 @@ -.. - This work is licensed under a Creative Commons Attribution 3.0 Unported - License. - - http://creativecommons.org/licenses/by/3.0/legalcode - - Convention for heading levels in Neutron devref: - ======= Heading 0 (reserved for the title in a document) - ------- Heading 1 - ~~~~~~~ Heading 2 - +++++++ Heading 3 - ''''''' Heading 4 - (Avoid deeper levels because they do not render well.) - -============================================ -Kuryr Kubernetes Services Integration Design -============================================ - -Purpose -------- - -The purpose of this document is to present how Kubernetes Service is supported -by the kuryr integration and to capture the design decisions currently taken -by the kuryr team. - - -Overview --------- - -A Kubernetes Service is an abstraction which defines a logical set of Pods and -a policy by which to access them. Service is a Kubernetes managed API object. -For Kubernetes-native applications, Kubernetes offers an Endpoints API that is -updated whenever the set of Pods in a Service changes. For detailed information -please refer to `Kubernetes service`_. Kubernetes supports services with -kube-proxy component that runs on each node, `Kube-Proxy`_. - - -Proposed Solution ------------------ - -Kubernetes service in its essence is a Load Balancer across Pods that fit the -service selection. Kuryr's choice is to support Kubernetes services by using -Neutron LBaaS service. The initial implementation is based on the OpenStack -LBaaSv2 API, so compatible with any LBaaSv2 API provider. - -In order to be compatible with Kubernetes networking, Kuryr-Kubernetes makes -sure that services Load Balancers have access to Pods Neutron ports. This may -be affected once Kubernetes Network Policies will be supported. Oslo versioned -objects are used to keep translation details in Kubernetes entities annotation. -This will allow future changes to be backward compatible. - - -Data Model Translation -~~~~~~~~~~~~~~~~~~~~~~ - -Kubernetes service is mapped to the LBaaSv2 Load Balancer with associated -Listeners and Pools. Service endpoints are mapped to Load Balancer Pool -members. - - -Kuryr Controller Impact -~~~~~~~~~~~~~~~~~~~~~~~ - -Three Kubernetes Event Handlers are added to the Controller pipeline. - -- ServiceHandler manages Kubernetes Service events. - Based on the service spec and metadata details, it creates KuryrLoadBalancer - CRD or it updates the CRD, more specifically the spec part of the CRD with - details to be used for translation to LBaaSv2 model, such as tenant-id, - subnet-id, ip address and security groups. -- EndpointsHandler is responsible for adding endpoints subsets to the - KuryrLoadBalancer CRD. If endpoint is created before Service, this handler - creates the CRD with the endpoints subsets, otherwise the existent CRD is - updated. -- KuryrLoadBalancerHandler manages KuryrLoadBalancer CRD events when the CRD is - successfully created and filled with spec data. This handler is responsible - for creating the needed Octavia resources according to the CRD spec and - update the status field with information about the generated resources, such - as LoadBalancer, LoadBalancerListener, LoadBalancerPool and - LoadBalancerMembers. - -These Handlers use Project, Subnet and SecurityGroup service drivers to get -details for service mapping. - -In order to prevent Kubernetes objects from being deleted before the OpenStack -resources are cleaned up, finalizers are used. Finalizers block deletion of the -Service, Endpoints and KuryrLoadBalancer objects until Kuryr deletes the -associated OpenStack loadbalancers. After that the finalizers are removed -allowing the Kubernetes API to delete the objects. - -LBaaS Driver is added to manage service translation to the LBaaSv2-like API. It -abstracts all the details of service translation to Load Balancer. -LBaaSv2Driver supports this interface by mapping to neutron LBaaSv2 constructs. - - -Service Creation Process -~~~~~~~~~~~~~~~~~~~~~~~~ - -What happens when a service gets created by Kubernetes? -+++++++++++++++++++++++++++++++++++++++++++++++++++++++ - -When a Kubernetes Service and Endpoints are created,the ServiceHandler and -EndpointHandler (at controller/handlers/lbaas.py) are called. When the -ServiceHandler first starts handling the on_present event, it creates the -KuryrLoadBalancer CRD with the Service spec and empty status. When the -Endpoints information is not yet added on the spec by the EndpointHandler, the -event reaching KuryrLoadBalancerHandler is skipped. If the EndpointsHandler -starts handling the on_present event first, the KuryrLoadBalancer CRD is -created with the endpoints subsets. Otherwise, it will update the existing CRD -created by the ServiceHandler with the endpoints subsets. - -The KuryrLoadBalancerHandler (at controller/handlers/loadbalancer.py) upon -noticing the KuryrLoadBalancer CRD with the full specification, calls the -appropriate Drivers to handle the openstack resources such as Loadbalancer, -Load Balancer Listeners, Load Balancer Pools, and Load Balancer Members. It -uses the _sync_lbaas_members function to check if Openstack Loadbalancers are -in sync with the kubernetes counterparts. - - -.. figure:: ../../images/service_creation_diagram.svg - :alt: Service creation Diagram - :align: center - :width: 100% - - Service creation flow diagram - -Service Deletion Process -~~~~~~~~~~~~~~~~~~~~~~~~ - -What happens when a service gets deleted by Kubernetes? -+++++++++++++++++++++++++++++++++++++++++++++++++++++++ - -When a Kubernetes Service and Endpoints are deleted, the finalizers which are -added to the service object (and the KLB CRD Object too) and defined during the -KuryrLoadBalancer CRD creation, blocks the removal of the kubernetes object -until the associated OpenStack resources are removed, which also avoids -leftovers. When they are removed, Kubernetes is able to remove the CRD, the -service and the endpoints, hence completing the service removal action. - -What happens if the KuryrLoadBalancer CRD status changes? -+++++++++++++++++++++++++++++++++++++++++++++++++++++++++ - -If the members field on the status of the CRD is manually removed or the status -is completely set to an empty object, the KuryrLoadBalancerHandler that is -watching these CRD objects detects this change and confirms that there is no -information about the OpenStack resources on the status and so needs to -rediscover or recreate them. It checks if there are provisioned OpenStack -resources (in this case loadbalancers, listeners, pools, and members) for the -service/endpoints defined on the KuryrLoadBalancer CRD spec. If that is the -case, it retrieves their information and puts it back on the CRD status field. -If that is not the case (due to the resources being deleted on the OpenStack -side), it will recreate the resources and write the new information about them -on the CRD status field. - - -.. _Kubernetes service: http://kubernetes.io/docs/user-guide/services/ -.. _Kube-Proxy: http://kubernetes.io/docs/admin/kube-proxy/ diff --git a/doc/source/devref/updating_pod_resources_api.rst b/doc/source/devref/updating_pod_resources_api.rst deleted file mode 100644 index 323eca9f6..000000000 --- a/doc/source/devref/updating_pod_resources_api.rst +++ /dev/null @@ -1,137 +0,0 @@ -.. - This work is licensed under a Creative Commons Attribution 3.0 Unported - License. - - http://creativecommons.org/licenses/by/3.0/legalcode - - Convention for heading levels in Neutron devref: - ======= Heading 0 (reserved for the title in a document) - ------- Heading 1 - ~~~~~~~ Heading 2 - +++++++ Heading 3 - ''''''' Heading 4 - (Avoid deeper levels because they do not render well.) - -================================== -HowTo Update PodResources gRPC API -================================== - -Purpose -------- - -The purpose of this document is to describe how to update gRPC API files in -kuryr-kubernetes repository in case of upgrading to a new version of Kubernetes -PodResources API. These files are ``api_pb2_grpc.py``, ``api_pb2.py`` and -``api.proto`` from ``kuryr_kubernetes/pod_resources/`` directory. - -``api.proto`` is a gRPC API definition file generated from the -``kubernetes/pkg/kubelet/apis/podresources//api.proto`` of the -Kubernetes source tree. - -``api_pb2_grpc.py`` and ``api_pb2.py`` are python bindings for gRPC API. - -.. note:: - - There are only 2 reasons for update: - - #. Kubernetes released new version of PodResources API and the old one is no - longer supported. In this case, without update, we'll not be able to use - PodResources service. - #. ``protobuf`` version in ``lower-constraints.txt`` changed to lower - version (this is highly unlikely). In this case ``protobuf`` could fail - to use our python bindings. - - -Automated update ----------------- - -``contrib/regenerate_pod_resources_api.sh`` script could be used to re-generate -PodResources gRPC API files. By default, this script will download ``v1alpha1`` -version of ``api.proto`` file from the Kubernetes GitHub repo and create -required kuryr-kubernetes files from it: - -.. code-block:: console - - [kuryr-kubernetes]$ ./contrib/regenerate_pod_resources_api.sh - -Alternatively, path to ``api.proto`` file could be specified in -``KUBERNETES_API_PROTO`` environment variable: - -.. code-block:: console - - $ export KUBERNETES_API_PROTO=/path/to/api.proto - -Define ``API_VERSION`` environment variable to use specific version of -``api.proto`` from the Kubernetes GitHub: - -.. code-block:: console - - $ export API_VERSION=v1alpha1 - - -Manual update steps -------------------- - -Preparing the new api.proto -~~~~~~~~~~~~~~~~~~~~~~~~~~~ - -Copy the ``api.proto`` from K8s sources to ``kuryr_kubernetes/pod_resources/`` -and remove all the lines that contains ``gogoproto`` since this is unwanted -dependency that is not needed for python bindings: - -.. code-block:: console - - $ sed '/gogoproto/d' \ - ../kubernetes/pkg/kubelet/apis/podresources//api.proto \ - > kuryr_kubernetes/pod_resources/api.proto - -Don't forget to update the file header that should point to the original -``api.proto`` and to this reference document:: - - // Generated from kubernetes/pkg/kubelet/apis/podresources//api.proto - // To regenerate api.proto, api_pb2.py and api_pb2_grpc.py follow instructions - // from doc/source/devref/updating_pod_resources_api.rst. - - -Generating the python bindings -~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ - -* (Optional) Create the python virtual environment: - -.. code-block:: console - - [kuryr-kubernetes]$ python3 -m venv venv - [kuryr-kubernetes]$ . ./venv/bin/activate - -* To generate python bindings we need a ``protoc`` compiler and the - ``gRPC plugin`` for it. The most simple way to get them is to install - ``grpcio-tools``: - - .. code-block:: console - - (venv) [kuryr-kubernetes]$ pip install grpcio-tools==1.19 - - .. note:: - - We're installing specific version of ``grpcio-tools`` to get specific - version of ``protoc`` compiler. The version of ``protoc`` compiler should - be equal to the ``protobuf`` package version in ``lower-constraints.txt``. - This is because older ``protobuf`` might be not able to use files - generated by newer compiler. In case you need to use more recent compiler, - you need update ``requirements.txt`` and ``lower-constraints.txt`` - accordingly. - - To check version of compiler installed with ``grpcio-tools`` use: - - .. code-block:: console - - (venv) [kuryr-kubernetes]$ python -m grpc_tools.protoc --version - libprotoc 3.6.1 - -* Following command will generate ``api_pb2_grpc.py`` and ``api_pb2.py``: - - .. code-block:: console - - (venv) [kuryr-kubernetes]$ python -m grpc_tools.protoc -I./ \ - --python_out=. --grpc_python_out=. \ - kuryr_kubernetes/pod_resources/api.proto diff --git a/doc/source/devref/vif_handler_drivers_design.rst b/doc/source/devref/vif_handler_drivers_design.rst deleted file mode 100644 index 15ae21cc9..000000000 --- a/doc/source/devref/vif_handler_drivers_design.rst +++ /dev/null @@ -1,157 +0,0 @@ -.. - This work is licensed under a Creative Commons Attribution 3.0 Unported - License. - - http://creativecommons.org/licenses/by/3.0/legalcode - - Convention for heading levels in Neutron devref: - ======= Heading 0 (reserved for the title in a document) - ------- Heading 1 - ~~~~~~~ Heading 2 - +++++++ Heading 3 - ''''''' Heading 4 - (Avoid deeper levels because they do not render well.) - -================================== -VIF-Handler And Vif Drivers Design -================================== - -Purpose -------- - -The purpose of this document is to present an approach for implementing -design of interaction between VIF-handler and the drivers it uses in -Kuryr-Kubernetes Controller. - - -VIF-Handler ------------ - -VIF-handler was intended to handle VIFs. Currently it is responsible for -reacting on Pod object events, and for creating/deleting corresponding -KuryrPort CRD object. - - -KuryrPort-handler ------------------ - -KuryrPort is responsible for taking care about associated Pod VIFs. The main -aim of this handler is to get the KuryrPort CRD object, created by the -VIF-handler, send it to the: - -- VIF-driver for the default network, -- enabled Multi-VIF drivers for the additional networks, -- and get VIF objects from both. - -After that KuryrPort-handler is able to activate, release or update VIFs. -KuryrPort-handler should stay clean whereas parsing of specific pod information -should be done by Multi-VIF drivers. - - -Multi-VIF driver -~~~~~~~~~~~~~~~~ - -The new type of drivers which is used to call other VIF-drivers to attach -additional interfaces to Pods. The main aim of this kind of drivers is to get -additional interfaces from the Pods definition, then invoke real VIF-drivers -like neutron-vif, nested-macvlan to retrieve the VIF objects accordingly. - -All Multi-VIF drivers should be derived from class *MultiVIFDriver*. And all -should implement the *request_additional_vifs* method which returns a list of -VIF objects. Those VIF objects are created by each of the vif-drivers invoked -by the Multi-VIF driver. Each of the multi-vif driver should support a syntax -of additional interfaces definition in Pod. If the pod object doesn't define -additional interfaces, the Multi-VIF driver can just return. - -Diagram describing VifHandler - Drivers flow is giver below: - -.. image:: ../../images/vif_handler_drivers_design.png - :alt: vif handler drivers design - :align: center - :width: 100% - - -Config Options -~~~~~~~~~~~~~~ - -Add new config option "multi_vif_drivers" (list) to config file that shows -what Multi-VIF drivers should be used in to specify the addition VIF objects. -It is allowed to have one or more multi_vif_drivers enabled, which means that -multi_vif_drivers can either work separately or together. By default, a noop -driver which basically does nothing will be used if this field is not -explicitly specified. - -Option in config file might look like this: - -.. code-block:: ini - - [kubernetes] - multi_vif_drivers = additional_subnets - -Or like this: - -.. code-block:: ini - - [kubernetes] - multi_vif_drivers = npwg_multiple_interfaces - - -Additional Subnets Driver -~~~~~~~~~~~~~~~~~~~~~~~~~ - -Since it is possible to request additional subnets for the pod through the pod -annotations it is necessary to have new driver. According to parsed information -(requested subnets) by Multi-vif driver it has to return dictionary containing -the mapping 'subnet_id' -> 'network' for all requested subnets in unified -format specified in PodSubnetsDriver class. Here's how a Pod Spec with -additional subnets requests might look like: - -.. code-block:: yaml - - spec: - replicas: 1 - template: - metadata: - name: some-name - labels: - app: some-name - annotations: - openstack.org/kuryr-additional-subnets: '[ - "id_of_neutron_subnet_created_previously" - ]' - - -Specific ports support ----------------------- - -Specific ports support is enabled by default and will be a part of the drivers -to implement it. It is possible to have manually precreated specific ports in -neutron and specify them in pod annotations as preferably used. This means that -drivers will use specific ports if it is specified in pod annotations, -otherwise it will create new ports by default. It is important that specific -ports can have vnic_type both direct and normal, so it is necessary to provide -processing support for specific ports in both SRIOV and generic driver. Pod -annotation with requested specific ports might look like this: - -.. code-block:: yaml - - spec: - replicas: 1 - template: - metadata: - name: some-name - labels: - app: some-name - annotations: - spec-ports: '[ - "id_of_direct_precreated_port". - "id_of_normal_precreated_port" - ]' - -Pod spec above should be interpreted the following way: Multi-vif driver parses -pod annotations and gets ids of specific ports. If vnic_type is "normal" and -such ports exist, it calls generic driver to create vif objects for these -ports. Else if vnic_type is "direct" and such ports exist, it calls sriov -driver to create vif objects for these ports. If certain ports are not -requested in annotations then driver doesn't return additional vifs to -Multi-vif driver. diff --git a/doc/source/index.rst b/doc/source/index.rst deleted file mode 100644 index 20e95887b..000000000 --- a/doc/source/index.rst +++ /dev/null @@ -1,48 +0,0 @@ -.. kuryr-kubernetes documentation master file, created by - sphinx-quickstart on Tue Jul 9 22:26:36 2013. - You can adapt this file completely to your liking, but it should at least - contain the root `toctree` directive. - -Welcome to kuryr-kubernetes's documentation! -============================================ - -Contents --------- - -.. toctree:: - :maxdepth: 3 - - readme - nested_vlan_mode - installation/index - usage - contributor/index - - -Developer Docs --------------- - -.. toctree:: - :maxdepth: 3 - - devref/index - - -Design Specs ------------- - -.. toctree:: - :maxdepth: 1 - - specs/pike/contrail_support - specs/pike/fuxi_kubernetes - specs/queens/network_policy - specs/rocky/npwg_spec_support - specs/stein/vhostuser - - -Indices and tables ------------------- - -* :ref:`genindex` -* :ref:`search` diff --git a/doc/source/installation/containerized.rst b/doc/source/installation/containerized.rst deleted file mode 100644 index 5391595a8..000000000 --- a/doc/source/installation/containerized.rst +++ /dev/null @@ -1,187 +0,0 @@ -.. _containerized: - -================================================ -Kuryr installation as a Kubernetes network addon -================================================ - -Building images -~~~~~~~~~~~~~~~ - -First you should build kuryr-controller and kuryr-cni docker images and place -them on cluster-wide accessible registry. - -For creating controller image on local machine: - -.. code-block:: console - - $ docker build -t kuryr/controller -f controller.Dockerfile . - -For creating cni daemonset image on local machine: - -.. code-block:: console - - $ docker build -t kuryr/cni -f cni.Dockerfile . - -Kuryr-kubernetes also includes a tool to automatically build the controller -image and deletes the existing container to apply the newly built -image. The tool is avaliable at: - -.. code-block:: console - - $ contrib/regenerate_controller_pod.sh - -If you want to run kuryr CNI without the daemon, build the image with: - -.. code-block:: console - - $ docker build -t kuryr/cni -f cni.Dockerfile --build-arg CNI_DAEMON=False . - -Alternatively, you can remove ``imagePullPolicy: Never`` from kuryr-controller -Deployment and kuryr-cni DaemonSet definitions to use pre-built `controller`_ -and `cni`_ images from the Docker Hub. Those definitions will be generated in -next step. - -.. _containerized-generate: - -Generating Kuryr resource definitions for Kubernetes -~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ - -kuryr-kubernetes includes a tool that lets you generate resource definitions -that can be used to Deploy Kuryr on Kubernetes. The script is placed in -``tools/generate_k8s_resource_definitions.sh`` and takes up to 3 arguments: - -.. code-block:: console - - $ ./tools/generate_k8s_resource_definitions.sh [] [] [] - -* ``output_dir`` - directory where to put yaml files with definitions. -* ``controller_conf_path`` - path to custom kuryr-controller configuration - file. -* ``cni_conf_path`` - path to custom kuryr-cni configuration file (defaults to - ``controller_conf_path``). -* ``ca_certificate_path`` - path to custom CA certificate for OpenStack API. It - will be added into Kubernetes as a ``Secret`` and mounted into - kuryr-controller container. Defaults to no certificate. - -.. note:: - - Providing no or incorrect ``ca_certificate_path`` will still create the file - with ``Secret`` definition with empty CA certificate file. This file will - still be mounted in kuryr-controller ``Deployment`` definition. - -If no path to config files is provided, script automatically generates minimal -configuration. However some of the options should be filled by the user. You -can do that either by editing the file after the ConfigMap definition is -generated or provide your options as environment variables before running the -script. Below is the list of available variables: - -* ``$KURYR_K8S_API_ROOT`` - ``[kubernetes]api_root`` (default: - https://127.0.0.1:6443) -* ``$KURYR_K8S_AUTH_URL`` - ``[neutron]auth_url`` (default: - http://127.0.0.1/identity) -* ``$KURYR_K8S_USERNAME`` - ``[neutron]username`` (default: admin) -* ``$KURYR_K8S_PASSWORD`` - ``[neutron]password`` (default: password) -* ``$KURYR_K8S_USER_DOMAIN_NAME`` - ``[neutron]user_domain_name`` (default: - Default) -* ``$KURYR_K8S_KURYR_PROJECT_ID`` - ``[neutron]kuryr_project_id`` -* ``$KURYR_K8S_PROJECT_DOMAIN_NAME`` - ``[neutron]project_domain_name`` - (default: Default) -* ``$KURYR_K8S_PROJECT_ID`` - ``[neutron]k8s_project_id`` -* ``$KURYR_K8S_POD_SUBNET_ID`` - ``[neutron_defaults]pod_subnet_id`` -* ``$KURYR_K8S_POD_SG`` - ``[neutron_defaults]pod_sg`` -* ``$KURYR_K8S_SERVICE_SUBNET_ID`` - ``[neutron_defaults]service_subnet_id`` -* ``$KURYR_K8S_WORKER_NODES_SUBNETS`` - ``[pod_vif_nested]worker_nodes_subnets`` -* ``$KURYR_K8S_BINDING_DRIVER`` - ``[binding]driver`` (default: - ``kuryr.lib.binding.drivers.vlan``) -* ``$KURYR_K8S_BINDING_IFACE`` - ``[binding]link_iface`` (default: eth0) - -.. note:: - - kuryr-daemon will be started in the CNI container. It is using ``os-vif`` - and ``oslo.privsep`` to do pod wiring tasks. By default it'll call ``sudo`` - to raise privileges, even though container is priviledged by itself or - ``sudo`` is missing from container OS (e.g. default CentOS 8). To prevent - that make sure to set following options in kuryr.conf used for kuryr-daemon: - - .. code-block:: ini - - [vif_plug_ovs_privileged] - helper_command=privsep-helper - [vif_plug_linux_bridge_privileged] - helper_command=privsep-helper - - Those options will prevent oslo.privsep from doing that. If rely on - aformentioned script to generate config files, those options will be added - automatically. - -In case of using ports pool functionality, we may want to make the -kuryr-controller not ready until the pools are populated with the existing -ports. To achieve this a readiness probe must be added to the kuryr-controller -deployment. To add the readiness probe, in addition to the above environment -variables or the kuryr-controller configuration file, and extra environmental -variable must be set: - -* ``$KURYR_USE_PORTS_POOLS`` - ``True`` (default: False) - -Example run: - -.. code-block:: console - - $ KURYR_K8S_API_ROOT="192.168.0.1:6443" ./tools/generate_k8s_resource_definitions.sh /tmp - -This should generate 6 files in your ````: - -* config_map.yml -* certificates_secret.yml -* controller_service_account.yml -* cni_service_account.yml -* controller_deployment.yml -* cni_ds.yml - -.. note:: - - kuryr-cni daemonset mounts /var/run, due to necessity of accessing to - several sub directories like openvswitch and auxiliary directory for - vhostuser configuration and socket files. Also when - neutron-openvswitch-agent works with datapath_type = netdev configuration - option, kuryr-kubernetes has to move vhostuser socket to auxiliary - directory, that auxiliary directory should be on the same mount point, - otherwise connection of this socket will be refused. In case when Open - vSwitch keeps vhostuser socket files not in /var/run/openvswitch, - openvswitch mount point in cni_ds.yaml and [vhostuser] section in - config_map.yml should be changed properly. - - -Deploying Kuryr resources on Kubernetes -~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ - -To deploy the files on your Kubernetes cluster run: - -.. code-block:: console - - $ kubectl apply -f config_map.yml -n kube-system - $ kubectl apply -f certificates_secret.yml -n kube-system - $ kubectl apply -f controller_service_account.yml -n kube-system - $ kubectl apply -f cni_service_account.yml -n kube-system - $ kubectl apply -f controller_deployment.yml -n kube-system - $ kubectl apply -f cni_ds.yml -n kube-system - -After successful completion: - -* kuryr-controller Deployment object, with single replica count, will get - created in kube-system namespace. -* kuryr-cni gets installed as a daemonset object on all the nodes in - kube-system namespace - -To see kuryr-controller logs: - -.. code-block:: console - - $ kubectl logs - -NOTE: kuryr-cni has no logs and to debug failures you need to check out kubelet -logs. - - -.. _controller: https://hub.docker.com/r/kuryr/controller/ -.. _cni: https://hub.docker.com/r/kuryr/cni/ diff --git a/doc/source/installation/default_configuration.rst b/doc/source/installation/default_configuration.rst deleted file mode 100644 index 8a32c270e..000000000 --- a/doc/source/installation/default_configuration.rst +++ /dev/null @@ -1,91 +0,0 @@ -============================= -Inspect default Configuration -============================= - -By default, DevStack creates networks called ``private`` and ``public``: - -.. code-block:: console - - $ openstack network list --project demo - +--------------------------------------+---------+----------------------------------------------------------------------------+ - | ID | Name | Subnets | - +--------------------------------------+---------+----------------------------------------------------------------------------+ - | 12bc346b-35ed-4cfa-855b-389305c05740 | private | 1ee73076-e01e-4cec-a3a4-cbb275f94d0f, 8376a091-dcea-4ed5-b738-c16446e861da | - +--------------------------------------+---------+----------------------------------------------------------------------------+ - - $ openstack network list --project admin - +--------------------------------------+--------+----------------------------------------------------------------------------+ - | ID | Name | Subnets | - +--------------------------------------+--------+----------------------------------------------------------------------------+ - | 646baf54-6178-4a26-a52b-68ad0ba1e057 | public | 00e0b1e4-4bee-4204-bd02-610291c56334, b1be34f2-7c3d-41ca-b2f5-6dcbd3c1715b | - +--------------------------------------+--------+----------------------------------------------------------------------------+ - -And kuryr-kubernetes creates two extra ones for the kubernetes services and -pods under the project k8s: - -.. code-block:: console - - $ openstack network list --project k8s - +--------------------------------------+-----------------+--------------------------------------+ - | ID | Name | Subnets | - +--------------------------------------+-----------------+--------------------------------------+ - | 1bff74a6-e4e2-42fb-a81b-33c9c144987c | k8s-pod-net | 3c3e18f9-d1d0-4674-b3be-9fc8561980d3 | - | d4be7efc-b84d-480e-a1db-34205877e6c4 | k8s-service-net | 55405e9d-4e25-4a55-bac2-e25ee88584e1 | - +--------------------------------------+-----------------+--------------------------------------+ - -And similarly for the subnets: - -.. code-block:: console - - $ openstack subnet list --project k8s - +--------------------------------------+--------------------+--------------------------------------+---------------+ - | ID | Name | Network | Subnet | - +--------------------------------------+--------------------+--------------------------------------+---------------+ - | 3c3e18f9-d1d0-4674-b3be-9fc8561980d3 | k8s-pod-subnet | 1bff74a6-e4e2-42fb-a81b-33c9c144987c | 10.0.0.64/26 | - | 55405e9d-4e25-4a55-bac2-e25ee88584e1 | k8s-service-subnet | d4be7efc-b84d-480e-a1db-34205877e6c4 | 10.0.0.128/26 | - +--------------------------------------+--------------------+--------------------------------------+---------------+ - -In addition to that, security groups for both pods and services are created too: - -.. code-block:: console - - $ openstack security group list --project k8s - +--------------------------------------+--------------------+------------------------+----------------------------------+ - | ID | Name | Description | Project | - +--------------------------------------+--------------------+------------------------+----------------------------------+ - | 00fd78f9-484d-4ea7-b677-82f73c54064a | service_pod_access | service_pod_access | 49e2683370f245e38ac2d6a8c16697b3 | - | fe7cee41-6021-4d7b-ab03-1ce1e391a1ca | default | Default security group | 49e2683370f245e38ac2d6a8c16697b3 | - +--------------------------------------+--------------------+------------------------+----------------------------------+ - -And finally, the loadbalancer for the kubernetes API service is also created, -with the subsequence listener, pool and added members: - -.. code-block:: console - - $ openstack loadbalancer list - +--------------------------------------+--------------------+----------------------------------+-------------+---------------------+----------+ - | id | name | tenant_id | vip_address | provisioning_status | provider | - +--------------------------------------+--------------------+----------------------------------+-------------+---------------------+----------+ - | 7d0cf5b5-b164-4b32-87d3-ae6c82513927 | default/kubernetes | 47c28e562795468ea52e92226e3bc7b1 | 10.0.0.129 | ACTIVE | haproxy | - +--------------------------------------+--------------------+----------------------------------+-------------+---------------------+----------+ - - $ openstack loadbalancer listener list - +--------------------------------------+--------------------------------------+------------------------+----------------------------------+----------+---------------+----------------+ - | id | default_pool_id | name | tenant_id | protocol | protocol_port | admin_state_up | - +--------------------------------------+--------------------------------------+------------------------+----------------------------------+----------+---------------+----------------+ - | abfbafd8-7609-4b7d-9def-4edddf2b887b | 70bed821-9a9f-4e1d-8c7e-7df89a923982 | default/kubernetes:443 | 47c28e562795468ea52e92226e3bc7b1 | HTTPS | 443 | True | - +--------------------------------------+--------------------------------------+------------------------+----------------------------------+----------+---------------+----------------+ - - $ openstack loadbalancer pool list - +--------------------------------------+------------------------+----------------------------------+--------------+----------+----------------+ - | id | name | tenant_id | lb_algorithm | protocol | admin_state_up | - +--------------------------------------+------------------------+----------------------------------+--------------+----------+----------------+ - | 70bed821-9a9f-4e1d-8c7e-7df89a923982 | default/kubernetes:443 | 47c28e562795468ea52e92226e3bc7b1 | ROUND_ROBIN | HTTPS | True | - +--------------------------------------+------------------------+----------------------------------+--------------+----------+----------------+ - - $ openstack loadbalancer member list default/kubernetes:443 - +--------------------------------------+------+----------------------------------+--------------+---------------+--------+--------------------------------------+----------------+ - | id | name | tenant_id | address | protocol_port | weight | subnet_id | admin_state_up | - +--------------------------------------+------+----------------------------------+--------------+---------------+--------+--------------------------------------+----------------+ - | 5ddceaff-180b-47fa-b787-8921f4591cb0 | | 47c28e562795468ea52e92226e3bc7b1 | 192.168.5.10 | 6443 | 1 | b1be34f2-7c3d-41ca-b2f5-6dcbd3c1715b | True | - +--------------------------------------+------+----------------------------------+--------------+---------------+--------+--------------------------------------+----------------+ diff --git a/doc/source/installation/devstack/basic.rst b/doc/source/installation/devstack/basic.rst deleted file mode 100644 index 0538e13ad..000000000 --- a/doc/source/installation/devstack/basic.rst +++ /dev/null @@ -1,186 +0,0 @@ -=========================== -Basic DevStack installation -=========================== - -Most basic DevStack installation of kuryr-kubernetes is pretty simple. This -document aims to be a tutorial through installation steps. - -Document assumes using Ubuntu LTS 20.04 (using server or cloud installation is -recommended, but desktop will also work), but same steps should apply for other -operating systems. It is also assumed that ``git`` and ``curl`` are already -installed on the system. DevStack will make sure to install and configure -OpenStack, Kubernetes and dependencies of both systems. - -Please note, that DevStack installation should be done inside isolated -environment such as virtual machine, since it will make substantial changes to -the host. - - -Cloning required repositories ------------------------------ - -First of all, you'll need a user account, which can execute passwordless -``sudo`` command. Consult `DevStack Documentation`_ for details, how to create -one, or simply add line: - -.. code-block:: ini - - "USERNAME ALL=(ALL) NOPASSWD:ALL" - -to ``/etc/sudoers`` using ``visudo`` command. Remember to change ``USERNAME`` -to the real name of the user account. - -Clone DevStack: - -.. code-block:: console - - $ git clone https://opendev.org/openstack-dev/devstack - -Copy sample ``local.conf`` (DevStack configuration file) to devstack -directory: - -.. code-block:: console - - $ curl https://opendev.org/openstack/kuryr-kubernetes/raw/branch/master/devstack/local.conf.sample \ - -o devstack/local.conf - -.. note:: - - ``local.conf.sample`` file is configuring Neutron and Kuryr with OVN - ML2 networking. In the ``kuryr-kubernetes/devstack`` directory there are - other sample configuration files that enable Open vSwitch instead OVN. - networking. See other pages in this documentation section to learn more. - -Now edit ``devstack/local.conf`` to set up some initial options: - -* If you have multiple network interfaces, you need to set ``HOST_IP`` variable - to the IP on the interface you want to use as DevStack's primary. DevStack - sometimes complain about lacking of ``HOST_IP`` even if there is single - network interface. -* If you already have Docker installed on the machine, you can comment out line - starting with ``enable_plugin devstack-plugin-container``. -* If you can't pull images from k8s.gcr.io, you can add the variable - ``KURYR_KUBEADMIN_IMAGE_REPOSITORY`` to ``devstack/local.conf`` and set it's - value to the repository that you can access. - -Once ``local.conf`` is configured, you can start the installation: - -.. code-block:: console - - $ devstack/stack.sh - -Installation takes from 20 to 40 minutes. Once that's done you should see -similar output: - -.. code-block:: console - - ========================= - DevStack Component Timing - (times are in seconds) - ========================= - wait_for_service 8 - pip_install 137 - apt-get 295 - run_process 14 - dbsync 22 - git_timed 168 - apt-get-update 4 - test_with_retry 3 - async_wait 71 - osc 200 - ------------------------- - Unaccounted time 505 - ========================= - Total runtime 1427 - - ================= - Async summary - ================= - Time spent in the background minus waits: 140 sec - Elapsed time: 1427 sec - Time if we did everything serially: 1567 sec - Speedup: 1.09811 - - - - This is your host IP address: 10.0.2.15 - This is your host IPv6 address: ::1 - Keystone is serving at http://10.0.2.15/identity/ - The default users are: admin and demo - The password: pass - - Services are running under systemd unit files. - For more information see: - https://docs.openstack.org/devstack/latest/systemd.html - - DevStack Version: xena - Change: - OS Version: Ubuntu 20.04 focal - - -You can test DevStack by sourcing credentials and trying some commands: - -.. code-block:: console - - $ source devstack/openrc admin admin - $ openstack service list - +----------------------------------+------------------+------------------+ - | ID | Name | Type | - +----------------------------------+------------------+------------------+ - | 07e985b425fc4f8a9da20970a26f754a | octavia | load-balancer | - | 1dc08cb4401243848a562c0042d3f40a | neutron | network | - | 35627730938d4a4295f3add6fc826261 | nova | compute | - | 636b43b739e548e0bb369bc41fe1df08 | glance | image | - | 90ef7129985e4e10874d5e4ddb36ea01 | keystone | identity | - | ce177a3f05dc454fb3d43f705ae24dde | kuryr-kubernetes | kuryr-kubernetes | - | d3d6a461a78e4601a14a5e484ec6cdd1 | nova_legacy | compute_legacy | - | d97e5c31b1054a308c5409ee813c0310 | placement | placement | - +----------------------------------+------------------+------------------+ - -To verify if Kubernetes is running properly, list its nodes and check status of -the only node you should have. The correct value is "Ready": - -.. code-block:: console - - $ kubectl get nodes - NAME STATUS AGE VERSION - localhost Ready 2m v1.6.2 - -To test kuryr-kubernetes itself try creating a Kubernetes pod: - -.. code-block:: console - - $ kubectl create deployment --image busybox test -- sleep 3600 - $ kubectl get pods -o wide - NAME READY STATUS RESTARTS AGE IP NODE - test-3202410914-1dp7g 0/1 ContainerCreating 0 7s localhost - -After a moment (even up to few minutes as Docker image needs to be downloaded) -you should see that pod got the IP from OpenStack network: - -.. code-block:: console - - $ kubectl get pods -o wide - NAME READY STATUS RESTARTS AGE IP NODE - test-3202410914-1dp7g 1/1 Running 0 35s 10.0.0.73 localhost - -You can verify that this IP is really assigned to Neutron port: - -.. code-block:: console - - [stack@localhost kuryr-kubernetes]$ openstack port list | grep 10.0.0.73 - | 3ce7fd13-ad0a-4e92-9b6f-0d38d50b1699 | | fa:16:3e:8e:f4:30 | ip_address='10.0.0.73', subnet_id='ddfbc8e9-68da-48f9-8a05-238ea0607e0d' | ACTIVE | - -If those steps were successful, then it looks like your DevStack with -kuryr-kubernetes is working correctly. In case of errors, copy last ~50 lines -of the logs, paste them into `paste.openstack.org`_ and ask other developers -for help on `Kuryr's IRC channel`_. More info on how to use DevStack can be -found in `DevStack Documentation`_, especially in section `Using Systemd in -DevStack`_, which explains how to use ``systemctl`` to control services and -``journalctl`` to read its logs. - - -.. _paste.openstack.org: http://paste.openstack.org -.. _Kuryr's IRC channel: ircs://irc.oftc.net:6697/openstack-kuryr -.. _DevStack Documentation: https://docs.openstack.org/devstack/latest/ -.. _Using Systemd in DevStack: https://docs.openstack.org/devstack/latest/systemd.html diff --git a/doc/source/installation/devstack/containerized.rst b/doc/source/installation/devstack/containerized.rst deleted file mode 100644 index a4866d181..000000000 --- a/doc/source/installation/devstack/containerized.rst +++ /dev/null @@ -1,83 +0,0 @@ -========================== -Containerized installation -========================== - -It is possible to configure DevStack to install kuryr-controller and kuryr-cni -on Kubernetes as pods. Details can be found on :doc:`../containerized` page, -this page will explain DevStack aspects of running containerized. - - -Installation ------------- - -To configure DevStack to install Kuryr services as containerized Kubernetes -resources, you need to switch ``KURYR_K8S_CONTAINERIZED_DEPLOYMENT``. Add this -line to your ``local.conf``: - -.. code-block:: ini - - KURYR_K8S_CONTAINERIZED_DEPLOYMENT=True - -This will trigger building the kuryr-controller and kuryr-cni containers during -installation, as well as will deploy those on Kubernetes cluster it installed. - - -Rebuilding container images ---------------------------- - -Instructions on how to manually rebuild both kuryr-controller and kuryr-cni -container images are presented on :doc:`../containerized` page. In case you -want to test any code changes, you need to rebuild the images first. - - -Changing configuration ----------------------- - -To change kuryr.conf files that are put into containers you need to edit the -associated ConfigMap. On DevStack deployment this can be done using: - -.. code-block:: console - - $ kubectl -n kube-system edit cm kuryr-config - -Then the editor will appear that will let you edit the ConfigMap. Make sure to -keep correct indentation when doing changes. - - -Restarting services -------------------- - -Once any changes are made to docker images or the configuration, it is crucial -to restart pod you've modified. - - -kuryr-controller -~~~~~~~~~~~~~~~~ - -To restart kuryr-controller and let it load new image and configuration, simply -kill existing pod: - -.. code-block:: console - - $ kubectl -n kube-system get pods - - $ kubectl -n kube-system delete pod - -Deployment controller will make sure to restart the pod with new configuration. - - -kuryr-cni -~~~~~~~~~ - -It's important to understand that kuryr-cni is only a storage pod i.e. it is -actually idling with ``sleep infinity`` once all the files are copied into -correct locations on Kubernetes host. - -You can force it to redeploy new files by killing it. DaemonSet controller -should make sure to restart it with new image and configuration files. - -.. code-block:: console - - $ kubectl -n kube-system get pods - - $ kubectl -n kube-system delete pod <...> diff --git a/doc/source/installation/devstack/index.rst b/doc/source/installation/devstack/index.rst deleted file mode 100644 index 1db445ff9..000000000 --- a/doc/source/installation/devstack/index.rst +++ /dev/null @@ -1,42 +0,0 @@ -.. - Licensed under the Apache License, Version 2.0 (the "License"); you may - not use this file except in compliance with the License. You may obtain - a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - - Unless required by applicable law or agreed to in writing, software - distributed under the License is distributed on an "AS IS" BASIS, WITHOUT - WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the - License for the specific language governing permissions and limitations - under the License. - - Convention for heading levels in Neutron devref: - ======= Heading 0 (reserved for the title in a document) - ------- Heading 1 - ~~~~~~~ Heading 2 - +++++++ Heading 3 - ''''''' Heading 4 - (Avoid deeper levels because they do not render well.) - - -=========================== -DevStack based Installation -=========================== - -This section describes how you can install and configure kuryr-kubernetes with -DevStack for testing different functionality, such as nested or different -ML2 drivers. - - -.. toctree:: - :maxdepth: 1 - - basic - nested-vlan - nested-macvlan - nested-dpdk - ovn_support - ovn-octavia - containerized - ports-pool diff --git a/doc/source/installation/devstack/nested-dpdk.rst b/doc/source/installation/devstack/nested-dpdk.rst deleted file mode 100644 index 53ae2bf34..000000000 --- a/doc/source/installation/devstack/nested-dpdk.rst +++ /dev/null @@ -1,222 +0,0 @@ -========================================= -How to try out nested-pods locally (DPDK) -========================================= - -Following are the instructions for an all-in-one setup, using the nested DPDK -driver. We assume that we already have the 'undercloud' configured with at -least one VM as nova instance which is also a kubernetes minion. We assume -that VM has an access to the Internet to install necessary packages. - -Configure the VM: - -#. Install kernel version supporting uio_pci_generic module: - - .. code-block:: bash - - sudo apt install linux-image-`uname -r` linux-headers-`uname -r` - sudo update-grub - sudo reboot - -#. Install DPDK. On Ubuntu: - - .. code-block:: bash - - sudo apt update - sudo apt install dpdk - -#. Enable hugepages: - - .. code-block:: bash - - sudo sysctl -w vm.nr_hugepages=768 - -#. Load DPDK userspace driver: - - .. code-block:: bash - - sudo modprobe uio_pci_generic - -#. Clone devstack repository: - - .. code-block:: bash - - cd ~ - git clone https://git.openstack.org/openstack-dev/devstack - -#. Edit local.conf: - - .. code-block:: ini - - [[local|localrc]] - - RECLONE="no" - - enable_plugin kuryr-kubernetes \ - https://git.openstack.org/openstack/kuryr-kubernetes - - OFFLINE="no" - LOGFILE=devstack.log - LOG_COLOR=False - ADMIN_PASSWORD= - DATABASE_PASSWORD= - RABBIT_PASSWORD= - SERVICE_PASSWORD= - SERVICE_TOKEN= - IDENTITY_API_VERSION=3 - ENABLED_SERVICES="" - - HOST_IP= - - SERVICE_HOST= - MULTI_HOST=1 - KEYSTONE_SERVICE_HOST=$SERVICE_HOST - MYSQL_HOST=$SERVICE_HOST - RABBIT_HOST=$SERVICE_HOST - - KURYR_CONFIGURE_NEUTRON_DEFAULTS=False - KURYR_CONFIGURE_BAREMETAL_KUBELET_IFACE=False - - enable_service docker - enable_service etcd3 - enable_service kubernetes-api - enable_service kubernetes-controller-manager - enable_service kubernetes-scheduler - enable_service kubelet - enable_service kuryr-kubernetes - enable_service kuryr-daemon - - [[post-config|$KURYR_CONF]] - [nested_dpdk] - dpdk_driver = uio_pci_generic - -#. Stack: - - .. code-block:: bash - - cd ~/devstack - ./stack.sh - -#. Install CNI plugins: - - .. code-block:: bash - - wget https://github.com/containernetworking/plugins/releases/download/v0.6.0/cni-plugins-amd64-v0.6.0.tgz - tar xf cni-plugins-amd64-v0.6.0.tgz -C ~/cni/bin/ - -#. Install Multus CNI using this guide: https://github.com/intel/multus-cni#build - - - *Note: Kuryr natively supports multiple VIFs now. In step 13 solution* - *without Multus is described* - -#. Create Multus CNI configuration file ~/cni/conf/multus-cni.conf: - - .. code-block:: json - - { - "name":"multus-demo-network", - "type":"multus", - "delegates":[ - { - "type":"kuryr-cni", - "kuryr_conf":"/etc/kuryr/kuryr.conf", - "debug":true - }, - { - "type":"macvlan", - "master":"ens3", - "masterplugin":true, - "ipam":{ - "type":"host-local", - "subnet":"10.0.0.0/24" - } - } - ] - } - -#. Create a directory to store pci devices used by container: - - .. code-block:: bash - - mkdir /var/pci_address - -#. If you do not use Multus CNI as a tool to have multiple interfaces in - container but use some multi vif driver, then change Kuryr configuration file - /etc/kuryr/kuryr.conf: - - .. code-block:: ini - - [kubernetes] - pod_vif_driver = nested-vlan - multi_vif_drivers = npwg_multiple_interfaces - [vif_pool] - vif_pool_mapping = nested-vlan:nested,nested-dpdk:noop - -#. Also prepare and apply network attachment definition, for example: - - .. code-block:: yaml - - apiVersion: "k8s.cni.cncf.io/v1" - kind: NetworkAttachmentDefinition - metadata: - name: "net-nested-dpdk" - annotations: - openstack.org/kuryr-config: '{ - "subnetId": "", - "driverType": "nested-dpdk" - }' - -#. Reload systemd services: - - .. code-block:: bash - - sudo systemctl daemon-reload - -#. Restart systemd services: - - .. code-block:: bash - - sudo systemctl restart devstack@kubelet.service devstack@kuryr-kubernetes.service devstack@kuryr-daemon.service - -#. Create pod specifying additional interface in annotations: - - .. code-block:: yaml - - apiVersion: extensions/v1beta1 - kind: Deployment - metadata: - name: nginx-nested-dpdk - spec: - replicas: 1 - template: - metadata: - name: nginx-nested-dpdk - labels: - app: nginx-nested-dpdk - annotations: - k8s.v1.cni.cncf.io/networks: net-nested-dpdk - spec: - containers: - - name: nginx-nested-dpdk - image: nginx - resources: - requests: - cpu: "1" - memory: "512Mi" - limits: - cpu: "1" - memory: "512Mi" - volumeMounts: - - name: dev - mountPath: /dev - - name: pci_address - mountPath: /var/pci_address - volumes: - - name: dev - hostPath: - path: /dev - type: Directory - - name: pci_address - hostPath: - path: /var/pci_address - type: Directory - diff --git a/doc/source/installation/devstack/nested-macvlan.rst b/doc/source/installation/devstack/nested-macvlan.rst deleted file mode 100644 index dcbe043ae..000000000 --- a/doc/source/installation/devstack/nested-macvlan.rst +++ /dev/null @@ -1,53 +0,0 @@ -============================================ -How to try out nested-pods locally (MACVLAN) -============================================ - -Following are the instructions for an all-in-one setup, using the -nested MACVLAN driver rather than VLAN and trunk ports. - -#. To install OpenStack services run devstack with - ``devstack/local.conf.pod-in-vm.undercloud.sample``. -#. Launch a Nova VM with MACVLAN support - - .. todo:: - - Add a list of neutron commands, required to launch a such a VM - -#. Log into the VM and set up Kubernetes along with Kuryr using devstack: - - Since undercloud Neutron will be used by pods, Neutron services should be - disabled in localrc. - - Run devstack with ``devstack/local.conf.pod-in-vm.overcloud.sample``. - Fill in the needed information, such as the subnet pool id to use or the - router. - -#. Once devstack is done and all services are up inside VM. Next steps are to - configure the missing information at ``/etc/kuryr/kuryr.conf``: - - - Configure worker VMs subnet: - - .. code-block:: ini - - [pod_vif_nested] - worker_nodes_subnets = - - - Configure "pod_vif_driver" as "nested-macvlan": - - .. code-block:: ini - - [kubernetes] - pod_vif_driver = nested-macvlan - - - Configure binding section: - - .. code-block:: ini - - [binding] - link_iface = - - - Restart kuryr-k8s-controller: - - .. code-block:: console - - $ sudo systemctl restart devstack@kuryr-kubernetes.service - -Now launch pods using kubectl, Undercloud Neutron will serve the networking. diff --git a/doc/source/installation/devstack/nested-vlan.rst b/doc/source/installation/devstack/nested-vlan.rst deleted file mode 100644 index 7c749e5b5..000000000 --- a/doc/source/installation/devstack/nested-vlan.rst +++ /dev/null @@ -1,94 +0,0 @@ -================================================= -How to try out nested-pods locally (VLAN + trunk) -================================================= - -Following are the instructions for an all-in-one setup where Kubernetes will -also be running inside the same Nova VM in which Kuryr-controller and Kuryr-cni -will be running. 4GB memory and 2 vCPUs, is the minimum resource requirement -for the VM: - -#. To install OpenStack services run devstack with - ``devstack/local.conf.pod-in-vm.undercloud.sample``. Ensure that "trunk" - service plugin is enabled in ``/etc/neutron/neutron.conf``: - - .. code-block:: ini - - [DEFAULT] - service_plugins = neutron.services.l3_router.l3_router_plugin.L3RouterPlugin,neutron.services.trunk.plugin.TrunkPlugin - -#. Launch a VM with `Neutron trunk port`_. The next steps can be followed: - `Boot VM with a Trunk Port`_. - -#. Inside VM, install and setup Kubernetes along with Kuryr using devstack: - - - Since undercloud Neutron will be used by pods, Neutron services should be - disabled in localrc. - - Run devstack with ``devstack/local.conf.pod-in-vm.overcloud.sample``. - But first fill in the needed information: - - - Point to the undercloud deployment by setting: - - .. code-block:: bash - - SERVICE_HOST=UNDERCLOUD_CONTROLLER_IP - - - Fill in the subnetpool id of the undercloud deployment, as well as the - router where the new pod and service networks need to be connected: - - .. code-block:: bash - - KURYR_NEUTRON_DEFAULT_SUBNETPOOL_ID=UNDERCLOUD_SUBNETPOOL_V4_ID - KURYR_NEUTRON_DEFAULT_ROUTER=router1 - - - Ensure the nested-vlan driver is going to be set by setting: - - .. code-block:: bash - - KURYR_POD_VIF_DRIVER=nested-vlan - - - Optionally, the ports pool funcionality can be enabled by following: - `How to enable ports pool with devstack`_. - - - [OPTIONAL] If you want to enable the subport pools driver and the VIF - Pool Manager you need to include: - - .. code-block:: bash - - KURYR_VIF_POOL_MANAGER=True - -#. Once devstack is done and all services are up inside VM. Next steps are to - configure the missing information at ``/etc/kuryr/kuryr.conf``: - - - Configure worker VMs subnet: - - .. code-block:: ini - - [pod_vif_nested] - worker_nodes_subnets = - - - Configure binding section: - - .. code-block:: ini - - [binding] - driver = kuryr.lib.binding.drivers.vlan - link_iface = - - - Restart kuryr-k8s-controller: - - .. code-block:: console - - $ sudo systemctl restart devstack@kuryr-kubernetes.service - - - Restart kuryr-daemon: - - .. code-block:: console - - $ sudo systemctl restart devstack@kuryr-daemon.service - -Now launch pods using kubectl, Undercloud Neutron will serve the networking. - - -.. _Neutron trunk port: https://wiki.openstack.org/wiki/Neutron/TrunkPort -.. _Boot VM with a Trunk Port: https://docs.openstack.org/kuryr-kubernetes/latest/installation/trunk_ports.html -.. _How to enable ports pool with devstack: https://docs.openstack.org/kuryr-kubernetes/latest/installation/devstack/ports-pool.html diff --git a/doc/source/installation/devstack/ovn-octavia.rst b/doc/source/installation/devstack/ovn-octavia.rst deleted file mode 100644 index 6982e5d03..000000000 --- a/doc/source/installation/devstack/ovn-octavia.rst +++ /dev/null @@ -1,105 +0,0 @@ -======================================================= -How to enable OVN Octavia provider driver with devstack -======================================================= - -To enable the utilization of OVN as the provider driver for Octavia through -devstack: - -#. You can start with the sample DevStack configuration file for OVN - that kuryr-kubernetes comes with. - - .. code-block:: console - - $ curl https://opendev.org/openstack/kuryr-kubernetes/raw/branch/master/devstack/local.conf.sample \ - -o devstack/local.conf - -#. In case you want more Kuryr specific features than provided by the default - handlers and more handlers are enabled, for example, the following enables - NetworkPolicies in addition to the default features: - - .. code-block:: bash - - KURYR_ENABLED_HANDLERS=vif,kuryrport,service,endpoints,kuryrloadbalancer, - namespace,pod_label,policy,kuryrnetworkpolicy,kuryrnetwork - - Then, the proper subnet drivers need to be set: - - .. code-block:: bash - - KURYR_SG_DRIVER=policy - KURYR_SUBNET_DRIVER=namespace - -#. Run DevStack. - - .. code-block:: console - - $ ./stack.sh - - -Enabling Kuryr support for OVN Octavia driver via ConfigMap ------------------------------------------------------------ - -Alternatively, you can enable Kuryr support for the OVN Octavia driver on the -Kuryr ConfigMap, in case the options are not set at the local.conf file. On -DevStack deployment, the Kuryr ConfigMap can be edited using: - -.. code-block:: console - - $ kubectl -n kube-system edit cm kuryr-config - -The following options need to be set at the ConfigMap: - -.. code-block:: bash - - [kubernetes] - endpoints_driver_octavia_provider = ovn - - [octavia_defaults] - lb_algorithm = SOURCE_IP_PORT - enforce_sg_rules = False - member_mode = L2 - -Make sure to keep correct indentation when doing changes. To enforce the new -settings, you need to restart kuryr-controller by simply killing existing pod. -Deployment controller will make sure to restart the pod with new configuration. - -Kuryr automatically handles the recreation of already created services/load -balancers, so that all of them have the same Octavia provider. - - -Testing ovn-octavia driver support ----------------------------------- - -Once the environment is ready, you can test that network connectivity works -and verify that Kuryr creates the load balancer for the service with the OVN -provider specified in the ConfigMap. -To do that check out :doc:`../testing_connectivity`. - -You can also manually create a load balancer in Openstack: - -.. code-block:: console - - $ openstack loadbalancer create --vip-network-id public --provider ovn - +---------------------+--------------------------------------+ - | Field | Value | - +---------------------+--------------------------------------+ - | admin_state_up | True | - | availability_zone | None | - | created_at | 2020-12-09T14:45:08 | - | description | | - | flavor_id | None | - | id | 94e7c431-912b-496c-a247-d52875d44ac7 | - | listeners | | - | name | | - | operating_status | OFFLINE | - | pools | | - | project_id | af820b57868c4864957d523fb32ccfba | - | provider | ovn | - | provisioning_status | PENDING_CREATE | - | updated_at | None | - | vip_address | 172.24.4.9 | - | vip_network_id | ee97665d-69d0-4995-a275-27855359956a | - | vip_port_id | c98e52d0-5965-4b22-8a17-a374f4399193 | - | vip_qos_policy_id | None | - | vip_subnet_id | 3eed0c05-6527-400e-bb80-df6e59d248f1 | - +---------------------+--------------------------------------+ diff --git a/doc/source/installation/devstack/ovn_support.rst b/doc/source/installation/devstack/ovn_support.rst deleted file mode 100644 index c66e5dd9b..000000000 --- a/doc/source/installation/devstack/ovn_support.rst +++ /dev/null @@ -1,190 +0,0 @@ -================================ -Kuryr Kubernetes OVN Integration -================================ - -OVN provides virtual networking for Open vSwitch and is a component of the Open -vSwitch project. - -OpenStack can use OVN as its network management provider through the Modular -Layer 2 (ML2) north-bound plug-in. - -Integrating of OVN allows Kuryr to be used to bridge (both baremetal and -nested) containers and VM networking in a OVN-based OpenStack deployment. - - -Testing with DevStack ---------------------- - -The next points describe how to test OpenStack with OVN using DevStack. -We will start by describing how to test the baremetal case on a single host, -and then cover a nested environment where containers are created inside VMs. - - -Single Node Test Environment -~~~~~~~~~~~~~~~~~~~~~~~~~~~~ - -#. Create a test system. - - It's best to use a throwaway dev system for running DevStack. Your best bet - is to use latest Ubuntu LTS (20.04, Focal). - -#. Optionally create the ``stack`` user. You'll need user account with - passwordless ``sudo`` command. - - .. code-block:: console - - $ git clone https://opendev.org/openstack-dev/devstack.git - $ sudo ./devstack/tools/create-stack-user.sh - $ sudo su - stack - -#. Clone DevStack. - - .. code-block:: console - - $ sudo su - stack - $ git clone https://opendev.org/openstack-dev/devstack.git - $ git clone https://opendev.org/openstack/kuryr-kubernetes.git - -#. Configure DevStack to use OVN. - - kuryr-kubernetes comes with a sample DevStack configuration file for OVN you - can start with. For example, you may want to set some values for the various - PASSWORD variables in that file, or change the LBaaS service provider to - use. Feel free to edit it if you'd like, but it should work as-is. - - .. code-block:: console - - $ curl https://opendev.org/openstack/kuryr-kubernetes/raw/branch/master/devstack/local.conf.sample \ - -o devstack/local.conf - - Note that due to OVN compiling OVS from source at - /usr/local/var/run/openvswitch we need to state at the local.conf that the - path is different from the default one (i.e., /var/run/openvswitch). - - Optionally, the ports pool functionality can be enabled by following: - :doc:`./ports-pool` - - .. note:: - - Kuryr-kubernetes is using OVN by default - -#. Run DevStack. - - This is going to take a while. It installs a bunch of packages, clones a - bunch of git repos, and installs everything from these git repos. - - .. code-block:: console - - $ devstack/stack.sh - - Once DevStack completes successfully, you should see output that looks - something like this: - - .. code-block:: console - - This is your host IP address: 192.168.5.10 - This is your host IPv6 address: ::1 - Keystone is serving at http://192.168.5.10/identity/ - The default users are: admin and demo - The password: pass - -#. Extra configurations. - - DevStack does not wire up the public network by default so we must do some - extra steps for floating IP usage as well as external connectivity: - - .. code-block:: console - - $ sudo ip link set br-ex up - $ sudo ip route add 172.24.4.0/24 dev br-ex - $ sudo ip addr add 172.24.4.1/24 dev br-ex - - Then you can create forwarding and NAT rules that will cause "external" - traffic from your instances to get rewritten to your network controller's ip - address and sent out on the network: - - .. code-block:: console - - $ sudo iptables -A FORWARD -d 172.24.4.0/24 -j ACCEPT - $ sudo iptables -A FORWARD -s 172.24.4.0/24 -j ACCEPT - $ sudo iptables -t nat -I POSTROUTING 1 -s 172.24.4.1/24 -j MASQUERADE - - -Inspect default Configuration -+++++++++++++++++++++++++++++ - -In order to check the default configuration, in term of networks, subnets, -security groups and loadbalancers created upon a successful DevStack stacking, -you can check the :doc:`../default_configuration` - -Testing Network Connectivity -++++++++++++++++++++++++++++ - -Once the environment is ready, we can test that network connectivity works -among pods. To do that check out :doc:`../testing_connectivity` - - -Nested Containers Test Environment (VLAN) -~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ - -Another deployment option is the nested-vlan where containers are created -inside OpenStack VMs by using the Trunk ports support. Thus, first we need to -deploy an undercloud DevStack environment with the needed components to -create VMs (e.g., Glance, Nova, Neutron, Keystone, ...), as well as the needed -OVN configurations such as enabling the trunk support that will be needed for -the VM. And then install the overcloud deployment inside the VM with the kuryr -components. - - -Undercloud deployment -+++++++++++++++++++++ - -The steps to deploy the undercloud environment are the same described above -for the `Single Node Test Environment` with the different of the sample -local.conf to use (step 4), in this case: - -.. code-block:: console - - $ curl https://opendev.org/openstack/kuryr-kubernetes/raw/branch/master/devstack/local.conf.pod-in-vm.undercloud.ovn.sample \ - -o devstack/local.conf - -The main differences with the default ovn local.conf sample are that: - -- There is no need to enable the kuryr-kubernetes plugin as this will be - installed inside the VM (overcloud). -- There is no need to enable the kuryr related services as they will also be - installed inside the VM: kuryr-kubernetes, kubelet, kubernetes-api, - kubernetes-controller-manager, kubernetes-scheduler and kubelet. -- Nova and Glance components need to be enabled to be able to create the VM - where we will install the overcloud. -- OVN Trunk service plugin need to be enable to ensure Trunk ports support. - -Once the undercloud deployment has finished, the next steps are related to -create the overcloud VM by using a parent port of a Trunk so that containers -can be created inside with their own networks. To do that we follow the next -steps detailed at :doc:`../trunk_ports` - - -Overcloud deployment -++++++++++++++++++++ - -Once the VM is up and running, we can start with the overcloud configuration. -The steps to perform are the same as without OVN integration, i.e., the -same steps as for ML2/OVS: - -#. Log in into the VM: - - .. code-block:: console - - $ ssh -i id_rsa_demo ubuntu@FLOATING_IP - -#. Deploy devstack following steps 3 and 4 detailed at :doc:`./nested-vlan` - - -Testing Nested Network Connectivity -+++++++++++++++++++++++++++++++++++ - -Similarly to the baremetal testing, we can create a demo deployment at the -overcloud VM, scale it to any number of pods and expose the service to check if -the deployment was successful. To do that check out -:doc:`../testing_nested_connectivity` diff --git a/doc/source/installation/devstack/ports-pool.rst b/doc/source/installation/devstack/ports-pool.rst deleted file mode 100644 index f2cd53a15..000000000 --- a/doc/source/installation/devstack/ports-pool.rst +++ /dev/null @@ -1,40 +0,0 @@ -====================================== -How to enable ports pool with devstack -====================================== - -To enable the utilization of the ports pool feature through devstack, the next -options needs to be set at the local.conf file: - -#. First, you need to enable the pools by setting: - - .. code-block:: bash - - KURYR_USE_PORT_POOLS=True - -#. Then, the proper pool driver needs to be set. This means that for the - baremetal case you need to ensure the pod vif driver and the vif pool driver - are set to the right baremetal drivers, for instance: - - .. code-block:: bash - - KURYR_POD_VIF_DRIVER=neutron-vif - KURYR_VIF_POOL_DRIVER=neutron - - And if the use case is the nested one, then they should be set to: - - .. code-block:: bash - - KURYR_POD_VIF_DRIVER=nested-vlan - KURYR_VIF_POOL_DRIVER=nested - -#. Then, in case you want to set a limit to the maximum number of ports, or - increase/reduce the default one for the minimum number, as well as to modify - the way the pools are repopulated, both in time as well as regarding bulk - operation sizes, the next option can be included and modified accordingly: - - .. code-block:: bash - - KURYR_PORT_POOL_MIN=5 - KURYR_PORT_POOL_MAX=0 - KURYR_PORT_POOL_BATCH=10 - KURYR_PORT_POOL_UPDATE_FREQ=20 diff --git a/doc/source/installation/https_kubernetes.rst b/doc/source/installation/https_kubernetes.rst deleted file mode 100644 index 7c8c84e93..000000000 --- a/doc/source/installation/https_kubernetes.rst +++ /dev/null @@ -1,29 +0,0 @@ -========================================= -Watching Kubernetes api-server over HTTPS -========================================= - -Add absolute path of client side cert file and key file for Kubernetes server -in ``kuryr.conf``: - -.. code-block:: ini - - [kubernetes] - api_root = https://your_server_address:server_ssl_port - ssl_client_crt_file = - ssl_client_key_file = - -If server ssl certification verification is also to be enabled, add absolute -path to the ca cert: - -.. code-block:: ini - - [kubernetes] - ssl_ca_crt_file = - ssl_verify_server_crt = True - -If want to query HTTPS Kubernetes api server with ``--insecure`` mode: - -.. code-block:: ini - - [kubernetes] - ssl_verify_server_crt = False diff --git a/doc/source/installation/index.rst b/doc/source/installation/index.rst deleted file mode 100644 index 6cb0cd6ef..000000000 --- a/doc/source/installation/index.rst +++ /dev/null @@ -1,49 +0,0 @@ -.. - Licensed under the Apache License, Version 2.0 (the "License"); you may - not use this file except in compliance with the License. You may obtain - a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - - Unless required by applicable law or agreed to in writing, software - distributed under the License is distributed on an "AS IS" BASIS, WITHOUT - WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the - License for the specific language governing permissions and limitations - under the License. - - Convention for heading levels in Neutron devref: - ======= Heading 0 (reserved for the title in a document) - ------- Heading 1 - ~~~~~~~ Heading 2 - +++++++ Heading 3 - ''''''' Heading 4 - (Avoid deeper levels because they do not render well.) - - -Installation -============ - -This section describes how you can install and configure kuryr-kubernetes - -.. toctree:: - :maxdepth: 2 - - manual - https_kubernetes - ports-pool - services - ipv6 - upgrades - devstack/index - default_configuration - trunk_ports - network_namespace - network_policy - testing_connectivity - testing_nested_connectivity - containerized - multi_vif_with_npwg_spec - testing_udp_services - testing_sctp_services - listener_timeouts - multiple_tenants diff --git a/doc/source/installation/ipv6.rst b/doc/source/installation/ipv6.rst deleted file mode 100644 index aea52f6ba..000000000 --- a/doc/source/installation/ipv6.rst +++ /dev/null @@ -1,265 +0,0 @@ -=============== -IPv6 networking -=============== - -Kuryr Kubernetes can be used with IPv6 networking. In this guide we'll show how -you can create the Neutron resources and configure Kubernetes and -Kuryr-Kubernetes to achieve an IPv6 only Kubernetes cluster. - - -Setting it up -------------- - -#. Create pods network: - - .. code-block:: console - - $ openstack network create pods - +---------------------------+--------------------------------------+ - | Field | Value | - +---------------------------+--------------------------------------+ - | admin_state_up | UP | - | availability_zone_hints | | - | availability_zones | | - | created_at | 2017-08-11T10:51:25Z | - | description | | - | dns_domain | None | - | id | 4593045c-4233-4b4c-8527-35608ab0eaae | - | ipv4_address_scope | None | - | ipv6_address_scope | None | - | is_default | False | - | is_vlan_transparent | None | - | mtu | 1450 | - | name | pods | - | port_security_enabled | True | - | project_id | 90baf12877ba49a786419b2cacc2c954 | - | provider:network_type | vxlan | - | provider:physical_network | None | - | provider:segmentation_id | 21 | - | qos_policy_id | None | - | revision_number | 2 | - | router:external | Internal | - | segments | None | - | shared | False | - | status | ACTIVE | - | subnets | | - | tags | [] | - | updated_at | 2017-08-11T10:51:25Z | - +---------------------------+--------------------------------------+ - -#. Create the pod subnet: - - .. code-block:: console - - $ openstack subnet create --network pods --no-dhcp \ - --subnet-range fd10:0:0:1::/64 \ - --ip-version 6 \ - pod_subnet - +-------------------------+-------------------------------------------+ - | Field | Value | - +-------------------------+-------------------------------------------+ - | allocation_pools | fd10:0:0:1::2-fd10::1:ffff:ffff:ffff:ffff | - | cidr | fd10:0:0:1::/64 | - | created_at | 2017-08-11T17:02:20Z | - | description | | - | dns_nameservers | | - | enable_dhcp | False | - | gateway_ip | fd10:0:0:1::1 | - | host_routes | | - | id | eef12d65-4d02-4344-b255-295f9adfd4e9 | - | ip_version | 6 | - | ipv6_address_mode | None | - | ipv6_ra_mode | None | - | name | pod_subnet | - | network_id | 4593045c-4233-4b4c-8527-35608ab0eaae | - | project_id | 90baf12877ba49a786419b2cacc2c954 | - | revision_number | 0 | - | segment_id | None | - | service_types | | - | subnetpool_id | None | - | tags | [] | - | updated_at | 2017-08-11T17:02:20Z | - | use_default_subnet_pool | None | - +-------------------------+-------------------------------------------+ - - -#. Create services network: - - .. code-block:: console - - $ openstack network create services - +---------------------------+--------------------------------------+ - | Field | Value | - +---------------------------+--------------------------------------+ - | admin_state_up | UP | - | availability_zone_hints | | - | availability_zones | | - | created_at | 2017-08-11T10:53:36Z | - | description | | - | dns_domain | None | - | id | 560df0c2-537c-41c0-b22c-40ef3d752574 | - | ipv4_address_scope | None | - | ipv6_address_scope | None | - | is_default | False | - | is_vlan_transparent | None | - | mtu | 1450 | - | name | services | - | port_security_enabled | True | - | project_id | 90baf12877ba49a786419b2cacc2c954 | - | provider:network_type | vxlan | - | provider:physical_network | None | - | provider:segmentation_id | 94 | - | qos_policy_id | None | - | revision_number | 2 | - | router:external | Internal | - | segments | None | - | shared | False | - | status | ACTIVE | - | subnets | | - | tags | [] | - | updated_at | 2017-08-11T10:53:37Z | - +---------------------------+--------------------------------------+ - -#. Create services subnet. We reserve the first half of the subnet range for the - VIPs and the second half for the loadbalancer vrrp ports. - - .. code-block:: console - - $ openstack subnet create --network services --no-dhcp \ - --gateway fd10:0:0:2:0:0:0:fffe \ - --ip-version 6 \ - --allocation-pool start=fd10:0:0:2:0:0:0:8000,end=fd10:0:0:2:0:0:0:fffd \ - --subnet-range fd10:0:0:2::/112 \ - service_subnet - +-------------------------+--------------------------------------+ - | Field | Value | - +-------------------------+--------------------------------------+ - | allocation_pools | fd10:0:0:2::8000-fd10:0:0:2::fffd | - | cidr | fd10:0:0:2::/112 | - | created_at | 2017-08-14T19:08:34Z | - | description | | - | dns_nameservers | | - | enable_dhcp | False | - | gateway_ip | fd10:0:0:2::fffe | - | host_routes | | - | id | 3c53ff94-40e2-4399-bc45-6e210f1e8064 | - | ip_version | 6 | - | ipv6_address_mode | None | - | ipv6_ra_mode | None | - | name | service_subnet | - | network_id | 560df0c2-537c-41c0-b22c-40ef3d752574 | - | project_id | 90baf12877ba49a786419b2cacc2c954 | - | revision_number | 0 | - | segment_id | None | - | service_types | | - | subnetpool_id | None | - | tags | [] | - | updated_at | 2017-08-14T19:08:34Z | - | use_default_subnet_pool | None | - +-------------------------+--------------------------------------+ - -#. Create a router: - - .. code-block:: console - - $ openstack router create k8s-ipv6 - +-------------------------+--------------------------------------+ - | Field | Value | - +-------------------------+--------------------------------------+ - | admin_state_up | UP | - | availability_zone_hints | | - | availability_zones | | - | created_at | 2017-08-11T13:17:10Z | - | description | | - | distributed | False | - | external_gateway_info | None | - | flavor_id | None | - | ha | False | - | id | f802a968-2f83-4006-80cb-5070415f69bf | - | name | k8s-ipv6 | - | project_id | 90baf12877ba49a786419b2cacc2c954 | - | revision_number | None | - | routes | | - | status | ACTIVE | - | tags | [] | - | updated_at | 2017-08-11T13:17:10Z | - +-------------------------+--------------------------------------+ - -#. Add the router to the pod subnet: - - .. code-block:: console - - $ openstack router add subnet k8s-ipv6 pod_subnet - -#. Add the router to the service subnet: - - .. code-block:: console - - $ openstack router add subnet k8s-ipv6 service_subnet - -#. Modify Kubernetes API server command line so that it points to the right - CIDR: - - .. code-block:: console - - --service-cluster-ip-range=fd10:0:0:2::/113 - - Note that it is /113 because the other half of the /112 will be used by the - Octavia LB vrrp ports. - -#. Follow the :ref:`k8s_lb_reachable` guide but using IPv6 addresses instead - for the host Kubernetes API. You should also make sure that the Kubernetes - API server binds on the IPv6 address of the host. - - -Troubleshooting ---------------- - -* **Pods can talk to each other with IPv6 but they can't talk to services.** - - This means that most likely you forgot to create a security group or rule - for the pods to be accessible by the service CIDR. You can find an example - here: - - .. code-block:: console - - $ openstack security group create service_pod_access_v6 - +-----------------+-------------------------------------------------------------------------------------------------------------------------------------------------------+ - | Field | Value | - +-----------------+-------------------------------------------------------------------------------------------------------------------------------------------------------+ - | created_at | 2017-08-16T10:01:45Z | - | description | service_pod_access_v6 | - | id | f0b6f0bd-40f7-4ab6-a77b-3cf9f7cc28ac | - | name | service_pod_access_v6 | - | project_id | 90baf12877ba49a786419b2cacc2c954 | - | revision_number | 2 | - | rules | created_at='2017-08-16T10:01:45Z', direction='egress', ethertype='IPv4', id='bd759b4f-c0f5-4cff-a30a-3cd8544d2822', updated_at='2017-08-16T10:01:45Z' | - | | created_at='2017-08-16T10:01:45Z', direction='egress', ethertype='IPv6', id='c89c3f3e-a326-4902-ba26-5315e2d95320', updated_at='2017-08-16T10:01:45Z' | - | updated_at | 2017-08-16T10:01:45Z | - +-----------------+-------------------------------------------------------------------------------------------------------------------------------------------------------+ - - $ openstack security group rule create --remote-ip fd10:0:0:2::/112 \ - --ethertype IPv6 f0b6f0bd-40f7-4ab6-a77b-3cf9f7cc28ac - +-------------------+--------------------------------------+ - | Field | Value | - +-------------------+--------------------------------------+ - | created_at | 2017-08-16T10:04:57Z | - | description | | - | direction | ingress | - | ether_type | IPv6 | - | id | cface77f-666f-4a4c-8a15-a9c6953acf08 | - | name | None | - | port_range_max | None | - | port_range_min | None | - | project_id | 90baf12877ba49a786419b2cacc2c954 | - | protocol | tcp | - | remote_group_id | None | - | remote_ip_prefix | fd10:0:0:2::/112 | - | revision_number | 0 | - | security_group_id | f0b6f0bd-40f7-4ab6-a77b-3cf9f7cc28ac | - | updated_at | 2017-08-16T10:04:57Z | - +-------------------+--------------------------------------+ - - Then remember to add the new security groups to the comma-separated - *pod_security_groups* setting in the section *[neutron_defaults]* of - /etc/kuryr/kuryr.conf diff --git a/doc/source/installation/listener_timeouts.rst b/doc/source/installation/listener_timeouts.rst deleted file mode 100644 index d43bdbb1c..000000000 --- a/doc/source/installation/listener_timeouts.rst +++ /dev/null @@ -1,68 +0,0 @@ -===================================================== -How to configure Listener timeouts for Load Balancers -===================================================== - -By default, Kuryr uses the default Octavia timeout-client-data and -timeout-member-data values when creating or modifying loadbalancers. -To change the timeout values used in creating or modifying a particular -service: - -Set the new timeout values for openstack.org/kuryr-timeout-client-data and -openstack.org/kuryr-timeout-member-data in the service annotation as seen in -the service manifest below. This specification sets the timeout-client-data -and the timeout-member-data to '70000' and '75000' respectively. - -.. code-block:: yaml - - apiVersion: v1 - kind: Service - metadata: - name: kuryr-demo - annotations: - openstack.org/kuryr-timeout-client-data: '70000' - openstack.org/kuryr-timeout-member-data: '75000' - spec: - selector: - app: server - ports: - - protocol: TCP - port: 80 - targetPort: 8080 - -.. note:: - - The listener timeout values can be reset to the defaults by removing the - sevice annotations for the timeout values. - -Setting the timeouts via ConfigMap ----------------------------------- - -Alternatively, you can change the value of the timeout-client-data and/or the -timeout-member-data on the Kuryr ConfigMap. This option is ideal if the new -timeout values will be used for multiple loadbalancers. On DevStack deployment, -the Kuryr ConfigMap can be edited using: - -.. code-block:: console - - $ kubectl -n kube-system edit cm kuryr-config - -The listener timeouts then needs to be changed at the ConfigMap: - -.. code-block:: bash - - [octavia_defaults] - timeout_member_data = 0 - timeout_client_data = 0 - -Another option is to set the listener timeouts at the local.conf file. - -.. code-block:: bash - - #KURYR_TIMEOUT_CLIENT_DATA=0 - #KURYR_TIMEOUT_MEMBER_DATA=0 - -.. note:: - - The listener timeouts values set via the ConfigMap or set at the local.conf - can be overridden by values set in the service annotations for a particular - service. diff --git a/doc/source/installation/manual.rst b/doc/source/installation/manual.rst deleted file mode 100644 index 2fc659324..000000000 --- a/doc/source/installation/manual.rst +++ /dev/null @@ -1,332 +0,0 @@ -==================================== -Installing kuryr-kubernetes manually -==================================== - -Configure kuryr-k8s-controller ------------------------------- - -Install ``kuryr-k8s-controller`` in a virtualenv: - -.. code-block:: console - - $ mkdir kuryr-k8s-controller - $ cd kuryr-k8s-controller - $ python3 -m venv env - $ git clone https://opendev.org/openstack/kuryr-kubernetes - $ . env/bin/activate - $ pip install -e kuryr-kubernetes - -In neutron or in horizon create subnet for pods, subnet for services and a -security-group for pods. You may use existing if you like. In case that you -decide to create new networks and subnets with the cli, you can follow the -services guide, specifically its :ref:`k8s_default_configuration` section. - -Create ``/etc/kuryr/kuryr.conf``: - -.. code-block:: console - - $ cd kuryr-kubernetes - $ ./tools/generate_config_file_samples.sh - $ sudo mkdir /etc/kuryr - $ sudo cp etc/kuryr.conf.sample /etc/kuryr/kuryr.conf - -Edit ``kuryr.conf``: - -.. code-block:: ini - - [DEFAULT] - use_stderr = true - bindir = {path_to_env}/libexec/kuryr - - [kubernetes] - api_root = http://{ip_of_kubernetes_apiserver}:8080 - ssl_client_crt_file = {path-to-kuryr-k8s-user-cert-file} - ssl_client_key_file = {path-to-kuryr-k8s-user-key-file} - ssl_ca_crt_file = {path-to-k8s-api-ca-cert-file} - - [neutron] - auth_url = http://127.0.0.1:35357/v3/ - username = admin - user_domain_name = Default - password = ADMIN_PASSWORD - project_name = service - project_domain_name = Default - auth_type = password - - [neutron_defaults] - ovs_bridge = br-int - pod_security_groups = {id_of_secuirity_group_for_pods} - pod_subnet = {id_of_subnet_for_pods} - project = {id_of_project} - service_subnet = {id_of_subnet_for_k8s_services} - -.. note:: - - If you want Kuryr to connect to Kubernetes through an unauthenticated - endpoint make sure to set ``[kubernetes]ssl_ca_crt_file`` and - ``[kubernetes]token_file`` to ``""`` as they default to the locations where - Kubernetes puts those files for pods. Also don't set - ``[kubernetes]ssl_client_crt_file`` and ``[kubernetes]ssl_client_key_file``. - - If you use tokens to authenticate use ``[kubernetes]token_file`` to specify - a file having it. - -.. note:: - - If your Kubernetes cluster has RBAC enabled, make sure the Kuryr user has - access to required resources: - - .. code-block:: yaml - - rules: - - apiGroups: - - "" - verbs: ["*"] - resources: - - endpoints - - pods - - nodes - - services - - services/status - - namespaces - - apiGroups: - - openstack.org - verbs: ["*"] - resources: - - kuryrnetworks - - kuryrnetworkpolicies - - kuryrloadbalancers - - apiGroups: ["networking.k8s.io"] - resources: - - networkpolicies - verbs: - - get - - list - - watch - - update - - patch - - apiGroups: ["k8s.cni.cncf.io"] - resources: - - network-attachment-definitions - verbs: - - get - - You can generate ``ServiceAccount`` definition with correct ``ClusterRole`` - using instructions on :ref:`containerized-generate` page. - -Note that the service_subnet and the pod_subnet *should be routable* and that -the pods should allow service subnet access. - -Octavia supports two ways of performing the load balancing between the -Kubernetes load balancers and their members: - -* Layer2: Octavia, apart from the VIP port in the services subnet, creates a - Neutron port to the subnet of each of the members. This way the traffic from - the Service Haproxy to the members will not go through the router again, only - will have gone through the router to reach the service. -* Layer3: Octavia only creates the VIP port. The traffic from the service VIP - to the members will go back to the router to reach the pod subnet. It is - important to note that it will have some performance impact depending on the - SDN. - -To support the L3 mode (both for Octavia and for the deprecated -Neutron-LBaaSv2): - -* There should be a router between the two subnets. -* The pod_security_groups setting should include a security group with a rule - granting access to all the CIDR of the service subnet, e.g.: - - .. code-block:: console - - $ openstack security group create --project k8s_cluster_project \ - service_pod_access_sg - $ openstack security group rule create --project k8s_cluster_project \ - --remote-ip cidr_of_service_subnet --ethertype IPv4 --protocol tcp \ - service_pod_access_sg - -* The uuid of this security group id should be added to the comma separated - list of pod security groups. *pod_security_groups* in *[neutron_defaults]*. - -Alternatively, to support Octavia L2 mode: - -* The pod security_groups setting should include a security group with a rule - granting access to all the CIDR of the pod subnet, e.g.: - - .. code-block:: console - - $ openstack security group create --project k8s_cluster_project \ - octavia_pod_access_sg - $ openstack security group rule create --project k8s_cluster_project \ - --remote-ip cidr_of_pod_subnet --ethertype IPv4 --protocol tcp \ - octavia_pod_access_sg - -* The uuid of this security group id should be added to the comma separated - list of pod security groups. *pod_security_groups* in *[neutron_defaults]*. - -Run kuryr-k8s-controller: - -.. code-block:: console - - $ kuryr-k8s-controller --config-file /etc/kuryr/kuryr.conf -d - -Alternatively you may run it in screen: - -.. code-block:: console - - $ screen -dm kuryr-k8s-controller --config-file /etc/kuryr/kuryr.conf -d - - -Configure kuryr-cni -------------------- - -On every kubernetes minion node (and on master if you intend to run containers -there) you need to configure kuryr-cni. - -Install ``kuryr-cni`` in a virtualenv: - -.. code-block:: console - - $ mkdir kuryr-k8s-cni - $ cd kuryr-k8s-cni - $ virtualenv env - $ . env/bin/activate - $ git clone https://opendev.org/openstack/kuryr-kubernetes - $ pip install -e kuryr-kubernetes - -Create ``/etc/kuryr/kuryr.conf``: - -.. code-block:: console - - $ cd kuryr-kubernetes - $ ./tools/generate_config_file_samples.sh - $ cp etc/kuryr.conf.sample /etc/kuryr/kuryr.conf - -Edit ``kuryr.conf``: - -.. code-block:: ini - - [DEFAULT] - use_stderr = true - bindir = {path_to_env}/libexec/kuryr - [kubernetes] - api_root = http://{ip_of_kubernetes_apiserver}:8080 - -Link the CNI binary to CNI directory, where kubelet would find it: - -.. code-block:: console - - $ mkdir -p /opt/cni/bin - $ ln -s $(which kuryr-cni) /opt/cni/bin/ - -Create the CNI config file for kuryr-cni: ``/etc/cni/net.d/10-kuryr.conflist``. -Kubelet would only use the lexicographically first file in that directory, so -make sure that it is kuryr's config file: - -.. code-block:: json - - { - "name": "kuryr", - "cniVersion": "0.3.1", - "plugins": [ - { - "type": "kuryr-cni", - "kuryr_conf": "/etc/kuryr/kuryr.conf", - "debug": true - } - ] - } - -Install ``os-vif`` and ``oslo.privsep`` libraries globally. These modules -are used to plug interfaces and would be run with raised privileges. ``os-vif`` -uses ``sudo`` to raise privileges, and they would need to be installed globally -to work correctly: - -.. code-block:: console - - $ deactivate - $ sudo pip install 'oslo.privsep>=1.20.0' 'os-vif>=1.5.0' - - -Configure Kuryr CNI Daemon --------------------------- - -Kuryr CNI Daemon is a service designed to increased scalability of the Kuryr -operations done on Kubernetes nodes. More information can be found on -:ref:`cni-daemon` page. - -Kuryr CNI Daemon, should be installed on every Kubernetes node, so following -steps need to be repeated. - -.. note:: - - You can tweak configuration of some timeouts to match your environment. It's - crucial for scalability of the whole deployment. In general the timeout to - serve CNI request from kubelet to Kuryr is 180 seconds. After that time - kubelet will retry the request. Additionally there are two configuration - options: - - .. code-block:: ini - - [cni_daemon] - vif_annotation_timeout=60 - pyroute2_timeout=10 - - ``vif_annotation_timeout`` is time the Kuryr CNI Daemon will wait for Kuryr - Controller to create a port in Neutron and add information about it to Pod's - metadata. If either Neutron or Kuryr Controller doesn't keep up with high - number of requests, it's advised to increase this timeout. Please note that - increasing it over 180 seconds will not have any effect as the request will - time out anyway and will be retried (which is safe). - - ``pyroute2_timeout`` is internal timeout of pyroute2 library, that is - responsible for doing modifications to Linux Kernel networking stack (e.g. - moving interfaces to Pod's namespaces, adding routes and ports or assigning - addresses to interfaces). When serving a lot of ADD/DEL CNI requests on a - regular basis it's advised to increase that timeout. Please note that the - value denotes *maximum* time to wait for kernel to complete the operations. - If operation succeeds earlier, request isn't delayed. - -Run kuryr-daemon: - -.. code-block:: console - - $ kuryr-daemon --config-file /etc/kuryr/kuryr.conf -d - -Alternatively you may run it in screen: - -.. code-block:: console - - $ screen -dm kuryr-daemon --config-file /etc/kuryr/kuryr.conf -d - - -Kuryr CNI Daemon health checks -~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ - -The CNI daemon health checks allow the deployer or the orchestration layer -(like for example Kubernetes or OpenShift) to probe the CNI daemon for liveness -and readiness. - -If you want to make use of all of its facilities, you should run the -kuryr-daemon in its own cgroup. It will get its own cgroup if you: - -* Run it as a systemd service, -* run it containerized, -* create a memory cgroup for it. - -In order to make the daemon run in its own cgroup, you can do the following: - -.. code-block:: console - - systemd-run --unit=kuryr-daemon --scope --slice=kuryr-cni \ - kuryr-daemon --config-file /etc/kuryr/kuryr.conf -d - -After this, with the CNI daemon running inside its own cgroup, we can enable -the CNI daemon memory health check. This health check allows us to limit the -memory consumption of the CNI Daemon. The health checks will fail if CNI starts -taking more memory that it is set and the orchestration layer should restart. -The setting is: - -.. code-block:: ini - - [cni_health_server] - max_memory_usage = 4096 # Set the memory limit to 4GiB diff --git a/doc/source/installation/multi_vif_with_npwg_spec.rst b/doc/source/installation/multi_vif_with_npwg_spec.rst deleted file mode 100644 index 119df604a..000000000 --- a/doc/source/installation/multi_vif_with_npwg_spec.rst +++ /dev/null @@ -1,95 +0,0 @@ -======================================== -Configure Pod with Additional Interfaces -======================================== - -To create pods with additional Interfaces follow the `Kubernetes Network Custom -Resource Definition De-facto Standard Version 1`_, the next steps can be -followed: - -#. Create Neutron net/subnets which you want the additional interfaces attach - to. - - .. code-block:: console - - $ openstack network create net-a - $ openstack subnet create subnet-a --subnet-range 192.0.2.0/24 --network net-a - -#. Create CRD of 'NetworkAttachmentDefinition' as defined in NPWG spec. - - .. code-block:: console - - $ cat << EOF > nad.yaml - apiVersion: apiextensions.k8s.io/v1beta1 - kind: CustomResourceDefinition - metadata: - name: network-attachment-definitions.k8s.cni.cncf.io - spec: - group: k8s.cni.cncf.io - version: v1 - scope: Namespaced - names: - plural: network-attachment-definitions - singular: network-attachment-definition - kind: NetworkAttachmentDefinition - shortNames: - - net-attach-def - validation: - openAPIV3Schema: - properties: - spec: - properties: - config: - type: string - EOF - $ kubectl apply -f nad.yaml - -#. Create NetworkAttachmentDefinition object with the UUID of Neutron subnet - defined in step 1. - - .. code-block:: console - - $ cat << EOF > net-a.yaml - apiVersion: "k8s.cni.cncf.io/v1" - kind: NetworkAttachmentDefinition - metadata: - name: "net-a" - annotations: - openstack.org/kuryr-config: '{ - "subnetId": "uuid-of-neutron-subnet-a" - }' - EOF - $ kubectl apply -f net-a.yaml - -#. Enable the multi-vif driver by setting 'multi_vif_drivers' in kuryr.conf. - Then restart kuryr-controller. - - .. code-block:: ini - - [kubernetes] - multi_vif_drivers = npwg_multiple_interfaces - -5. Add additional interfaces to pods definition. e.g. - - .. code-block:: console - - $ cat << EOF > pod.yaml - apiVersion: v1 - kind: Pod - metadata: - name: nginx4 - annotations: - k8s.v1.cni.cncf.io/networks: net-a - spec: - containers: - - name: nginx - image: nginx:1.7.9 - ports: - - containerPort: 80 - EOF - $ kubectl apply -f pod.yaml - -You may put a list of network separated with comma to attach Pods to more -networks. - - -.. _Kubernetes Network Custom Resource Definition De-facto Standard Version 1: https://docs.google.com/document/d/1Ny03h6IDVy_e_vmElOqR7UdTPAG_RNydhVE1Kx54kFQ/edit?usp=sharing diff --git a/doc/source/installation/multiple_tenants.rst b/doc/source/installation/multiple_tenants.rst deleted file mode 100644 index 5f796a32a..000000000 --- a/doc/source/installation/multiple_tenants.rst +++ /dev/null @@ -1,98 +0,0 @@ -======================== -Multiple tenants support -======================== - - -Annotation project driver -------------------------- - -We introduced an annotation project driver, by the driver you can specify a -openstack project for a k8s namespace, kuryr will take along the project id -when it creates openstack resources (port, subnet, LB, etc.) for the namespace -and the resources (pod, service, etc.) of the namespace. - -Configure to enable the driver in kuryr.conf: - - .. code-block:: ini - - [kubernetes] - pod_project_driver = annotation - service_project_driver = annotation - namespace_project_driver = annotation - network_policy_project_driver = annotation - - -User workflow -~~~~~~~~~~~~~ - -#. Retrieve your own openstack project's id: - - .. code-block:: console - - $ openstack project show test-user - +-------------+----------------------------------+ - | Field | Value | - +-------------+----------------------------------+ - | description | | - | domain_id | default | - | enabled | True | - | id | b5e0a1ae99a34aa0b6a6dad59c95dea7 | - | is_domain | False | - | name | test-user | - | options | {} | - | parent_id | default | - | tags | [] | - +-------------+----------------------------------+ - -#. Create a k8s namespace with the project id - - The manifest file of the namespace: - - .. code-block:: yaml - - apiVersion: v1 - kind: Namespace - metadata: - name: testns - annotations: - openstack.org/kuryr-project: b5e0a1ae99a34aa0b6a6dad59c95dea7 - - Modify the annotation ``openstack.org/kuryr-project``'s value to your own - project id. - -#. Create a pod in the created namespaces: - - .. code-block:: console - - $ kubectl create deployment -n testns --image quay.io/kuryr/demo demo - deployment.apps/demo created - - $ kubectl -n testns get pod -o wide - NAME READY STATUS RESTARTS AGE IP NODE NOMINATED NODE READINESS GATES - demo-6cb99dfd4d-mkjh2 1/1 Running 0 3m15s 10.0.1.76 yjf-dev-kuryr - -#. Retrieve the related openstack resource: - - .. code-block:: console - - $ openstack network list --project b5e0a1ae99a34aa0b6a6dad59c95dea7 - +--------------------------------------+---------------+--------------------------------------+ - | ID | Name | Subnets | - +--------------------------------------+---------------+--------------------------------------+ - | f7e3f025-6d03-40db-b6a8-6671b0874646 | ns/testns-net | d9995087-1363-4671-86da-51b4d17712d8 | - +--------------------------------------+---------------+--------------------------------------+ - - $ openstack subnet list --project b5e0a1ae99a34aa0b6a6dad59c95dea7 - +--------------------------------------+------------------+--------------------------------------+--------------+ - | ID | Name | Network | Subnet | - +--------------------------------------+------------------+--------------------------------------+--------------+ - | d9995087-1363-4671-86da-51b4d17712d8 | ns/testns-subnet | f7e3f025-6d03-40db-b6a8-6671b0874646 | 10.0.1.64/26 | - +--------------------------------------+------------------+--------------------------------------+--------------+ - - $ openstack port list --project b5e0a1ae99a34aa0b6a6dad59c95dea7 - +--------------------------------------+------------------------------+-------------------+--------------------------------------------------------------------------+--------+ - | ID | Name | MAC Address | Fixed IP Addresses | Status | - +--------------------------------------+------------------------------+-------------------+--------------------------------------------------------------------------+--------+ - | 1ce9d0b7-de47-40bb-9bc3-2a8e179681b2 | | fa:16:3e:90:2a:a7 | | DOWN | - | abddd00b-383b-4bf2-9b72-0734739e733d | testns/demo-6cb99dfd4d-mkjh2 | fa:16:3e:a4:c0:f7 | ip_address='10.0.1.76', subnet_id='d9995087-1363-4671-86da-51b4d17712d8' | ACTIVE | - +--------------------------------------+------------------------------+-------------------+--------------------------------------------------------------------------+--------+ diff --git a/doc/source/installation/network_namespace.rst b/doc/source/installation/network_namespace.rst deleted file mode 100644 index 75e50a92f..000000000 --- a/doc/source/installation/network_namespace.rst +++ /dev/null @@ -1,164 +0,0 @@ -============================================================= -Enable network per namespace functionality (handler + driver) -============================================================= - -To enable the subnet driver that creates a new network for each new namespace -the next steps are needed: - -#. Enable the namespace handler to reach to namespace events, in this case, - creation and deletion. To do that you need to add it to the list of the - enabled handlers at kuryr.conf (details on how to edit this for - containerized deployment can be found at :doc:`./devstack/containerized`): - - .. code-block:: ini - - [kubernetes] - enabled_handlers=vif,endpoints,service,kuryrloadbalancer,namespace, - kuryrnetwork,kuryrport - - Note that if you also want to enable prepopulation of ports pools upon - creation of first pod on pods network in a namespace, you need to also - add the kuryrnetwork_population handler (more details on :doc:`./ports-pool`): - - .. code-block:: ini - - [kubernetes] - enabled_handlers=vif,endpoints,service,kuryrloadbalancer,namespace, - kuryrnetwork,kuryrport,kuryrnetwork_population - -#. Enable the namespace subnet driver by modifying the default - pod_subnet_driver option at kuryr.conf: - - .. code-block:: ini - - [kubernetes] - pod_subnets_driver = namespace - -#. Select (and create if needed) the subnet pool from where the new subnets - will get their CIDR (e.g., the default on devstack deployment is - shared-default-subnetpool-v4): - - .. code-block:: ini - - [namespace_subnet] - pod_subnet_pool = SUBNET_POOL_ID - -#. Select (and create if needed) the router where the new subnet will be - connected (e.g., the default on devstack deployments is router1): - - .. code-block:: ini - - [namespace_subnet] - pod_router = ROUTER_ID - - Note that if a new router is created, it must ensure the connectivity - requirements between pod, service and public subnets, as in the case for - the default subnet driver. - -Note you need to restart the kuryr controller after applying the above -detailed steps. For devstack non-containerized deployments: - -.. code-block:: console - - $ sudo systemctl restart devstack@kuryr-kubernetes.service - -And for containerized deployments: - -.. code-block:: console - - $ kubectl -n kube-system get pod | grep kuryr-controller - $ kubectl -n kube-system delete pod KURYR_CONTROLLER_POD_NAME - -For directly enabling the driver when deploying with devstack, you just need -to add the namespace handler and state the namespace subnet driver with: - -.. code-block:: console - - KURYR_SUBNET_DRIVER=namespace - KURYR_ENABLED_HANDLERS=vif,endpoints,service,kuryrloadbalancer,namespace, - kuryrnetwork,kuryrport - -.. note:: - - If the loadbalancer maintains the source IP (such as ovn-octavia driver), - there is no need to enforce sg rules at the load balancer level. - To disable the enforcement, you need to set the following variable: - KURYR_ENFORCE_SG_RULES=False - - -Testing the network per namespace functionality ------------------------------------------------ - -#. Create two namespaces: - - .. code-block:: console - - $ kubectl create namespace test1 - $ kubectl create namespace test2 - -#. Check resources has been created: - - .. code-block:: console - - $ kubectl get namespaces - NAME STATUS AGE - test1 Active 14s - test2 Active 5s - ... ... ... - - $ kubectl get kuryrnetworks -A - NAME AGE - ns-test1 1m - ns-test2 1m - - $ openstack network list | grep test1 - | 7c7b68c5-d3c4-431c-9f69-fbc777b43ee5 | ns/test1-net | 8640d134-5ea2-437d-9e2a-89236f6c0198 | - - $ openstack subnet list | grep test1 - | 8640d134-5ea2-437d-9e2a-89236f6c0198 | ns/test1-subnet | 7c7b68c5-d3c4-431c-9f69-fbc777b43ee5 | 10.0.1.128/26 | - -#. Create a pod in the created namespaces: - - .. code-block:: console - - $ kubectl create deployment -n test1 --image quay.io/kuryr/demo demo - deployment "demo" created - - $ kubectl create deployment -n test2 --image quay.io/kuryr/demo demo - deployment "demo" created - - $ kubectl -n test1 get pod -o wide - NAME READY STATUS RESTARTS AGE IP NODE - demo-5995548848-lmmjc 1/1 Running 0 7s 10.0.1.136 node1 - - $ kubectl -n test2 get pod -o wide - NAME READY STATUS RESTARTS AGE IP NODE - demo-5135352253-dfghd 1/1 Running 0 7s 10.0.1.134 node1 - -#. Create a service: - - .. code-block:: console - - $ kubectl expose -n test1 deploy/demo --port 80 --target-port 8080 - service "demo" exposed - - $ kubectl -n test1 get svc - NAME TYPE CLUSTER-IP EXTERNAL-IP PORT(S) AGE - demo ClusterIP 10.0.0.141 80/TCP 18s - -#. Test service connectivity from both namespaces: - - .. code-block:: console - - $ kubectl exec -n test1 -it demo-5995548848-lmmjc /bin/sh - test-1-pod$ curl 10.0.0.141 - demo-5995548848-lmmjc: HELLO! I AM ALIVE!!! - -#. And finally, to remove the namespace and all its resources, including - openstack networks, kuryrnetwork CRD, svc, pods, you just need to - do: - - .. code-block:: console - - $ kubectl delete namespace test1 - $ kubectl delete namespace test2 diff --git a/doc/source/installation/network_policy.rst b/doc/source/installation/network_policy.rst deleted file mode 100644 index 702157170..000000000 --- a/doc/source/installation/network_policy.rst +++ /dev/null @@ -1,350 +0,0 @@ -=========================================== -Enable network policy support functionality -=========================================== - -Enable policy, pod_label and namespace handlers to respond to network policy -events. As this is not done by default you'd have to explicitly add that to -the list of enabled handlers at kuryr.conf (further info on how to do this can -be found at :doc:`./devstack/containerized`): - -.. code-block:: ini - - [kubernetes] - enabled_handlers=vif,endpoints,service,kuryrloadbalancer,policy, - pod_label,namespace,kuryrnetwork,kuryrnetworkpolicy, - kuryrport - -Note that if you also want to enable prepopulation of ports pools upon creation -of first pod on pods network in a namespace, you need to also add the -kuryrnetwork_population handler -(more details on :doc:`./ports-pool`): - -.. code-block:: ini - - [kubernetes] - enabled_handlers=vif,endpoints,service,kuryrloadbalancer,policy, - pod_label,namespace,kuryrnetworkpolicy,kuryrnetwork, - kuryrport,kuryrnetwork_population - -After that, enable also the security group drivers for policies: - -.. code-block:: ini - - [kubernetes] - service_security_groups_driver = policy - pod_security_groups_driver = policy - -.. warning:: - - The correct behavior for pods that have no network policy applied is to - allow all ingress and egress traffic. If you want that to be enforced, - please make sure to create an SG allowing all traffic and add it to - ``[neutron_defaults]pod_security_groups`` setting in ``kuryr.conf``: - - .. code-block:: ini - - [neutron_defaults] - pod_security_groups = ALLOW_ALL_SG_ID - -Enable the namespace subnet driver by modifying the default pod_subnet_driver -option: - -.. code-block:: ini - - [kubernetes] - pod_subnets_driver = namespace - -Select the subnet pool from where the new subnets will get their CIDR: - -.. code-block:: ini - - [namespace_subnet] - pod_subnet_pool = SUBNET_POOL_ID - -Lastly, select the router where the new subnet will be connected: - -.. code-block:: ini - - [namespace_subnet] - pod_router = ROUTER_ID - -Note you need to restart the kuryr controller after applying the above step. -For devstack non-containerized deployments: - -.. code-block:: console - - $ sudo systemctl restart devstack@kuryr-kubernetes.service - -Same for containerized deployments: - -.. code-block:: console - - $ kubectl -n kube-system get pod | grep kuryr-controller - $ kubectl -n kube-system delete pod KURYR_CONTROLLER_POD_NAME - -For directly enabling the driver when deploying with devstack, you just need -to add the policy, pod_label and namespace handler and drivers with: - -.. code-block:: bash - - KURYR_ENABLED_HANDLERS=vif,kuryrport,endpoints,service,kuryrloadbalancer,policy,pod_label,kuryrnetworkpolicy,namespace,kuryrnetwork - KURYR_SG_DRIVER=policy - KURYR_SUBNET_DRIVER=namespace - -.. note:: - - If the loadbalancer maintains the source IP (such as ovn-octavia driver), - there is no need to enforce sg rules at the load balancer level. To disable - the enforcement, you need to set the following variable in DevStack's - local.conf: - - .. code-block:: bash - - KURYR_ENFORCE_SG_RULES=False - - To set that directly in kuryr.conf, the config to be set is: - - .. code-block:: ini - - [octavia_defaults] - enforce_sg_rules=False - -Testing the network policy support functionality ------------------------------------------------- - -#. Given a yaml file with a network policy, such as: - - .. code-block:: yaml - - apiVersion: networking.k8s.io/v1 - kind: NetworkPolicy - metadata: - name: test-network-policy - namespace: default - spec: - podSelector: - matchLabels: - project: default - policyTypes: - - Ingress - - Egress - ingress: - - from: - - namespaceSelector: - matchLabels: - project: default - ports: - - protocol: TCP - port: 6379 - egress: - - to: - - namespaceSelector: - matchLabels: - project: default - ports: - - protocol: TCP - port: 5978 - -#. Apply the network policy: - - .. code-block:: console - - $ kubectl apply -f network_policy.yml - -#. Check that the resources has been created: - - .. code-block:: console - - $ kubectl get kuryrnetworkpolicies - NAME AGE - test-network-policy 2s - - $ kubectl get networkpolicies - NAME POD-SELECTOR AGE - test-network-policy role=db 2s - - $ openstack security group list | grep sg-test-network-policy - | dabdf308-7eed-43ef-a058-af84d1954acb | sg-test-network-policy - -#. Check that the rules are in place for the security group: - - .. code-block:: console - - $ kubectl get kuryrnetworkpolicy test-network-policy -o yaml - - apiVersion: openstack.org/v1 - kind: KuryrNetworkPolicy - metadata: - annotations: - networkPolicyLink: - clusterName: "" - creationTimestamp: 2018-10-02T11:17:02Z - generation: 0 - name: test-network-policy - namespace: default - resourceVersion: "2117" - uid: afb99326-c634-11e8-b63d-002564fdd760 - spec: - egressSgRules: - - sgRule: - description: Kuryr-Kubernetes NetPolicy SG rule - direction: egress - ethertype: IPv4 - port_range_max: 5978 - port_range_min: 5978 - protocol: tcp - ingressSgRules: - - sgRule: - description: Kuryr-Kubernetes NetPolicy SG rule - direction: ingress - ethertype: IPv4 - port_range_max: 6379 - port_range_min: 6379 - protocol: tcp - status: - securityGroupId: cdee7815-3b49-4a3e-abc8-31e384ab75c5 - securityGroupRules: - … - - $ openstack security group rule list sg-test-network-policy --protocol tcp -c "IP Protocol" -c "Port Range" -c "Direction" --long - +-------------+------------+-----------+ - | IP Protocol | Port Range | Direction | - +-------------+------------+-----------+ - | tcp | 6379:6379 | ingress | - | tcp | 5978:5978 | egress | - +-------------+------------+-----------+ - -#. Create a pod: - - .. code-block:: console - - $ kubectl create deployment --image quay.io/kuryr/demo demo - deployment "demo" created - - $ kubectl get pod -o wide - NAME READY STATUS RESTARTS AGE IP - demo-5558c7865d-fdkdv 1/1 Running 0 44s 10.0.0.68 - -#. Get the pod port and check its security group rules: - - .. code-block:: console - - $ openstack port list --fixed-ip ip-address=10.0.0.68 -f value -c ID - 5d29b83c-714c-4579-8987-d0c0558420b3 - - $ openstack port show 5d29b83c-714c-4579-8987-d0c0558420b3 | grep security_group_ids - | security_group_ids | bb2ac605-56ff-4688-b4f1-1d045ad251d0 - - $ openstack security group rule list bb2ac605-56ff-4688-b4f1-1d045ad251d0 - --protocol tcp -c "IP Protocol" -c "Port Range" - +-------------+------------+-----------+ - | IP Protocol | Port Range | Direction | - +-------------+------------+-----------+ - | tcp | 6379:6379 | ingress | - | tcp | 5978:5978 | egress | - +-------------+------------+-----------+ - -#. Try to curl the pod on port 8080 (hint: it won't work!): - - .. code-block:: console - - $ curl 10.0.0.68:8080 - -#. Update network policy to allow ingress 8080 port: - - .. code-block:: console - - $ kubectl patch networkpolicy test-network-policy -p '{"spec":{"ingress":[{"ports":[{"port": 8080,"protocol": "TCP"}]}]}}' - networkpolicy "test-network-policy" patched - - $ kubectl get knp test-network-policy -o yaml - apiVersion: openstack.org/v1 - kind: KuryrNetworkPolicy - metadata: - annotations: - networkPolicyLink: - clusterName: "" - creationTimestamp: 2018-10-02T11:17:02Z - generation: 0 - name: test-network-policy - namespace: default - resourceVersion: "1546" - uid: afb99326-c634-11e8-b63d-002564fdd760 - spec: - egressSgRules: - - sgRule: - description: Kuryr-Kubernetes NetPolicy SG rule - direction: egress - ethertype: IPv4 - port_range_max: 5978 - port_range_min: 5978 - protocol: tcp - ingressSgRules: - - sgRule: - description: Kuryr-Kubernetes NetPolicy SG rule - direction: ingress - ethertype: IPv4 - port_range_max: 8080 - port_range_min: 8080 - protocol: tcp - status: - securityGroupId: cdee7815-3b49-4a3e-abc8-31e384ab75c5 - securityGroupRules: - … - - $ openstack security group rule list sg-test-network-policy -c "IP Protocol" -c "Port Range" -c "Direction" --long - +-------------+------------+-----------+ - | IP Protocol | Port Range | Direction | - +-------------+------------+-----------+ - | tcp | 8080:8080 | ingress | - | tcp | 5978:5978 | egress | - +-------------+------------+-----------+ - -#. Try to curl the pod ip after patching the network policy: - - .. code-block:: console - - $ curl 10.0.0.68:8080 - demo-5558c7865d-fdkdv: HELLO! I AM ALIVE!!! - - Note the curl only works from pods (neutron ports) on a namespace that has - the label `project: default` as stated on the policy namespaceSelector. - -#. We can also create a single pod, without a label and check that there is no - connectivity to it, as it does not match the network policy podSelector: - - .. code-block:: console - - $ cat sample-pod.yml - apiVersion: v1 - kind: Pod - metadata: - name: demo-pod - spec: - containers: - - image: quay.io/kuryr/demo - imagePullPolicy: Always - name: demo-pod - - $ kubectl apply -f sample-pod.yml - $ curl demo-pod-IP:8080 - NO REPLY - -#. If we add to the pod a label that match a network policy podSelector, in - this case 'project: default', the network policy will get applied on the - pod, and the traffic will be allowed: - - .. code-block:: console - - $ kubectl label pod demo-pod project=default - $ curl demo-pod-IP:8080 - demo-pod-XXX: HELLO! I AM ALIVE!!! - -#. Confirm the teardown of the resources once the network policy is removed: - - .. code-block:: console - - $ kubectl delete -f network_policy.yml - $ kubectl get kuryrnetworkpolicies - $ kubectl get networkpolicies - $ openstack security group list | grep sg-test-network-policy diff --git a/doc/source/installation/ports-pool.rst b/doc/source/installation/ports-pool.rst deleted file mode 100644 index aa43eaa87..000000000 --- a/doc/source/installation/ports-pool.rst +++ /dev/null @@ -1,177 +0,0 @@ -================================ -How to enable ports pool support -================================ - -To enable the utilization of the ports pool feature, the selected pool driver -needs to be included at the kuryr.conf at the kubernetes section. So, for the -baremetal deployment: - -.. code-block:: ini - - [kubernetes] - vif_pool_driver = neutron - -And for the nested (VLAN+Trunk) case: - -.. code-block:: ini - - [kubernetes] - vif_pool_driver = nested - -On the other hand, there are a few extra (optional) configuration options -regarding the maximum and minimum desired sizes of the pools, where the -maximum size can be disabled by setting it to 0: - -.. code-block:: ini - - [vif_pool] - ports_pool_max = 10 - ports_pool_min = 5 - -In addition the size of the bulk operation, e.g., the number of ports created -in a bulk request upon pool population, can be modified: - -.. code-block:: ini - - [vif_pool] - ports_pool_batch = 5 - -Note this value should be smaller than the ports_pool_max (if the -ports_pool_max is enabled). - -Finally, to define the frequency (in seconds) of ports recycle to allow them -to be reused by future pods, configure the following option: - -.. code-block:: ini - - [vif_pool] - ports_pool_update_frequency = 20 - -After these configurations, the final step is to restart the -kuryr-k8s-controller. At devstack deployment: - -.. code-block:: console - - $ sudo systemctl restart devstack@kuryr-kubernetes.service - -And for RDO packaging based installations: - -.. code-block:: console - - $ sudo systemctl restart kuryr-controller - -Note that for the containerized deployment, you need to edit the associated -ConfigMap to change the kuryr.conf files with: - -.. code-block:: console - - $ kubectl -n kube-system edit cm kuryr-config - -Then modify the kuryr.conf to modify the controller configuration regarding -the pools. After that, to have the new configuration applied you need to -restart the kuryr-controller just by killing the existing pod: - -.. code-block:: console - - $ kubectl -n kube-system get pod | grep kuryr-controller - $ kubectl -n kube-system delete pod KURYR_CONTROLLER_POD_NAME - - -Ports loading into pools ------------------------- - -Pre-created ports for the pools will be loaded and put back into their -respective pools upon controller restart. This allows the pre-creation of -neutron ports (or subports for the nested case) with a script or any other -preferred tool (e.g., heat templates) and load them into their respective -pools just by restarting the kuryr-controller (or even before installing it). -To do that you just need to ensure the ports are created with the right -device_owner: - -- For neutron pod driver: compute:kuryr (of the value at - kuryr.lib.constants.py) -- For nested-vlan pod driver: trunk:subport or compute:kuryr (or the value at - kuryr.lib.constants.py). But in this case they also need to be attached to an - active neutron trunk port, i.e., they need to be subports of an existing - trunk - - -Subports pools management tool ------------------------------- - -Note there is a developers tool available at `contrib/pools-management` to -create/delete ports in the desired pool(s) as well as to control the amount of -existing ports loaded into each pool. For more details on this read the readme -file on that folder. - - -Multi pod-vif drivers support with pools ----------------------------------------- - -There is a multi pool driver that supports hybrid environments where some -nodes are Bare Metal while others are running inside VMs, therefore having -different VIF drivers (e.g., neutron and nested-vlan). - -This new multi pool driver is the default pool driver used even if a different -vif_pool_driver is set at the config option. However if the configuration about -the mappings between the different pod vif and pools drivers is not provided at -the vif_pool_mapping config option of vif_pool configuration section only one -pool driver will be loaded -- using the standard pod_vif_driver and -vif_pool_driver config options, i.e., using the one selected at kuryr.conf -options. - -To enable the option of having different pools depending on the node's pod vif -types, you need to state the type of pool that you want for each pod vif -driver, e.g.: - -.. code-block:: ini - - [vif_pool] - vif_pool_mapping=nested-vlan:nested,neutron-vif:neutron - -This will use a pool driver nested to handle the pods whose vif driver is -nested-vlan, and a pool driver neutron to handle the pods whose vif driver is -neutron-vif. When the controller is requesting a vif for a pod in node X, it -will first read the node's annotation about pod_vif driver to use, e.g., -pod_vif: nested-vlan, and then use the corresponding pool driver -- which has -the right pod-vif driver set. - -.. note:: - - Previously, `pools_vif_drivers` configuration option provided similar - functionality, but is now deprecated and not recommended. It stored a - mapping from pool_driver => pod_vif_driver instead, disallowing the use of a - single pool driver as keys for multiple pod_vif_drivers. - - .. code-block:: ini - - [vif_pool] - pools_vif_drivers=nested:nested-vlan,neutron:neutron-vif - -Note that if no annotation is set on a node, the default pod_vif_driver is -used. - - -Populate pools on subnets creation for namespace subnet driver --------------------------------------------------------------- - -When the namespace subnet driver is used (either for namespace isolation or -for network policies) a new subnet is created for each namespace. The ports -associated to each namespace will therefore be on different pools. In order -to prepopulate the pools associated to a newly created namespace (i.e., -subnet), the next handler needs to be enabled: - -.. code-block:: ini - - [kubernetes] - enabled_handlers=vif,endpoints,service,kuryrloadbalancer,namespace, - *kuryrnetwork* - - -This can be enabled at devstack deployment time to by adding the next to the -local.conf: - -.. code-block:: bash - - KURYR_ENABLED_HANDLERS=vif,endpoints,service,kuryrloadbalancer,namespace, - *kuryrnetwork* diff --git a/doc/source/installation/services.rst b/doc/source/installation/services.rst deleted file mode 100644 index d61b809a8..000000000 --- a/doc/source/installation/services.rst +++ /dev/null @@ -1,787 +0,0 @@ -============================== -Kubernetes services networking -============================== - -Kuryr-Kubernetes default handler for handling Kubernetes `services`_ and -endpoints uses the OpenStack `Octavia API`_ in order to have each service -be implemented in the following way: - -* **Service**: It is translated to a single **LoadBalancer** and as many - **Listeners** and **Pools** as ports the Kubernetes Service spec defines. -* **ClusterIP**: It is translated to a LoadBalancer's VIP. -* **loadBalancerIP**: Translated to public IP associated with the - LoadBalancer's VIP. -* **Endpoints**: The Endpoint object is translated to a LoadBalancer's VIP. - -.. figure:: ../../images/lbaas_translation.svg - :width: 100% - :alt: Graphical depiction of the translation explained above - - In this diagram you can see how the Kubernetes entities in the top left - corner are implemented in plain Kubernetes networking (top-right) and in - Kuryr's default configuration (bottom) - -If you are paying attention and are familiar with the `Octavia API`_ you -probably noticed that we have separate pools for each exposed port in a -service. This is probably not optimal and we would probably benefit from -keeping a single Neutron pool that lists each of the per port listeners. - -Kuryr-Kubernetes uses OpenStack Octavia as the load balancing solution for -OpenStack and to provide connectivity to the Kubernetes Services. - -It is beyond the scope of this document to explain in detail the inner -workings of Openstack Octavia thus, only a brief explanation will be offered. - - -Octavia -------- - -OpenStack Octavia is a project that provides advanced Load Balancing by using -pre-existing OpenStack services. The requirements for running Kuryr with -OpenStack Octavia are the following: - -* Nova -* Neutron -* Glance -* Barbican (if TLS offloading functionality is enabled) -* Keystone -* Rabbit -* MySQL - -You can find a good explanation about the involved steps to install Octavia in -the `Octavia installation docs`_. - -Simplifying a lot, Octavia works by instantiating a compute resource, i.e. a -Nova VM, and running HAProxy inside. These single load balancer Nova VMs are -called *Amphorae*. Each *Amphora* has a separate linux network namespace where -HAProxy runs and that is connected to the Kuryr services network. The VM host -network namespace is used by Octavia to reconfigure and monitor the Load -Balancer, which it talks to via HAProxy's control unix domain socket. - -Running Kuryr with Octavia means that each Kubernetes service that runs in the -cluster will need at least one Load Balancer VM, i.e., an *Amphora*. To avoid -single point of failure at Amphora, Octavia should be configured to support -active/standby loadbalancer topology. In addition, it is important to -configure the right Octavia flavor for your deployment and to size the compute -nodes appropriately so that Octavia can operate well. - -Another important consideration is where do the Amphorae run, i.e., whether the -worker nodes should also be compute nodes so that they run the Amphorae or if -Amphorae should be run separately. If your compute nodes are big enough, it -would help avoiding extra hops if the amphorae were scheduled in the worker -nodes, but how much significant that is, depends on your latency and throughput -requirements. - -Octavia uses `Load Balancer drivers`_ to handle all communications with -*Amphorae*. By default, Kuryr-Kubernetes uses the reference Octavia driver -which is the `Amphora driver`_. Kuryr also supports the use of -`OVN Octavia driver`_. - - -OVN Octavia Provider Driver ---------------------------- - -Kuryr supports the creation of a load balancer with OVN provider driver. When -'ovn' provider is enabled as one of the Octavia Load Balancer providers, the -load balancing is executed by the virtual switch data-path engine and there is -no need to create VMs. This means there is no additional overhead of VMs as is -required when using Octavia with the default Amphora driver. - -You can find additional information about the driver, its limitations and how -to create OVN-based load balancers in `OVN as Provider Driver for Octavia`_. - - -.. _k8s_default_configuration: - -Default configuration -~~~~~~~~~~~~~~~~~~~~~ - -Kuryr can use Octavia in two ways: - -* The one that is commonly referred to as **Layer 3**, this means that Kuryr - will tell Octavia not to add a Neutron port to the pod network for each - load balancer. Instead, **it relies on the pod and the service subnets being - routable**. This means that the communication from Pods to Services and back - will go through the router. Depending on the SDN of your choice, this may - have performance implications. -* The **Layer 2** way, where kuryr will tell Octavia to add a Neutron port to - the pod network for each load balancer. Therefore the communication from - Services to its Pods members and back will go directly through L2 layer. The - drawback of this approach is the extra usage of neutron ports in the Pods - subnet, that needs to be accordingly dimensioned. - -The services and pods subnets should be created. - -#. Create pod network: - - .. code-block:: console - - $ openstack network create pod - +---------------------------+--------------------------------------+ - | Field | Value | - +---------------------------+--------------------------------------+ - | admin_state_up | UP | - | availability_zone_hints | | - | availability_zones | | - | created_at | 2017-08-11T10:51:25Z | - | description | | - | dns_domain | None | - | id | 4593045c-4233-4b4c-8527-35608ab0eaae | - | ipv4_address_scope | None | - | ipv6_address_scope | None | - | is_default | False | - | is_vlan_transparent | None | - | mtu | 1450 | - | name | pod | - | port_security_enabled | True | - | project_id | 90baf12877ba49a786419b2cacc2c954 | - | provider:network_type | vxlan | - | provider:physical_network | None | - | provider:segmentation_id | 21 | - | qos_policy_id | None | - | revision_number | 2 | - | router:external | Internal | - | segments | None | - | shared | False | - | status | ACTIVE | - | subnets | | - | tags | [] | - | updated_at | 2017-08-11T10:51:25Z | - +---------------------------+--------------------------------------+ - -#. Create pod subnet: - - .. code-block:: console - - $ openstack subnet create --network pod --no-dhcp \ - --gateway 10.1.255.254 \ - --subnet-range 10.1.0.0/16 \ - pod_subnet - +-------------------------+--------------------------------------+ - | Field | Value | - +-------------------------+--------------------------------------+ - | allocation_pools | 10.1.0.1-10.1.255.253 | - | cidr | 10.1.0.0/16 | - | created_at | 2017-08-11T10:55:25Z | - | description | | - | dns_nameservers | | - | enable_dhcp | False | - | gateway_ip | 10.1.255.254 | - | host_routes | | - | id | e0a888ab-9915-4685-a600-bffe240dc58b | - | ip_version | 4 | - | ipv6_address_mode | None | - | ipv6_ra_mode | None | - | name | pod_subnet | - | network_id | 4593045c-4233-4b4c-8527-35608ab0eaae | - | project_id | 90baf12877ba49a786419b2cacc2c954 | - | revision_number | 0 | - | segment_id | None | - | service_types | | - | subnetpool_id | None | - | tags | [] | - | updated_at | 2017-08-11T10:55:25Z | - | use_default_subnet_pool | None | - +-------------------------+--------------------------------------+ - -#. Create services network: - - .. code-block:: console - - $ openstack network create services - +---------------------------+--------------------------------------+ - | Field | Value | - +---------------------------+--------------------------------------+ - | admin_state_up | UP | - | availability_zone_hints | | - | availability_zones | | - | created_at | 2017-08-11T10:53:36Z | - | description | | - | dns_domain | None | - | id | 560df0c2-537c-41c0-b22c-40ef3d752574 | - | ipv4_address_scope | None | - | ipv6_address_scope | None | - | is_default | False | - | is_vlan_transparent | None | - | mtu | 1450 | - | name | services | - | port_security_enabled | True | - | project_id | 90baf12877ba49a786419b2cacc2c954 | - | provider:network_type | vxlan | - | provider:physical_network | None | - | provider:segmentation_id | 94 | - | qos_policy_id | None | - | revision_number | 2 | - | router:external | Internal | - | segments | None | - | shared | False | - | status | ACTIVE | - | subnets | | - | tags | [] | - | updated_at | 2017-08-11T10:53:37Z | - +---------------------------+--------------------------------------+ - -#. Create service subnet. We reserve the first half of the subnet range for the - VIPs and the second half for the loadbalancer vrrp ports: - - .. code-block:: console - - $ openstack subnet create --network services --no-dhcp \ - --gateway 10.2.255.254 \ - --ip-version 4 \ - --allocation-pool start=10.2.128.1,end=10.2.255.253 \ - --subnet-range 10.2.0.0/16 \ - service_subnet - +-------------------------+--------------------------------------+ - | Field | Value | - +-------------------------+--------------------------------------+ - | allocation_pools | 10.2.128.1-10.2.255.253 | - | cidr | 10.2.0.0/16 | - | created_at | 2017-08-11T11:02:24Z | - | description | | - | dns_nameservers | | - | enable_dhcp | False | - | gateway_ip | 10.2.255.254 | - | host_routes | | - | id | d6438a81-22fa-4a88-9b05-c4723662ef36 | - | ip_version | 4 | - | ipv6_address_mode | None | - | ipv6_ra_mode | None | - | name | service_subnet | - | network_id | 560df0c2-537c-41c0-b22c-40ef3d752574 | - | project_id | 90baf12877ba49a786419b2cacc2c954 | - | revision_number | 0 | - | segment_id | None | - | service_types | | - | subnetpool_id | None | - | tags | [] | - | updated_at | 2017-08-11T11:02:24Z | - | use_default_subnet_pool | None | - +-------------------------+--------------------------------------+ - -#. Create a router to give L3 connectivity between the pod and the service - subnets. If you already have one, you can use it: - - .. code-block:: console - - $ openstack router create kuryr-kubernetes - +-------------------------+--------------------------------------+ - | Field | Value | - +-------------------------+--------------------------------------+ - | admin_state_up | UP | - | availability_zone_hints | | - | availability_zones | | - | created_at | 2017-08-11T11:06:21Z | - | description | | - | distributed | False | - | external_gateway_info | None | - | flavor_id | None | - | ha | False | - | id | d2a06d95-8abd-471b-afbe-9dfe475dd8a4 | - | name | kuryr-kubernetes | - | project_id | 90baf12877ba49a786419b2cacc2c954 | - | revision_number | None | - | routes | | - | status | ACTIVE | - | tags | [] | - | updated_at | 2017-08-11T11:06:21Z | - +-------------------------+--------------------------------------+ - -#. Create router ports in the pod and service subnets: - - .. code-block:: console - - $ openstack port create --network pod --fixed-ip ip-address=10.1.255.254 pod_subnet_router - +-----------------------+---------------------------------------------------------------------------+ - | Field | Value | - +-----------------------+---------------------------------------------------------------------------+ - | admin_state_up | UP | - | allowed_address_pairs | | - | binding_host_id | | - | binding_profile | | - | binding_vif_details | | - | binding_vif_type | unbound | - | binding_vnic_type | normal | - | created_at | 2017-08-11T11:10:47Z | - | data_plane_status | None | - | description | | - | device_id | | - | device_owner | | - | dns_assignment | None | - | dns_name | None | - | extra_dhcp_opts | | - | fixed_ips | ip_address='10.1.255.254', subnet_id='e0a888ab-9915-4685-a600-bffe240dc58b' | - | id | 0a82dfff-bf45-4738-a1d2-36d4ad81a5fd | - | ip_address | None | - | mac_address | fa:16:3e:49:70:b5 | - | name | pod_subnet_router | - | network_id | 4593045c-4233-4b4c-8527-35608ab0eaae | - | option_name | None | - | option_value | None | - | port_security_enabled | True | - | project_id | 90baf12877ba49a786419b2cacc2c954 | - | qos_policy_id | None | - | revision_number | 3 | - | security_group_ids | 2d6e006e-572a-4939-93b8-0f45b40777f7 | - | status | DOWN | - | subnet_id | None | - | tags | [] | - | trunk_details | None | - | updated_at | 2017-08-11T11:10:47Z | - +-----------------------+---------------------------------------------------------------------------+ - - $ openstack port create --network services \ - --fixed-ip ip-address=10.2.255.254 \ - service_subnet_router - +-----------------------+-----------------------------------------------------------------------------+ - | Field | Value | - +-----------------------+-----------------------------------------------------------------------------+ - | admin_state_up | UP | - | allowed_address_pairs | | - | binding_host_id | | - | binding_profile | | - | binding_vif_details | | - | binding_vif_type | unbound | - | binding_vnic_type | normal | - | created_at | 2017-08-11T11:16:56Z | - | data_plane_status | None | - | description | | - | device_id | | - | device_owner | | - | dns_assignment | None | - | dns_name | None | - | extra_dhcp_opts | | - | fixed_ips | ip_address='10.2.255.254', subnet_id='d6438a81-22fa-4a88-9b05-c4723662ef36' | - | id | 572cee3d-c30a-4ee6-a59c-fe9529a6e168 | - | ip_address | None | - | mac_address | fa:16:3e:65:de:e5 | - | name | service_subnet_router | - | network_id | 560df0c2-537c-41c0-b22c-40ef3d752574 | - | option_name | None | - | option_value | None | - | port_security_enabled | True | - | project_id | 90baf12877ba49a786419b2cacc2c954 | - | qos_policy_id | None | - | revision_number | 3 | - | security_group_ids | 2d6e006e-572a-4939-93b8-0f45b40777f7 | - | status | DOWN | - | subnet_id | None | - | tags | [] | - | trunk_details | None | - | updated_at | 2017-08-11T11:16:57Z | - +-----------------------+-----------------------------------------------------------------------------+ - -#. Add the router to the service and the pod subnets: - - .. code-block:: console - - $ openstack router add port \ - d2a06d95-8abd-471b-afbe-9dfe475dd8a4 \ - 0a82dfff-bf45-4738-a1d2-36d4ad81a5fd - - $ openstack router add port \ - d2a06d95-8abd-471b-afbe-9dfe475dd8a4 \ - 572cee3d-c30a-4ee6-a59c-fe9529a6e168 - -#. Configure kuryr.conf pod subnet and service subnet to point to their - respective subnets created in step (2) and (4): - - .. code-block:: ini - - [neutron_defaults] - pod_subnet = e0a888ab-9915-4685-a600-bffe240dc58b - service_subnet = d6438a81-22fa-4a88-9b05-c4723662ef36 - -#. Configure Kubernetes API server to use only a subset of the service - addresses, **10.2.0.0/17**. The rest will be used for loadbalancer *vrrp* - ports managed by Octavia. To configure Kubernetes with this CIDR range you - have to add the following parameter to its command line invocation: - - .. code-block:: console - - --service-cluster-ip-range=10.2.0.0/17 - - As a result of this, Kubernetes will allocate the **10.2.0.1** address to - the Kubernetes API service, i.e., the service used for pods to talk to the - Kubernetes API server. It will be able to allocate service addresses up - until **10.2.127.254**. The rest of the addresses, as stated above, will be - for Octavia load balancer *vrrp* ports. **If this subnetting was not done, - Octavia would allocate *vrrp* ports with the Neutron IPAM from the same - range as Kubernetes service IPAM and we'd end up with conflicts**. - -#. Once you have Kubernetes installed and you have the API host reachable from - the pod subnet, follow the `Making the Pods be able to reach the Kubernetes - API`_ section - -#. For the external services (type=LoadBalancer) case, - two methods are supported: - - + Pool - external IPs are allocated from pre-defined pool - + User - user specify the external IP address - - In case 'Pool' method should be supported, execute the next steps: - - #. Create an external/provider network - #. Create subnet/pool range of external CIDR - #. Connect external subnet to kuryr-kubernetes router - #. Configure external network details in Kuryr.conf as follows: - - .. code-block:: ini - - [neutron_defaults] - external_svc_net= - # 'external_svc_subnet' field is optional, set this field in case - # multiple subnets attached to 'external_svc_net' - external_svc_subnet= - - From this point for each K8s service of type=LoadBalancer and in which - 'load-balancer-ip' is not specified, an external IP from - 'external_svc_subnet' will be allocated. - - For the 'User' case, user should first create an external/floating IP: - - .. code-block:: console - - $ #openstack floating ip create --subnet - $ openstack floating ip create --subnet 48ddcfec-1b29-411b-be92-8329cc09fc12 3b4eb25e-e103-491f-a640-a6246d588561 - +---------------------------+--------------------------------------+ - | Field | Value | - +---------------------+--------------------------------------+ - | created_at | 2017-10-02T09:22:37Z | - | description | | - | fixed_ip_address | None | - | floating_ip_address | 172.24.4.13 | - | floating_network_id | 3b4eb25e-e103-491f-a640-a6246d588561 | - | id | 1157e2fd-de64-492d-b955-88ea203b4c37 | - | name | 172.24.4.13 | - | port_id | None | - | project_id | 6556471f4f7b40e2bde1fc6e4aba0eef | - | revision_number | 0 | - | router_id | None | - | status | DOWN | - | updated_at | 2017-10-02T09:22:37Z | - +---------------------+--------------------------------------+ - - and then create k8s service with type=LoadBalancer and - load-balancer-ip= (e.g: 172.24.4.13) - - In both 'User' and 'Pool' methods, the external IP address could be found - in k8s service status information (under loadbalancer/ingress/ip) - - -Alternative configuration -~~~~~~~~~~~~~~~~~~~~~~~~~ - -It is actually possible to avoid this routing by performing a deployment change -that was successfully pioneered by the people at EasyStack Inc. which consists -of doing the following: - -#. Create the pod network and subnet so that it has enough addresses for both - the pod ports and the service ports. We are limiting the allocation range - out of the service range so that nor Octavia nor Kuryr-Kubernetes pod - allocation create ports in the part reserved for services. - - Create the network: - - .. code-block:: console - - $ openstack network create k8s - +---------------------------+--------------------------------------+ - | Field | Value | - +---------------------------+--------------------------------------+ - | admin_state_up | UP | - | availability_zone_hints | | - | availability_zones | | - | created_at | 2017-08-10T15:58:19Z | - | description | | - | dns_domain | None | - | id | 9fa35362-0bf7-4b5b-8921-f0c7f60a7dd3 | - | ipv4_address_scope | None | - | ipv6_address_scope | None | - | is_default | False | - | is_vlan_transparent | None | - | mtu | 1450 | - | name | k8s | - | port_security_enabled | True | - | project_id | 90baf12877ba49a786419b2cacc2c954 | - | provider:network_type | vxlan | - | provider:physical_network | None | - | provider:segmentation_id | 69 | - | qos_policy_id | None | - | revision_number | 2 | - | router:external | Internal | - | segments | None | - | shared | False | - | status | ACTIVE | - | subnets | | - | tags | [] | - | updated_at | 2017-08-10T15:58:20Z | - +---------------------------+--------------------------------------+ - - Create the subnet. Note that we disable dhcp as Kuryr-Kubernetes pod subnets - have no need for them for Pod networking. We also put the gateway on the - last IP of the subnet range so that the beginning of the range can be kept - for Kubernetes driven service IPAM: - - .. code-block:: console - - $ openstack subnet create --network k8s --no-dhcp \ - --gateway 10.0.255.254 \ - --ip-version 4 \ - --allocation-pool start=10.0.64.0,end=10.0.255.253 \ - --subnet-range 10.0.0.0/16 \ - k8s_subnet - +-------------------------+--------------------------------------+ - | Field | Value | - +-------------------------+--------------------------------------+ - | allocation_pools | 10.0.64.0-10.0.255.253 | - | cidr | 10.0.0.0/16 | - | created_at | 2017-08-10T16:07:11Z | - | description | | - | dns_nameservers | | - | enable_dhcp | False | - | gateway_ip | 10.0.255.254 | - | host_routes | | - | id | 3a1df0d9-f738-4293-8de6-6c624f742980 | - | ip_version | 4 | - | ipv6_address_mode | None | - | ipv6_ra_mode | None | - | name | k8s_subnet | - | network_id | 9fa35362-0bf7-4b5b-8921-f0c7f60a7dd3 | - | project_id | 90baf12877ba49a786419b2cacc2c954 | - | revision_number | 0 | - | segment_id | None | - | service_types | | - | subnetpool_id | None | - | tags | [] | - | updated_at | 2017-08-10T16:07:11Z | - | use_default_subnet_pool | None | - +-------------------------+--------------------------------------+ - -#. Configure kuryr.conf pod subnet and service subnet to point to the same - subnet created in step (1): - - .. code-block:: ini - - [neutron_defaults] - pod_subnet = 3a1df0d9-f738-4293-8de6-6c624f742980 - service_subnet = 3a1df0d9-f738-4293-8de6-6c624f742980 - -#. Configure Kubernetes API server to use only a subset of the addresses for - services, **10.0.0.0/18**. The rest will be used for pods. To configure - Kubernetes with this CIDR range you have to add the following parameter to - its command line invocation: - - .. code-block:: console - - --service-cluster-ip-range=10.0.0.0/18 - - As a result of this, Kubernetes will allocate the **10.0.0.1** address to - the Kubernetes API service, i.e., the service used for pods to talk to the - Kubernetes API server. It will be able to allocate service addresses up - until **10.0.63.255**. The rest of the addresses will be for pods or Octavia - load balancer *vrrp* ports. - -#. Once you have Kubernetes installed and you have the API host reachable from - the pod subnet, follow the `Making the Pods be able to reach the Kubernetes - API`_ section - - -.. _k8s_lb_reachable: - -Making the Pods be able to reach the Kubernetes API ---------------------------------------------------- - -Once you have Kubernetes installed and you have the API host reachable from the -pod subnet (that means you should add 10.0.255.254 to a router that gives -access to it), you should create a load balancer configuration for the -Kubernetes service to be accessible to Pods. - -#. Create the load balancer (Kubernetes always picks the first address of the - range we gave in *--service-cluster-ip-range*): - - .. code-block:: console - - $ openstack loadbalancer create --vip-address 10.0.0.1 \ - --vip-subnet-id 3a1df0d9-f738-4293-8de6-6c624f742980 \ - --name default/kubernetes - +---------------------+--------------------------------------+ - | Field | Value | - +---------------------+--------------------------------------+ - | admin_state_up | True | - | created_at | 2017-08-10T16:16:30 | - | description | | - | flavor | | - | id | 84c1c0da-2065-43c5-86c9-f2235566b111 | - | listeners | | - | name | default/kubernetes | - | operating_status | OFFLINE | - | pools | | - | project_id | 90baf12877ba49a786419b2cacc2c954 | - | provider | octavia | - | provisioning_status | PENDING_CREATE | - | updated_at | None | - | vip_Address | 10.0.0.1 | - | vip_network_id | 9fa35362-0bf7-4b5b-8921-f0c7f60a7dd3 | - | vip_port_id | d1182d33-686b-4bcc-9754-8d46e373d647 | - | vip_subnet_id | 3a1df0d9-f738-4293-8de6-6c624f742980 | - +---------------------+--------------------------------------+ - -#. Create the Pool for all the Kubernetes API hosts: - - .. code-block:: console - - $ openstack loadbalancer pool create --name default/kubernetes:HTTPS:443 \ - --protocol HTTPS --lb-algorithm LEAST_CONNECTIONS \ - --loadbalancer 84c1c0da-2065-43c5-86c9-f2235566b111 - +---------------------+--------------------------------------+ - | Field | Value | - +---------------------+--------------------------------------+ - | admin_state_up | True | - | created_at | 2017-08-10T16:21:52 | - | description | | - | healthmonitor_id | | - | id | 22ae71be-1d71-4a6d-9dd8-c6a4f8e87061 | - | lb_algorithm | LEAST_CONNECTIONS | - | listeners | | - | loadbalancers | 84c1c0da-2065-43c5-86c9-f2235566b111 | - | members | | - | name | default/kubernetes:HTTPS:443 | - | operating_status | OFFLINE | - | project_id | 90baf12877ba49a786419b2cacc2c954 | - | protocol | HTTPS | - | provisioning_status | PENDING_CREATE | - | session_persistence | None | - | updated_at | None | - +---------------------+--------------------------------------+ - -#. Add a member for each Kubernetes API server. We recommend setting the name - to be the hostname of the host where the Kubernetes API runs: - - .. code-block:: console - - $ openstack loadbalancer member create \ - --name k8s-master-0 \ - --address 192.168.1.2 \ - --protocol-port 6443 \ - 22ae71be-1d71-4a6d-9dd8-c6a4f8e87061 - +---------------------+--------------------------------------+ - | Field | Value | - +---------------------+--------------------------------------+ - | address | 192.168.1.2 | - | admin_state_up | True | - | created_at | 2017-08-10T16:40:57 | - | id | 9ba24740-3666-49e8-914d-233068de6423 | - | name | k8s-master-0 | - | operating_status | NO_MONITOR | - | project_id | 90baf12877ba49a786419b2cacc2c954 | - | protocol_port | 6443 | - | provisioning_status | PENDING_CREATE | - | subnet_id | None | - | updated_at | None | - | weight | 1 | - | monitor_port | None | - | monitor_address | None | - +---------------------+--------------------------------------+ - -#. Create a listener for the load balancer that defaults to the created pool: - - .. code-block:: console - - $ openstack loadbalancer listener create \ - --name default/kubernetes:HTTPS:443 \ - --protocol HTTPS \ - --default-pool 22ae71be-1d71-4a6d-9dd8-c6a4f8e87061 \ - --protocol-port 443 \ - 84c1c0da-2065-43c5-86c9-f2235566b111 - +---------------------------+--------------------------------------+ - | Field | Value | - +---------------------------+--------------------------------------+ - | admin_state_up | True | - | connection_limit | -1 | - | created_at | 2017-08-10T16:46:55 | - | default_pool_id | 22ae71be-1d71-4a6d-9dd8-c6a4f8e87061 | - | default_tls_container_ref | None | - | description | | - | id | f18b9af6-6336-4a8c-abe5-cb7b89c6b621 | - | insert_headers | None | - | l7policies | | - | loadbalancers | 84c1c0da-2065-43c5-86c9-f2235566b111 | - | name | default/kubernetes:HTTPS:443 | - | operating_status | OFFLINE | - | project_id | 90baf12877ba49a786419b2cacc2c954 | - | protocol | HTTPS | - | protocol_port | 443 | - | provisioning_status | PENDING_CREATE | - | sni_container_refs | [] | - | updated_at | 2017-08-10T16:46:55 | - +---------------------------+--------------------------------------+ - - -.. _services_troubleshooting: - -Troubleshooting ---------------- - -* **Pods can talk to each other with IPv6 but they can't talk to services.** - - This means that most likely you forgot to create a security group or rule - for the pods to be accessible by the service CIDR. You can find an example - here: - - .. code-block:: console - - $ openstack security group create service_pod_access - +-----------------+-------------------------------------------------------------------------------------------------------------------------------------------------------+ - | Field | Value | - +-----------------+-------------------------------------------------------------------------------------------------------------------------------------------------------+ - | created_at | 2017-08-16T10:01:45Z | - | description | service_pod_access | - | id | f0b6f0bd-40f7-4ab6-a77b-3cf9f7cc28ac | - | name | service_pod_access | - | project_id | 90baf12877ba49a786419b2cacc2c954 | - | revision_number | 2 | - | rules | created_at='2017-08-16T10:01:45Z', direction='egress', ethertype='IPv4', id='bd759b4f-c0f5-4cff-a30a-3cd8544d2822', updated_at='2017-08-16T10:01:45Z' | - | | created_at='2017-08-16T10:01:45Z', direction='egress', ethertype='IPv6', id='c89c3f3e-a326-4902-ba26-5315e2d95320', updated_at='2017-08-16T10:01:45Z' | - | updated_at | 2017-08-16T10:01:45Z | - +-----------------+-------------------------------------------------------------------------------------------------------------------------------------------------------+ - - $ openstack security group rule create --remote-ip 10.2.0.0/16 \ - --ethertype IPv4 f0b6f0bd-40f7-4ab6-a77b-3cf9f7cc28ac - +-------------------+--------------------------------------+ - | Field | Value | - +-------------------+--------------------------------------+ - | created_at | 2017-08-16T10:04:57Z | - | description | | - | direction | ingress | - | ether_type | IPv4 | - | id | cface77f-666f-4a4c-8a15-a9c6953acf08 | - | name | None | - | port_range_max | None | - | port_range_min | None | - | project_id | 90baf12877ba49a786419b2cacc2c954 | - | protocol | tcp | - | remote_group_id | None | - | remote_ip_prefix | 10.2.0.0/16 | - | revision_number | 0 | - | security_group_id | f0b6f0bd-40f7-4ab6-a77b-3cf9f7cc28ac | - | updated_at | 2017-08-16T10:04:57Z | - +-------------------+--------------------------------------+ - - Then remember to add the new security groups to the comma-separated - *pod_security_groups* setting in the section *[neutron_defaults]* of - /etc/kuryr/kuryr.conf. After making the kuryr.conf edits, you need to - restart the kuryr controller for the changes to take effect. - - If you want your current pods to get this change applied, the most - comfortable way to do that is to delete them and let the Kubernetes - Deployment create them automatically for you. - - -.. _services: https://kubernetes.io/docs/concepts/services-networking/service/ -.. _Octavia API: https://docs.openstack.org/api-ref/load-balancer/v2/ -.. _Octavia installation docs: https://docs.openstack.org/octavia/latest/contributor/guides/dev-quick-start.html -.. _Load Balancer drivers: https://docs.openstack.org/octavia/latest/ -.. _Amphora driver: https://docs.openstack.org/octavia/latest/ -.. _OVN Octavia driver: https://docs.openstack.org/ovn-octavia-provider/latest/ -.. _OVN as Provider Driver for Octavia: https://docs.openstack.org/networking-ovn/stein/admin/loadbalancer.html diff --git a/doc/source/installation/testing_connectivity.rst b/doc/source/installation/testing_connectivity.rst deleted file mode 100644 index c04ee5b09..000000000 --- a/doc/source/installation/testing_connectivity.rst +++ /dev/null @@ -1,240 +0,0 @@ -============================ -Testing Network Connectivity -============================ - -Once the environment is ready, we can test that network connectivity works -among pods. First we check the status of the kubernetes cluster: - -.. code-block:: console - - $ kubectl get nodes - NAME STATUS AGE VERSION - masterodl-vm Ready 1h v1.6.2 - - $ kubectl get pods - No resources found. - - $ kubectl get svc - NAME CLUSTER-IP EXTERNAL-IP PORT(S) AGE - kubernetes 10.0.0.129 443/TCP 1h - -As we can see, this is a one node cluster with currently no pods running, and -with the kubernetes API service listening on port 443 at 10.0.0.129 (which -matches the ip assigned to the load balancer created for it). - -To test proper configuration and connectivity we firstly create a sample -deployment with: - -.. code-block:: console - - $ kubectl create deployment demo --image=quay.io/kuryr/demo - deployment "demo" created - -After a few seconds, the container is up an running, and a neutron port was -created with the same IP that got assigned to the pod: - -.. code-block:: console - - $ kubectl get pods - NAME READY STATUS RESTARTS AGE - demo-7dd477695c-25s99 1/1 Running 0 1m - - $ kubectl describe pod demo-2293951457-j29nb | grep IP: - IP: 10.0.1.122 - - $ openstack port list | grep demo - | 468d3d7e-4dd1-4e42-9200-e3eb97d603e6 | default/demo-7dd477695c-25s99 | fa:16:3e:24:ba:40 | ip_address='10.0.1.122', subnet_id='15cfabf7-c7e0-4964-a3c0-0545e9e4ea2f' | ACTIVE | - -We can then scale the deployment to 2 pods, and check connectivity between -them: - -.. code-block:: console - - $ kubectl scale deploy/demo --replicas=2 - deployment "demo" scaled - - $ kubectl get pods - NAME READY STATUS RESTARTS AGE - demo-7dd477695c-25s99 1/1 Running 0 36m - demo-7dd477695c-fbq4r 1/1 Running 0 30m - - - $ openstack port list | grep demo - | 468d3d7e-4dd1-4e42-9200-e3eb97d603e6 | default/demo-7dd477695c-25s99 | fa:16:3e:24:ba:40 | ip_address='10.0.1.122', subnet_id='15cfabf7-c7e0-4964-a3c0-0545e9e4ea2f' | ACTIVE | - | b54da942-2241-4f07-8e2e-e45a7367fa69 | default/demo-7dd477695c-fbq4r | fa:16:3e:41:57:a4 | ip_address='10.0.1.116', subnet_id='15cfabf7-c7e0-4964-a3c0-0545e9e4ea2f' | ACTIVE | - - $ kubectl exec -it demo-7dd477695c-25s99 -- /bin/sh - - sh-4.2$ curl 10.0.1.122:8080 - demo-7dd477695c-25s99: HELLO, I AM ALIVE!!! - - - sh-4.2$ curl 10.0.1.116:8080 - demo-7dd477695c-fbq4r: HELLO, I AM ALIVE!!! - - - sh-4.2$ ping 10.0.1.116 - PING 10.0.1.116 (10.0.1.116) 56(84) bytes of data. - 64 bytes from 10.0.1.116: icmp_seq=1 ttl=64 time=1.14 ms - 64 bytes from 10.0.1.116: icmp_seq=2 ttl=64 time=0.250 ms - -Next, we expose the service so that a neutron load balancer is created and -the service is exposed and load balanced among the available pods: - -.. code-block:: console - - $ kubectl get svc - NAME CLUSTER-IP EXTERNAL-IP PORT(S) AGE - kubernetes 10.0.0.129 443/TCP 1h - - $ kubectl expose deploy/demo --port=80 --target-port=8080 - service "demo" exposed - - $ kubectl get svc - NAME CLUSTER-IP EXTERNAL-IP PORT(S) AGE - demo 10.0.0.140 80/TCP 6s - kubernetes 10.0.0.129 443/TCP 1h - - $ openstack loadbalancer list - +--------------------------------------+---------------------+----------------------------------+-------------+---------------------+------------------+----------+ - | id | name | project_id | vip_address | provisioning_status | operating_status | provider | - +--------------------------------------+---------------------+----------------------------------+-------------+---------------------+------------------+----------+ - | e4949ba4-7f73-43ad-8091-d123dea12dae | default/kubernetes | 1ea4a08913d74aff8ed3e3bf31851236 | 10.0.0.129 | ACTIVE | ONLINE | amphora | - | 994893a7-d67f-4af2-b2fe-5a03f03102b1 | default/demo | 1ea4a08913d74aff8ed3e3bf31851236 | 10.0.0.140 | ACTIVE | ONLINE | amphora | - +--------------------------------------+---------------------+----------------------------------+-------------+---------------------+------------------+----------+ - - - $ openstack loadbalancer listener list - +--------------------------------------+--------------------------------------+----------------------------+----------------------------------+----------+---------------+----------------+ - | id | default_pool_id | name | project_id | protocol | protocol_port | admin_state_up | - +--------------------------------------+--------------------------------------+----------------------------+----------------------------------+----------+---------------+----------------+ - | 3223bf4a-4cdd-4d0f-9922-a3d3eb6f5e4f | 6212ecc2-c118-434a-8564-b4e763e9fa74 | default/kubernetes:443 | 1ea4a08913d74aff8ed3e3bf31851236 | HTTPS | 443 | True | - | 8aebeb5e-bccc-4519-8b68-07847c1b5b73 | f5a61ce7-3e2f-4a33-bd1f-8f12b8d6a6aa | default/demo:TCP:80 | 1ea4a08913d74aff8ed3e3bf31851236 | TCP | 80 | True | - +--------------------------------------+--------------------------------------+----------------------------+----------------------------------+----------+---------------+----------------+ - - $ openstack loadbalancer pool list - +--------------------------------------+----------------------------+----------------------------------+---------------------+----------+--------------+----------------+ - | id | name | project_id | provisioning_status | protocol | lb_algorithm | admin_state_up | - +--------------------------------------+----------------------------+----------------------------------+---------------------+----------+--------------+----------------+ - | 6212ecc2-c118-434a-8564-b4e763e9fa74 | default/kubernetes:443 | 1ea4a08913d74aff8ed3e3bf31851236 | ACTIVE | HTTPS | ROUND_ROBIN | True | - | f5a61ce7-3e2f-4a33-bd1f-8f12b8d6a6aa | default/demo:TCP:80 | 1ea4a08913d74aff8ed3e3bf31851236 | ACTIVE | TCP | ROUND_ROBIN | True | - +--------------------------------------+----------------------------+----------------------------------+---------------------+----------+--------------+----------------+ - - - $ openstack loadbalancer member list default/demo:TCP:80 - +--------------------------------------+------------------------------------+----------------------------------+---------------------+------------+---------------+------------------+--------+ - | id | name | project_id | provisioning_status | address | protocol_port | operating_status | weight | - +--------------------------------------+------------------------------------+----------------------------------+---------------------+------------+---------------+------------------+--------+ - | 8aff18b1-1e5b-45df-ade1-44ed0e75ca5e | default/demo-7dd477695c-fbq4r:8080 | 1ea4a08913d74aff8ed3e3bf31851236 | ACTIVE | 10.0.1.116 | 8080 | NO_MONITOR | 1 | - | 2c2c7a54-ad38-4182-b34f-daec03ee0a9a | default/demo-7dd477695c-25s99:8080 | 1ea4a08913d74aff8ed3e3bf31851236 | ACTIVE | 10.0.1.122 | 8080 | NO_MONITOR | 1 | - +--------------------------------------+------------------------------------+----------------------------------+---------------------+------------+---------------+------------------+--------+ - - $ kubectl get klb demo -o yaml - apiVersion: openstack.org/v1 - kind: KuryrLoadBalancer - metadata: - creationTimestamp: "2020-12-21T15:31:48Z" - finalizers: - - kuryr.openstack.org/kuryrloadbalancer-finalizers - generation: 7 - name: demo - namespace: default - resourceVersion: "714" - selfLink: /apis/openstack.org/v1/namespaces/default/kuryrloadbalancers/demo - uid: 3a97dfad-ad19-45da-8544-72d837ca704a - spec: - endpointSlices: - - endpoints: - - addresses: - - 10.0.1.116 - conditions: - ready: true - targetRef: - kind: Pod - name: demo-7dd477695c-fbq4r - namespace: default - resourceVersion: "592" - uid: 35d2b8ef-1f0b-4859-b6a2-f62e35418d22 - - addresses: - - 10.0.1.122 - conditions: - ready: true - targetRef: - kind: Pod - name: demo-7dd477695c-25s99 - namespace: default - resourceVersion: "524" - uid: 27437c01-488b-43cd-bba3-9a70c1778598 - ports: - - port: 8080 - protocol: TCP - ip: 10.0.0.140 - ports: - - port: 80 - protocol: TCP - targetPort: "8080" - project_id: 1ea4a08913d74aff8ed3e3bf31851236 - provider: amphora - security_groups_ids: - - 30cd7a25-3628-449c-992f-d23bdc4d1086 - - aaffa1a5-4b7e-4257-a444-1d39fb61ea22 - subnet_id: 3e043d77-c1b1-4374-acd5-a87a5f7a8c25 - type: ClusterIP - status: - listeners: - - id: 8aebeb5e-bccc-4519-8b68-07847c1b5b73 - loadbalancer_id: 994893a7-d67f-4af2-b2fe-5a03f03102b1 - name: default/demo:TCP:80 - port: 80 - project_id: 1ea4a08913d74aff8ed3e3bf31851236 - protocol: TCP - loadbalancer: - id: 994893a7-d67f-4af2-b2fe-5a03f03102b1 - ip: 10.0.0.140 - name: default/demo - port_id: 967688f5-55a7-4f84-a021-0fdf64152a8b - project_id: 1ea4a08913d74aff8ed3e3bf31851236 - provider: amphora - security_groups: - - 30cd7a25-3628-449c-992f-d23bdc4d1086 - - aaffa1a5-4b7e-4257-a444-1d39fb61ea22 - subnet_id: 3e043d77-c1b1-4374-acd5-a87a5f7a8c25 - members: - - id: 8aff18b1-1e5b-45df-ade1-44ed0e75ca5e - ip: 10.0.1.116 - name: default/demo-7dd477695c-fbq4r:8080 - pool_id: f5a61ce7-3e2f-4a33-bd1f-8f12b8d6a6aa - port: 8080 - project_id: 1ea4a08913d74aff8ed3e3bf31851236 - subnet_id: 3e043d77-c1b1-4374-acd5-a87a5f7a8c25 - - id: 2c2c7a54-ad38-4182-b34f-daec03ee0a9a - ip: 10.0.1.122 - name: default/demo-7dd477695c-25s99:8080 - pool_id: f5a61ce7-3e2f-4a33-bd1f-8f12b8d6a6aa - port: 8080 - project_id: 1ea4a08913d74aff8ed3e3bf31851236 - subnet_id: 3e043d77-c1b1-4374-acd5-a87a5f7a8c25 - pools: - - id: f5a61ce7-3e2f-4a33-bd1f-8f12b8d6a6aa - listener_id: 8aebeb5e-bccc-4519-8b68-07847c1b5b73 - loadbalancer_id: 994893a7-d67f-4af2-b2fe-5a03f03102b1 - name: default/demo:TCP:80 - project_id: 1ea4a08913d74aff8ed3e3bf31851236 - protocol: TCP - -We can see that both pods are included as members and that the demo cluster-ip -matches with the loadbalancer vip_address. Also we can see the loadbalancer CRD -after the load balancer was created. In order to check loadbalancing among them, -we are going to curl the cluster-ip from one of the pods and see that each of -the pods is replying at a time: - -.. code-block:: console - - $ kubectl exec -it demo-7dd477695c-25s99 -- /bin/sh - - sh-4.2$ curl 10.0.0.140 - demo-7dd477695c-fbq4r: HELLO, I AM ALIVE!!! - - - sh-4.2$ curl 10.0.0.140 - demo-7dd477695c-25s99: HELLO, I AM ALIVE!!! diff --git a/doc/source/installation/testing_nested_connectivity.rst b/doc/source/installation/testing_nested_connectivity.rst deleted file mode 100644 index 0f3022b3a..000000000 --- a/doc/source/installation/testing_nested_connectivity.rst +++ /dev/null @@ -1,57 +0,0 @@ -=================================== -Testing Nested Network Connectivity -=================================== - -Similarly to the baremetal testing, we can create a demo deployment, scale it -to any number of pods and expose the service to check if the deployment was -successful: - -.. code-block:: console - - $ kubectl create deployment demo --image=quay.io/kuryr/demo - $ kubectl scale deploy/demo --replicas=2 - $ kubectl expose deploy/demo --port=80 --target-port=8080 - -After a few seconds you can check that the pods are up and running and the -neutron subports have been created (and in ACTIVE status) at the undercloud: - -.. code-block:: console - - (OVERCLOUD) $ kubectl get pods - NAME READY STATUS RESTARTS AGE - demo-1575152709-4k19q 1/1 Running 0 2m - demo-1575152709-vmjwx 1/1 Running 0 12s - - (UNDERCLOUD) $ openstack port list | grep demo - | 1019bc07-fcdd-4c78-adbd-72a04dffd6ba | demo-1575152709-4k19q | fa:16:3e:b5:de:1f | ip_address='10.0.0.65', subnet_id='b98d40d1-57ac-4909-8db5-0bf0226719d8' | ACTIVE | - | 33c4d79f-4fde-4817-b672-a5ec026fa833 | demo-1575152709-vmjwx | fa:16:3e:32:58:38 | ip_address='10.0.0.70', subnet_id='b98d40d1-57ac-4909-8db5-0bf0226719d8' | ACTIVE | - -Then, we can check that the service has been created, as well as the -respective loadbalancer at the undercloud: - -.. code-block:: console - - (OVERCLOUD) $ kubectl get svc - NAME CLUSTER-IP EXTERNAL-IP PORT(S) AGE - svc/demo 10.0.0.171 80/TCP 1m - svc/kubernetes 10.0.0.129 443/TCP 45m - - (UNDERCLOUD) $ openstack loadbalancer list - +--------------------------------------+--------------------+----------------------------------+-------------+---------------------+----------+ - | id | name | tenant_id | vip_address | provisioning_status | provider | - +--------------------------------------+--------------------+----------------------------------+-------------+---------------------+----------+ - | a3b85089-1fbd-47e1-a697-bbdfd0fa19e3 | default/kubernetes | 672bc45aedfe4ec7b0e90959b1029e30 | 10.0.0.129 | ACTIVE | haproxy | - | e55b3f75-15dc-4bc5-b4f4-bce65fc15aa4 | default/demo | e4757688696641218fba0bac86ff7117 | 10.0.0.171 | ACTIVE | haproxy | - +--------------------------------------+--------------------+----------------------------------+-------------+---------------------+----------+ - - -Finally, you can log in into one of the containers and curl the service IP to -check that each time a different pod answer the request: - -.. code-block:: console - - $ kubectl exec -it demo-1575152709-4k19q -- /bin/sh - sh-4.2$ curl 10.0.0.171 - demo-1575152709-4k19q: HELLO, I AM ALIVE!!! - sh-4.2$ curl 10.0.0.771 - demo-1575152709-vmjwx: HELLO, I AM ALIVE!!! diff --git a/doc/source/installation/testing_sctp_services.rst b/doc/source/installation/testing_sctp_services.rst deleted file mode 100644 index 8ca072703..000000000 --- a/doc/source/installation/testing_sctp_services.rst +++ /dev/null @@ -1,209 +0,0 @@ -===================== -Testing SCTP Services -===================== - -In this example, we will use the `kuryr-sctp-demo`_ image. This image -implements a SCTP server that listens on port 9090, and responds to client -when a packet is received. - -We first create a deployment named sctp-demo using the deployment manifest -(deploy.yml) below: - -.. code-block:: yaml - - apiVersion: apps/v1 - kind: Deployment - metadata: - name: sctp-demo - labels: - app: server - spec: - replicas: 2 - selector: - matchLabels: - app: server - template: - metadata: - labels: - app: server - spec: - containers: - - name: sctp-demo - image: tabbie/kuryr-sctp-demo:v2.1 - ports: - - containerPort: 9090 - -.. code-block:: console - - $ kubectl apply -f deploy.yml - deployment.apps/sctp-demo created - -At this point we should have two pods running the `kuryr-sctp-demo`_ image: - -.. code-block:: console - - $ kubectl get pods - NAME READY STATUS RESTARTS AGE - sctp-demo-65fcf85ddb-8vnrq 1/1 Running 0 40s - sctp-demo-65fcf85ddb-zg7nq 1/1 Running 0 109s - -Next, we expose the deployment as a service, setting SCTP port to 90: - -.. note:: - - In order to successfully expose the deployment as a service, ensure that - the Octavia provider in use by Kuryr has SCTP support. - -.. code-block:: console - - $ kubectl get svc - NAME TYPE CLUSTER-IP EXTERNAL-IP PORT(S) AGE - kubernetes ClusterIP 10.0.0.129 443/TCP 36h - - $ kubectl expose deploy/sctp-demo --protocol=SCTP --port=90 --target-port=9090 - service/sctp-demo exposed - - $ kubectl get svc - NAME TYPE CLUSTER-IP EXTERNAL-IP PORT(S) AGE - kubernetes ClusterIP 10.0.0.129 443/TCP 36h - sctp-demo ClusterIP 10.0.0.158 90/SCTP 42s - -Now, let's check the OpenStack load balancer created by Kuryr for **sctp-demo** -service. - -.. code-block:: console - - $ openstack loadbalancer list - +--------------------------------------+--------------------+----------------------------------+-------------+---------------------+----------+ - | id | name | project_id | vip_address | provisioning_status | provider | - +--------------------------------------+--------------------+----------------------------------+-------------+---------------------+----------+ - | 4d219ac7-2592-4d33-8afa-12994c5d82ec | default/kubernetes | 2e89a9e0a50d42d1be8054a80530b836 | 10.0.0.129 | ACTIVE | amphora | - | 96b38be3-1183-41c5-a0db-d246ef1d07cb | default/sctp-demo | 2e89a9e0a50d42d1be8054a80530b836 | 10.0.0.158 | ACTIVE | amphora | - +--------------------------------------+--------------------+----------------------------------+-------------+---------------------+----------+ - - $ openstack loadbalancer show default/sctp-demo - +---------------------+--------------------------------------+ - | Field | Value | - +---------------------+--------------------------------------+ - | admin_state_up | True | - | availability_zone | None | - | created_at | 2021-01-11T10:01:15 | - | description | | - | flavor_id | None | - | id | 96b38be3-1183-41c5-a0db-d246ef1d07cb | - | listeners | eda5caa0-083a-4c45-a2e5-38c243b2c970 | - | name | default/sctp-demo | - | operating_status | ONLINE | - | pools | 0935f099-d901-4f39-8090-392a527cbc35 | - | project_id | 2e89a9e0a50d42d1be8054a80530b836 | - | provider | amphora | - | provisioning_status | ACTIVE | - | updated_at | 2021-01-11T10:05:30 | - | vip_address | 10.0.0.158 | - | vip_network_id | 13190422-869c-4259-ba3b-6a41be79a671 | - | vip_port_id | 64da8e72-8469-4ac6-a0e6-ec60ca02b96a | - | vip_qos_policy_id | None | - | vip_subnet_id | 0041469e-371c-417f-83df-94ca8f202eab | - +---------------------+--------------------------------------+ - -Checking the load balancer's details, we can see that the load balancer is -listening on SCTP port 90: - -.. code-block:: console - - $ openstack loadbalancer listener show eda5caa0-083a-4c45-a2e5-38c243b2c970 - +-----------------------------+--------------------------------------+ - | Field | Value | - +-----------------------------+--------------------------------------+ - | admin_state_up | True | - | connection_limit | -1 | - | created_at | 2021-01-11T10:04:31 | - | default_pool_id | 0935f099-d901-4f39-8090-392a527cbc35 | - | default_tls_container_ref | None | - | description | | - | id | eda5caa0-083a-4c45-a2e5-38c243b2c970 | - | insert_headers | None | - | l7policies | | - | loadbalancers | 96b38be3-1183-41c5-a0db-d246ef1d07cb | - | name | default/sctp-demo:SCTP:90 | - | operating_status | ONLINE | - | project_id | 2e89a9e0a50d42d1be8054a80530b836 | - | protocol | SCTP | - | protocol_port | 90 | - | provisioning_status | ACTIVE | - | sni_container_refs | [] | - | timeout_client_data | 50000 | - | timeout_member_connect | 5000 | - | timeout_member_data | 50000 | - | timeout_tcp_inspect | 0 | - | updated_at | 2021-01-11T10:05:30 | - | client_ca_tls_container_ref | None | - | client_authentication | NONE | - | client_crl_container_ref | None | - | allowed_cidrs | None | - | tls_ciphers | None | - | tls_versions | None | - | alpn_protocols | None | - +-----------------------------+--------------------------------------+ - -And the load balancer has a pool with two members listening on SCTP port 9090: - -.. code-block:: console - - $ openstack loadbalancer pool list - +--------------------------------------+---------------------------+----------------------------------+---------------------+----------+--------------+----------------+ - | id | name | project_id | provisioning_status | protocol | lb_algorithm | admin_state_up | - +--------------------------------------+---------------------------+----------------------------------+---------------------+----------+--------------+----------------+ - | c69a87a5-078e-4c2b-84d4-0a2691c58f07 | default/kubernetes:443 | 2e89a9e0a50d42d1be8054a80530b836 | ACTIVE | HTTPS | ROUND_ROBIN | True | - | 0935f099-d901-4f39-8090-392a527cbc35 | default/sctp-demo:SCTP:90 | 2e89a9e0a50d42d1be8054a80530b836 | ACTIVE | SCTP | ROUND_ROBIN | True | - +--------------------------------------+---------------------------+----------------------------------+---------------------+----------+--------------+----------------+ - - $ openstack loadbalancer member list default/sctp-demo:SCTP:90 - +--------------------------------------+-----------------------------------------+----------------------------------+---------------------+-----------+---------------+------------------+--------+ - | id | name | project_id | provisioning_status | address | protocol_port | operating_status | weight | - +--------------------------------------+-----------------------------------------+----------------------------------+---------------------+-----------+---------------+------------------+--------+ - | abeec334-56b1-4535-a238-71424d78590e | default/sctp-demo-65fcf85ddb-zg7nq:9090 | 2e89a9e0a50d42d1be8054a80530b836 | ACTIVE | 10.0.0.75 | 9090 | NO_MONITOR | 1 | - | 826345b0-1264-421d-b9e0-8756f7bc0d21 | default/sctp-demo-65fcf85ddb-8vnrq:9090 | 2e89a9e0a50d42d1be8054a80530b836 | ACTIVE | 10.0.0.88 | 9090 | NO_MONITOR | 1 | - +--------------------------------------+-----------------------------------------+----------------------------------+---------------------+-----------+---------------+------------------+--------+ - -At this point, we have both the kubernetes service and corresponding OpenStack -load balancer running, and we are ready to run the client application. - -For the client application we will use the `sctp_client`_ python script. The -SCTP client script sends SCTP message towards specific IP and port, and waits -for a response from the server. The client application communicates with the -server by leveraging OpenStack load balancer functionality. - -For the client application to work, python SCTP module needs to be installed -in our environment. We need a SCTP-aware kernel (most are). First we install -the following packages: libsctp-dev, libsctp1, lksctp-tools and then install -the module. - -.. code-block:: console - - $ sudo apt-get install libsctp-dev libsctp1 lksctp-tools - $ pip3 install pysctp - - -And we need the SCTP server service IP and port: - -.. code-block:: console - - $ kubectl get svc sctp-demo - NAME TYPE CLUSTER-IP EXTERNAL-IP PORT(S) AGE - sctp-demo ClusterIP 10.0.0.158 90/SCTP 67m - -Last step will be to connect to the SCTP server service: - -.. code-block:: console - - $ python3 sctp_client.py 10.0.0.158 90 - Sending Message - sctp-demo-65fcf85ddb-zg7nq: HELLO, I AM ALIVE!!! - - $ python3 sctp_client.py 10.0.0.158 90 - Sending Message - sctp-demo-65fcf85ddb-8vnrq: HELLO, I AM ALIVE!!! - -.. _kuryr-sctp-demo: https://hub.docker.com/repository/docker/tabbie/kuryr-sctp-demo -.. _sctp_client: https://github.com/openstack/kuryr-kubernetes/blob/master/contrib/sctp_client.py diff --git a/doc/source/installation/testing_udp_services.rst b/doc/source/installation/testing_udp_services.rst deleted file mode 100644 index 50b23b68b..000000000 --- a/doc/source/installation/testing_udp_services.rst +++ /dev/null @@ -1,177 +0,0 @@ -==================== -Testing UDP Services -==================== - -In this example, we will use the `kuryr-udp-demo`_ image. This image -implements a simple UDP server that listens on port 9090, and replies towards -client when a packet is received. - -We first create a deployment named demo: - -.. code-block:: console - - $ kubectl create deployment --image=yboaron/kuryr-udp-demo demo - deployment "demo" created - -As the next step, we will scale the deployment to 2 pods: - -.. code-block:: console - - $ kubectl scale deploy/demo --replicas=2 - deployment "demo" scaled - -At this point we should have two pods running the `kuryr-udp-demo`_ image: - -.. code-block:: console - - $ kubectl get pods - NAME READY STATUS RESTARTS AGE - demo-fbb89f54c-92ttl 1/1 Running 0 31s - demo-fbb89f54c-q9fq7 1/1 Running 0 1m - -Next, we expose the deployment as a service, setting UDP port to 90: - -.. code-block:: console - - $ kubectl get svc - NAME TYPE CLUSTER-IP EXTERNAL-IP PORT(S) AGE - kubernetes ClusterIP 10.0.0.129 443/TCP 17m - - $ kubectl expose deploy/demo --protocol UDP --port 90 --target-port 9090 - service "demo" exposed - - $ kubectl get svc - NAME TYPE CLUSTER-IP EXTERNAL-IP PORT(S) AGE - demo ClusterIP 10.0.0.150 90/UDP 16s - kubernetes ClusterIP 10.0.0.129 443/TCP 17m - -Now, let's check the OpenStack load balancer created by Kuryr for **demo** -service: - -.. code-block:: console - - $ openstack loadbalancer list - +--------------------------------------+--------------------+----------------------------------+-------------+---------------------+----------+ - | id | name | project_id | vip_address | provisioning_status | provider | - +--------------------------------------+--------------------+----------------------------------+-------------+---------------------+----------+ - | eb5123e8-6bb5-4680-ac64-dcf25c57ced3 | default/kubernetes | fdc9ac3b36474fbf8c7ab77f4f783ec5 | 10.0.0.129 | ACTIVE | amphora | - | 67f19a39-dfb9-4a7a-bafe-7d6789982d91 | default/demo | fdc9ac3b36474fbf8c7ab77f4f783ec5 | 10.0.0.150 | ACTIVE | amphora | - +--------------------------------------+--------------------+----------------------------------+-------------+---------------------+----------+ - - $ openstack loadbalancer show default/demo - +---------------------+--------------------------------------+ - | Field | Value | - +---------------------+--------------------------------------+ - | admin_state_up | True | - | created_at | 2018-10-09T06:06:14 | - | description | | - | flavor | | - | id | 67f19a39-dfb9-4a7a-bafe-7d6789982d91 | - | listeners | 7b374ecf-80c4-44be-a725-9b0c3fa2d0fa | - | name | default/demo | - | operating_status | ONLINE | - | pools | d549df5b-e008-49a6-8695-b6578441553e | - | project_id | fdc9ac3b36474fbf8c7ab77f4f783ec5 | - | provider | amphora | - | provisioning_status | ACTIVE | - | updated_at | 2018-10-09T06:07:53 | - | vip_address | 10.0.0.150 | - | vip_network_id | eee6af72-9fbb-48b5-8e52-9f8bdf61cbab | - | vip_port_id | ccd8be94-c65e-4bb2-afe7-44aa3d0617ea | - | vip_qos_policy_id | None | - | vip_subnet_id | 3376291d-6c23-48cb-b6c6-37cefd57f914 | - +---------------------+--------------------------------------+ - -Checking the load balancer's details, we can see that the load balancer is -listening on UDP port 90: - -.. code-block:: console - - $ openstack loadbalancer listener show 7b374ecf-80c4-44be-a725-9b0c3fa2d0fa - +---------------------------+--------------------------------------+ - | Field | Value | - +---------------------------+--------------------------------------+ - | admin_state_up | True | - | connection_limit | -1 | - | created_at | 2018-10-09T06:07:37 | - | default_pool_id | d549df5b-e008-49a6-8695-b6578441553e | - | default_tls_container_ref | None | - | description | | - | id | 7b374ecf-80c4-44be-a725-9b0c3fa2d0fa | - | insert_headers | None | - | l7policies | | - | loadbalancers | 67f19a39-dfb9-4a7a-bafe-7d6789982d91 | - | name | default/demo:UDP:90 | - | operating_status | ONLINE | - | project_id | fdc9ac3b36474fbf8c7ab77f4f783ec5 | - | protocol | UDP | - | protocol_port | 90 | - | provisioning_status | ACTIVE | - | sni_container_refs | [] | - | timeout_client_data | 50000 | - | timeout_member_connect | 5000 | - | timeout_member_data | 50000 | - | timeout_tcp_inspect | 0 | - | updated_at | 2018-10-09T06:07:53 | - +---------------------------+--------------------------------------+ - -And the load balancer has two members listening on UDP port 9090: - -.. code-block:: console - - $ openstack loadbalancer member list d549df5b-e008-49a6-8695-b6578441553e - +--------------------------------------+-----------------------------------+----------------------------------+---------------------+-----------+---------------+------------------+--------+ - | id | name | project_id | provisioning_status | address | protocol_port | operating_status | weight | - +--------------------------------------+-----------------------------------+----------------------------------+---------------------+-----------+---------------+------------------+--------+ - | b2c63e7b-47ed-4a6f-b8bb-acaa6742a0ad | default/demo-fbb89f54c-q9fq7:9090 | fdc9ac3b36474fbf8c7ab77f4f783ec5 | ACTIVE | 10.0.0.74 | 9090 | ONLINE | 1 | - | 7fa773b1-cf76-4a0b-8004-153423e59ef6 | default/demo-fbb89f54c-92ttl:9090 | fdc9ac3b36474fbf8c7ab77f4f783ec5 | ACTIVE | 10.0.0.88 | 9090 | ONLINE | 1 | - +--------------------------------------+-----------------------------------+----------------------------------+---------------------+-----------+---------------+------------------+--------+ - -At this point, we have both the kubernetes **demo** service and corresponding -openstack load balancer running, and we are ready to run the client -application. - -For the client application we will use the `udp-client`_ python script. The UDP -client script sends UDP message towards specific IP and port, and waits for a -response from the server. The way that the client application can communicate -with the server is by leveraging the Kubernetes service functionality. - -First we clone the client script: - -.. code-block:: console - - $ git clone https://github.com/yboaron/udp-client-script.git - Cloning into 'udp-client-script'... - remote: Enumerating objects: 15, done. - remote: Counting objects: 100% (15/15), done. - remote: Compressing objects: 100% (13/13), done. - remote: Total 15 (delta 4), reused 3 (delta 1), pack-reused 0 - Unpacking objects: 100% (15/15), done. - $ - -And we need the UDP server service IP and port: - -.. code-block:: console - - $ kubectl get svc demo - NAME TYPE CLUSTER-IP EXTERNAL-IP PORT(S) AGE - demo ClusterIP 10.0.0.150 90/UDP 20m - $ - -Last step will be to ping the UDP server service: - -.. code-block:: console - - $ python udp-client-script/client.py 10.0.0.150 90 - demo-fbb89f54c-92ttl: HELLO, I AM ALIVE!!! - - $ python udp-client-script/client.py 10.0.0.150 90 - demo-fbb89f54c-q9fq7: HELLO, I AM ALIVE!!! - -Since the `kuryr-udp-demo`_ application concatenates the pod's name to the -replyed message, it is plain to see that both service's pods are replying to -the requests from the client. - - -.. _kuryr-udp-demo: https://hub.docker.com/r/yboaron/kuryr-udp-demo/ -.. _udp-client: https://github.com/yboaron/udp-client-script diff --git a/doc/source/installation/trunk_ports.rst b/doc/source/installation/trunk_ports.rst deleted file mode 100644 index f69803991..000000000 --- a/doc/source/installation/trunk_ports.rst +++ /dev/null @@ -1,53 +0,0 @@ -========================= -Boot VM with a Trunk Port -========================= - -To create a VM that makes use of the Neutron Trunk port support, the next -steps can be followed: - -#. Use the demo tenant and create a key to be used to log in into the overcloud - VM: - - .. code-block:: console - - $ source ~/devstack/openrc demo - $ openstack keypair create demo > id_rsa_demo - $ chmod 600 id_rsa_demo - -#. Ensure the demo default security group allows ping and ssh access: - - .. code-block:: console - - $ openstack security group rule create --protocol icmp default - $ openstack security group rule create --protocol tcp --dst-port 22 default - -#. Download and import an image that allows vlans, as cirros does not support - it: - - .. code-block:: console - - $ wget http://cloud.centos.org/centos/7/images/CentOS-7-x86_64-GenericCloud.qcow2 - $ openstack image create --container-format bare --disk-format qcow2 --file CentOS-7-x86_64-GenericCloud.qcow2 centos7 - -#. Create a port for the overcloud VM and create the trunk with that port as - the parent port (untagged traffic): - - .. code-block:: console - - $ openstack port create --network private --security-group default port0 - $ openstack network trunk create --parent-port port0 trunk0 - -#. Create the overcloud VM and assign a floating ip to it to be able to log in - into it: - - .. code-block:: console - - $ openstack server create --image centos7 --flavor ds4G --nic port-id=port0 --key-name demo overcloud_vm - $ openstack floating ip create --port port0 public - - Note subports can be added to the trunk port, and be used inside the VM with - the specific vlan, 102 in the example, by doing: - - .. code-block:: console - - $ openstack network trunk set --subport port=subport0,segmentation-type=vlan,segmentation-id=102 trunk0 diff --git a/doc/source/installation/upgrades.rst b/doc/source/installation/upgrades.rst deleted file mode 100644 index 43a92b1fb..000000000 --- a/doc/source/installation/upgrades.rst +++ /dev/null @@ -1,91 +0,0 @@ -========================== -Upgrading kuryr-kubernetes -========================== - -Kuryr-Kubernetes supports standard OpenStack utility for checking upgrade is -possible and safe: - -.. code-block:: console - - $ kuryr-k8s-status upgrade check - +---------------------------------------+ - | Upgrade Check Results | - +---------------------------------------+ - | Check: Pod annotations | - | Result: Success | - | Details: All annotations are updated. | - +---------------------------------------+ - -If any issue will be found, the utility will give you explanation and possible -remediations. Also note that *Warning* results aren't blocking an upgrade, but -are worth investigating. - - -Stein (0.6.x) to T (0.7.x) upgrade ----------------------------------- - -In T we want to drop support for old format of Pod annotations (switch was -motivated by multi-vif support feature implemented in Rocky). To make sure that -you don't have unsupported Pod annotations you need to run ``kuryr-k8s-status -upgrade check`` utility **before upgrading Kuryr-Kubernetes services to T**. - -.. note:: - - In case of running Kuryr-Kubernetes containerized you can use ``kubectl - exec`` to run kuryr-k8s-status - - .. code-block:: console - - $ kubectl -n kube-system exec -it kuryr-k8s-status upgrade check - -.. code-block:: console - - $ kuryr-k8s-status upgrade check - +---------------------------------------+ - | Upgrade Check Results | - +---------------------------------------+ - | Check: Pod annotations | - | Result: Success | - | Details: All annotations are updated. | - +---------------------------------------+ - -In case of *Failure* result of *Pod annotations* check you should run -``kuryr-k8s-status upgrade update-annotations`` command and check again: - -.. code-block:: console - - $ kuryr-k8s-status upgrade check - +----------------------------------------------------------------------+ - | Upgrade Check Results | - +----------------------------------------------------------------------+ - | Check: Pod annotations | - | Result: Failure | - | Details: You have 3 Kuryr pod annotations in old format. You need to | - | run `kuryr-k8s-status upgrade update-annotations` | - | before proceeding with the upgrade. | - +----------------------------------------------------------------------+ - $ kuryr-k8s-status upgrade update-annotations - +-----------------------+--------+ - | Stat | Number | - +-----------------------+--------+ - | Updated annotations | 3 | - +-----------------------+--------+ - | Malformed annotations | 0 | - +-----------------------+--------+ - | Annotations left | 0 | - +-----------------------+--------+ - $ kuryr-k8s-status upgrade check - +---------------------------------------+ - | Upgrade Check Results | - +---------------------------------------+ - | Check: Pod annotations | - | Result: Success | - | Details: All annotations are updated. | - +---------------------------------------+ - -It's possible that some annotations were somehow malformed. That will generate -a warning that should be investigated, but isn't blocking upgrading to T -(it won't make things any worse). - -If in any case you need to rollback those changes, there is ``kuryr-k8s-status -upgrade downgrade-annotations`` command as well. diff --git a/doc/source/nested_vlan_mode.rst b/doc/source/nested_vlan_mode.rst deleted file mode 100644 index c32be36c9..000000000 --- a/doc/source/nested_vlan_mode.rst +++ /dev/null @@ -1,66 +0,0 @@ -================================= -Kuryr-Kubernetes nested VLAN mode -================================= - -Kuryr-Kubernetes can work in two basic modes - nested and standalone. The main -use case of the project, which is to support Kubernetes running on OpenStack -VMs is implemented with nested mode. The standalone mode is mostly used for -testing. - -This document describes nested VLAN mode. - - -Requirements -============ - -Nested VLAN mode requires Neutron to have `trunk` extension enabled, which adds -trunk port functionality to Neutron API. - - -Principle -========= - -This mode aims at use case of kuryr-kubernetes providing networking for a -Kubernetes cluster running in VMs on OpenStack. - -.. note:: - - A natural consideration here is running kuryr-kubernetes in containers on - that K8s cluster. For more see :ref:`containerized` section. - -The principle of nested VLAN is that Kuryr-Kubernetes will require that main -interface of the K8s worker VMs is a trunk port. Then each of the pods will -get a subport of that attached into its network namespace. - - -How to configure -================ - -You need to set several options in the kuryr.conf: - -.. code-block:: ini - - [binding] - default_driver = kuryr.lib.binding.drivers.vlan - # Name of the trunk port interface on VMs. If not provided Kuryr will try - # to autodetect it. - link_iface = ens3 - - [kubernetes] - pod_vif_driver = nested-vlan - vif_pool_driver = nested # If using port pools. - - [pod_vif_nested] - # ID of the subnet in which worker node VMs are running (if multiple join - # with a comma). - worker_nodes_subnets = - -Also if you want to run several Kubernetes cluster in one OpenStack tenant you -need to make sure Kuryr-Kubernetes instances are able to distinguish their own -resources from resources created by other instances. In order to do that you -need to configure Kuryr-Kubernetes to tag resources with unique ID: - -.. code-block:: ini - - [neutron_defaults] - resource_tags = diff --git a/doc/source/readme.rst b/doc/source/readme.rst deleted file mode 100644 index a6210d3d8..000000000 --- a/doc/source/readme.rst +++ /dev/null @@ -1 +0,0 @@ -.. include:: ../../README.rst diff --git a/doc/source/specs/pike/contrail_support.rst b/doc/source/specs/pike/contrail_support.rst deleted file mode 100644 index 082696ffa..000000000 --- a/doc/source/specs/pike/contrail_support.rst +++ /dev/null @@ -1,80 +0,0 @@ -========================================= -Kuryr Kubernetes OpenContrail Integration -========================================= - -https://blueprints.launchpad.net/kuryr-kubernetes/+spec/kuryr-k8s-contrail-integration - -This spec proposes how to integrate OpenContrail with Kuryr-Kubernetes. -OpenContrail is an open source project that provides network virtualization -functionality to OpenStack. Integrating these will allow Kuryr to be used to -bridge container-VM networking in a Contrail-based OpenStack deployment. - -Problem Description -=================== - -OpenContrail is one of the largest SDN platforms, but it currently does not -work with Kuryr-Kubernetes. The goal of this blueprint is to provide Kuryr with -the correct driver so that a Kubernetes-hosted container can use -Kuryr-Kubernetes to correctly interface with an OpenContrail-based network. In -this configuration, OpenContrail will take place of the Open Virtual Switch, -L2/L3 functionality, etc. that normally comes with using Neutron as the default -implementation. - -Use Cases ---------- - -Kuryr will act as the container networking interface for OpenContrail. This -patch set will allow a bare-metal, Kubernetes-hosted container to interact with -VMs in an OpenStack virtual network. This means we have to have a way to plug, -unplug and bridge the container. - -Use Case 1: Enable container based work loads to communicate with OpenStack -hosted VM workloads in Contrail SDN environments - -Use Case 2: Allow Kubernetes workloads to leverage advanced OpenContrail based -networking - -Use Case 3: Enable Kubernetes to create virtual networks via Contrail - -Proposed Change -=============== -This change will add a driver to Kuryr-Kubernetes that has all of the -functionality of the CNI specifically for OpenContrail. The driver will feature -the plug() and unplug() commands that grant the container network access. - -Community Impact ----------------- - -This spec invites the community to collaborate on a unified solution to support -contrail integration within Kuryr-Kubernetes. - -Implementation -============== - -Assignee(s) ------------ - -Darla Ahlert -Steve Kipp - -Work Items ----------- - -1. Implement an os-vif bare bones plugin similar to [1] only worrying about -plug and unplug. We will implement this within Kuryr-Kubernetes for now and -eventually merge this to openstack/os-vif. -2. Look into serialization for OpenContrail and use [2] as a reference, -if needed. -3. Look into binding for OpenContrail similar to OVS binding [3] -4. Implement unit tests for added code -5. Add gate to install OpenContrail components - -Added Paths for New Code: - kuryr-kubernetes/cni/os-vif/opencontrail.py - -References -========== - -[1] https://github.com/openstack/os-vif/blob/master/vif_plug_ovs/ovs.py -[2] https://github.com/openstack/kuryr-kubernetes/blob/794ec706c5fbe0da6e49bf20ba2439d8eb39ae7e/kuryr_kubernetes/os_vif_util.py#L258-L281 -[3] https://github.com/openstack/kuryr-kubernetes/blob/794ec706c5fbe0da6e49bf20ba2439d8eb39ae7e/kuryr_kubernetes/cni/binding/bridge.py diff --git a/doc/source/specs/pike/fuxi_kubernetes.rst b/doc/source/specs/pike/fuxi_kubernetes.rst deleted file mode 100644 index 887201a62..000000000 --- a/doc/source/specs/pike/fuxi_kubernetes.rst +++ /dev/null @@ -1,176 +0,0 @@ -.. - This work is licensed under a Creative Commons Attribution 3.0 Unported - License. - - http://creativecommons.org/licenses/by/3.0/legalcode - -=========================== -Fuxi Kubernetes Integration -=========================== - -https://blueprints.launchpad.net/kuryr-kubernetes/+spec/fuxi-kubernetes - -This spec proposes an approach to integrate Kubernetes with various OpenStack -storage services, such as Cinder, Manila. - -Kubernetes is a platform for automating deployment, scaling and operations of -application containers across clusters of hosts. Kubernetes currently supports -Cinder plugin that manages Cinder volumes and makes them available for pods. -In addition, there are a number of third-party implementations of Kubernetes -volume plugins (i.e. Flocker, REX-Ray) that allows Kubernetes to manage volumes -backed by various cloud storage providers including OpenStack Cinder. All these -solutions are mainly for overcloud use cases, in which pods are deployed on -a set of cloud instances. In production, there is another class of use cases, -that were referred as the undercloud use cases, in which Kubernetes are -deployed as a control plane service and pods are running on servers other -than cloud instances (i.e. baremetal servers). - -This spec proposed a solution that will address both overcloud and undercloud -use cases. The specific requirements are as follows: - -- Integrate Kubernetes with various OpenStack storage services, such as Cinder - and Manila. -- Support various backends that are supported by the OpenStack storage - services. -- Support various volumes operations including but not limiting to provision, - de-provision, attach, de-attach, mount, un-mount, resize, snapshot, backup - and restore. -- Support Kubernetes Persistent Volume (PV) and Persistent Volume Claim (PVC) - [1]. -- Whenever possible, reuse existing services or frameworks. For example, - Kuryr-kubernetes has the framework to watch the Kubernetes API and handle - changes of resources. Fuxi has implemented the logic of interfacing with - various OpenStack storage services and their backends. This proposal - suggested to reuse Kuryr-kubernetes and Fuxi instead of re-inventing the - equivalent functionalities. -- Pluggable architecture. Allows integrating with custom storage solutions - via plugins. -- Massively scalable. Support large scale Kubernetes deployment (i.e. - hundreds of nodes) with massive workloads (i.e. thousands of containers). - -Problem Description -=================== -There are several ways to integrate Kubernetes with OpenStack. If Kubernetes -is hosted by the cloud servers provided by Nova, users can leverage the -Kubernetes cloud provider feature to provision and attach Cinder volumes to -hosts. However, if Kubernetes is hosted by servers other than Nova instances, -there is no perfect solution that connects Kubernetes to various OpenStack -storage services. - - -Proposed Change -=============== -In order to integrate Kubernetes with OpenStack and satisfy the requirements -above, this spec proposes to develop two components: A volume provisioner and -a FlexVolume driver. The volume provisioner runs on the Kubernetes control -plane and is responsible to watch for Kubernetes API for changes of PVCs and -provision PVs for PVCs. The FlexVolume driver will reside on each host that -runs Kubelet and it will be called out by Kubelet to perform local operations, -such as attach volumes to hosts, etc.. Both volume provisioner and FlexVolume -driver will consume OpenStack storage services via Fuxi server. - -.. image:: ../../../images/fuxi_k8s_components.png - :alt: integration components - :align: center - :width: 100% - - -Volume Provisioner ------------------- -Volume provisioner is responsible for watching Kubernetes API for PVCs and -make sure the corresponding storage assets (i.e. cinder volume) are -provisioned, updated, or deleted in OpenStack. The volume provisioner will -implement the 'ResourceEventHandler' interface of Kuryr-kubernetes for -handling PVC events. - -For each creation of PVC in Kubernetes, the Kuryr-kubernetes's API watcher will -trigger an event that will be eventually handled by volume provisioner. -On receiving the event, the volume provisioner will provision the appropriate -storage asset in OpenStack and create a PV in Kubernetes to represent the provisioned -storage asset. The volume provisioning workflow will be in compliance with -the Kubernetes's out-of-tree provisioning specification [2]. The provisioned -PV will be populated with necessary information for the volume driver to -connect to the provisioned storage asset later. - -The volume provisioner will call the REST API of fuxi server to do the actual -provisioning, and fuxi server will in term provision storage assets by using a -volume provider (i.e. cinder provider). Note that fuxi was originally designed -to be a remote docker volume plugin, and this proposal proposes to reuse it -for fuxi Kubernetes. - -Similarly, for each update or deletion of PVC, the volume provisioner will -call fuxi server to update or delete the corresponding storage assets at -OpenStack and PVs at Kubernetes. - - -FlexVolume Driver ------------------ -FlexVolume [3] is a Kubernetes volume plugin that allows vendor to write their own -driver to support custom storage solutions. This spec proposes to implement -a FlexVolume driver that enables Kubelet to consume the provisioned storage -assets. The FlexVolume driver will implement the FlexVolume's driver interface -that is consistent of a set of 'call-outs'. - -After the PVs are provisioned by the volume provisioner, they will be picked by -Kubelet and Kubelet will assign the PVs to a volume plugin based on its type. -In our case, all PVs provisioned by our volume provisioner will be set to -'flexVolume' type so FlexVolume will be invoked to handle these PVs. -FlexVolume will parse the PVs to retrieve information and pass down those -information to our FlexVolume driver via 'call-outs'. - -Generally speaking, PVs will serve as medium for the volume provisioner to -communicate with the FlexVolume driver. The volume provisioner is supposed -to populate PVs with all the data that will be consumed by the FlexVolume -driver later. For example, the volume provisioner might provision a Cinder -volume and set the Cinder volume's name in a field of the created PV, -so that the name can be passed down to the FlexVolume driver who will consume -the Cinder volume. - -The FlexVolume driver will leverage fuxi to do the actual processing (i.e. -connect to the volume). The initial implementation will assume an instance of -fuxi server is deployed to each host that run Kubelet/FlexVolume driver so that -the fuxi server and the FlexVolume driver can communicate via localhost. -In the second phrase, we will investigate the possibility to have a centralized -fuxi server which both volume provisioner and FlexVolume driver will consume. -This might require splitting fuxi into a server and a library. The library will -be leveraged by the FlexVolume driver to perform local operations (i.e. volume -bi-mounting) and the server will serve cluster-wide requests. - -Note that FlexVolume has several known drawbacks. For example, it invokes -drivers via shells, which requires executables pre-installed in the specified -path. This deployment model doesn't work with operating systems like CoreOS -in which the root file system is immutable. This proposal suggests to continue -monitoring the evolution of Kubernetes and switch to a better solution if there is -one showed up. - - -Alternatives -============ -An alternative to FlexVolume driver is provide an implementation of Kubernetes volume -plugin. An obstacle of this approach is that Kubernetes doesn't support out-of-tree -volume plugin (beside using FlexVolume) right now. Therefore, the fuxi volume -plugin needs to be reside in Kubernetes tree and released with a different schedule -from OpenStack. - - -Implementation -============== - -Assignee(s) ------------ - -Primary assignee: -Hongbin Lu - - -Work Items ----------- -1. Implement a Kubernetes volume provisioner. -2. Implement a Kubernetes FlexVolume driver. - - -References -========== -[1] https://kubernetes.io/docs/concepts/storage/persistent-volumes/ -[2] https://github.com/kubernetes/community/blob/master/contributors/design-proposals/volume-provisioning.md -[3] https://github.com/kubernetes/community/blob/master/contributors/devel/flexvolume.md diff --git a/doc/source/specs/queens/network_policy.rst b/doc/source/specs/queens/network_policy.rst deleted file mode 100644 index e90815039..000000000 --- a/doc/source/specs/queens/network_policy.rst +++ /dev/null @@ -1,613 +0,0 @@ -======================= -Network policy support -======================= - -By default all Kubernetes pods are non-isolated and they accept traffic -from any source. "Network policy" is Kubernetes specification that defines how -groups of pods are allowed to communicate with each other and other network -endpoints [1]_. - -This Spec suggests a design for supporting Kubernetes "Network policy" in -Kuryr. - - -Problem Description -=================== - -Kubernetes "Network policies" define which traffic is allowed to be -sent or received by group of pods. - -Each network policy has 3 main parts [2]_: - -* Pod selector: Use kubernetes "label selector" [5]_ that defines on which - pods this policy should be applied. The relationship between pod and policy - is N to M. - Each pod can be selected by multiple network-policies (when an OR operator - is applied between the policies), and each policy can be attached to - multiple pods. -* Ingress section: defines which traffic can be received by selected pods. - It's defined by a "Cartesian product" of (allowed peers) and (protocol). - - * There are 3 ways to define Allowed peer: - - * Ip-address-block: allowed CIDR (also enable to exclude an inner CIDR). - * Pod selector: allowed set of pods defined by "label selector" [5]_. - * Namespace selector: list of namespaces; all pods that belong to that - namespaces are defined as allowed-peers. - - * Port is defined by 2 fields: - - * L4 protocol (TCP/UDP/ICMP..) - * L4 destination port - -* Egress section: defines which traffic can be sent from the selected pods. - This is defined in the same way as the ingress section. - -In order to support network-policy, kuryr-kubernetes should handle all the -events that are related to the network-policies and translate them into -Neutron objects for apply an equivalent network-topology to the one defined by -the Kubernetes policies. Neutron doesn't have a security API that is equivalent -to the kubernetes-network-policy. The translation should be done carefully -in order to achieve eventually consistent required topology, and avoid -corner-cases and race conditions. - -Proposed solution -================= - -This spec suggests to implement Kubernetes network-policy by leveraging Neutron -security-groups [4]_. -There is some similarity between security-groups and network-policy, but there -are also some definitions inside the network-policy that require some more -complex translation work. - -In order to provide a full translation between kubernetes-policies to security -groups, there are 3 main issues that need to be consider: - -* Translation of the kubernetes-policy to Neutron security-group object. - -* Attaching the security-group to the relevant-ports according to - the policy pod-selector. - -* Response to changes in the group of pods that selected by pod selectors and - namespace selector: - - * when pod is created and matches the queries - * when pod is updated with new label, and that label matches to the query - -The next paragraphs describe the implementation proposal for each of the tasks -described above, including new Handler and Drivers, that should be added to -the Kuryr controller. - - -Translate Kubernetes policy to Neutron security-group ------------------------------------------------------ - -'Allow all' policy [3]_ -####################### -Network policy that allows all traffic, should be translated to -Security group with one rule that allows all traffic. - -Example for allow all egress traffic: - -.. code-block:: json - - { - "security_group_rule": { - "direction": "egress", - "protocol": null, - "security_group_id": "[id]" - } - } - -'Deny all' policy [6]_ -###################### - -Translate to an empty-security-group. - -Ingress/egress -############## - -Can be translated to security-group-rules ingress/egress traits. - -IpBlock: -######## - -Can be done by "remote ip prefix" trait in security-group-rule as both -use CIDRs. In case of Exceptions (It's an inner CIDR's of the ipBlock, that -should be excluded from the rule), the ip-range should be broken into pieces -that cover the all ip-block without the exception. -For example, if there is Ip-block :"1.1.1.0/24 except 1.1.1.0/26", Kuryr should -create security-groups-rules with 1.1.1.128/25 an 1.1.1.64/26). - -podSelectors -############ - -Pod selector uses kubernetes label-selectors [6]_ for choosing set of pods. -It is used in the policy for 2 purposes: - -* Define on which pods the policy should be applied. - -* Allow ingress/egress traffic from/to this set of pods. - -The first one defines on which ports the policy should be applied, so it will -be discussed in the next section. For the second, the translation mechanism can -use the security-group trait - "remote_policy_group", that allows to define as -valid source all ports that belong to another security-group. This means that -we could create security-group with no rules for each network-policy selector -and attach all ports corresponding to pods that selected by the pod query -to this security-group. -We assume that each port attached to this security-group will be attached to -at least one other group (default security-group), so that attachment will not -entirely block traffic to the port. - -namespaceSelector -################# - -Namespace selector is used for choosing all the pods that belong to the -namespaces that selected by the query for allowing ingress/egress traffic. -Should use the same security-group as the pod selector for allowing ingress/egress -traffic from the selected namespaces. - -Port, protocol -############## - -A port can be defined as a number or a name. When defined as a number, it's directly -translated to port on a protocol in security group rule. However, when defined with -a name, the container pods' name needs to be verified, and in case of matching the -named port it's translate to a security group rule with the port number of -the named port on a determined protocol. - -The choice of which pods to select to check the containers, depends on the direction -of the rule being applied. In case of a ingress rule, all the pods selected by -NetworkPolicySpec's podSelector are verified, in other words, the pods which the -Network Policy is applied. For a egress rule, the pods selected -by the NetworkPolicyEgressRule's selector are verified. - -To keep track of the pods that have container(s) matching a named port, -a new field, 'remote_ip_prefixes', needs to be added to the security group rule of the -KuryrNetPolicy CRD, containing the IP and the namespace of the affected resources. -This way, the process of creating, deleting or updating a security group rule -on pod events is facilitated. - -Lets assume the following pod and network policy are created -(based on Kubernetes Upstream e2e tests [11]_): - - .. code-block:: yaml - - apiVersion: v1 - kind: Pod - metadata: - name: server - labels: - pod-name: server - spec: - containers: - - env: - - name: SERVE_PORT_80 - value: foo - image: gcr.io/kubernetes-e2e-test-images/porter:1.0 - imagePullPolicy: IfNotPresent - name: server-container-80 - ports: - - containerPort: 80 - name: serve-80 - protocol: TCP - - env: - - name: SERVE_PORT_81 - value: foo - image: gcr.io/kubernetes-e2e-test-images/porter:1.0 - imagePullPolicy: IfNotPresent - name: server-container-81 - ports: - - containerPort: 81 - name: serve-81 - protocol: TCP - - --- - - apiVersion: networking.k8s.io/v1 - kind: NetworkPolicy - metadata: - name: allow-client-a-via-named-port-ingress-rule - namespace: default - spec: - podSelector: - matchLabels: - pod-name: server - policyTypes: - - Ingress - ingress: - - ports: - - protocol: TCP - port: serve-80 - -The following Custom Resources Definition is generated containing -all the Neutron resources created to ensure the policy is enforced. -Note that a 'remote_ip_prefixes' is added to keep track of the pod -that matched the named port. - - .. code-block:: yaml - - apiVersion: openstack.org/v1 - kind: KuryrNetPolicy - metadata: - annotations: - networkpolicy_name: allow-client-a-via-named-port-ingress-rule - networkpolicy_namespace: default - networkpolicy_uid: 65d54bbb-70d5-11e9-9986-fa163e6aa097 - creationTimestamp: "2019-05-07T14:35:46Z" - generation: 2 - name: np-allow-client-a-via-named-port-ingress-rule - namespace: default - resourceVersion: "66522" - uid: 66eee462-70d5-11e9-9986-fa163e6aa097 - spec: - egressSgRules: - - security_group_rule: - description: Kuryr-Kubernetes NetPolicy SG rule - direction: egress - ethertype: IPv4 - id: e19eefd9-c543-44b8-b933-4a82f0c300b9 - port_range_max: 65535 - port_range_min: 1 - protocol: tcp - security_group_id: f4b881ae-ce8f-4587-84ef-9d2867d00aec - ingressSgRules: - - remote_ip_prefixes: - "10.0.0.231:": default - security_group_rule: - description: Kuryr-Kubernetes NetPolicy SG rule - direction: ingress - ethertype: IPv4 - id: f61ab507-cf8c-4720-9a70-c83505bc430f - port_range_max: 80 - port_range_min: 80 - protocol: tcp - security_group_id: f4b881ae-ce8f-4587-84ef-9d2867d00aec - networkpolicy_spec: - ingress: - - ports: - - port: serve-80 - protocol: TCP - podSelector: - matchLabels: - pod-name: server - policyTypes: - - Ingress - podSelector: - matchLabels: - pod-name: server - securityGroupId: f4b881ae-ce8f-4587-84ef-9d2867d00aec - securityGroupName: sg-allow-client-a-via-named-port-ingress-rule - -Mix of ports and peer -##################### - -In this case security-group-rule should be created for each tuple of -peer and ports. Number of rules will be a Cartesian product of ports and -peers. - - -Match between security-groups and its ports -------------------------------------------- - -A security-group that derived from kubernetes-policy should be -tagged [7]_ with network_policy UID. In case of issue of length or characters-set -hashing should be applied, so security-group unique tag could be derived from the -policy-UID. - -For defining on which pods the policy should be applied Kubernetes defines a -pod-selector query [5]_. For applying the policy on the relevant ports, Kuryr -needs to know at any given moment which pods belong to that group. It can -happen when pod is created/updated/change-label. - -When policy is created, Kuryr should trigger a get query for applying the -policy on all pods that already match, -and add a watch for getting an update when POD added or removed from -network-policy and apply/remove the translated policy on the pod's port. - -For applying the policy on the pod, an annotation with the security-group-id -will be added to the pod. That will cause the "update pod" event. -The VIFHandler via security-group Driver will attach the pod to the port. - -We can't attach the security-group directly in the watch callback as it -will create a race condition between the watch and the VIFHandler as -the watch could be called before Kuryr notified that the pod is created. -With the annotation - when new pod is created, if the watch was called -before VIFHandler pod creation processing, VIFHandler will get the pod already -with the annotation. Otherwise, it will get pod with no -security-group-annotation and will attach it to the default security-group. -When the watch will update the annotation, the pod will be updated with the -correct security-group. - -When policy is updated, if policy pod-selectors changed, -a diff between the old and new selected-pod-set should be done, -and the pods security-groups annotations should be updated respectively. -Selector watches should be updated with the new queries. - - -Allow traffic from the pod ingress and egress selectors: --------------------------------------------------------- - -As mentioned above, "remote_group_id" will be used to allow ingress and -egress traffic from pods selected by the pod/namespace selectors. - -For the pod-selector and namespace-selector we need to -create a security-group per policy (one for ingress and one for egress). -The security-group should be tagged with tag that is derived from the -policy-UID and traffic direction (for example: [policy_UID]_EG for egress traffic). -In case of characters-sets or allowed-length issues, hash should be applied for -updating these security-groups. - -For each selector (namespace or pod) a watch should be set. The watch callback -will add/remove the relevant pods to/from the security-group. - -Controller Handlers and Drivers impact: ---------------------------------------- - -For supporting Network-policy Handler that watches network_policy events -will be added. - -Two new drivers will be added: - -* On the network_policy Handler: - - * "Network Policy Apply Driver", it will have 3 responsibilities: - - * Translate the network-policy to security-groups. - * Match the security-group to its relevant port - (by setting the watch and annotating the pod as described above). - * Update the security-groups for ingress/egress POD/namespace selector. - -* On VIF handler: - - * A security-group-policy Driver will be used instead of the default - security groups Driver. It will be responsible for: - - * Set the port security-group according to the annotation. - - -Controller startup ------------------- -The following should be done on Controller startup: - -* Retrieve from Kubernetes API all network-policies and set all the - relevant watches. This should happen before the Controller starts. - -* Need to do some sync operation to make-sure Neutron topology is synchronised - with Kubernetes Network Policy model. - - * for every network-policy: - - * Get it's main security-group and check if it's updated. This validation - will be done by the generation tag. Generation is part of k8s metadata - and increased by one on every policy change. When the policy will - applied by kuryr, it's should be annotated with current policy generation. - If the generation in the policy meta-data is newer than - the generation in the annotation, it's mean that the policy had been - changed and the security-group rules needs to rebuild. - - * for each pod selector in the policy: - - * get from kubernetes-api all pods that selected by this query. - * get all ports of the relevant security-groups. - * Do diff between port that needed to be attached to SG, - and add/remove pod-ports from security-groups. - - -Port pool impact ----------------- - -Changes in the security-policy can cause negative impact on the port-pools [9]_. -The combination of the security-groups of port is part of the pool key, and -changes in network-policy could make some pools not relevant any more. - -For example let's assume that we have 2 policies "a" and "b", and both policies -should be applied on pods with the label "role: db". When the first pod with -label "role: db" is created - a new port-pool is created and its pool key is -composed from the security-groups of "a" and "b". If policy "b" will be changed -and pods with label "role: db" would not be selected by the policy anymore, -then the port-pool that was created for the combination of "a" and "b" will not -be not useful any more. - -That can lead to the ports leak, as pool holds many ports that not useful -anymore. For handling this issue a new cleanup task should be added. This task -will release all ports from the pools that are not in use anymore. - -Another issue that needs to be treated is that the policies of pod can be -changed while pod is running. Currently when pod is deleted its' port is -returned to the pool that it was taken from. But if the pod's policies are -changed, this behaviour is incorrect. When port is released it should be -returned to the pool that matches to the current state of the pod. - - -Execution flow diagram ----------------------- - -See below the network policy attachment to the pod after pod creation: - -.. image:: ../../../images/net-policy.svg - :alt: Ingress creation flow diagram - :align: left - :width: 100% - - -Possible optimization: ----------------------- - -Kubernetes label-selector divided into 2 types of queries "match-labels", -and "match-expression" [10]_. "match-labels" selects a closed list of labels -while "match-expression" selects all pods that match to particular expression. - -This spec suggests to create a watch for each label-selector query, because in -"match-expression" queries it is not possible to determine if pod matches the -query, without implementing parser for each expression. By setting a watch we -are using kubernetes-api-server for the matching between pods and queries. - -The spec treats the "match-labels" and "match-expression" queries in the same -way for simplicity reasons. But future optimization may distinguish between -queries-types. "match-labels" queries watches may be removed and the matching -between pod to its' "match-labels" queries could be done directly -on the vif-handler. - - -Assumptions ------------ - -Security-groups are supported by the networking backend for all vif interfaces. -In case of special interfaces (SR-IOV, mac-vlan, etc ..), the network-policy -will be applied on the interface if and only if then networking backend -enables security-groups on those interfaces. - - -Execution flow-example ----------------------- - -This section describes system execution flow in the following scenarios: - -* POD is deployed on empty system. -* Network-policy that should be applied on the first pod is deployed. -* Another pod that belongs to the network-policy is deployed. - -Pod is deployed on empty system: - -* Pod is created with the following details: - - * name: p1, namespace: default, labels : {Role:db}. - -* VIF Handler: - - * Security-group-policy driver: - - * Assign default-policy to this pod (as we still - do not have any network-policy in the system). - * Create security-group for namespace 'default', - and add pod p1 port to that security-group. - -Network policy is deployed: - -Let's assume that following policy is created (taken from k8s tutorial [8]_): - - .. code-block:: yaml - - apiVersion: networking.k8s.io/v1 - kind: NetworkPolicy - metadata: - name: test-network-policy - namespace: default - spec: - podSelector: - matchLabels: - role: db - policyTypes: - - Ingress - - Egress - ingress: - - from: - - ipBlock: - cidr: 172.17.0.0/16 - except: - - 172.17.1.0/24 - - namespaceSelector: - matchLabels: - project: myproject - - podSelector: - matchLabels: - role: frontend - ports: - - protocol: TCP - port: 6379 - egress: - - to: - - ipBlock: - cidr: 10.0.0.0/24 - ports: - - protocol: TCP - port: 5978 - - -* Network policy Handler: - - * Network policy Driver: - - * Create security group with the following rules: - - * Ingress , tcp:6379 172.17.0.0/24 - * Ingress , tcp:6379 172.17.2.0/23 - * Ingress , tcp:6379 172.17.4.0.0/22 - * Ingress , tcp:6379 172.17.8.0/21 - * Ingress , tcp:6379 172.17.16.0/20 - * Ingress , tcp:6379 172.17.32.0/19 - * Ingress , tcp:6379 172.17.64.0/18 - * Ingress , tcp:6379 172.17.128.0/17 - * Ingress , tcp: 6379 , remote_group_id : [test-network-policy-uid]_in - * Egress, tcp:5978 10.0.0.0/24 - - * Create match for the policy: - - * Queries k8s-api about pods that match to {role:db} - - * Attach the annotation with security-policy-id to p1. - - * Set a watch on the query - "Match-label : {role:db}" , the watch - callback of this watch will update the security-group annotation on - the updated/new pods that are selected by this query. - - * Create a match for the ingress/egress group: - - * Set watch on the query : "match-labels {role:frontend}" , watch - callback will add all pods that are selected by this query to the - security-group [test-network-policy-uid]_in. - -* VIF Handler: - - * Will get update event on p1 as its' annotations is changed. - - * security-policy-group-driver: - - * Attach the interface to its security-group. - -Second pod is created: - -* pod is created with the details: - - * name: p2 , namespace: default , labels : {Role:db}. - -* Let's assume that VIF handler is called before watch-callback (as this case - is little more complicated). - -* VIF Handler: - - * Pod created event (still no namespace sg annotation on the pod). - - * Namespace security-group driver - - * Return the default network-policy. - - * Pod is created with default-policy. - -* Watch Callback: - - * p2 is selected by security-group net-policy-test, annotates the pod - with security-group-id that matches to the policy. - -* VIF Handler: - - * security-group policy driver - - * Update pod P2 port with network policy driver. - - -References -========== -.. [1] https://kubernetes.io/docs/concepts/services-networking/network-policies/ -.. [2] https://kubernetes.io/docs/api-reference/v1.8/#networkpolicy-v1-networking/ -.. [3] https://kubernetes.io/docs/concepts/services-networking/network-policies/#default-allow-all-ingress-traffic -.. [4] https://developer.openstack.org/api-ref/network/v2/index.html#security-group-rules-security-group-rules -.. [5] https://kubernetes.io/docs/concepts/overview/working-with-objects/labels/ -.. [6] https://kubernetes.io/docs/concepts/services-networking/network-policies/#default-deny-all-ingress-traffic -.. [7] https://docs.openstack.org/neutron/latest/contributor/internals/tag.html -.. [8] https://kubernetes.io/docs/concepts/services-networking/network-policies/#the-networkpolicy-resource -.. [9] https://github.com/openstack/kuryr-kubernetes/blob/master/doc/source/devref/port_manager.rst -.. [10] https://v1-8.docs.kubernetes.io/docs/api-reference/v1.8/#labelselector-v1-meta -.. [11] https://github.com/kubernetes/kubernetes/blob/master/test/e2e/network/network_policy.go diff --git a/doc/source/specs/rocky/npwg_spec_support.rst b/doc/source/specs/rocky/npwg_spec_support.rst deleted file mode 100644 index 48ec4df13..000000000 --- a/doc/source/specs/rocky/npwg_spec_support.rst +++ /dev/null @@ -1,190 +0,0 @@ -========================================================================== -Kubernetes Network Custom Resource Definition De-facto Standard v1 Support -========================================================================== - -https://blueprints.launchpad.net/kuryr-kubernetes/+spec/kuryr-npwg-spec-support - -This spec proposes an approach to support the mechanism defined in Kubernetes -Network Custom Resource Definition De-facto Standard Version 1 [#]_, which is -used to attach multiple VIFs to Pods. - -Problem Description -------------------- - -There is always a desire for Pods to be able to be attached to multiple -interfaces in NFV use cases. However CNI plugins which have implemented this -functionality are using different way of defining the additional interfaces in -Pods. There is no standard approach among those CNI plugins. - -Therefore, the Networking Plumbing Working Group [#]_ drafted a spec (the NPWG -spec) trying to standardize the way of attaching Pods to multiple networks. - -Proposed Change ---------------- - -The NPWG spec defines a "Network" Custom Resource object which describes how to -attach a Pod to the logical or physical network referenced by the object. - -The proposed change is based on VIF-Handler And Vif Drivers Design [#]_. A new -VIF driver 'npwg_multiple_interfaces' will be created to parse the annotation -of Pods and Network CRDs. The new VIF driver will be invoked by the multi-vif -driver as another sub-drivers. It should return a list of VIF objects. The -'npwg_multiple_interfaces' should invoke other VIF driver to create the vif -objects if it is necessary. - -The VIFHandler then updates the Pod annotation of 'openstack.org/kuryr-vif' -with the VIF objects. So that the Kuryr CNI can read these VIFs, and attaches -each of them to Pods namespace. If any of the additional interfaces failed to -be attached to the Pod, or any error happens during attachment, the CNI shall -return with error. - -Option in config file might look like this: - -.. code-block:: ini - - [kubernetes] - - enabled_vif_drivers = npwg_multiple_interfaces - -To define additional network in Pods, NPWG spec defines format of annotation. -Here's how a Pod Spec with additional networks requests might look like: - -.. code-block:: yaml - - kind: Pod - metadata: - name: my-pod - namespace: my-namespace - annotations: - k8s.v1.cni.cncf.io/networks: net-a,net-b,other-ns/net-c - -Or in JSON format like: - -.. code-block:: yaml - - kind: Pod - metadata: - name: my-pod - namespace: my-namespace - annotations: - k8s.v1.cni.cncf.io/networks: | - [ - {"name":"net-a"}, - {"name":"net-b"}, - { - "name":"net-c", - "namespace":"other-ns" - } - ] - -Then the VIF driver can parse the network information defined in 'Network' -objects. In NPWG spec, the 'NetworkAttachmentDefinition' object definition is -very flexible. Implementations that are not CNI delegating plugins can add -annotations to the Network object and use those to store non-CNI configuration. -And it is up to the implementation to define the content it requires. - -Here is how 'CustomResourceDefinition' CRD specified in the NPWG spec. - -.. code-block:: yaml - - apiVersion: apiextensions.k8s.io/v1beta1 - kind: CustomResourceDefinition - metadata: - name: network-attachment-definitions.k8s.cni.cncf.io - spec: - group: k8s.cni.cncf.io - version: v1 - scope: Namespaced - names: - plural: network-attachment-definitions - singular: network-attachment-definition - kind: NetworkAttachmentDefinition - shortNames: - - net-attach-def - validation: - openAPIV3Schema: - properties: - spec: - properties: - config: - type: string - -For Kuryr-kubernetes, users should define the 'Network' object with a Neutron -subnet created previously like: - -.. code-block:: yaml - - apiVersion: "kubernetes.cni.cncf.io/v1" - kind: Network - metadata: - name: a-bridge-network - annotations: - openstack.org/kuryr-config: '{ - "subnetId": "id_of_neutron_subnet_created_previously" - }' - -With information read from Pod annotation k8s.v1.cni.cncf.io/networks -and 'Network' objects, the Neutron ports could either be created or retrieved. -Then the Pod annotation openstack.org/kuryr-vif will be updated accordingly. - -Here's how openstack.org/kuryr-vif annotation with additional networks might -look like: - -.. code-block:: yaml - - kind: Pod - metadata: - name: my-pod - namespace: my-namespace - annotations: - openstack.org/kuryr-vif: { - # default interface remains intact - "eth0": { - ... Neutron vif object from default subnet ... - } - # additional interfaces appended by driver 'npwg_multiple_interfaces' - "eth1": { - ... Neutron vif object ... - } - "eth2": { - ... Neutron vif object ... - } - } - -Alternatives -~~~~~~~~~~~~ - -Currently, Kuryr-Kubernetes has already designed a way of defining additional -VIF. This spec will not change that part. Users can choose using which -format they want by configuring 'enabled_vif_drivers'. - -Other end user impact -~~~~~~~~~~~~~~~~~~~~~ -Pods always attach the default Kubernetes network as how Kuryr-Kubernetes works -today, and all networks specified in the Pod annotation are sidecars. - -Assignee(s) -~~~~~~~~~~~ - -Primary assignee: -Peng Liu - -Work Items -~~~~~~~~~~ - -* Implement a new NPWG spec compatible VIF driver. -* Document the procedure of using this new VIF driver. - -Possible Further Work -~~~~~~~~~~~~~~~~~~~~~ - -* To keep on track of the subsequent releases of NPWG spec. -* To allow defining new neutron network/subnet in 'Network' objects, so that - kuryr can create them in Neutron first, then attach Pod to it. - -References ----------- - -.. [#] https://docs.google.com/document/d/1Ny03h6IDVy_e_vmElOqR7UdTPAG_RNydhVE1Kx54kFQ/edit?usp=sharing -.. [#] https://groups.google.com/forum/?_escaped_fragment_=topic/kubernetes-sig-network/ANAjTyqVosw -.. [#] https://docs.openstack.org/kuryr-kubernetes/latest/devref/vif_handler_drivers_design.html diff --git a/doc/source/specs/stein/vhostuser.rst b/doc/source/specs/stein/vhostuser.rst deleted file mode 100644 index ec1eb5cf7..000000000 --- a/doc/source/specs/stein/vhostuser.rst +++ /dev/null @@ -1,197 +0,0 @@ - - -Kuryr Kubernetes vhost-user port integration -============================================ - -Open vSwitch or any other virtual switch can be built with DPDK datapath [3]_, -for this datapath virtual switch provisions vhost-user port. DPDK application -can use it just by accessing UNIX domain socket of vhost-user port. DPDK -applications which use vhost-user port have more network performance compared to -applications which use veth pair with tap interfaces. -DPDK application which uses vhost-user socket it is typical use case for -bare-metal installation in NFV world. -Also there is another use case, where vhost-user ports are passed to VM. In this -case DPDK application inside VM works with vhost-user port through VirtIO -device. This is Nested DPDK Support [1]_ use case. - -Problem statement ------------------ - -Now kuryr-kubernetes doesn't support vhostuser port creation on bare-metal -installation, but OpenStack can be configured to work with vhost-user ports. -In case of vhost-user port in bare-metal installation there is no device, DPDK -applications use unix domain socket created by Open vSwitch daemon, it's control -plain socket. Kuryr-kubernetes has to move this vhost-user socket file to path -available for pod. - -Proposed solution ------------------ - -Kuryr-kubernetes should create Neutron port as usual, NeutronPodVIFDriver will -be used. Then kuryr-kubernetes should handle vif_type vhost-user [2]_, it -already handles port with vif_type ovs for non-DPDK datapath with veth pair as -well as ports with ovs_hybrid_plug where linux bridge is used. No new pod vif -driver will be introduced. - -From user point of view there is no difference in pod definition. It's the same -as with tap based. To request vhost-user port as a main port no need to do -something special. - -When vhost-user port is additional interface it can be defined with Network -Attachment Definition [6]_. - -The type of port will be determined by neutron-openvswitch-agent configuration -file by datapath_type option [2]_, whether the veth is plugged to the OVS bridge -or vhostuser. That's why datapath is not featured in pod's definition, -kuryr-kubernetes will rely on pod's vif type. - -Open vSwitch supports DPDK ports only in special bridges with type netdev, -therefore integration bridge should have netdev type, otherwise OVS dpdk port -will not work. Kuryr-kubernetes uses os-vif, this library does all necessary -work for vif of VIFVhostuser type to set up bridge and create port. - -To be able to use that kind of port in container, socket of that port has to be -placed on the container's file system. It will be done by mountVolumes in pod -yaml file like that: - -.. _configuration: -.. code-block:: yaml - - volumeMounts: - - name: vhostuser - mountPath: /var/run/vhostuser - ... - volumes: - - name: openvswitch - hostPath: - path: /var/run/vhostuser - type: Directory - - -mountPath is defined in kuryr.conf on the minion host - -.. code-block:: ini - - [vhostuser] - mount_point = /var/run/vhostuser - ovs_vhu_path = /var/run/openvswitch - -Single mount point will be provided for several pods -(CONF.vhostuser.mount_point). It's the place where vhost-user socket file will -be moved from ovs_vhu_path. ovs_vhu_path it's a path where Open vSwitch stores -vhost-user socket by default in case when Open vSwitch creates socket. -mount_point and ovs_vhu_path should be on the same point of mount, -otherwise EXDEV (Cross-device link) will be raised and -connection by this socket will be refused. Unfortunately Open vSwitch daemon -can't remove moved socket by ovs-vsctl del-port command, in this case socket -file will be removed by VIFVHostUserDriver. - -Configuration file will be created there for DPDK application. It will contain -auxiliary information: socket name, mac address, ovs port mode. -It might look like: - -.. code-block:: json - - { - "vhostname": "vhu9c9cc8f5-88", - "vhostmac": "fa:16:3e:ef:b0:ed", - "mode": "server" - } - -Name of configuration file will contain container id concatenated by dash with -ifname for this interface. DPDK application will use vhostname to determine -vhost-user socket name. - -To get vhost user socket name inside container user has to read configuration -file. Container identifier will be required for it. Following bash -command will help to get it inside container. - -.. code-block:: bash - - CONTAINER_ID=`sed -ne '/hostname/p' /proc/1/task/1/mountinfo |\ - awk -F '/' '{print $4}'` - -Kuryr-kubernetes can produce multiple ports per one pod, following command can -be used to list all available ports. - -.. code-block:: bash - - ls $MOUNT_POINT/$CONTAINER_ID-eth* - -$MOUNT_POINT here is volumeMounts with name vhostuser defined in pod -configuration_ yaml file. -Value from vhostname field should be used for launching DPDK application. - -.. code-block:: bash - - testpmd -d librte_pmd_virtio.so.17.11 -m 1024 -c 0xC --file-prefix=testpmd_ \ - --vdev=net_virtio_user0,path=/$MOUNT_POINT/$VHU_PORT \ - --no-pci -- --no-lsc-interrupt --auto-start --tx-first \ - --stats-period 1 --disable-hw-vlan; - -vhost-user port has two different modes: client and server. -The type of vhost-user port to create is defined in vif_details by -vhostuser_mode field [4]_. vhost-user port's mode affects socket life cycle. -The client mode from kuryr-kubernetes point of view, it is mode when -ovs-vswitchd creates and listens the vhost-user socket, which is created by the -command below: - -.. code-block:: console - - ovs-vsctl add-port ovsbr0 vhost-user0 -- set Interface vhost-user0 \ - type=dpdkvhostuser - -In this case vhost_user_mode's value will be 'client'. This mode is not robust -because after restarting ovs-vswitchd will recreate sockets by initial path, all -clients have to reestablish connection and kuryr-kubernetes has to move sockets -again. It leads to more complicated solution, that's why another mode in -Open vSwitch was invented, in this mode ovs-vswitchd acts as a client, it tries -to connect by predefined path to vhost-user server (DPDK application in -container). From kuryr-kubernetes point of view it's server mode, -vhost_user_mode's value is 'server'. - -It imposes a restrictions: - -- Kuryr-kubernetes can specify socket path in 'server' mode, but can't in - 'client' mode, at socket creation time -- In case of 'client' mode it's better to recreate whole pod at restart of Open - vSwitch daemon - -This feature doesn't depend on HA behavior. But this feature affects -containeraized cni plugin, due to it requeres the same mount point for source -and destination of vhostuser socket file. - -vhost-user port is not a limited resource it can be scheduled in any nodes -without restrictions. Limited resource here is memory, in most cases number of -huge pages. To configure it see [5]_. - -Initial implementation doesn't cover security issues, DAC and MAC should be -defined by user properly. - - -Implementation -============== - -Work Items ----------- - -* check for vhostuser_mode in neutron_to_osvif_vif_ovs and create appropriate - VIF -* introduce new binding driver VIFVHostUserDriver -* add unit tests for new code -* add tempest test for vhostuser ports - -Assignee --------- - -Alexey Perevalov - - -References ----------- -.. [1] https://blueprints.launchpad.net/kuryr-kubernetes/+spec/nested-dpdk-support -.. [2] https://docs.openstack.org/neutron/pike/contributor/internals/ovs_vhostuser.html -.. [3] http://docs.openvswitch.org/en/latest/topics/dpdk/vhost-user/ -.. [4] https://specs.openstack.org/openstack/nova-specs/specs/kilo/implemented/libvirt_vif_vhostuser.html -.. [5] https://kubernetes.io/docs/tasks/manage-hugepages/scheduling-hugepages/ -.. [6] https://docs.openstack.org/kuryr-kubernetes/latest/specs/rocky/npwg_spec_support.html diff --git a/doc/source/usage.rst b/doc/source/usage.rst deleted file mode 100644 index 627150d95..000000000 --- a/doc/source/usage.rst +++ /dev/null @@ -1,7 +0,0 @@ -===== -Usage -===== - -To use kuryr-kubernetes in a project:: - - import kuryr_kubernetes diff --git a/etc/cni/net.d/10-kuryr.conflist b/etc/cni/net.d/10-kuryr.conflist deleted file mode 100644 index 94ac4193b..000000000 --- a/etc/cni/net.d/10-kuryr.conflist +++ /dev/null @@ -1,11 +0,0 @@ -{ - "name": "kuryr", - "cniVersion": "0.3.1", - "plugins": [ - { - "type": "kuryr-cni", - "kuryr_conf": "/etc/kuryr/kuryr.conf", - "debug": true - } - ] -} diff --git a/etc/cni/net.d/kuryr.conflist.template b/etc/cni/net.d/kuryr.conflist.template deleted file mode 100644 index 94ac4193b..000000000 --- a/etc/cni/net.d/kuryr.conflist.template +++ /dev/null @@ -1,11 +0,0 @@ -{ - "name": "kuryr", - "cniVersion": "0.3.1", - "plugins": [ - { - "type": "kuryr-cni", - "kuryr_conf": "/etc/kuryr/kuryr.conf", - "debug": true - } - ] -} diff --git a/etc/oslo-config-generator/kuryr.conf b/etc/oslo-config-generator/kuryr.conf deleted file mode 100644 index 37ec8ff80..000000000 --- a/etc/oslo-config-generator/kuryr.conf +++ /dev/null @@ -1,4 +0,0 @@ -[DEFAULT] -output_file = etc/kuryr.conf.sample -wrap_width = 79 -namespace = kuryr_kubernetes diff --git a/kubernetes_crds/kuryr_crds/kuryrloadbalancer.yaml b/kubernetes_crds/kuryr_crds/kuryrloadbalancer.yaml deleted file mode 100644 index d3b1c5dde..000000000 --- a/kubernetes_crds/kuryr_crds/kuryrloadbalancer.yaml +++ /dev/null @@ -1,241 +0,0 @@ -apiVersion: apiextensions.k8s.io/v1 -kind: CustomResourceDefinition -metadata: - name: kuryrloadbalancers.openstack.org -spec: - group: openstack.org - scope: Namespaced - names: - plural: kuryrloadbalancers - singular: kuryrloadbalancer - kind: KuryrLoadBalancer - shortNames: - - klb - versions: - - name: v1 - served: true - storage: true - additionalPrinterColumns: - - name: PROJECT-ID - type: string - description: The ID of the PROJECT associated to the loadbalancer - jsonPath: .spec.project_id - - name: Age - type: date - jsonPath: .metadata.creationTimestamp - schema: - openAPIV3Schema: - type: object - properties: - spec: - type: object - properties: - endpointSlices: - type: array - items: - type: object - properties: - endpoints: - type: array - items: - type: object - properties: - addresses: - type: array - items: - type: string - conditions: - type: object - properties: - ready: - type: boolean - hostname: - type: string - targetRef: - type: object - properties: - kind: - type: string - name: - type: string - namespace: - type: string - resourceVersion: - type: string - uid: - type: string - topology: - type: object - ports: - type: array - items: - type: object - properties: - name: - type: string - port: - type: integer - protocol: - type: string - ip: - type: string - lb_ip: - type: string - ports: - type: array - items: - type: object - required: - - port - - protocol - - targetPort - properties: - name: - type: string - port: - type: integer - protocol: - type: string - targetPort: - type: string - project_id: - type: string - security_groups_ids: - type: array - items: - type: string - subnet_id: - type: string - type: - type: string - provider: - type: string - timeout_client_data: - type: integer - timeout_member_data: - type: integer - status: - type: object - properties: - listeners: - type: array - items: - type: object - required: - - id - - loadbalancer_id - - name - - port - - project_id - - protocol - properties: - id: - type: string - loadbalancer_id: - type: string - name: - type: string - port: - type: integer - project_id: - type: string - protocol: - type: string - timeout_client_data: - type: integer - timeout_member_data: - type: integer - loadbalancer: - type: object - required: - - id - - ip - - name - - port_id - - project_id - - provider - - security_groups - - subnet_id - properties: - id: - type: string - ip: - type: string - name: - type: string - port_id: - type: string - project_id: - type: string - provider: - type: string - security_groups: - type: array - items: - type: string - subnet_id: - type: string - members: - type: array - items: - type: object - required: - - id - - ip - - name - - pool_id - - port - - project_id - - subnet_id - properties: - id: - type: string - ip: - type: string - name: - type: string - pool_id: - type: string - port: - type: integer - project_id: - type: string - subnet_id: - type: string - pools: - type: array - items: - type: object - required: - - id - - listener_id - - loadbalancer_id - - name - - project_id - - protocol - properties: - id: - type: string - listener_id: - type: string - loadbalancer_id: - type: string - name: - type: string - project_id: - type: string - protocol: - type: string - service_pub_ip_info: - type: object - required: - - ip_id - - ip_addr - - alloc_method - properties: - ip_id: - type: string - ip_addr: - type: string - alloc_method: - type: string diff --git a/kubernetes_crds/kuryr_crds/kuryrnetwork.yaml b/kubernetes_crds/kuryr_crds/kuryrnetwork.yaml deleted file mode 100644 index f8754548a..000000000 --- a/kubernetes_crds/kuryr_crds/kuryrnetwork.yaml +++ /dev/null @@ -1,59 +0,0 @@ -apiVersion: apiextensions.k8s.io/v1 -kind: CustomResourceDefinition -metadata: - name: kuryrnetworks.openstack.org -spec: - group: openstack.org - scope: Namespaced - names: - plural: kuryrnetworks - singular: kuryrnetwork - kind: KuryrNetwork - shortNames: - - kns - versions: - - name: v1 - served: true - storage: true - additionalPrinterColumns: - - name: SUBNET-CIDR - type: string - description: The subnet CIDR allocated to the namespace - jsonPath: .status.subnetCIDR - - name: Age - type: date - jsonPath: .metadata.creationTimestamp - schema: - openAPIV3Schema: - type: object - properties: - spec: - type: object - required: - - nsName - - projectId - - nsLabels - properties: - nsName: - type: string - projectId: - type: string - nsLabels: - x-kubernetes-preserve-unknown-fields: true - type: object - status: - type: object - properties: - netId: - type: string - populated: - type: boolean - routerId: - type: string - subnetCIDR: - type: string - subnetId: - type: string - nsLabels: - x-kubernetes-preserve-unknown-fields: true - type: object diff --git a/kubernetes_crds/kuryr_crds/kuryrnetworkpolicy.yaml b/kubernetes_crds/kuryr_crds/kuryrnetworkpolicy.yaml deleted file mode 100644 index 3726409d8..000000000 --- a/kubernetes_crds/kuryr_crds/kuryrnetworkpolicy.yaml +++ /dev/null @@ -1,158 +0,0 @@ -apiVersion: apiextensions.k8s.io/v1 -kind: CustomResourceDefinition -metadata: - name: kuryrnetworkpolicies.openstack.org -spec: - group: openstack.org - scope: Namespaced - names: - plural: kuryrnetworkpolicies - singular: kuryrnetworkpolicy - kind: KuryrNetworkPolicy - shortNames: - - knp - versions: - - name: v1 - served: true - storage: true - additionalPrinterColumns: - - name: SG-ID - type: string - description: The ID of the SG associated to the policy - jsonPath: .status.securityGroupId - - name: Age - type: date - jsonPath: .metadata.creationTimestamp - schema: - openAPIV3Schema: - type: object - required: - - status - - spec - properties: - spec: - type: object - required: - - egressSgRules - - ingressSgRules - - podSelector - - policyTypes - properties: - egressSgRules: - type: array - items: - type: object - required: - - sgRule - properties: - affectedPods: - type: array - items: - type: object - properties: - podIP: - type: string - podNamespace: - type: string - required: - - podIP - - podNamespace - namespace: - type: string - sgRule: - type: object - properties: - description: - type: string - direction: - type: string - ethertype: - type: string - port_range_max: - type: integer - port_range_min: - type: integer - protocol: - type: string - remote_ip_prefix: - type: string - ingressSgRules: - type: array - items: - type: object - required: - - sgRule - properties: - affectedPods: - type: array - items: - type: object - properties: - podIP: - type: string - podNamespace: - type: string - required: - - podIP - - podNamespace - namespace: - type: string - sgRule: - type: object - properties: - description: - type: string - direction: - type: string - ethertype: - type: string - port_range_max: - type: integer - port_range_min: - type: integer - protocol: - type: string - remote_ip_prefix: - type: string - podSelector: - x-kubernetes-preserve-unknown-fields: true - type: object - policyTypes: - type: array - items: - type: string - status: - type: object - required: - - securityGroupRules - properties: - securityGroupId: - type: string - securityGroupRules: - type: array - items: - type: object - required: - - id - properties: - id: - type: string - description: - type: string - direction: - type: string - ethertype: - type: string - port_range_max: - type: integer - port_range_min: - type: integer - protocol: - type: string - remote_ip_prefix: - type: string - security_group_id: - type: string - podSelector: - x-kubernetes-preserve-unknown-fields: true - type: object diff --git a/kubernetes_crds/kuryr_crds/kuryrport.yaml b/kubernetes_crds/kuryr_crds/kuryrport.yaml deleted file mode 100644 index e320e048a..000000000 --- a/kubernetes_crds/kuryr_crds/kuryrport.yaml +++ /dev/null @@ -1,54 +0,0 @@ -apiVersion: apiextensions.k8s.io/v1 -kind: CustomResourceDefinition -metadata: - name: kuryrports.openstack.org -spec: - group: openstack.org - scope: Namespaced - names: - plural: kuryrports - singular: kuryrport - kind: KuryrPort - shortNames: - - kp - versions: - - name: v1 - served: true - storage: true - schema: - openAPIV3Schema: - type: object - properties: - spec: - type: object - required: - - podUid - - podNodeName - properties: - podUid: - type: string - podNodeName: - type: string - podStatic: - type: boolean - status: - type: object - required: - - vifs - properties: - vifs: - type: object - x-kubernetes-preserve-unknown-fields: true - additionalPrinterColumns: - - name: PodUID - type: string - description: Pod UID - jsonPath: .spec.podUid - - name: Nodename - type: string - description: Name of the node corresponding pod lives in - jsonPath: .spec.podNodeName - - name: labels - type: string - description: Labels for the CRD - jsonPath: .metadata.labels diff --git a/kubernetes_crds/network_attachment_definition_crd.yaml b/kubernetes_crds/network_attachment_definition_crd.yaml deleted file mode 100644 index 0b0169711..000000000 --- a/kubernetes_crds/network_attachment_definition_crd.yaml +++ /dev/null @@ -1,26 +0,0 @@ -apiVersion: apiextensions.k8s.io/v1 -kind: CustomResourceDefinition -metadata: - name: network-attachment-definitions.k8s.cni.cncf.io -spec: - group: k8s.cni.cncf.io - scope: Namespaced - names: - plural: network-attachment-definitions - singular: network-attachment-definition - kind: NetworkAttachmentDefinition - shortNames: - - net-attach-def - versions: - - name: v1 - served: true - storage: true - schema: - openAPIV3Schema: - type: object - properties: - spec: - type: object - properties: - config: - type: string diff --git a/kuryr_cni/README b/kuryr_cni/README deleted file mode 100644 index dccc35499..000000000 --- a/kuryr_cni/README +++ /dev/null @@ -1,2 +0,0 @@ -This is golang part of Kuryr, that is the CNI plugin that gets injected into -the host. \ No newline at end of file diff --git a/kuryr_cni/go.mod b/kuryr_cni/go.mod deleted file mode 100644 index 37b3eb1db..000000000 --- a/kuryr_cni/go.mod +++ /dev/null @@ -1,9 +0,0 @@ -module main - -go 1.15 - -require ( - github.com/containernetworking/cni v0.8.1 - github.com/onsi/ginkgo v1.16.1 // indirect - github.com/onsi/gomega v1.11.0 // indirect -) diff --git a/kuryr_cni/go.sum b/kuryr_cni/go.sum deleted file mode 100644 index 8097d4b9b..000000000 --- a/kuryr_cni/go.sum +++ /dev/null @@ -1,89 +0,0 @@ -github.com/containernetworking/cni v0.8.1 h1:7zpDnQ3T3s4ucOuJ/ZCLrYBxzkg0AELFfII3Epo9TmI= -github.com/containernetworking/cni v0.8.1/go.mod h1:LGwApLUm2FpoOfxTDEeq8T9ipbpZ61X79hmU3w8FmsY= -github.com/davecgh/go-spew v1.1.0/go.mod h1:J7Y8YcW2NihsgmVo/mv3lAwl/skON4iLHjSsI+c5H38= -github.com/davecgh/go-spew v1.1.1/go.mod h1:J7Y8YcW2NihsgmVo/mv3lAwl/skON4iLHjSsI+c5H38= -github.com/fsnotify/fsnotify v1.4.7/go.mod h1:jwhsz4b93w/PPRr/qN1Yymfu8t87LnFCMoQvtojpjFo= -github.com/fsnotify/fsnotify v1.4.9 h1:hsms1Qyu0jgnwNXIxa+/V/PDsU6CfLf6CNO8H7IWoS4= -github.com/fsnotify/fsnotify v1.4.9/go.mod h1:znqG4EE+3YCdAaPaxE2ZRY/06pZUdp0tY4IgpuI1SZQ= -github.com/go-task/slim-sprig v0.0.0-20210107165309-348f09dbbbc0/go.mod h1:fyg7847qk6SyHyPtNmDHnmrv/HOrqktSC+C9fM+CJOE= -github.com/golang/protobuf v1.2.0/go.mod h1:6lQm79b+lXiMfvg/cZm0SGofjICqVBUtrP5yJMmIC1U= -github.com/golang/protobuf v1.4.0-rc.1/go.mod h1:ceaxUfeHdC40wWswd/P6IGgMaK3YpKi5j83Wpe3EHw8= -github.com/golang/protobuf v1.4.0-rc.1.0.20200221234624-67d41d38c208/go.mod h1:xKAWHe0F5eneWXFV3EuXVDTCmh+JuBKY0li0aMyXATA= -github.com/golang/protobuf v1.4.0-rc.2/go.mod h1:LlEzMj4AhA7rCAGe4KMBDvJI+AwstrUpVNzEA03Pprs= -github.com/golang/protobuf v1.4.0-rc.4.0.20200313231945-b860323f09d0/go.mod h1:WU3c8KckQ9AFe+yFwt9sWVRKCVIyN9cPHBJSNnbL67w= -github.com/golang/protobuf v1.4.0/go.mod h1:jodUvKwWbYaEsadDk5Fwe5c77LiNKVO9IDvqG2KuDX0= -github.com/golang/protobuf v1.4.2/go.mod h1:oDoupMAO8OvCJWAcko0GGGIgR6R6ocIYbsSw735rRwI= -github.com/golang/protobuf v1.4.3 h1:JjCZWpVbqXDqFVmTfYWEVTMIYrL/NPdPSCHPJ0T/raM= -github.com/golang/protobuf v1.4.3/go.mod h1:oDoupMAO8OvCJWAcko0GGGIgR6R6ocIYbsSw735rRwI= -github.com/google/go-cmp v0.3.0/go.mod h1:8QqcDgzrUqlUb/G2PQTWiueGozuR1884gddMywk6iLU= -github.com/google/go-cmp v0.3.1/go.mod h1:8QqcDgzrUqlUb/G2PQTWiueGozuR1884gddMywk6iLU= -github.com/google/go-cmp v0.4.0 h1:xsAVV57WRhGj6kEIi8ReJzQlHHqcBYCElAvkovg3B/4= -github.com/google/go-cmp v0.4.0/go.mod h1:v8dTdLbMG2kIc/vJvl+f65V22dbkXbowE6jgT/gNBxE= -github.com/hpcloud/tail v1.0.0/go.mod h1:ab1qPbhIpdTxEkNHXyeSf5vhxWSCs/tWer42PpOxQnU= -github.com/nxadm/tail v1.4.4/go.mod h1:kenIhsEOeOJmVchQTgglprH7qJGnHDVpk1VPCcaMI8A= -github.com/nxadm/tail v1.4.8 h1:nPr65rt6Y5JFSKQO7qToXr7pePgD6Gwiw05lkbyAQTE= -github.com/nxadm/tail v1.4.8/go.mod h1:+ncqLTQzXmGhMZNUePPaPqPvBxHAIsmXswZKocGu+AU= -github.com/onsi/ginkgo v1.6.0/go.mod h1:lLunBs/Ym6LB5Z9jYTR76FiuTmxDTDusOGeTQH+WWjE= -github.com/onsi/ginkgo v1.12.1/go.mod h1:zj2OWP4+oCPe1qIXoGWkgMRwljMUYCdkwsT2108oapk= -github.com/onsi/ginkgo v1.16.1 h1:foqVmeWDD6yYpK+Yz3fHyNIxFYNxswxqNFjSKe+vI54= -github.com/onsi/ginkgo v1.16.1/go.mod h1:CObGmKUOKaSC0RjmoAK7tKyn4Azo5P2IWuoMnvwxz1E= -github.com/onsi/gomega v1.7.1/go.mod h1:XdKZgCCFLUoM/7CFJVPcG8C1xQ1AJ0vpAezJrB7JYyY= -github.com/onsi/gomega v1.10.1/go.mod h1:iN09h71vgCQne3DLsj+A5owkum+a2tYe+TOCB1ybHNo= -github.com/onsi/gomega v1.11.0 h1:+CqWgvj0OZycCaqclBD1pxKHAU+tOkHmQIWvDHq2aug= -github.com/onsi/gomega v1.11.0/go.mod h1:azGKhqFUon9Vuj0YmTfLSmx0FUwqXYSTl5re8lQLTUg= -github.com/pmezard/go-difflib v1.0.0/go.mod h1:iKH77koFhYxTK1pcRnkKkqfTogsbg7gZNVY4sRDYZ/4= -github.com/stretchr/objx v0.1.0/go.mod h1:HFkY916IF+rwdDfMAkV7OtwuqBVzrE8GR6GFx+wExME= -github.com/stretchr/testify v1.5.1/go.mod h1:5W2xD1RspED5o8YsWQXVCued0rvSQ+mT+I5cxcmMvtA= -github.com/yuin/goldmark v1.2.1/go.mod h1:3hX8gzYuyVAZsxl0MRgGTJEmQBFcNTphYh9decYSb74= -golang.org/x/crypto v0.0.0-20190308221718-c2843e01d9a2/go.mod h1:djNgcEr1/C05ACkg1iLfiJU5Ep61QUkGW8qpdssI0+w= -golang.org/x/crypto v0.0.0-20191011191535-87dc89f01550/go.mod h1:yigFU9vqHzYiE8UmvKecakEJjdnWj3jj499lnFckfCI= -golang.org/x/crypto v0.0.0-20200622213623-75b288015ac9/go.mod h1:LzIPMQfyMNhhGPhUkYOs5KpL4U8rLKemX1yGLhDgUto= -golang.org/x/mod v0.3.0/go.mod h1:s0Qsj1ACt9ePp/hMypM3fl4fZqREWJwdYDEqhRiZZUA= -golang.org/x/net v0.0.0-20180906233101-161cd47e91fd/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4= -golang.org/x/net v0.0.0-20190404232315-eb5bcb51f2a3/go.mod h1:t9HGtf8HONx5eT2rtn7q6eTqICYqUVnKs3thJo3Qplg= -golang.org/x/net v0.0.0-20190620200207-3b0461eec859/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s= -golang.org/x/net v0.0.0-20200520004742-59133d7f0dd7/go.mod h1:qpuaurCH72eLCgpAm/N6yyVIVM9cpaDIP3A8BGJEC5A= -golang.org/x/net v0.0.0-20201021035429-f5854403a974/go.mod h1:sp8m0HH+o8qH0wwXwYZr8TS3Oi6o0r6Gce1SSxlDquU= -golang.org/x/net v0.0.0-20201202161906-c7110b5ffcbb h1:eBmm0M9fYhWpKZLjQUUKka/LtIxf46G4fxeEz5KJr9U= -golang.org/x/net v0.0.0-20201202161906-c7110b5ffcbb/go.mod h1:sp8m0HH+o8qH0wwXwYZr8TS3Oi6o0r6Gce1SSxlDquU= -golang.org/x/sync v0.0.0-20180314180146-1d60e4601c6f/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= -golang.org/x/sync v0.0.0-20190423024810-112230192c58/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= -golang.org/x/sync v0.0.0-20201020160332-67f06af15bc9/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= -golang.org/x/sys v0.0.0-20180909124046-d0be0721c37e/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY= -golang.org/x/sys v0.0.0-20190215142949-d0b11bdaac8a/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY= -golang.org/x/sys v0.0.0-20190412213103-97732733099d/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= -golang.org/x/sys v0.0.0-20190904154756-749cb33beabd/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= -golang.org/x/sys v0.0.0-20191005200804-aed5e4c7ecf9/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= -golang.org/x/sys v0.0.0-20191120155948-bd437916bb0e/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= -golang.org/x/sys v0.0.0-20200323222414-85ca7c5b95cd/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= -golang.org/x/sys v0.0.0-20200930185726-fdedc70b468f/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= -golang.org/x/sys v0.0.0-20210112080510-489259a85091 h1:DMyOG0U+gKfu8JZzg2UQe9MeaC1X+xQWlAKcRnjxjCw= -golang.org/x/sys v0.0.0-20210112080510-489259a85091/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= -golang.org/x/text v0.3.0/go.mod h1:NqM8EUOU14njkJ3fqMW+pc6Ldnwhi/IjpwHt7yyuwOQ= -golang.org/x/text v0.3.3 h1:cokOdA+Jmi5PJGXLlLllQSgYigAEfHXJAERHVMaCc2k= -golang.org/x/text v0.3.3/go.mod h1:5Zoc/QRtKVWzQhOtBMvqHzDpF6irO9z98xDceosuGiQ= -golang.org/x/tools v0.0.0-20180917221912-90fa682c2a6e/go.mod h1:n7NCudcB/nEzxVGmLbDWY5pfWTLqBcC2KZ6jyYvM4mQ= -golang.org/x/tools v0.0.0-20191119224855-298f0cb1881e/go.mod h1:b+2E5dAYhXwXZwtnZ6UAqBI28+e2cm9otk0dWdXHAEo= -golang.org/x/tools v0.0.0-20201224043029-2b0845dc783e/go.mod h1:emZCQorbCU4vsT4fOWvOPXz4eW1wZW4PmDk9uLelYpA= -golang.org/x/xerrors v0.0.0-20190717185122-a985d3407aa7/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0= -golang.org/x/xerrors v0.0.0-20191011141410-1b5146add898/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0= -golang.org/x/xerrors v0.0.0-20191204190536-9bdfabe68543/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0= -golang.org/x/xerrors v0.0.0-20200804184101-5ec99f83aff1 h1:go1bK/D/BFZV2I8cIQd1NKEZ+0owSTG1fDTci4IqFcE= -golang.org/x/xerrors v0.0.0-20200804184101-5ec99f83aff1/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0= -google.golang.org/protobuf v0.0.0-20200109180630-ec00e32a8dfd/go.mod h1:DFci5gLYBciE7Vtevhsrf46CRTquxDuWsQurQQe4oz8= -google.golang.org/protobuf v0.0.0-20200221191635-4d8936d0db64/go.mod h1:kwYJMbMJ01Woi6D6+Kah6886xMZcty6N08ah7+eCXa0= -google.golang.org/protobuf v0.0.0-20200228230310-ab0ca4ff8a60/go.mod h1:cfTl7dwQJ+fmap5saPgwCLgHXTUD7jkjRqWcaiX5VyM= -google.golang.org/protobuf v1.20.1-0.20200309200217-e05f789c0967/go.mod h1:A+miEFZTKqfCUM6K7xSMQL9OKL/b6hQv+e19PK+JZNE= -google.golang.org/protobuf v1.21.0/go.mod h1:47Nbq4nVaFHyn7ilMalzfO3qCViNmqZ2kzikPIcrTAo= -google.golang.org/protobuf v1.23.0 h1:4MY060fB1DLGMB/7MBTLnwQUY6+F09GEiz6SsrNqyzM= -google.golang.org/protobuf v1.23.0/go.mod h1:EGpADcykh3NcUnDUJcl1+ZksZNG86OlYog2l/sGQquU= -gopkg.in/check.v1 v0.0.0-20161208181325-20d25e280405 h1:yhCVgyC4o1eVCa2tZl7eS0r+SDo693bJlVdllGtEeKM= -gopkg.in/check.v1 v0.0.0-20161208181325-20d25e280405/go.mod h1:Co6ibVJAznAaIkqp8huTwlJQCZ016jof/cbN4VW5Yz0= -gopkg.in/fsnotify.v1 v1.4.7/go.mod h1:Tz8NjZHkW78fSQdbUxIjBTcgA1z1m8ZHf0WmKUhAMys= -gopkg.in/tomb.v1 v1.0.0-20141024135613-dd632973f1e7 h1:uRGJdciOHaEIrze2W8Q3AKkepLTh2hOroT7a+7czfdQ= -gopkg.in/tomb.v1 v1.0.0-20141024135613-dd632973f1e7/go.mod h1:dt/ZhP58zS4L8KSrWDmTeBkI65Dw0HsyUHuEVlX15mw= -gopkg.in/yaml.v2 v2.2.2/go.mod h1:hI93XBmqTisBFMUTm0b8Fm+jr3Dg1NNxqwp+5A1VGuI= -gopkg.in/yaml.v2 v2.2.4/go.mod h1:hI93XBmqTisBFMUTm0b8Fm+jr3Dg1NNxqwp+5A1VGuI= -gopkg.in/yaml.v2 v2.3.0/go.mod h1:hI93XBmqTisBFMUTm0b8Fm+jr3Dg1NNxqwp+5A1VGuI= -gopkg.in/yaml.v2 v2.4.0 h1:D8xgwECY7CYvx+Y2n4sBz93Jn9JRvxdiyyo8CTfuKaY= -gopkg.in/yaml.v2 v2.4.0/go.mod h1:RDklbk79AGWmwhnvt/jBztapEOGDOx6ZbXqjP6csGnQ= diff --git a/kuryr_cni/hack/build-go.sh b/kuryr_cni/hack/build-go.sh deleted file mode 100755 index 71883e4af..000000000 --- a/kuryr_cni/hack/build-go.sh +++ /dev/null @@ -1,9 +0,0 @@ -#!/usr/bin/env bash -set -eu -cmd=kuryr-cni -eval $(go env | grep -e "GOHOSTOS" -e "GOHOSTARCH") -GOOS=${GOOS:-${GOHOSTOS}} -GOARCH=${GOACH:-${GOHOSTARCH}} -GOFLAGS=${GOFLAGS:-} -GLDFLAGS=${GLDFLAGS:-} -CGO_ENABLED=0 GOOS=${GOOS} GOARCH=${GOARCH} go build ${GOFLAGS} -ldflags "${GLDFLAGS}" -o bin/${cmd} pkg/* diff --git a/kuryr_cni/hack/update-deps.sh b/kuryr_cni/hack/update-deps.sh deleted file mode 100755 index dd0fa036e..000000000 --- a/kuryr_cni/hack/update-deps.sh +++ /dev/null @@ -1,3 +0,0 @@ -#!/bin/bash -go mod vendor -go mod tidy diff --git a/kuryr_cni/pkg/main.go b/kuryr_cni/pkg/main.go deleted file mode 100644 index a866aaee1..000000000 --- a/kuryr_cni/pkg/main.go +++ /dev/null @@ -1,235 +0,0 @@ -package main - -import ( - "bytes" - "encoding/json" - "fmt" - "io/ioutil" - "log" - "net" - "net/http" - - "github.com/containernetworking/cni/pkg/skel" - "github.com/containernetworking/cni/pkg/types" - cni "github.com/containernetworking/cni/pkg/types" - "github.com/containernetworking/cni/pkg/types/current" - "github.com/containernetworking/cni/pkg/version" -) - -const ( - // FIXME(dulek): We don't really have a good way to fetch current URL:port binding here. - // I'm hardcoding it for now, but in the future we should probably put it in - // the JSON config in 10-kuryr.conflist file that we will get passed on stdin. - urlBase = "http://localhost:5036/" - addPath = "addNetwork" - delPath = "delNetwork" - - ErrVif uint = 899 - ErrParsing uint = 799 -) - -type KuryrDaemonData struct { - IfName string `json:"CNI_IFNAME"` - Netns string `json:"CNI_NETNS"` - Path string `json:"CNI_PATH"` - Command string `json:"CNI_COMMAND"` - ContainerID string `json:"CNI_CONTAINERID"` - Args string `json:"CNI_ARGS"` - KuryrConf interface{} `json:"config_kuryr"` -} - -func transformData(args *skel.CmdArgs, command string) (KuryrDaemonData, error) { - var conf interface{} - err := json.Unmarshal(args.StdinData, &conf) - if err != nil { - newErr := types.Error{ - Code: types.ErrDecodingFailure, - Msg: fmt.Sprintf("Error when reading configuration: %v", err), - Details: "", - } - return KuryrDaemonData{}, &newErr - } - - return KuryrDaemonData{ - IfName: args.IfName, - Netns: args.Netns, - Path: args.Path, - Command: command, - ContainerID: args.ContainerID, - Args: args.Args, - KuryrConf: conf, - }, nil -} - -func makeDaemonRequest(data KuryrDaemonData, expectedCode int) ([]byte, error) { - log.Printf("Calling kuryr-daemon with %s request (CNI_ARGS=%s, CNI_NETNS=%s).", data.Command, data.Args, data.Netns) - - b, err := json.Marshal(data) - if err != nil { - return []byte{}, &types.Error{ - Code: types.ErrInvalidNetworkConfig, - Msg: fmt.Sprintf("Error when preparing payload for kuryr-daemon: %v", err), - Details: "", - } - } - - url := "" - switch data.Command { - case "ADD": - url = urlBase + addPath - case "DEL": - url = urlBase + delPath - default: - return []byte{}, &types.Error{ - Code: types.ErrInvalidEnvironmentVariables, - Msg: fmt.Sprintf("Cannot handle command %s", data.Command), - Details: "", - } - } - - resp, err := http.Post(url, "application/json", bytes.NewBuffer(b)) - if err != nil { - return []byte{}, &types.Error{ - Code: types.ErrTryAgainLater, - Msg: fmt.Sprintf("Looks like %s cannot be reached. Is kuryr-daemon running?", url), - Details: fmt.Sprintf("%v", err), - } - } - defer resp.Body.Close() - - body, _ := ioutil.ReadAll(resp.Body) - if resp.StatusCode != expectedCode { - if len(body) > 1 { - var err types.Error - json.Unmarshal(body, &err) - return []byte{}, &err - } - return []byte{}, &types.Error{ - Code: uint(resp.StatusCode), - Msg: fmt.Sprintf("CNI Daemon returned error %d %s", resp.StatusCode, body), - Details: "", - } - } - return body, nil -} - -func cmdAdd(args *skel.CmdArgs) error { - data, err := transformData(args, "ADD") - if err != nil { - return err - } - - body, err := makeDaemonRequest(data, 202) - if err != nil { - return err - } - - vif := VIF{} - er := json.Unmarshal(body, &vif) - if er != nil { - return &types.Error{ - Code: ErrVif, - Msg: fmt.Sprintf("Error when reading response from kuryr-daemon: %s", string(body)), - Details: fmt.Sprintf("%v", er), - } - } - - iface := current.Interface{} - iface.Name = args.IfName - iface.Mac = vif.Address - iface.Sandbox = args.ContainerID - - var ips []*current.IPConfig - var dns types.DNS - var routes []*types.Route - for _, subnet := range vif.Network.Subnets { - addrStr := subnet.Ips[0].Address - addr := net.ParseIP(addrStr) - if addr == nil { - return &types.Error{ - Code: ErrParsing, - Msg: fmt.Sprintf("Error when parsing IP address %s received from kuryr-daemon", addrStr), - Details: "", - } - } - _, cidr, err := net.ParseCIDR(subnet.Cidr) - if err != nil { - return &types.Error{ - Code: ErrParsing, - Msg: fmt.Sprintf("Error when parsing CIDR %s received from kuryr-daemon", subnet.Cidr), - Details: fmt.Sprintf("%v", err), - } - } - - ver := "4" - if addr.To4() == nil { - ver = "6" - } - - prefixSize, _ := cidr.Mask.Size() - ifaceCIDR := fmt.Sprintf("%s/%d", addr.String(), prefixSize) - ipAddress, err := cni.ParseCIDR(ifaceCIDR) - if err != nil { - return &types.Error{ - Code: ErrParsing, - Msg: fmt.Sprintf("Error when parsing CIDR %s received from kuryr-daemon", ifaceCIDR), - Details: fmt.Sprintf("%v", err), - } - } - ifaceNum := 0 - - ips = append(ips, ¤t.IPConfig{ - Version: ver, - Interface: &ifaceNum, - Gateway: net.ParseIP(subnet.Gateway), - Address: *ipAddress, - }) - - for _, route := range subnet.Routes { - _, dst, err := net.ParseCIDR(route.Cidr) - if err != nil { - return &types.Error{ - Code: ErrParsing, - Msg: fmt.Sprintf("Error when parsing CIDR %s received from kuryr-daemon", route.Cidr), - Details: fmt.Sprintf("%v", err), - } - } - - gw := net.ParseIP(route.Gateway) - if gw == nil { - return &types.Error{ - Code: ErrParsing, - Msg: fmt.Sprintf("Error when parsing IP address %s received from kuryr-daemon", route.Gateway), - Details: "", - } - } - - routes = append(routes, &types.Route{Dst: *dst, GW: gw}) - } - - dns.Nameservers = append(dns.Nameservers, subnet.DNS...) - } - - res := ¤t.Result{ - Interfaces: []*current.Interface{&iface}, - IPs: ips, - DNS: dns, - Routes: routes, - } - - return types.PrintResult(res, "0.3.1") -} - -func cmdDel(args *skel.CmdArgs) error { - data, err := transformData(args, "DEL") - _, err = makeDaemonRequest(data, 204) - return err -} - -func cmdCheck(args *skel.CmdArgs) error { - return nil -} - -func main() { - skel.PluginMain(cmdAdd, cmdCheck, cmdDel, version.All, "CNI Plugin Kuryr-Kubernetes v1.0.0") -} diff --git a/kuryr_cni/pkg/ovo.go b/kuryr_cni/pkg/ovo.go deleted file mode 100644 index ac67833f0..000000000 --- a/kuryr_cni/pkg/ovo.go +++ /dev/null @@ -1,120 +0,0 @@ -package main - -import ( - "encoding/json" - "reflect" -) - -const ( - vod = "versioned_object.data" - ob = "objects" -) - -type VIF struct { - Network Network `json:"network"` - Address string `json:"address"` - VifName string `json:"vif_name"` -} - -type Network struct { - Subnets []Subnet `json:"subnets"` -} - -type Route struct { - Cidr string `json:"cidr"` - Gateway string `json:"gateway"` -} - -type IP struct { - Address string `json:"address"` -} - -type Subnet struct { - Routes []Route `json:"routes"` - Ips []IP `json:"ips"` - Cidr string `json:"cidr"` - Gateway string `json:"gateway"` - DNS []string `json:"dns"` -} - -func UnmarshalOVO(data []byte, r interface{}) error { - // Unmarshall into a generic map - var i map[string]interface{} - if err := json.Unmarshal(data, &i); err != nil { - return err - } - - // Skip versioned_object.data level - d := i[vod].(map[string]interface{}) - - p := reflect.ValueOf(r) // this will be a pointer - v := p.Elem() // dereferences pointer - t := v.Type() // gets type of the struct - - // Go over fields of the struct - for i := 0; i < t.NumField(); i++ { - // Initial info - field := t.Field(i) - fieldVal := v.Field(i) - key := field.Tag.Get("json") - - var obj interface{} - - // Main switch - switch fieldVal.Kind() { - case reflect.String: - // In case of string let's just write it and we're done (hence continue) - fieldVal.SetString(d[key].(string)) - continue - case reflect.Slice: - if reflect.ValueOf(d[key]).Kind() != reflect.Slice { - // It's a list with next level of "versioned_object.data" and then "objects" keys. Let's flatten this. - listObj := d[key].(map[string]interface{}) - listData := listObj[vod].(map[string]interface{}) - obj = listData[ob].([]interface{}) - break - } - // If we have a slice and d[key] is just a simple list, then struct's approach will work fine, that's - // why there's this fallthrough. - fallthrough - case reflect.Struct: - // Treat it as struct - obj = d[key] - } - - // For slices and structs marshall that level of JSON, and unmarshall them into the result. The weird - // approach with reflect.New is forced by how reflections work in golang. - jsonBytes, err := json.Marshal(obj) - if err != nil { - return err - } - new := reflect.New(fieldVal.Type()) - inter := new.Interface() - if err := json.Unmarshal(jsonBytes, &inter); err != nil { - return err - } - fieldVal.Set(new.Elem()) - } - - return nil -} - -func (v *VIF) UnmarshalJSON(data []byte) error { - return UnmarshalOVO(data, v) -} - -func (v *Network) UnmarshalJSON(data []byte) error { - return UnmarshalOVO(data, v) -} - -func (v *Subnet) UnmarshalJSON(data []byte) error { - return UnmarshalOVO(data, v) -} - -func (v *IP) UnmarshalJSON(data []byte) error { - return UnmarshalOVO(data, v) -} - -func (v *Route) UnmarshalJSON(data []byte) error { - return UnmarshalOVO(data, v) -} diff --git a/kuryr_cni/vendor/github.com/containernetworking/cni/LICENSE b/kuryr_cni/vendor/github.com/containernetworking/cni/LICENSE deleted file mode 100644 index 8f71f43fe..000000000 --- a/kuryr_cni/vendor/github.com/containernetworking/cni/LICENSE +++ /dev/null @@ -1,202 +0,0 @@ - Apache License - Version 2.0, January 2004 - http://www.apache.org/licenses/ - - TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION - - 1. Definitions. - - "License" shall mean the terms and conditions for use, reproduction, - and distribution as defined by Sections 1 through 9 of this document. - - "Licensor" shall mean the copyright owner or entity authorized by - the copyright owner that is granting the License. - - "Legal Entity" shall mean the union of the acting entity and all - other entities that control, are controlled by, or are under common - control with that entity. For the purposes of this definition, - "control" means (i) the power, direct or indirect, to cause the - direction or management of such entity, whether by contract or - otherwise, or (ii) ownership of fifty percent (50%) or more of the - outstanding shares, or (iii) beneficial ownership of such entity. - - "You" (or "Your") shall mean an individual or Legal Entity - exercising permissions granted by this License. - - "Source" form shall mean the preferred form for making modifications, - including but not limited to software source code, documentation - source, and configuration files. - - "Object" form shall mean any form resulting from mechanical - transformation or translation of a Source form, including but - not limited to compiled object code, generated documentation, - and conversions to other media types. - - "Work" shall mean the work of authorship, whether in Source or - Object form, made available under the License, as indicated by a - copyright notice that is included in or attached to the work - (an example is provided in the Appendix below). - - "Derivative Works" shall mean any work, whether in Source or Object - form, that is based on (or derived from) the Work and for which the - editorial revisions, annotations, elaborations, or other modifications - represent, as a whole, an original work of authorship. For the purposes - of this License, Derivative Works shall not include works that remain - separable from, or merely link (or bind by name) to the interfaces of, - the Work and Derivative Works thereof. - - "Contribution" shall mean any work of authorship, including - the original version of the Work and any modifications or additions - to that Work or Derivative Works thereof, that is intentionally - submitted to Licensor for inclusion in the Work by the copyright owner - or by an individual or Legal Entity authorized to submit on behalf of - the copyright owner. For the purposes of this definition, "submitted" - means any form of electronic, verbal, or written communication sent - to the Licensor or its representatives, including but not limited to - communication on electronic mailing lists, source code control systems, - and issue tracking systems that are managed by, or on behalf of, the - Licensor for the purpose of discussing and improving the Work, but - excluding communication that is conspicuously marked or otherwise - designated in writing by the copyright owner as "Not a Contribution." - - "Contributor" shall mean Licensor and any individual or Legal Entity - on behalf of whom a Contribution has been received by Licensor and - subsequently incorporated within the Work. - - 2. Grant of Copyright License. Subject to the terms and conditions of - this License, each Contributor hereby grants to You a perpetual, - worldwide, non-exclusive, no-charge, royalty-free, irrevocable - copyright license to reproduce, prepare Derivative Works of, - publicly display, publicly perform, sublicense, and distribute the - Work and such Derivative Works in Source or Object form. - - 3. Grant of Patent License. Subject to the terms and conditions of - this License, each Contributor hereby grants to You a perpetual, - worldwide, non-exclusive, no-charge, royalty-free, irrevocable - (except as stated in this section) patent license to make, have made, - use, offer to sell, sell, import, and otherwise transfer the Work, - where such license applies only to those patent claims licensable - by such Contributor that are necessarily infringed by their - Contribution(s) alone or by combination of their Contribution(s) - with the Work to which such Contribution(s) was submitted. If You - institute patent litigation against any entity (including a - cross-claim or counterclaim in a lawsuit) alleging that the Work - or a Contribution incorporated within the Work constitutes direct - or contributory patent infringement, then any patent licenses - granted to You under this License for that Work shall terminate - as of the date such litigation is filed. - - 4. Redistribution. You may reproduce and distribute copies of the - Work or Derivative Works thereof in any medium, with or without - modifications, and in Source or Object form, provided that You - meet the following conditions: - - (a) You must give any other recipients of the Work or - Derivative Works a copy of this License; and - - (b) You must cause any modified files to carry prominent notices - stating that You changed the files; and - - (c) You must retain, in the Source form of any Derivative Works - that You distribute, all copyright, patent, trademark, and - attribution notices from the Source form of the Work, - excluding those notices that do not pertain to any part of - the Derivative Works; and - - (d) If the Work includes a "NOTICE" text file as part of its - distribution, then any Derivative Works that You distribute must - include a readable copy of the attribution notices contained - within such NOTICE file, excluding those notices that do not - pertain to any part of the Derivative Works, in at least one - of the following places: within a NOTICE text file distributed - as part of the Derivative Works; within the Source form or - documentation, if provided along with the Derivative Works; or, - within a display generated by the Derivative Works, if and - wherever such third-party notices normally appear. The contents - of the NOTICE file are for informational purposes only and - do not modify the License. You may add Your own attribution - notices within Derivative Works that You distribute, alongside - or as an addendum to the NOTICE text from the Work, provided - that such additional attribution notices cannot be construed - as modifying the License. - - You may add Your own copyright statement to Your modifications and - may provide additional or different license terms and conditions - for use, reproduction, or distribution of Your modifications, or - for any such Derivative Works as a whole, provided Your use, - reproduction, and distribution of the Work otherwise complies with - the conditions stated in this License. - - 5. Submission of Contributions. Unless You explicitly state otherwise, - any Contribution intentionally submitted for inclusion in the Work - by You to the Licensor shall be under the terms and conditions of - this License, without any additional terms or conditions. - Notwithstanding the above, nothing herein shall supersede or modify - the terms of any separate license agreement you may have executed - with Licensor regarding such Contributions. - - 6. Trademarks. This License does not grant permission to use the trade - names, trademarks, service marks, or product names of the Licensor, - except as required for reasonable and customary use in describing the - origin of the Work and reproducing the content of the NOTICE file. - - 7. Disclaimer of Warranty. Unless required by applicable law or - agreed to in writing, Licensor provides the Work (and each - Contributor provides its Contributions) on an "AS IS" BASIS, - WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or - implied, including, without limitation, any warranties or conditions - of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A - PARTICULAR PURPOSE. You are solely responsible for determining the - appropriateness of using or redistributing the Work and assume any - risks associated with Your exercise of permissions under this License. - - 8. Limitation of Liability. In no event and under no legal theory, - whether in tort (including negligence), contract, or otherwise, - unless required by applicable law (such as deliberate and grossly - negligent acts) or agreed to in writing, shall any Contributor be - liable to You for damages, including any direct, indirect, special, - incidental, or consequential damages of any character arising as a - result of this License or out of the use or inability to use the - Work (including but not limited to damages for loss of goodwill, - work stoppage, computer failure or malfunction, or any and all - other commercial damages or losses), even if such Contributor - has been advised of the possibility of such damages. - - 9. Accepting Warranty or Additional Liability. While redistributing - the Work or Derivative Works thereof, You may choose to offer, - and charge a fee for, acceptance of support, warranty, indemnity, - or other liability obligations and/or rights consistent with this - License. However, in accepting such obligations, You may act only - on Your own behalf and on Your sole responsibility, not on behalf - of any other Contributor, and only if You agree to indemnify, - defend, and hold each Contributor harmless for any liability - incurred by, or claims asserted against, such Contributor by reason - of your accepting any such warranty or additional liability. - - END OF TERMS AND CONDITIONS - - APPENDIX: How to apply the Apache License to your work. - - To apply the Apache License to your work, attach the following - boilerplate notice, with the fields enclosed by brackets "{}" - replaced with your own identifying information. (Don't include - the brackets!) The text should be enclosed in the appropriate - comment syntax for the file format. We also recommend that a - file or class name and description of purpose be included on the - same "printed page" as the copyright notice for easier - identification within third-party archives. - - Copyright {yyyy} {name of copyright owner} - - Licensed under the Apache License, Version 2.0 (the "License"); - you may not use this file except in compliance with the License. - You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - - Unless required by applicable law or agreed to in writing, software - distributed under the License is distributed on an "AS IS" BASIS, - WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - See the License for the specific language governing permissions and - limitations under the License. - diff --git a/kuryr_cni/vendor/github.com/containernetworking/cni/pkg/skel/skel.go b/kuryr_cni/vendor/github.com/containernetworking/cni/pkg/skel/skel.go deleted file mode 100644 index da42db559..000000000 --- a/kuryr_cni/vendor/github.com/containernetworking/cni/pkg/skel/skel.go +++ /dev/null @@ -1,296 +0,0 @@ -// Copyright 2014-2016 CNI authors -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -// Package skel provides skeleton code for a CNI plugin. -// In particular, it implements argument parsing and validation. -package skel - -import ( - "bytes" - "encoding/json" - "fmt" - "io" - "io/ioutil" - "log" - "os" - "strings" - - "github.com/containernetworking/cni/pkg/types" - "github.com/containernetworking/cni/pkg/utils" - "github.com/containernetworking/cni/pkg/version" -) - -// CmdArgs captures all the arguments passed in to the plugin -// via both env vars and stdin -type CmdArgs struct { - ContainerID string - Netns string - IfName string - Args string - Path string - StdinData []byte -} - -type dispatcher struct { - Getenv func(string) string - Stdin io.Reader - Stdout io.Writer - Stderr io.Writer - - ConfVersionDecoder version.ConfigDecoder - VersionReconciler version.Reconciler -} - -type reqForCmdEntry map[string]bool - -func (t *dispatcher) getCmdArgsFromEnv() (string, *CmdArgs, *types.Error) { - var cmd, contID, netns, ifName, args, path string - - vars := []struct { - name string - val *string - reqForCmd reqForCmdEntry - }{ - { - "CNI_COMMAND", - &cmd, - reqForCmdEntry{ - "ADD": true, - "CHECK": true, - "DEL": true, - }, - }, - { - "CNI_CONTAINERID", - &contID, - reqForCmdEntry{ - "ADD": true, - "CHECK": true, - "DEL": true, - }, - }, - { - "CNI_NETNS", - &netns, - reqForCmdEntry{ - "ADD": true, - "CHECK": true, - "DEL": false, - }, - }, - { - "CNI_IFNAME", - &ifName, - reqForCmdEntry{ - "ADD": true, - "CHECK": true, - "DEL": true, - }, - }, - { - "CNI_ARGS", - &args, - reqForCmdEntry{ - "ADD": false, - "CHECK": false, - "DEL": false, - }, - }, - { - "CNI_PATH", - &path, - reqForCmdEntry{ - "ADD": true, - "CHECK": true, - "DEL": true, - }, - }, - } - - argsMissing := make([]string, 0) - for _, v := range vars { - *v.val = t.Getenv(v.name) - if *v.val == "" { - if v.reqForCmd[cmd] || v.name == "CNI_COMMAND" { - argsMissing = append(argsMissing, v.name) - } - } - } - - if len(argsMissing) > 0 { - joined := strings.Join(argsMissing, ",") - return "", nil, types.NewError(types.ErrInvalidEnvironmentVariables, fmt.Sprintf("required env variables [%s] missing", joined), "") - } - - if cmd == "VERSION" { - t.Stdin = bytes.NewReader(nil) - } - - stdinData, err := ioutil.ReadAll(t.Stdin) - if err != nil { - return "", nil, types.NewError(types.ErrIOFailure, fmt.Sprintf("error reading from stdin: %v", err), "") - } - - cmdArgs := &CmdArgs{ - ContainerID: contID, - Netns: netns, - IfName: ifName, - Args: args, - Path: path, - StdinData: stdinData, - } - return cmd, cmdArgs, nil -} - -func (t *dispatcher) checkVersionAndCall(cmdArgs *CmdArgs, pluginVersionInfo version.PluginInfo, toCall func(*CmdArgs) error) *types.Error { - configVersion, err := t.ConfVersionDecoder.Decode(cmdArgs.StdinData) - if err != nil { - return types.NewError(types.ErrDecodingFailure, err.Error(), "") - } - verErr := t.VersionReconciler.Check(configVersion, pluginVersionInfo) - if verErr != nil { - return types.NewError(types.ErrIncompatibleCNIVersion, "incompatible CNI versions", verErr.Details()) - } - - if err = toCall(cmdArgs); err != nil { - if e, ok := err.(*types.Error); ok { - // don't wrap Error in Error - return e - } - return types.NewError(types.ErrInternal, err.Error(), "") - } - - return nil -} - -func validateConfig(jsonBytes []byte) *types.Error { - var conf struct { - Name string `json:"name"` - } - if err := json.Unmarshal(jsonBytes, &conf); err != nil { - return types.NewError(types.ErrDecodingFailure, fmt.Sprintf("error unmarshall network config: %v", err), "") - } - if conf.Name == "" { - return types.NewError(types.ErrInvalidNetworkConfig, "missing network name", "") - } - if err := utils.ValidateNetworkName(conf.Name); err != nil { - return err - } - return nil -} - -func (t *dispatcher) pluginMain(cmdAdd, cmdCheck, cmdDel func(_ *CmdArgs) error, versionInfo version.PluginInfo, about string) *types.Error { - cmd, cmdArgs, err := t.getCmdArgsFromEnv() - if err != nil { - // Print the about string to stderr when no command is set - if err.Code == types.ErrInvalidEnvironmentVariables && t.Getenv("CNI_COMMAND") == "" && about != "" { - _, _ = fmt.Fprintln(t.Stderr, about) - return nil - } - return err - } - - if cmd != "VERSION" { - if err = validateConfig(cmdArgs.StdinData); err != nil { - return err - } - if err = utils.ValidateContainerID(cmdArgs.ContainerID); err != nil { - return err - } - if err = utils.ValidateInterfaceName(cmdArgs.IfName); err != nil { - return err - } - } - - switch cmd { - case "ADD": - err = t.checkVersionAndCall(cmdArgs, versionInfo, cmdAdd) - case "CHECK": - configVersion, err := t.ConfVersionDecoder.Decode(cmdArgs.StdinData) - if err != nil { - return types.NewError(types.ErrDecodingFailure, err.Error(), "") - } - if gtet, err := version.GreaterThanOrEqualTo(configVersion, "0.4.0"); err != nil { - return types.NewError(types.ErrDecodingFailure, err.Error(), "") - } else if !gtet { - return types.NewError(types.ErrIncompatibleCNIVersion, "config version does not allow CHECK", "") - } - for _, pluginVersion := range versionInfo.SupportedVersions() { - gtet, err := version.GreaterThanOrEqualTo(pluginVersion, configVersion) - if err != nil { - return types.NewError(types.ErrDecodingFailure, err.Error(), "") - } else if gtet { - if err := t.checkVersionAndCall(cmdArgs, versionInfo, cmdCheck); err != nil { - return err - } - return nil - } - } - return types.NewError(types.ErrIncompatibleCNIVersion, "plugin version does not allow CHECK", "") - case "DEL": - err = t.checkVersionAndCall(cmdArgs, versionInfo, cmdDel) - case "VERSION": - if err := versionInfo.Encode(t.Stdout); err != nil { - return types.NewError(types.ErrIOFailure, err.Error(), "") - } - default: - return types.NewError(types.ErrInvalidEnvironmentVariables, fmt.Sprintf("unknown CNI_COMMAND: %v", cmd), "") - } - - if err != nil { - return err - } - return nil -} - -// PluginMainWithError is the core "main" for a plugin. It accepts -// callback functions for add, check, and del CNI commands and returns an error. -// -// The caller must also specify what CNI spec versions the plugin supports. -// -// It is the responsibility of the caller to check for non-nil error return. -// -// For a plugin to comply with the CNI spec, it must print any error to stdout -// as JSON and then exit with nonzero status code. -// -// To let this package automatically handle errors and call os.Exit(1) for you, -// use PluginMain() instead. -func PluginMainWithError(cmdAdd, cmdCheck, cmdDel func(_ *CmdArgs) error, versionInfo version.PluginInfo, about string) *types.Error { - return (&dispatcher{ - Getenv: os.Getenv, - Stdin: os.Stdin, - Stdout: os.Stdout, - Stderr: os.Stderr, - }).pluginMain(cmdAdd, cmdCheck, cmdDel, versionInfo, about) -} - -// PluginMain is the core "main" for a plugin which includes automatic error handling. -// -// The caller must also specify what CNI spec versions the plugin supports. -// -// The caller can specify an "about" string, which is printed on stderr -// when no CNI_COMMAND is specified. The recommended output is "CNI plugin v" -// -// When an error occurs in either cmdAdd, cmdCheck, or cmdDel, PluginMain will print the error -// as JSON to stdout and call os.Exit(1). -// -// To have more control over error handling, use PluginMainWithError() instead. -func PluginMain(cmdAdd, cmdCheck, cmdDel func(_ *CmdArgs) error, versionInfo version.PluginInfo, about string) { - if e := PluginMainWithError(cmdAdd, cmdCheck, cmdDel, versionInfo, about); e != nil { - if err := e.Print(); err != nil { - log.Print("Error writing error JSON to stdout: ", err) - } - os.Exit(1) - } -} diff --git a/kuryr_cni/vendor/github.com/containernetworking/cni/pkg/types/020/types.go b/kuryr_cni/vendor/github.com/containernetworking/cni/pkg/types/020/types.go deleted file mode 100644 index 36f31678a..000000000 --- a/kuryr_cni/vendor/github.com/containernetworking/cni/pkg/types/020/types.go +++ /dev/null @@ -1,126 +0,0 @@ -// Copyright 2016 CNI authors -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -package types020 - -import ( - "encoding/json" - "fmt" - "io" - "net" - "os" - - "github.com/containernetworking/cni/pkg/types" -) - -const ImplementedSpecVersion string = "0.2.0" - -var SupportedVersions = []string{"", "0.1.0", ImplementedSpecVersion} - -// Compatibility types for CNI version 0.1.0 and 0.2.0 - -func NewResult(data []byte) (types.Result, error) { - result := &Result{} - if err := json.Unmarshal(data, result); err != nil { - return nil, err - } - return result, nil -} - -func GetResult(r types.Result) (*Result, error) { - // We expect version 0.1.0/0.2.0 results - result020, err := r.GetAsVersion(ImplementedSpecVersion) - if err != nil { - return nil, err - } - result, ok := result020.(*Result) - if !ok { - return nil, fmt.Errorf("failed to convert result") - } - return result, nil -} - -// Result is what gets returned from the plugin (via stdout) to the caller -type Result struct { - CNIVersion string `json:"cniVersion,omitempty"` - IP4 *IPConfig `json:"ip4,omitempty"` - IP6 *IPConfig `json:"ip6,omitempty"` - DNS types.DNS `json:"dns,omitempty"` -} - -func (r *Result) Version() string { - return ImplementedSpecVersion -} - -func (r *Result) GetAsVersion(version string) (types.Result, error) { - for _, supportedVersion := range SupportedVersions { - if version == supportedVersion { - r.CNIVersion = version - return r, nil - } - } - return nil, fmt.Errorf("cannot convert version %q to %s", SupportedVersions, version) -} - -func (r *Result) Print() error { - return r.PrintTo(os.Stdout) -} - -func (r *Result) PrintTo(writer io.Writer) error { - data, err := json.MarshalIndent(r, "", " ") - if err != nil { - return err - } - _, err = writer.Write(data) - return err -} - -// IPConfig contains values necessary to configure an interface -type IPConfig struct { - IP net.IPNet - Gateway net.IP - Routes []types.Route -} - -// net.IPNet is not JSON (un)marshallable so this duality is needed -// for our custom IPNet type - -// JSON (un)marshallable types -type ipConfig struct { - IP types.IPNet `json:"ip"` - Gateway net.IP `json:"gateway,omitempty"` - Routes []types.Route `json:"routes,omitempty"` -} - -func (c *IPConfig) MarshalJSON() ([]byte, error) { - ipc := ipConfig{ - IP: types.IPNet(c.IP), - Gateway: c.Gateway, - Routes: c.Routes, - } - - return json.Marshal(ipc) -} - -func (c *IPConfig) UnmarshalJSON(data []byte) error { - ipc := ipConfig{} - if err := json.Unmarshal(data, &ipc); err != nil { - return err - } - - c.IP = net.IPNet(ipc.IP) - c.Gateway = ipc.Gateway - c.Routes = ipc.Routes - return nil -} diff --git a/kuryr_cni/vendor/github.com/containernetworking/cni/pkg/types/args.go b/kuryr_cni/vendor/github.com/containernetworking/cni/pkg/types/args.go deleted file mode 100644 index 4eac64899..000000000 --- a/kuryr_cni/vendor/github.com/containernetworking/cni/pkg/types/args.go +++ /dev/null @@ -1,112 +0,0 @@ -// Copyright 2015 CNI authors -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -package types - -import ( - "encoding" - "fmt" - "reflect" - "strings" -) - -// UnmarshallableBool typedef for builtin bool -// because builtin type's methods can't be declared -type UnmarshallableBool bool - -// UnmarshalText implements the encoding.TextUnmarshaler interface. -// Returns boolean true if the string is "1" or "[Tt]rue" -// Returns boolean false if the string is "0" or "[Ff]alse" -func (b *UnmarshallableBool) UnmarshalText(data []byte) error { - s := strings.ToLower(string(data)) - switch s { - case "1", "true": - *b = true - case "0", "false": - *b = false - default: - return fmt.Errorf("boolean unmarshal error: invalid input %s", s) - } - return nil -} - -// UnmarshallableString typedef for builtin string -type UnmarshallableString string - -// UnmarshalText implements the encoding.TextUnmarshaler interface. -// Returns the string -func (s *UnmarshallableString) UnmarshalText(data []byte) error { - *s = UnmarshallableString(data) - return nil -} - -// CommonArgs contains the IgnoreUnknown argument -// and must be embedded by all Arg structs -type CommonArgs struct { - IgnoreUnknown UnmarshallableBool `json:"ignoreunknown,omitempty"` -} - -// GetKeyField is a helper function to receive Values -// Values that represent a pointer to a struct -func GetKeyField(keyString string, v reflect.Value) reflect.Value { - return v.Elem().FieldByName(keyString) -} - -// UnmarshalableArgsError is used to indicate error unmarshalling args -// from the args-string in the form "K=V;K2=V2;..." -type UnmarshalableArgsError struct { - error -} - -// LoadArgs parses args from a string in the form "K=V;K2=V2;..." -func LoadArgs(args string, container interface{}) error { - if args == "" { - return nil - } - - containerValue := reflect.ValueOf(container) - - pairs := strings.Split(args, ";") - unknownArgs := []string{} - for _, pair := range pairs { - kv := strings.Split(pair, "=") - if len(kv) != 2 { - return fmt.Errorf("ARGS: invalid pair %q", pair) - } - keyString := kv[0] - valueString := kv[1] - keyField := GetKeyField(keyString, containerValue) - if !keyField.IsValid() { - unknownArgs = append(unknownArgs, pair) - continue - } - keyFieldIface := keyField.Addr().Interface() - u, ok := keyFieldIface.(encoding.TextUnmarshaler) - if !ok { - return UnmarshalableArgsError{fmt.Errorf( - "ARGS: cannot unmarshal into field '%s' - type '%s' does not implement encoding.TextUnmarshaler", - keyString, reflect.TypeOf(keyFieldIface))} - } - err := u.UnmarshalText([]byte(valueString)) - if err != nil { - return fmt.Errorf("ARGS: error parsing value of pair %q: %v)", pair, err) - } - } - - isIgnoreUnknown := GetKeyField("IgnoreUnknown", containerValue).Bool() - if len(unknownArgs) > 0 && !isIgnoreUnknown { - return fmt.Errorf("ARGS: unknown args %q", unknownArgs) - } - return nil -} diff --git a/kuryr_cni/vendor/github.com/containernetworking/cni/pkg/types/current/types.go b/kuryr_cni/vendor/github.com/containernetworking/cni/pkg/types/current/types.go deleted file mode 100644 index 754cc6e72..000000000 --- a/kuryr_cni/vendor/github.com/containernetworking/cni/pkg/types/current/types.go +++ /dev/null @@ -1,276 +0,0 @@ -// Copyright 2016 CNI authors -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -package current - -import ( - "encoding/json" - "fmt" - "io" - "net" - "os" - - "github.com/containernetworking/cni/pkg/types" - "github.com/containernetworking/cni/pkg/types/020" -) - -const ImplementedSpecVersion string = "0.4.0" - -var SupportedVersions = []string{"0.3.0", "0.3.1", ImplementedSpecVersion} - -func NewResult(data []byte) (types.Result, error) { - result := &Result{} - if err := json.Unmarshal(data, result); err != nil { - return nil, err - } - return result, nil -} - -func GetResult(r types.Result) (*Result, error) { - resultCurrent, err := r.GetAsVersion(ImplementedSpecVersion) - if err != nil { - return nil, err - } - result, ok := resultCurrent.(*Result) - if !ok { - return nil, fmt.Errorf("failed to convert result") - } - return result, nil -} - -var resultConverters = []struct { - versions []string - convert func(types.Result) (*Result, error) -}{ - {types020.SupportedVersions, convertFrom020}, - {SupportedVersions, convertFrom030}, -} - -func convertFrom020(result types.Result) (*Result, error) { - oldResult, err := types020.GetResult(result) - if err != nil { - return nil, err - } - - newResult := &Result{ - CNIVersion: ImplementedSpecVersion, - DNS: oldResult.DNS, - Routes: []*types.Route{}, - } - - if oldResult.IP4 != nil { - newResult.IPs = append(newResult.IPs, &IPConfig{ - Version: "4", - Address: oldResult.IP4.IP, - Gateway: oldResult.IP4.Gateway, - }) - for _, route := range oldResult.IP4.Routes { - newResult.Routes = append(newResult.Routes, &types.Route{ - Dst: route.Dst, - GW: route.GW, - }) - } - } - - if oldResult.IP6 != nil { - newResult.IPs = append(newResult.IPs, &IPConfig{ - Version: "6", - Address: oldResult.IP6.IP, - Gateway: oldResult.IP6.Gateway, - }) - for _, route := range oldResult.IP6.Routes { - newResult.Routes = append(newResult.Routes, &types.Route{ - Dst: route.Dst, - GW: route.GW, - }) - } - } - - return newResult, nil -} - -func convertFrom030(result types.Result) (*Result, error) { - newResult, ok := result.(*Result) - if !ok { - return nil, fmt.Errorf("failed to convert result") - } - newResult.CNIVersion = ImplementedSpecVersion - return newResult, nil -} - -func NewResultFromResult(result types.Result) (*Result, error) { - version := result.Version() - for _, converter := range resultConverters { - for _, supportedVersion := range converter.versions { - if version == supportedVersion { - return converter.convert(result) - } - } - } - return nil, fmt.Errorf("unsupported CNI result22 version %q", version) -} - -// Result is what gets returned from the plugin (via stdout) to the caller -type Result struct { - CNIVersion string `json:"cniVersion,omitempty"` - Interfaces []*Interface `json:"interfaces,omitempty"` - IPs []*IPConfig `json:"ips,omitempty"` - Routes []*types.Route `json:"routes,omitempty"` - DNS types.DNS `json:"dns,omitempty"` -} - -// Convert to the older 0.2.0 CNI spec Result type -func (r *Result) convertTo020() (*types020.Result, error) { - oldResult := &types020.Result{ - CNIVersion: types020.ImplementedSpecVersion, - DNS: r.DNS, - } - - for _, ip := range r.IPs { - // Only convert the first IP address of each version as 0.2.0 - // and earlier cannot handle multiple IP addresses - if ip.Version == "4" && oldResult.IP4 == nil { - oldResult.IP4 = &types020.IPConfig{ - IP: ip.Address, - Gateway: ip.Gateway, - } - } else if ip.Version == "6" && oldResult.IP6 == nil { - oldResult.IP6 = &types020.IPConfig{ - IP: ip.Address, - Gateway: ip.Gateway, - } - } - - if oldResult.IP4 != nil && oldResult.IP6 != nil { - break - } - } - - for _, route := range r.Routes { - is4 := route.Dst.IP.To4() != nil - if is4 && oldResult.IP4 != nil { - oldResult.IP4.Routes = append(oldResult.IP4.Routes, types.Route{ - Dst: route.Dst, - GW: route.GW, - }) - } else if !is4 && oldResult.IP6 != nil { - oldResult.IP6.Routes = append(oldResult.IP6.Routes, types.Route{ - Dst: route.Dst, - GW: route.GW, - }) - } - } - - if oldResult.IP4 == nil && oldResult.IP6 == nil { - return nil, fmt.Errorf("cannot convert: no valid IP addresses") - } - - return oldResult, nil -} - -func (r *Result) Version() string { - return ImplementedSpecVersion -} - -func (r *Result) GetAsVersion(version string) (types.Result, error) { - switch version { - case "0.3.0", "0.3.1", ImplementedSpecVersion: - r.CNIVersion = version - return r, nil - case types020.SupportedVersions[0], types020.SupportedVersions[1], types020.SupportedVersions[2]: - return r.convertTo020() - } - return nil, fmt.Errorf("cannot convert version 0.3.x to %q", version) -} - -func (r *Result) Print() error { - return r.PrintTo(os.Stdout) -} - -func (r *Result) PrintTo(writer io.Writer) error { - data, err := json.MarshalIndent(r, "", " ") - if err != nil { - return err - } - _, err = writer.Write(data) - return err -} - -// Convert this old version result to the current CNI version result -func (r *Result) Convert() (*Result, error) { - return r, nil -} - -// Interface contains values about the created interfaces -type Interface struct { - Name string `json:"name"` - Mac string `json:"mac,omitempty"` - Sandbox string `json:"sandbox,omitempty"` -} - -func (i *Interface) String() string { - return fmt.Sprintf("%+v", *i) -} - -// Int returns a pointer to the int value passed in. Used to -// set the IPConfig.Interface field. -func Int(v int) *int { - return &v -} - -// IPConfig contains values necessary to configure an IP address on an interface -type IPConfig struct { - // IP version, either "4" or "6" - Version string - // Index into Result structs Interfaces list - Interface *int - Address net.IPNet - Gateway net.IP -} - -func (i *IPConfig) String() string { - return fmt.Sprintf("%+v", *i) -} - -// JSON (un)marshallable types -type ipConfig struct { - Version string `json:"version"` - Interface *int `json:"interface,omitempty"` - Address types.IPNet `json:"address"` - Gateway net.IP `json:"gateway,omitempty"` -} - -func (c *IPConfig) MarshalJSON() ([]byte, error) { - ipc := ipConfig{ - Version: c.Version, - Interface: c.Interface, - Address: types.IPNet(c.Address), - Gateway: c.Gateway, - } - - return json.Marshal(ipc) -} - -func (c *IPConfig) UnmarshalJSON(data []byte) error { - ipc := ipConfig{} - if err := json.Unmarshal(data, &ipc); err != nil { - return err - } - - c.Version = ipc.Version - c.Interface = ipc.Interface - c.Address = net.IPNet(ipc.Address) - c.Gateway = ipc.Gateway - return nil -} diff --git a/kuryr_cni/vendor/github.com/containernetworking/cni/pkg/types/types.go b/kuryr_cni/vendor/github.com/containernetworking/cni/pkg/types/types.go deleted file mode 100644 index 3fa757a5d..000000000 --- a/kuryr_cni/vendor/github.com/containernetworking/cni/pkg/types/types.go +++ /dev/null @@ -1,207 +0,0 @@ -// Copyright 2015 CNI authors -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -package types - -import ( - "encoding/json" - "fmt" - "io" - "net" - "os" -) - -// like net.IPNet but adds JSON marshalling and unmarshalling -type IPNet net.IPNet - -// ParseCIDR takes a string like "10.2.3.1/24" and -// return IPNet with "10.2.3.1" and /24 mask -func ParseCIDR(s string) (*net.IPNet, error) { - ip, ipn, err := net.ParseCIDR(s) - if err != nil { - return nil, err - } - - ipn.IP = ip - return ipn, nil -} - -func (n IPNet) MarshalJSON() ([]byte, error) { - return json.Marshal((*net.IPNet)(&n).String()) -} - -func (n *IPNet) UnmarshalJSON(data []byte) error { - var s string - if err := json.Unmarshal(data, &s); err != nil { - return err - } - - tmp, err := ParseCIDR(s) - if err != nil { - return err - } - - *n = IPNet(*tmp) - return nil -} - -// NetConf describes a network. -type NetConf struct { - CNIVersion string `json:"cniVersion,omitempty"` - - Name string `json:"name,omitempty"` - Type string `json:"type,omitempty"` - Capabilities map[string]bool `json:"capabilities,omitempty"` - IPAM IPAM `json:"ipam,omitempty"` - DNS DNS `json:"dns"` - - RawPrevResult map[string]interface{} `json:"prevResult,omitempty"` - PrevResult Result `json:"-"` -} - -type IPAM struct { - Type string `json:"type,omitempty"` -} - -// NetConfList describes an ordered list of networks. -type NetConfList struct { - CNIVersion string `json:"cniVersion,omitempty"` - - Name string `json:"name,omitempty"` - DisableCheck bool `json:"disableCheck,omitempty"` - Plugins []*NetConf `json:"plugins,omitempty"` -} - -type ResultFactoryFunc func([]byte) (Result, error) - -// Result is an interface that provides the result of plugin execution -type Result interface { - // The highest CNI specification result version the result supports - // without having to convert - Version() string - - // Returns the result converted into the requested CNI specification - // result version, or an error if conversion failed - GetAsVersion(version string) (Result, error) - - // Prints the result in JSON format to stdout - Print() error - - // Prints the result in JSON format to provided writer - PrintTo(writer io.Writer) error -} - -func PrintResult(result Result, version string) error { - newResult, err := result.GetAsVersion(version) - if err != nil { - return err - } - return newResult.Print() -} - -// DNS contains values interesting for DNS resolvers -type DNS struct { - Nameservers []string `json:"nameservers,omitempty"` - Domain string `json:"domain,omitempty"` - Search []string `json:"search,omitempty"` - Options []string `json:"options,omitempty"` -} - -type Route struct { - Dst net.IPNet - GW net.IP -} - -func (r *Route) String() string { - return fmt.Sprintf("%+v", *r) -} - -// Well known error codes -// see https://github.com/containernetworking/cni/blob/master/SPEC.md#well-known-error-codes -const ( - ErrUnknown uint = iota // 0 - ErrIncompatibleCNIVersion // 1 - ErrUnsupportedField // 2 - ErrUnknownContainer // 3 - ErrInvalidEnvironmentVariables // 4 - ErrIOFailure // 5 - ErrDecodingFailure // 6 - ErrInvalidNetworkConfig // 7 - ErrTryAgainLater uint = 11 - ErrInternal uint = 999 -) - -type Error struct { - Code uint `json:"code"` - Msg string `json:"msg"` - Details string `json:"details,omitempty"` -} - -func NewError(code uint, msg, details string) *Error { - return &Error{ - Code: code, - Msg: msg, - Details: details, - } -} - -func (e *Error) Error() string { - details := "" - if e.Details != "" { - details = fmt.Sprintf("; %v", e.Details) - } - return fmt.Sprintf("%v%v", e.Msg, details) -} - -func (e *Error) Print() error { - return prettyPrint(e) -} - -// net.IPNet is not JSON (un)marshallable so this duality is needed -// for our custom IPNet type - -// JSON (un)marshallable types -type route struct { - Dst IPNet `json:"dst"` - GW net.IP `json:"gw,omitempty"` -} - -func (r *Route) UnmarshalJSON(data []byte) error { - rt := route{} - if err := json.Unmarshal(data, &rt); err != nil { - return err - } - - r.Dst = net.IPNet(rt.Dst) - r.GW = rt.GW - return nil -} - -func (r Route) MarshalJSON() ([]byte, error) { - rt := route{ - Dst: IPNet(r.Dst), - GW: r.GW, - } - - return json.Marshal(rt) -} - -func prettyPrint(obj interface{}) error { - data, err := json.MarshalIndent(obj, "", " ") - if err != nil { - return err - } - _, err = os.Stdout.Write(data) - return err -} diff --git a/kuryr_cni/vendor/github.com/containernetworking/cni/pkg/utils/utils.go b/kuryr_cni/vendor/github.com/containernetworking/cni/pkg/utils/utils.go deleted file mode 100644 index b8ec38874..000000000 --- a/kuryr_cni/vendor/github.com/containernetworking/cni/pkg/utils/utils.go +++ /dev/null @@ -1,84 +0,0 @@ -// Copyright 2019 CNI authors -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -package utils - -import ( - "bytes" - "fmt" - "regexp" - "unicode" - - "github.com/containernetworking/cni/pkg/types" -) - -const ( - // cniValidNameChars is the regexp used to validate valid characters in - // containerID and networkName - cniValidNameChars = `[a-zA-Z0-9][a-zA-Z0-9_.\-]` - - // maxInterfaceNameLength is the length max of a valid interface name - maxInterfaceNameLength = 15 -) - -var cniReg = regexp.MustCompile(`^` + cniValidNameChars + `*$`) - -// ValidateContainerID will validate that the supplied containerID is not empty does not contain invalid characters -func ValidateContainerID(containerID string) *types.Error { - - if containerID == "" { - return types.NewError(types.ErrUnknownContainer, "missing containerID", "") - } - if !cniReg.MatchString(containerID) { - return types.NewError(types.ErrInvalidEnvironmentVariables, "invalid characters in containerID", containerID) - } - return nil -} - -// ValidateNetworkName will validate that the supplied networkName does not contain invalid characters -func ValidateNetworkName(networkName string) *types.Error { - - if networkName == "" { - return types.NewError(types.ErrInvalidNetworkConfig, "missing network name:", "") - } - if !cniReg.MatchString(networkName) { - return types.NewError(types.ErrInvalidNetworkConfig, "invalid characters found in network name", networkName) - } - return nil -} - -// ValidateInterfaceName will validate the interface name based on the three rules below -// 1. The name must not be empty -// 2. The name must be less than 16 characters -// 3. The name must not be "." or ".." -// 3. The name must not contain / or : or any whitespace characters -// ref to https://github.com/torvalds/linux/blob/master/net/core/dev.c#L1024 -func ValidateInterfaceName(ifName string) *types.Error { - if len(ifName) == 0 { - return types.NewError(types.ErrInvalidEnvironmentVariables, "interface name is empty", "") - } - if len(ifName) > maxInterfaceNameLength { - return types.NewError(types.ErrInvalidEnvironmentVariables, "interface name is too long", fmt.Sprintf("interface name should be less than %d characters", maxInterfaceNameLength+1)) - } - if ifName == "." || ifName == ".." { - return types.NewError(types.ErrInvalidEnvironmentVariables, "interface name is . or ..", "") - } - for _, r := range bytes.Runes([]byte(ifName)) { - if r == '/' || r == ':' || unicode.IsSpace(r) { - return types.NewError(types.ErrInvalidEnvironmentVariables, "interface name contains / or : or whitespace characters", "") - } - } - - return nil -} diff --git a/kuryr_cni/vendor/github.com/containernetworking/cni/pkg/version/conf.go b/kuryr_cni/vendor/github.com/containernetworking/cni/pkg/version/conf.go deleted file mode 100644 index 3cca58bbe..000000000 --- a/kuryr_cni/vendor/github.com/containernetworking/cni/pkg/version/conf.go +++ /dev/null @@ -1,37 +0,0 @@ -// Copyright 2016 CNI authors -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -package version - -import ( - "encoding/json" - "fmt" -) - -// ConfigDecoder can decode the CNI version available in network config data -type ConfigDecoder struct{} - -func (*ConfigDecoder) Decode(jsonBytes []byte) (string, error) { - var conf struct { - CNIVersion string `json:"cniVersion"` - } - err := json.Unmarshal(jsonBytes, &conf) - if err != nil { - return "", fmt.Errorf("decoding version from network config: %s", err) - } - if conf.CNIVersion == "" { - return "0.1.0", nil - } - return conf.CNIVersion, nil -} diff --git a/kuryr_cni/vendor/github.com/containernetworking/cni/pkg/version/plugin.go b/kuryr_cni/vendor/github.com/containernetworking/cni/pkg/version/plugin.go deleted file mode 100644 index 1df427243..000000000 --- a/kuryr_cni/vendor/github.com/containernetworking/cni/pkg/version/plugin.go +++ /dev/null @@ -1,144 +0,0 @@ -// Copyright 2016 CNI authors -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -package version - -import ( - "encoding/json" - "fmt" - "io" - "strconv" - "strings" -) - -// PluginInfo reports information about CNI versioning -type PluginInfo interface { - // SupportedVersions returns one or more CNI spec versions that the plugin - // supports. If input is provided in one of these versions, then the plugin - // promises to use the same CNI version in its response - SupportedVersions() []string - - // Encode writes this CNI version information as JSON to the given Writer - Encode(io.Writer) error -} - -type pluginInfo struct { - CNIVersion_ string `json:"cniVersion"` - SupportedVersions_ []string `json:"supportedVersions,omitempty"` -} - -// pluginInfo implements the PluginInfo interface -var _ PluginInfo = &pluginInfo{} - -func (p *pluginInfo) Encode(w io.Writer) error { - return json.NewEncoder(w).Encode(p) -} - -func (p *pluginInfo) SupportedVersions() []string { - return p.SupportedVersions_ -} - -// PluginSupports returns a new PluginInfo that will report the given versions -// as supported -func PluginSupports(supportedVersions ...string) PluginInfo { - if len(supportedVersions) < 1 { - panic("programmer error: you must support at least one version") - } - return &pluginInfo{ - CNIVersion_: Current(), - SupportedVersions_: supportedVersions, - } -} - -// PluginDecoder can decode the response returned by a plugin's VERSION command -type PluginDecoder struct{} - -func (*PluginDecoder) Decode(jsonBytes []byte) (PluginInfo, error) { - var info pluginInfo - err := json.Unmarshal(jsonBytes, &info) - if err != nil { - return nil, fmt.Errorf("decoding version info: %s", err) - } - if info.CNIVersion_ == "" { - return nil, fmt.Errorf("decoding version info: missing field cniVersion") - } - if len(info.SupportedVersions_) == 0 { - if info.CNIVersion_ == "0.2.0" { - return PluginSupports("0.1.0", "0.2.0"), nil - } - return nil, fmt.Errorf("decoding version info: missing field supportedVersions") - } - return &info, nil -} - -// ParseVersion parses a version string like "3.0.1" or "0.4.5" into major, -// minor, and micro numbers or returns an error -func ParseVersion(version string) (int, int, int, error) { - var major, minor, micro int - if version == "" { - return -1, -1, -1, fmt.Errorf("invalid version %q: the version is empty", version) - } - - parts := strings.Split(version, ".") - if len(parts) >= 4 { - return -1, -1, -1, fmt.Errorf("invalid version %q: too many parts", version) - } - - major, err := strconv.Atoi(parts[0]) - if err != nil { - return -1, -1, -1, fmt.Errorf("failed to convert major version part %q: %v", parts[0], err) - } - - if len(parts) >= 2 { - minor, err = strconv.Atoi(parts[1]) - if err != nil { - return -1, -1, -1, fmt.Errorf("failed to convert minor version part %q: %v", parts[1], err) - } - } - - if len(parts) >= 3 { - micro, err = strconv.Atoi(parts[2]) - if err != nil { - return -1, -1, -1, fmt.Errorf("failed to convert micro version part %q: %v", parts[2], err) - } - } - - return major, minor, micro, nil -} - -// GreaterThanOrEqualTo takes two string versions, parses them into major/minor/micro -// numbers, and compares them to determine whether the first version is greater -// than or equal to the second -func GreaterThanOrEqualTo(version, otherVersion string) (bool, error) { - firstMajor, firstMinor, firstMicro, err := ParseVersion(version) - if err != nil { - return false, err - } - - secondMajor, secondMinor, secondMicro, err := ParseVersion(otherVersion) - if err != nil { - return false, err - } - - if firstMajor > secondMajor { - return true, nil - } else if firstMajor == secondMajor { - if firstMinor > secondMinor { - return true, nil - } else if firstMinor == secondMinor && firstMicro >= secondMicro { - return true, nil - } - } - return false, nil -} diff --git a/kuryr_cni/vendor/github.com/containernetworking/cni/pkg/version/reconcile.go b/kuryr_cni/vendor/github.com/containernetworking/cni/pkg/version/reconcile.go deleted file mode 100644 index 25c3810b2..000000000 --- a/kuryr_cni/vendor/github.com/containernetworking/cni/pkg/version/reconcile.go +++ /dev/null @@ -1,49 +0,0 @@ -// Copyright 2016 CNI authors -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -package version - -import "fmt" - -type ErrorIncompatible struct { - Config string - Supported []string -} - -func (e *ErrorIncompatible) Details() string { - return fmt.Sprintf("config is %q, plugin supports %q", e.Config, e.Supported) -} - -func (e *ErrorIncompatible) Error() string { - return fmt.Sprintf("incompatible CNI versions: %s", e.Details()) -} - -type Reconciler struct{} - -func (r *Reconciler) Check(configVersion string, pluginInfo PluginInfo) *ErrorIncompatible { - return r.CheckRaw(configVersion, pluginInfo.SupportedVersions()) -} - -func (*Reconciler) CheckRaw(configVersion string, supportedVersions []string) *ErrorIncompatible { - for _, supportedVersion := range supportedVersions { - if configVersion == supportedVersion { - return nil - } - } - - return &ErrorIncompatible{ - Config: configVersion, - Supported: supportedVersions, - } -} diff --git a/kuryr_cni/vendor/github.com/containernetworking/cni/pkg/version/version.go b/kuryr_cni/vendor/github.com/containernetworking/cni/pkg/version/version.go deleted file mode 100644 index 8f3508e61..000000000 --- a/kuryr_cni/vendor/github.com/containernetworking/cni/pkg/version/version.go +++ /dev/null @@ -1,83 +0,0 @@ -// Copyright 2016 CNI authors -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -package version - -import ( - "encoding/json" - "fmt" - - "github.com/containernetworking/cni/pkg/types" - "github.com/containernetworking/cni/pkg/types/020" - "github.com/containernetworking/cni/pkg/types/current" -) - -// Current reports the version of the CNI spec implemented by this library -func Current() string { - return "0.4.0" -} - -// Legacy PluginInfo describes a plugin that is backwards compatible with the -// CNI spec version 0.1.0. In particular, a runtime compiled against the 0.1.0 -// library ought to work correctly with a plugin that reports support for -// Legacy versions. -// -// Any future CNI spec versions which meet this definition should be added to -// this list. -var Legacy = PluginSupports("0.1.0", "0.2.0") -var All = PluginSupports("0.1.0", "0.2.0", "0.3.0", "0.3.1", "0.4.0") - -var resultFactories = []struct { - supportedVersions []string - newResult types.ResultFactoryFunc -}{ - {current.SupportedVersions, current.NewResult}, - {types020.SupportedVersions, types020.NewResult}, -} - -// Finds a Result object matching the requested version (if any) and asks -// that object to parse the plugin result, returning an error if parsing failed. -func NewResult(version string, resultBytes []byte) (types.Result, error) { - reconciler := &Reconciler{} - for _, resultFactory := range resultFactories { - err := reconciler.CheckRaw(version, resultFactory.supportedVersions) - if err == nil { - // Result supports this version - return resultFactory.newResult(resultBytes) - } - } - - return nil, fmt.Errorf("unsupported CNI result version %q", version) -} - -// ParsePrevResult parses a prevResult in a NetConf structure and sets -// the NetConf's PrevResult member to the parsed Result object. -func ParsePrevResult(conf *types.NetConf) error { - if conf.RawPrevResult == nil { - return nil - } - - resultBytes, err := json.Marshal(conf.RawPrevResult) - if err != nil { - return fmt.Errorf("could not serialize prevResult: %v", err) - } - - conf.RawPrevResult = nil - conf.PrevResult, err = NewResult(conf.CNIVersion, resultBytes) - if err != nil { - return fmt.Errorf("could not parse prevResult: %v", err) - } - - return nil -} diff --git a/kuryr_cni/vendor/modules.txt b/kuryr_cni/vendor/modules.txt deleted file mode 100644 index f1c720678..000000000 --- a/kuryr_cni/vendor/modules.txt +++ /dev/null @@ -1,12 +0,0 @@ -# github.com/containernetworking/cni v0.8.1 -## explicit -github.com/containernetworking/cni/pkg/skel -github.com/containernetworking/cni/pkg/types -github.com/containernetworking/cni/pkg/types/020 -github.com/containernetworking/cni/pkg/types/current -github.com/containernetworking/cni/pkg/utils -github.com/containernetworking/cni/pkg/version -# github.com/onsi/ginkgo v1.16.1 -## explicit -# github.com/onsi/gomega v1.11.0 -## explicit diff --git a/kuryr_kubernetes/__init__.py b/kuryr_kubernetes/__init__.py deleted file mode 100644 index e69de29bb..000000000 diff --git a/kuryr_kubernetes/clients.py b/kuryr_kubernetes/clients.py deleted file mode 100644 index 2a3635db0..000000000 --- a/kuryr_kubernetes/clients.py +++ /dev/null @@ -1,116 +0,0 @@ -# Copyright (c) 2016 Mirantis, Inc. -# All Rights Reserved. -# -# Licensed under the Apache License, Version 2.0 (the "License"); you may -# not use this file except in compliance with the License. You may obtain -# a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT -# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the -# License for the specific language governing permissions and limitations -# under the License. - -import ipaddress -import os - -from keystoneauth1 import session as k_session -from kuryr.lib import utils -from openstack import connection -from openstack import exceptions as os_exc - -from kuryr_kubernetes import config -from kuryr_kubernetes import k8s_client - -_clients = {} -_NEUTRON_CLIENT = 'neutron-client' -_KUBERNETES_CLIENT = 'kubernetes-client' -_OPENSTACKSDK = 'openstacksdk' - - -def get_network_client(): - return _clients[_OPENSTACKSDK].network - - -def get_loadbalancer_client(): - return _clients[_OPENSTACKSDK].load_balancer - - -def get_kubernetes_client() -> k8s_client.K8sClient: - return _clients[_KUBERNETES_CLIENT] - - -def get_compute_client(): - return _clients[_OPENSTACKSDK].compute - - -def setup_clients(): - setup_kubernetes_client() - setup_openstacksdk() - - -def setup_kubernetes_client(): - if config.CONF.kubernetes.api_root: - api_root = config.CONF.kubernetes.api_root - else: - # NOTE(dulek): This is for containerized deployments, i.e. running in - # K8s Pods. - host = os.environ['KUBERNETES_SERVICE_HOST'] - port = os.environ['KUBERNETES_SERVICE_PORT_HTTPS'] - try: - addr = ipaddress.ip_address(host) - if addr.version == 6: - host = '[%s]' % host - except ValueError: - # It's not an IP addres but a hostname, it's fine, move along. - pass - api_root = "https://%s:%s" % (host, port) - _clients[_KUBERNETES_CLIENT] = k8s_client.K8sClient(api_root) - - -def get_neutron_error_type(ex): - try: - response = ex.response.json() - except (ValueError, AttributeError): - return None - - if response: - try: - return response['NeutronError']['type'] - except KeyError: - pass - return None - - -def handle_neutron_errors(method, *args, **kwargs): - """Handle errors on openstacksdk router methods""" - result = method(*args, **kwargs) - if 'NeutronError' in result: - error = result['NeutronError'] - if error['type'] in ('RouterNotFound', - 'RouterInterfaceNotFoundForSubnet', - 'SubnetNotFound'): - raise os_exc.NotFoundException(message=error['message']) - else: - raise os_exc.SDKException(error['type'] + ": " + error['message']) - - return result - - -def setup_openstacksdk(): - auth_plugin = utils.get_auth_plugin('neutron') - session = utils.get_keystone_session('neutron', auth_plugin) - - # NOTE(mdulko): To get rid of warnings about connection pool being full - # we need to "tweak" the keystoneauth's adapters increasing - # the maximum pool size. - for scheme in list(session.session.adapters): - session.session.mount(scheme, k_session.TCPKeepAliveAdapter( - pool_maxsize=1000)) - - conn = connection.Connection( - session=session, - region_name=getattr(config.CONF.neutron, 'region_name', None)) - _clients[_OPENSTACKSDK] = conn diff --git a/kuryr_kubernetes/cmd/__init__.py b/kuryr_kubernetes/cmd/__init__.py deleted file mode 100644 index 172274c13..000000000 --- a/kuryr_kubernetes/cmd/__init__.py +++ /dev/null @@ -1,28 +0,0 @@ -# Copyright 2017 NEC Corporation. All rights reserved. -# -# Licensed under the Apache License, Version 2.0 (the "License"); you may -# not use this file except in compliance with the License. You may obtain -# a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT -# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the -# License for the specific language governing permissions and limitations -# under the License. - -import logging as sys_logging - -from oslo_reports import guru_meditation_report as gmr - -from kuryr_kubernetes import version - -# During the call to gmr.TextGuruMeditation.setup_autorun(), Guru Meditation -# Report tries to start logging. Set a handler here to accommodate this. -logger = sys_logging.getLogger(None) -if not logger.handlers: - logger.addHandler(sys_logging.StreamHandler()) - -_version_string = version.version_info.release_string() -gmr.TextGuruMeditation.setup_autorun(version=_version_string) diff --git a/kuryr_kubernetes/cmd/cni.py b/kuryr_kubernetes/cmd/cni.py deleted file mode 100644 index f19f30f6f..000000000 --- a/kuryr_kubernetes/cmd/cni.py +++ /dev/null @@ -1,22 +0,0 @@ -# Copyright (c) 2016 Mirantis, Inc. -# All Rights Reserved. -# -# Licensed under the Apache License, Version 2.0 (the "License"); you may -# not use this file except in compliance with the License. You may obtain -# a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT -# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the -# License for the specific language governing permissions and limitations -# under the License. - -from kuryr_kubernetes.cni import main - - -run = main.run - -if __name__ == '__main__': - run() diff --git a/kuryr_kubernetes/cmd/daemon.py b/kuryr_kubernetes/cmd/daemon.py deleted file mode 100644 index 0f572ad6d..000000000 --- a/kuryr_kubernetes/cmd/daemon.py +++ /dev/null @@ -1,22 +0,0 @@ -# Copyright 2017 NEC Technologies India Pvt. Ltd. -# All Rights Reserved. -# -# Licensed under the Apache License, Version 2.0 (the "License"); you may -# not use this file except in compliance with the License. You may obtain -# a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT -# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the -# License for the specific language governing permissions and limitations -# under the License. - -from kuryr_kubernetes.cni.daemon import service - - -start = service.start - -if __name__ == '__main__': - start() diff --git a/kuryr_kubernetes/cmd/eventlet/__init__.py b/kuryr_kubernetes/cmd/eventlet/__init__.py deleted file mode 100644 index db423fc99..000000000 --- a/kuryr_kubernetes/cmd/eventlet/__init__.py +++ /dev/null @@ -1,18 +0,0 @@ -# Copyright (c) 2016 Mirantis, Inc. -# All Rights Reserved. -# -# Licensed under the Apache License, Version 2.0 (the "License"); you may -# not use this file except in compliance with the License. You may obtain -# a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT -# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the -# License for the specific language governing permissions and limitations -# under the License. - -import eventlet - -eventlet.monkey_patch() diff --git a/kuryr_kubernetes/cmd/eventlet/controller.py b/kuryr_kubernetes/cmd/eventlet/controller.py deleted file mode 100644 index ef560d4ea..000000000 --- a/kuryr_kubernetes/cmd/eventlet/controller.py +++ /dev/null @@ -1,22 +0,0 @@ -# Copyright (c) 2016 Mirantis, Inc. -# All Rights Reserved. -# -# Licensed under the Apache License, Version 2.0 (the "License"); you may -# not use this file except in compliance with the License. You may obtain -# a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT -# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the -# License for the specific language governing permissions and limitations -# under the License. - -from kuryr_kubernetes.controller import service - - -start = service.start - -if __name__ == '__main__': - start() diff --git a/kuryr_kubernetes/cmd/sanity/__init__.py b/kuryr_kubernetes/cmd/sanity/__init__.py deleted file mode 100644 index e69de29bb..000000000 diff --git a/kuryr_kubernetes/cmd/sanity/checks.py b/kuryr_kubernetes/cmd/sanity/checks.py deleted file mode 100644 index 9c3c9b009..000000000 --- a/kuryr_kubernetes/cmd/sanity/checks.py +++ /dev/null @@ -1,85 +0,0 @@ -# Copyright (c) 2021 OpenStack Foundation. -# All Rights Reserved. -# -# Licensed under the Apache License, Version 2.0 (the "License"); you may -# not use this file except in compliance with the License. You may obtain -# a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT -# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the -# License for the specific language governing permissions and limitations -# under the License. - - -from oslo_config import cfg -from oslo_log import log as logging - -from kuryr_kubernetes import config - -CONF = config.CONF -LOG = logging.getLogger(__name__) - - -def _logger(): - if cfg.CONF.sanity_check_error: - return LOG.error - else: - return LOG.warning - - -def ports_pool_min_max(): - try: - if not cfg.CONF.vif_pool.ports_pool_max: - return True - pool_max = cfg.CONF.vif_pool.ports_pool_max - pool_min = cfg.CONF.vif_pool.ports_pool_min - if pool_max < pool_min: - _logger()(f'The current configuration of ports_pool_min ' - f'"{pool_min}" and ports_pool_max "{pool_max}" ' - f'may cause infinite loop of creating ' - f'and deleting ports.') - return False - except (OSError, RuntimeError, IndexError, ValueError) as e: - LOG.debug("Exception while checking ports_pool_max. " - "Exception: %s", e) - return False - return True - - -def ports_pool_min_batch(): - try: - pool_min = cfg.CONF.vif_pool.ports_pool_min - pool_batch = cfg.CONF.vif_pool.ports_pool_batch - if pool_min > pool_batch: - _logger()(f'The current configuration of ports_pool_min ' - f'"{pool_min}" and ports_pool_batch "{pool_batch}" ' - f'may cause kuryr to send multiple unnecessary ' - f'bulk ports creation requests. ') - return False - except (OSError, RuntimeError, IndexError, ValueError) as e: - LOG.debug("Exception while checking ports_pool_batch. " - "Exception: %s", e) - return False - return True - - -def ports_pool_max_batch(): - try: - if not cfg.CONF.vif_pool.ports_pool_max: - return True - pool_max = cfg.CONF.vif_pool.ports_pool_max - pool_batch = cfg.CONF.vif_pool.ports_pool_batch - if pool_max < pool_batch: - _logger()(f'The current configuration of ports_pool_max ' - f'"{pool_max}" and ports_pool_batch "{pool_batch}" ' - f'may cause kuryr to create the ' - f'ports and then delete them immediately.') - return False - except (OSError, RuntimeError, IndexError, ValueError) as e: - LOG.debug("Exception while checking ports_pool_batch. " - "Exception: %s", e) - return False - return True diff --git a/kuryr_kubernetes/cmd/sanity_checks.py b/kuryr_kubernetes/cmd/sanity_checks.py deleted file mode 100644 index 237b9b02d..000000000 --- a/kuryr_kubernetes/cmd/sanity_checks.py +++ /dev/null @@ -1,105 +0,0 @@ -# Copyright (c) 2021 OpenStack Foundation. -# All Rights Reserved. -# -# Licensed under the Apache License, Version 2.0 (the "License"); you may -# not use this file except in compliance with the License. You may obtain -# a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT -# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the -# License for the specific language governing permissions and limitations -# under the License. - -""" -CLI interface for kuryr sanity commands. -""" - -import sys - -from oslo_config import cfg -from oslo_log import log as logging - -from kuryr_kubernetes.cmd.sanity import checks -from kuryr_kubernetes import config -from kuryr_kubernetes.controller.drivers import vif_pool # noqa - -LOG = logging.getLogger(__name__) - - -class BoolOptCallback(cfg.BoolOpt): - def __init__(self, name, callback, **kwargs): - if 'default' not in kwargs: - kwargs['default'] = False - self.callback = callback - super(BoolOptCallback, self).__init__(name, **kwargs) - - -def check_ports_pool_min_max(): - result = checks.ports_pool_min_max() - if not result: - LOG.warning("The ports_pool_max is enabled, " - "the ports_pool_min should be smaller than " - "ports_pool_max. Either disable ports_pool_max " - "setting it to 0 or increase it's value.") - return result - - -def check_ports_pool_min_batch(): - result = checks.ports_pool_min_batch() - if not result: - LOG.warning("The ports_pool_min should be lower than " - "ports_pool_batch. Please decrease it's value.") - return result - - -def check_ports_pool_max_batch(): - result = checks.ports_pool_max_batch() - if not result: - LOG.warning("The ports_pool_max is enabled, " - "the ports_pool_max should be higher than " - "ports_pool_batch. Either disable ports_pool_max " - "setting it to 0 or decrease it's value.") - return result - - -# Define CLI opts to test specific features, with a callback for the test -OPTS = [ - BoolOptCallback('vif_pool_min_max', check_ports_pool_min_max, - default=False, - help='Check configuration sanity of ports_pool_min and ' - 'ports_pool_max.'), - BoolOptCallback('vif_pool_min_batch', check_ports_pool_min_batch, - default=False, - help='Check configuration sanity of ports_pool_min and ' - 'ports_pool_batch.'), - BoolOptCallback('vif_pool_max_batch', check_ports_pool_max_batch, - default=False, - help='Check configuration sanity of ports_pool_max and ' - 'ports_pool_batch.'), -] - -CLI_OPTS = [ - cfg.BoolOpt('sanity_check_error', default=False, - help='If this flag is configured, the sanity command fails ' - 'if any of the sanity tests fails.'), -] - - -def all_tests_passed(): - results = [opt.callback() for opt in OPTS if cfg.CONF.get(opt.name)] - return all(results) - - -def main(): - cfg.CONF.register_cli_opts(OPTS) - cfg.CONF.register_cli_opts(CLI_OPTS) - config.init(sys.argv[1:], default_config_files=['/etc/kuryr/kuryr.conf']) - config.setup_logging() - return 0 if all_tests_passed() else 1 - - -if __name__ == '__main__': - main() diff --git a/kuryr_kubernetes/cmd/status.py b/kuryr_kubernetes/cmd/status.py deleted file mode 100644 index 6b2f0636b..000000000 --- a/kuryr_kubernetes/cmd/status.py +++ /dev/null @@ -1,204 +0,0 @@ -# Copyright 2018 Red Hat -# -# Licensed under the Apache License, Version 2.0 (the "License"); you may -# not use this file except in compliance with the License. You may obtain -# a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT -# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the -# License for the specific language governing permissions and limitations -# under the License. - -""" -CLI interface for kuryr status commands. -""" - -import sys -import textwrap -import traceback - -import prettytable - -import os_vif -from os_vif.objects import base -from oslo_config import cfg -from oslo_serialization import jsonutils - -from kuryr_kubernetes import clients -from kuryr_kubernetes import config -from kuryr_kubernetes import constants -from kuryr_kubernetes import objects -from kuryr_kubernetes import version - -CONF = config.CONF - -UPGRADE_CHECK_SUCCESS = 0 -UPGRADE_CHECK_WARNING = 1 -UPGRADE_CHECK_FAILURE = 2 - -UPGRADE_CHECK_MSG_MAP = { - UPGRADE_CHECK_SUCCESS: 'Success', - UPGRADE_CHECK_WARNING: 'Warning', - UPGRADE_CHECK_FAILURE: 'Failure', -} - - -class UpgradeCheckResult(object): - """Class used for 'kuryr-k8s-status upgrade check' results. - - The 'code' attribute is an UpgradeCheckCode enum. - The 'details' attribute is a message generally only used for - checks that result in a warning or failure code. The details should provide - information on what issue was discovered along with any remediation. - """ - - def __init__(self, code, details=None): - super(UpgradeCheckResult, self).__init__() - self.code = code - self.details = details - - def get_details(self): - if self.details is not None: - # wrap the text on the details to 60 characters - return '\n'.join(textwrap.wrap(self.details, 60, - subsequent_indent=' ' * 9)) - - -class UpgradeCommands(object): - def __init__(self): - self.check_methods = { - 'Pod annotations': self._check_annotations, # Stein - } - clients.setup_kubernetes_client() - self.k8s = clients.get_kubernetes_client() - - def _get_annotation(self, pod): - annotations = pod['metadata']['annotations'] - if constants.K8S_ANNOTATION_VIF not in annotations: - # NOTE(dulek): We ignore pods without annotation, those - # probably are hostNetworking. - return None - k_ann = annotations[constants.K8S_ANNOTATION_VIF] - k_ann = jsonutils.loads(k_ann) - obj = base.VersionedObject.obj_from_primitive(k_ann) - return obj - - def _check_annotations(self): - old_count = 0 - malformed_count = 0 - pods = self.k8s.get('/api/v1/pods')['items'] - for pod in pods: - try: - obj = self._get_annotation(pod) - if not obj: - # NOTE(dulek): We ignore pods without annotation, those - # probably are hostNetworking. - continue - except Exception: - # TODO(dulek): We might want to print this exception. - malformed_count += 1 - continue - - if obj.obj_name() != objects.vif.PodState.obj_name(): - old_count += 1 - - if malformed_count == 0 and old_count == 0: - return UpgradeCheckResult(0, 'All annotations are updated.') - elif malformed_count > 0 and old_count == 0: - msg = ('You have %d malformed Kuryr pod annotations in your ' - 'deployment. This is not blocking the upgrade, but ' - 'consider investigating it.' % malformed_count) - return UpgradeCheckResult(1, msg) - elif old_count > 0: - msg = ('You have %d Kuryr pod annotations in old format. You need ' - 'to run `kuryr-k8s-status upgrade update-annotations` ' - 'before proceeding with the upgrade.' % old_count) - return UpgradeCheckResult(2, msg) - - def upgrade_check(self): - check_results = [] - - t = prettytable.PrettyTable(['Upgrade Check Results'], - hrules=prettytable.ALL) - t.align = 'l' - - for name, method in self.check_methods.items(): - result = method() - check_results.append(result) - cell = ( - 'Check: %(name)s\n' - 'Result: %(result)s\n' - 'Details: %(details)s' % - { - 'name': name, - 'result': UPGRADE_CHECK_MSG_MAP[result.code], - 'details': result.get_details(), - } - ) - t.add_row([cell]) - print(t) - - return max(res.code for res in check_results) - - def update_annotations(self): - pass - - def downgrade_annotations(self): - pass - - -def print_version(): - print(version.version_info.version_string()) - - -def add_parsers(subparsers): - upgrade_cmds = UpgradeCommands() - - upgrade = subparsers.add_parser( - 'upgrade', help='Actions related to upgrades between releases.') - sub = upgrade.add_subparsers() - - check = sub.add_parser('check', help='Check if upgrading is possible.') - check.set_defaults(action_fn=upgrade_cmds.upgrade_check) - - ann_update = sub.add_parser( - 'update-annotations', - help='Update annotations in K8s API to newest version.') - ann_update.set_defaults(action_fn=upgrade_cmds.update_annotations) - - ann_downgrade = sub.add_parser( - 'downgrade-annotations', - help='Downgrade annotations in K8s API to previous version (useful ' - 'when reverting a failed upgrade).') - ann_downgrade.set_defaults(action_fn=upgrade_cmds.downgrade_annotations) - - version_action = subparsers.add_parser('version') - version_action.set_defaults(action_fn=print_version) - - -def main(): - opt = cfg.SubCommandOpt( - 'category', title='command', - description='kuryr-k8s-status command or category to execute', - handler=add_parsers) - - conf = cfg.ConfigOpts() - conf.register_cli_opt(opt) - conf(sys.argv[1:]) - - os_vif.initialize() - objects.register_locally_defined_vifs() - - try: - return conf.category.action_fn() - except Exception: - print('Error:\n%s' % traceback.format_exc()) - # This is 255 so it's not confused with the upgrade check exit codes. - return 255 - - -if __name__ == '__main__': - main() diff --git a/kuryr_kubernetes/cni/__init__.py b/kuryr_kubernetes/cni/__init__.py deleted file mode 100644 index e69de29bb..000000000 diff --git a/kuryr_kubernetes/cni/api.py b/kuryr_kubernetes/cni/api.py deleted file mode 100644 index b5d696522..000000000 --- a/kuryr_kubernetes/cni/api.py +++ /dev/null @@ -1,169 +0,0 @@ -# Copyright (c) 2016 Mirantis, Inc. -# All Rights Reserved. -# -# Licensed under the Apache License, Version 2.0 (the "License"); you may -# not use this file except in compliance with the License. You may obtain -# a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT -# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the -# License for the specific language governing permissions and limitations -# under the License. - - -import abc -from http import client as httplib -import traceback - -from kuryr.lib._i18n import _ -from os_vif.objects import base -from oslo_log import log as logging -from oslo_serialization import jsonutils -import requests - -from kuryr_kubernetes import config -from kuryr_kubernetes import constants as k_const -from kuryr_kubernetes import exceptions as k_exc - -LOG = logging.getLogger(__name__) - - -class CNIRunner(object, metaclass=abc.ABCMeta): - # TODO(ivc): extend SUPPORTED_VERSIONS and format output based on - # requested params.CNI_VERSION and/or params.config.cniVersion - VERSION = '0.3.1' - SUPPORTED_VERSIONS = ['0.3.1'] - - @abc.abstractmethod - def _add(self, params): - raise NotImplementedError() - - @abc.abstractmethod - def _delete(self, params): - raise NotImplementedError() - - def _write_dict(self, fout, dct): - output = {'cniVersion': self.VERSION} - output.update(dct) - LOG.debug("CNI output: %s", output) - jsonutils.dump(output, fout, sort_keys=True) - - def _write_exception(self, fout, msg): - self._write_dict(fout, { - 'msg': msg, - 'code': k_const.CNI_EXCEPTION_CODE, - 'details': traceback.format_exc(), - }) - - def _write_version(self, fout): - self._write_dict(fout, {'supportedVersions': self.SUPPORTED_VERSIONS}) - - @abc.abstractmethod - def prepare_env(self, env, stdin): - raise NotImplementedError() - - @abc.abstractmethod - def get_container_id(self, params): - raise NotImplementedError() - - def run(self, env, fin, fout): - try: - # Prepare params according to calling Object - params = self.prepare_env(env, fin) - if env.get('CNI_COMMAND') == 'ADD': - vif = self._add(params) - self._write_dict(fout, vif) - elif env.get('CNI_COMMAND') == 'DEL': - self._delete(params) - elif env.get('CNI_COMMAND') == 'VERSION': - self._write_version(fout) - else: - raise k_exc.CNIError(_("unknown CNI_COMMAND: %s") - % env['CNI_COMMAND']) - return 0 - except Exception as ex: - # LOG.exception - self._write_exception(fout, str(ex)) - return 1 - - def _vif_data(self, vif, params): - result = {} - nameservers = [] - - cni_ip_list = result.setdefault("ips", []) - cni_routes_list = result.setdefault("routes", []) - result["interfaces"] = [ - { - "name": params["CNI_IFNAME"], - "mac": vif.address, - "sandbox": self.get_container_id(params)}] - for subnet in vif.network.subnets.objects: - cni_ip = {} - nameservers.extend(subnet.dns) - - ip = subnet.ips.objects[0].address - - cni_ip['version'] = str(ip.version) - cni_ip['address'] = "%s/%s" % (ip, subnet.cidr.prefixlen) - cni_ip['interface'] = len(result["interfaces"]) - 1 - - if hasattr(subnet, 'gateway'): - cni_ip['gateway'] = str(subnet.gateway) - - if subnet.routes.objects: - routes = [ - {'dst': str(route.cidr), 'gw': str(route.gateway)} - for route in subnet.routes.objects] - cni_routes_list.extend(routes) - cni_ip_list.append(cni_ip) - - if nameservers: - result['dns'] = {'nameservers': nameservers} - return result - - -class CNIDaemonizedRunner(CNIRunner): - - def _add(self, params): - resp = self._make_request('addNetwork', params, httplib.ACCEPTED) - vif = base.VersionedObject.obj_from_primitive(resp.json()) - return self._vif_data(vif, params) - - def _delete(self, params): - self._make_request('delNetwork', params, httplib.NO_CONTENT) - - def prepare_env(self, env, stdin): - cni_envs = {} - cni_envs.update( - {k: v for k, v in env.items() if k.startswith('CNI_')}) - cni_envs['config_kuryr'] = dict(stdin) - return cni_envs - - def get_container_id(self, params): - return params["CNI_CONTAINERID"] - - def _make_request(self, path, cni_envs, expected_status=None): - method = 'POST' - - address = config.CONF.cni_daemon.bind_address - url = 'http://%s/%s' % (address, path) - try: - LOG.debug('Making request to CNI Daemon. %(method)s %(path)s\n' - '%(body)s', - {'method': method, 'path': url, 'body': cni_envs}) - resp = requests.post(url, json=cni_envs, - headers={'Connection': 'close'}) - except requests.ConnectionError: - LOG.exception('Looks like %s cannot be reached. Is kuryr-daemon ' - 'running?', address) - raise - LOG.debug('CNI Daemon returned "%(status)d %(reason)s".', - {'status': resp.status_code, 'reason': resp.reason}) - if expected_status and resp.status_code != expected_status: - LOG.error('CNI daemon returned error "%(status)d %(reason)s".', - {'status': resp.status_code, 'reason': resp.reason}) - raise k_exc.CNIError('Got invalid status code from CNI daemon.') - return resp diff --git a/kuryr_kubernetes/cni/binding/__init__.py b/kuryr_kubernetes/cni/binding/__init__.py deleted file mode 100644 index e69de29bb..000000000 diff --git a/kuryr_kubernetes/cni/binding/base.py b/kuryr_kubernetes/cni/binding/base.py deleted file mode 100644 index b46b5e55f..000000000 --- a/kuryr_kubernetes/cni/binding/base.py +++ /dev/null @@ -1,166 +0,0 @@ -# Copyright (c) 2016 Mirantis, Inc. -# All Rights Reserved. -# -# Licensed under the Apache License, Version 2.0 (the "License"); you may -# not use this file except in compliance with the License. You may obtain -# a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT -# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the -# License for the specific language governing permissions and limitations -# under the License. - -import abc -import errno - -import os_vif -from os_vif.objects import vif as osv_objects -from oslo_log import log as logging -import pyroute2 -from pyroute2 import netns as pyroute_netns -from stevedore import driver as stv_driver - -from kuryr_kubernetes.cni import utils as cni_utils -from kuryr_kubernetes import utils - -_BINDING_NAMESPACE = 'kuryr_kubernetes.cni.binding' -LOG = logging.getLogger(__name__) - - -class BaseBindingDriver(object, metaclass=abc.ABCMeta): - """Interface to attach ports to pods.""" - - def _remove_ifaces(self, ipdb, ifnames, netns='host'): - """Check if any of `ifnames` exists and remove it. - - :param ipdb: ipdb of the network namespace to check - :param ifnames: iterable of interface names to remove - :param netns: network namespace name (used for logging) - """ - for ifname in ifnames: - if ifname in ipdb.interfaces: - LOG.warning('Found hanging interface %(ifname)s inside ' - '%(netns)s netns. Most likely it is a leftover ' - 'from a kuryr-daemon restart. Trying to delete ' - 'it.', {'ifname': ifname, 'netns': netns}) - with ipdb.interfaces[ifname] as iface: - iface.remove() - - @abc.abstractmethod - def connect(self, vif, ifname, netns, container_id): - raise NotImplementedError() - - @abc.abstractmethod - def disconnect(self, vif, ifname, netns, container_id): - raise NotImplementedError() - - -def _get_binding_driver(vif): - mgr = stv_driver.DriverManager(namespace=_BINDING_NAMESPACE, - name=type(vif).__name__, - invoke_on_load=True) - return mgr.driver - - -def get_ipdb(netns=None): - if netns: - netns = utils.convert_netns(netns) - ipdb = pyroute2.IPDB(nl=pyroute2.NetNS(netns)) - else: - ipdb = pyroute2.IPDB() - return ipdb - - -def _enable_ipv6(netns): - # Docker disables IPv6 for --net=none containers - # TODO(apuimedo) remove when it is no longer the case - try: - netns = utils.convert_netns(netns) - path = utils.convert_netns('/proc/self/ns/net') - self_ns_fd = open(path) - pyroute_netns.setns(netns) - path = utils.convert_netns('/proc/sys/net/ipv6/conf/all/disable_ipv6') - with open(path, 'w') as disable_ipv6: - disable_ipv6.write('0') - except Exception: - raise - finally: - pyroute_netns.setns(self_ns_fd) - - -def _configure_l3(vif, ifname, netns, is_default_gateway): - with get_ipdb(netns) as ipdb: - with ipdb.interfaces[ifname] as iface: - for subnet in vif.network.subnets.objects: - if subnet.cidr.version == 6: - _enable_ipv6(netns) - for fip in subnet.ips.objects: - iface.add_ip('%s/%s' % (fip.address, - subnet.cidr.prefixlen)) - - routes = ipdb.routes - for subnet in vif.network.subnets.objects: - for route in subnet.routes.objects: - routes.add(gateway=str(route.gateway), - dst=str(route.cidr)).commit() - if is_default_gateway and hasattr(subnet, 'gateway'): - try: - routes.add(gateway=str(subnet.gateway), - dst='default').commit() - except pyroute2.NetlinkError as ex: - if ex.code != errno.EEXIST: - raise - LOG.debug("Default route already exists in pod for vif=%s." - " Did not overwrite with requested gateway=%s", - vif, subnet.gateway) - - -def _need_configure_l3(vif): - if isinstance(vif, osv_objects.VIFVHostUser): - return False - if not hasattr(vif, 'physnet'): - # NOTE(danil): non-sriov vif. Figure out if it is nested-dpdk - if vif.obj_attr_is_set('port_profile') and hasattr(vif.port_profile, - 'l3_setup'): - return vif.port_profile.l3_setup - # NOTE(danil): by default kuryr-kubernetes has to setup l3 - return True - return True - - -@cni_utils.log_ipdb -def connect(vif, instance_info, ifname, netns=None, report_health=None, - is_default_gateway=True, container_id=None): - driver = _get_binding_driver(vif) - if report_health: - report_health(driver.is_alive()) - os_vif.plug(vif, instance_info) - driver.connect(vif, ifname, netns, container_id) - if _need_configure_l3(vif): - _configure_l3(vif, ifname, netns, is_default_gateway) - - -@cni_utils.log_ipdb -def disconnect(vif, instance_info, ifname, netns=None, report_health=None, - container_id=None, **kwargs): - driver = _get_binding_driver(vif) - if report_health: - report_health(driver.is_alive()) - driver.disconnect(vif, ifname, netns, container_id) - os_vif.unplug(vif, instance_info) - - -@cni_utils.log_ipdb -def cleanup(ifname, netns): - try: - with get_ipdb(netns) as c_ipdb: - if ifname in c_ipdb.interfaces: - with c_ipdb.interfaces[ifname] as iface: - iface.remove() - except Exception: - # Just ignore cleanup errors, there's not much we can do anyway. - LOG.warning('Error occured when attempting to clean up netns %s. ' - 'Ignoring.', netns) diff --git a/kuryr_kubernetes/cni/binding/bridge.py b/kuryr_kubernetes/cni/binding/bridge.py deleted file mode 100644 index 06a36f238..000000000 --- a/kuryr_kubernetes/cni/binding/bridge.py +++ /dev/null @@ -1,115 +0,0 @@ -# Copyright (c) 2016 Mirantis, Inc. -# All Rights Reserved. -# -# Licensed under the Apache License, Version 2.0 (the "License"); you may -# not use this file except in compliance with the License. You may obtain -# a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT -# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the -# License for the specific language governing permissions and limitations -# under the License. -import os -from oslo_config import cfg -from oslo_log import log - -from kuryr_kubernetes.cni.binding import base as b_base -from kuryr_kubernetes.handlers import health -from kuryr_kubernetes import linux_net_utils as net_utils - -LOG = log.getLogger(__name__) -CONF = cfg.CONF - - -class BaseBridgeDriver(health.HealthHandler, b_base.BaseBindingDriver): - - def __init__(self): - super(BaseBridgeDriver, self).__init__() - - def connect(self, vif, ifname, netns, container_id): - host_ifname = vif.vif_name - - # NOTE(dulek): Check if we already run connect for this iface and if - # there's a leftover host-side vif. If so we need to - # remove it, its peer should get deleted automatically by - # the kernel. - with b_base.get_ipdb() as h_ipdb: - self._remove_ifaces(h_ipdb, (host_ifname,)) - - interface_mtu = vif.network.mtu - mtu_cfg = CONF.neutron_defaults.network_device_mtu - if mtu_cfg and mtu_cfg < interface_mtu: - interface_mtu = CONF.neutron_defaults.network_device_mtu - - with b_base.get_ipdb(netns) as c_ipdb: - with c_ipdb.create(ifname=ifname, peer=host_ifname, - kind='veth') as c_iface: - c_iface.mtu = interface_mtu - c_iface.address = str(vif.address) - c_iface.up() - - if netns: - with c_ipdb.interfaces[host_ifname] as h_iface: - h_iface.net_ns_pid = os.getpid() - - with b_base.get_ipdb() as h_ipdb: - with h_ipdb.interfaces[host_ifname] as h_iface: - h_iface.mtu = interface_mtu - h_iface.up() - - def disconnect(self, vif, ifname, netns, container_id): - pass - - -class BridgeDriver(BaseBridgeDriver): - def __init__(self): - super(BridgeDriver, self).__init__() - - def connect(self, vif, ifname, netns, container_id): - super(BridgeDriver, self).connect(vif, ifname, netns, container_id) - host_ifname = vif.vif_name - bridge_name = vif.bridge_name - - with b_base.get_ipdb() as h_ipdb: - with h_ipdb.interfaces[bridge_name] as h_br: - h_br.add_port(host_ifname) - - def disconnect(self, vif, ifname, netns, container_id): - # NOTE(ivc): veth pair is destroyed automatically along with the - # container namespace - pass - - -class VIFOpenVSwitchDriver(BaseBridgeDriver): - - def __init__(self): - super(VIFOpenVSwitchDriver, self).__init__() - - def connect(self, vif, ifname, netns, container_id): - super(VIFOpenVSwitchDriver, self).connect(vif, ifname, netns, - container_id) - # FIXME(irenab) use pod_id (neutron port device_id) - instance_id = 'kuryr' - net_utils.create_ovs_vif_port(vif.bridge_name, vif.vif_name, - vif.port_profile.interface_id, - vif.address, instance_id) - - def disconnect(self, vif, ifname, netns, container_id): - super(VIFOpenVSwitchDriver, self).disconnect(vif, ifname, netns, - container_id) - net_utils.delete_ovs_vif_port(vif.bridge_name, vif.vif_name) - - def is_alive(self): - bridge_name = CONF.neutron_defaults.ovs_bridge - try: - with b_base.get_ipdb() as h_ipdb: - h_ipdb.interfaces[bridge_name] - return True - except Exception: - LOG.error("The configured ovs_bridge=%s integration interface " - "does not exists. Reporting that driver is not healthy.", - bridge_name) - return False diff --git a/kuryr_kubernetes/cni/binding/dpdk.py b/kuryr_kubernetes/cni/binding/dpdk.py deleted file mode 100644 index a95b0459d..000000000 --- a/kuryr_kubernetes/cni/binding/dpdk.py +++ /dev/null @@ -1,196 +0,0 @@ -# Copyright (C) 2020 Intel Corporation -# All Rights Reserved. -# -# Licensed under the Apache License, Version 2.0 (the "License"); you may -# not use this file except in compliance with the License. You may obtain -# a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT -# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the -# License for the specific language governing permissions and limitations -# under the License. - -import os - -from os_vif import objects -from oslo_config import cfg -from oslo_log import log as logging -from oslo_serialization import jsonutils - -from kuryr_kubernetes import clients -from kuryr_kubernetes.cni.binding import base as b_base -from kuryr_kubernetes import constants -from kuryr_kubernetes.handlers import health - -from kuryr.lib._i18n import _ - - -LOG = logging.getLogger(__name__) -CONF = cfg.CONF - -NET_DEV_PATH = "/sys/class/net/{}/device" -VIRTIO_DEVS_PATH = "/sys/bus/virtio/devices" -PCI_PATH = "/sys/bus/pci/devices" -PCI_DRVS_PATH = "/sys/bus/pci/drivers" - - -# TODO(garyloug) These should probably eventually move to config.py -# TODO(garyloug) Would be nice if dpdk_driver is set as CNI arg -nested_dpdk_opts = [ - cfg.StrOpt('dpdk_driver', - help=_('The DPDK driver that the device will be bound to after ' - 'it is unbound from the kernel driver'), - default='uio_pci_generic'), - cfg.StrOpt('pci_mount_point', - help=_('Absolute path to directory containing pci address of ' - 'devices to be used by DPDK application'), - default='/var/pci_address'), -] - -CONF.register_opts(nested_dpdk_opts, "nested_dpdk") - - -class DpdkDriver(health.HealthHandler, b_base.BaseBindingDriver): - - def __init__(self): - super(DpdkDriver, self).__init__() - - def connect(self, vif, ifname, netns, container_id): - name = self._get_iface_name_by_mac(vif.address) - driver, pci_addr = self._get_device_info(name) - - vif.dev_driver = driver - vif.pci_address = pci_addr - dpdk_driver = CONF.nested_dpdk.dpdk_driver - self._change_driver_binding(pci_addr, dpdk_driver) - self._create_pci_file(pci_addr, container_id, ifname) - self._set_vif(vif) - - def disconnect(self, vif, ifname, netns, container_id): - self._remove_pci_file(container_id, ifname) - - def _get_iface_name_by_mac(self, mac_address): - with b_base.get_ipdb() as h_ipdb: - for name, data in h_ipdb.interfaces.items(): - if data['address'] == mac_address: - return data['ifname'] - - def _get_device_info(self, ifname): - """Get driver and PCI addr by using sysfs""" - - # TODO(garyloug): check the type (virtio) - dev = os.path.basename(os.readlink(NET_DEV_PATH.format(ifname))) - pci_link = os.readlink(os.path.join(VIRTIO_DEVS_PATH, dev)) - pci_addr = os.path.basename(os.path.dirname(pci_link)) - pci_driver_link = os.readlink(os.path.join(PCI_PATH, pci_addr, - 'driver')) - pci_driver = os.path.basename(pci_driver_link) - - return pci_driver, pci_addr - - def _change_driver_binding(self, pci, driver): - old_driver_path = os.path.join(PCI_PATH, pci, 'driver') - old_driver_link = os.readlink(old_driver_path) - old_driver = os.path.basename(old_driver_link) - - unbind_path = os.path.join(PCI_DRVS_PATH, old_driver, 'unbind') - bind_path = os.path.join(PCI_DRVS_PATH, driver, 'bind') - - with open(unbind_path, 'w') as unbind_fd: - unbind_fd.write(pci) - - override = os.path.join(PCI_PATH, pci, 'driver_override') - # NOTE(danil): to change driver for device it is necessary to - # write the name of this driver into override_fd. Before that - # Null should be written there. This process is described properly - # in dpdk-devbind.py script by DPDK - with open(override, 'w') as override_fd: - override_fd.write("\00") - - with open(override, 'w') as override_fd: - override_fd.write(driver) - - with open(bind_path, 'w') as bind_fd: - bind_fd.write(pci) - - LOG.info("Device %s was binded on driver %s. Old driver is %s", pci, - driver, old_driver) - - def _create_pci_file(self, pci_addr, container_id, ifname): - # NOTE(danil): writing used pci addresses is necessary to know what - # device to use by dpdk applications inside containers - try: - os.makedirs(CONF.nested_dpdk.pci_mount_point, exists_ok=True) - file_path = os.path.join(CONF.nested_dpdk.pci_mount_point, - container_id + '-' + ifname) - with open(file_path, 'w') as fd: - fd.write(pci_addr) - except OSError as err: - LOG.exception('Cannot create file %s. Error message: (%d) %s', - file_path, err.errno, err.strerror) - - def _remove_pci_file(self, container_id, ifname): - file_path = os.path.join(CONF.nested_dpdk.pci_mount_point, - container_id + '-' + ifname) - try: - os.remove(file_path) - except OSError as err: - LOG.warning('Cannot remove file %s. Error message: (%d) %s', - file_path, err.errno, err.strerror) - - def _set_vif(self, vif): - # TODO(ivc): extract annotation interactions - vifs, labels, resource_version, kp_link = self._get_pod_details( - vif.port_profile.selflink) - for ifname, data in vifs.items(): - if vif.id == data['vif'].id: - vifs[ifname] = data - break - self._set_pod_details(vifs, vif.port_profile.selflink, labels, - resource_version, kp_link) - - def _get_pod_details(self, selflink): - k8s = clients.get_kubernetes_client() - pod = k8s.get(selflink) - kp = k8s.get(f'{constants.K8S_API_CRD_NAMESPACES}/' - f'{pod["metadata"]["namespace"]}/kuryrports/' - f'{pod["metadata"]["name"]}') - - try: - vifs = {k: {'default': v['default'], - 'vif': objects.base.VersionedObject - .obj_from_primitive(v['vif'])} - for k, v in kp['status']['vifs'].items()} - except (KeyError, AttributeError): - LOG.exception(f"No vifs found on KuryrPort: {kp}") - raise - LOG.info(f"Got VIFs from Kuryrport: {vifs}") - - resource_version = pod['metadata']['resourceVersion'] - labels = pod['metadata'].get('labels') - return vifs, labels, resource_version, kp['metadata']['selflink'] - - def _set_pod_details(self, vifs, selflink, labels, resource_version, - kp_link): - k8s = clients.get_kubernetes_client() - if vifs: - vif_dict = {k: {'default': v['default'], - 'vif': v['vif'].obj_to_primitive()} - for k, v in vifs.items()} - - LOG.info("Setting VIFs in KuryrPort %r", vif_dict) - k8s.patch_crd('status', kp_link, {'vifs': vif_dict}) - - if not labels: - LOG.info("Removing Label annotation: %r", labels) - labels_annotation = None - else: - labels_annotation = jsonutils.dumps(labels, sort_keys=True) - LOG.info("Setting Labels annotation: %r", labels_annotation) - - k8s.annotate(selflink, - {constants.K8S_ANNOTATION_LABEL: labels_annotation}, - resource_version=resource_version) diff --git a/kuryr_kubernetes/cni/binding/nested.py b/kuryr_kubernetes/cni/binding/nested.py deleted file mode 100644 index 9389b62e9..000000000 --- a/kuryr_kubernetes/cni/binding/nested.py +++ /dev/null @@ -1,239 +0,0 @@ -# All Rights Reserved. -# -# Licensed under the Apache License, Version 2.0 (the "License"); you may -# not use this file except in compliance with the License. You may obtain -# a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT -# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the -# License for the specific language governing permissions and limitations -# under the License. - -import abc -import errno -import os - -from oslo_log import log as logging -import psutil -import pyroute2 -from pyroute2 import netlink as pyroute_netlink - -from kuryr_kubernetes.cni.binding import base as b_base -from kuryr_kubernetes import config -from kuryr_kubernetes import exceptions -from kuryr_kubernetes.handlers import health -from kuryr_kubernetes import utils - -VLAN_KIND = 'vlan' -MACVLAN_KIND = 'macvlan' -MACVLAN_MODE_BRIDGE = 'bridge' -KUBELET_PORT = 10250 - -LOG = logging.getLogger(__name__) - - -class NestedDriver(health.HealthHandler, b_base.BaseBindingDriver, - metaclass=abc.ABCMeta): - - def __init__(self): - super(NestedDriver, self).__init__() - - @abc.abstractmethod - def _get_iface_create_args(self, vif): - raise NotImplementedError() - - def _detect_iface_name(self, h_ipdb): - # Let's try config first - if config.CONF.binding.link_iface in h_ipdb.interfaces: - LOG.debug(f'Using configured interface ' - f'{config.CONF.binding.link_iface} as bridge interface.') - return config.CONF.binding.link_iface - - # Then let's try choosing the one where kubelet listens to - conns = [x for x in psutil.net_connections() - if x.status == psutil.CONN_LISTEN - and x.laddr.port == KUBELET_PORT] - if len(conns) == 1: - lookup_addr = conns[0].laddr.ip - for name, iface in h_ipdb.interfaces.items(): - if type(name) is int: # Skip ones duplicated by id - continue - - for addr in iface['ipaddr']: - if addr[0] == lookup_addr: - LOG.debug(f'Using kubelet bind interface {name} as ' - f'bridge interface.') - return name - - # Alright, just try the first non-loopback interface - for name, iface in h_ipdb.interfaces.items(): - if type(name) is int: # Skip ones duplicated by id - continue - - if iface['flags'] & pyroute_netlink.rtnl.ifinfmsg.IFF_LOOPBACK: - continue # Skip loopback - - LOG.debug(f'Using interface {name} as bridge interface.') - return name - - raise exceptions.CNIBindingFailure('Cannot find bridge interface for ' - 'nested driver to use. Please set ' - '[binding]link_iface option.') - - def connect(self, vif, ifname, netns, container_id): - # NOTE(vikasc): Ideally 'ifname' should be used here but instead a - # temporary name is being used while creating the device for - # container in host network namespace. This is because cni expects - # only 'eth0' as interface name and if host already has an - # interface named 'eth0', device creation will fail with 'already - # exists' error. - temp_name = vif.vif_name - - # First let's take a peek into the pod namespace and try to remove any - # leftover interface in case we got restarted before CNI returned to - # kubelet. - with b_base.get_ipdb(netns) as c_ipdb: - self._remove_ifaces(c_ipdb, (temp_name, ifname), netns) - - # We might also have leftover interface in the host netns, let's try to - # remove it too. This is outside of the main host's IPDB context - # manager to make sure removal is commited before starting next - # transaction. - with b_base.get_ipdb() as h_ipdb: - self._remove_ifaces(h_ipdb, (temp_name,)) - - with b_base.get_ipdb() as h_ipdb: - # TODO(vikasc): evaluate whether we should have stevedore - # driver for getting the link device. - vm_iface_name = self._detect_iface_name(h_ipdb) - mtu = h_ipdb.interfaces[vm_iface_name].mtu - if mtu < vif.network.mtu: - # NOTE(dulek): This might happen if Neutron and DHCP agent - # have different MTU settings. See - # https://bugs.launchpad.net/kuryr-kubernetes/+bug/1863212 - raise exceptions.CNIBindingFailure( - f'MTU of interface {vm_iface_name} ({mtu}) is smaller ' - f'than MTU of pod network {vif.network.id} ' - f'({vif.network.mtu}). Please make sure pod network ' - f'has the same or smaller MTU as node (VM) network.') - - args = self._get_iface_create_args(vif) - with h_ipdb.create(ifname=temp_name, - link=h_ipdb.interfaces[vm_iface_name], - **args) as iface: - iface.net_ns_fd = utils.convert_netns(netns) - - with b_base.get_ipdb(netns) as c_ipdb: - with c_ipdb.interfaces[temp_name] as iface: - iface.ifname = ifname - iface.mtu = vif.network.mtu - iface.address = str(vif.address) - iface.up() - - def disconnect(self, vif, ifname, netns, container_id): - # NOTE(dulek): Interfaces should get deleted with the netns, but it may - # happen that kubelet or crio will call new CNI ADD before - # the old netns is deleted. This might result in VLAN ID - # conflict. In oder to protect from that let's remove the - # netns ifaces here anyway. - with b_base.get_ipdb(netns) as c_ipdb: - self._remove_ifaces(c_ipdb, (vif.vif_name, ifname), netns) - - -class VlanDriver(NestedDriver): - - def __init__(self): - super(VlanDriver, self).__init__() - - def connect(self, vif, ifname, netns, container_id): - try: - super().connect(vif, ifname, netns, container_id) - except pyroute2.NetlinkError as e: - if e.code == errno.EEXIST: - args = self._get_iface_create_args(vif) - LOG.warning( - f'Creation of pod interface failed due to VLAN ID ' - f'(vlan_info={args}) conflict. Probably the CRI had not ' - f'cleaned up the network namespace of deleted pods. ' - f'Attempting to find and delete offending interface and ' - f'retry.') - self._cleanup_conflicting_vlan(netns, args['vlan_id']) - super().connect(vif, ifname, netns, container_id) - return - raise - - def _get_iface_create_args(self, vif): - return {'kind': VLAN_KIND, 'vlan_id': vif.vlan_id} - - def _cleanup_conflicting_vlan(self, netns, vlan_id): - if vlan_id is None: - # Better to not attempt that, might remove way to much. - return - - netns_paths = [] - handled_netns = set() - with b_base.get_ipdb() as h_ipdb: - vm_iface_name = self._detect_iface_name(h_ipdb) - vm_iface_index = h_ipdb.interfaces[vm_iface_name].index - - if netns.startswith('/proc'): - # Paths have /proc//ns/net pattern, we need to iterate - # over /proc. - netns_dir = utils.convert_netns('/proc') - for pid in os.listdir(netns_dir): - if not pid.isdigit(): - # Ignore all the non-pid stuff in /proc - continue - netns_paths.append(os.path.join(netns_dir, pid, 'ns/net')) - else: - # cri-o manages netns, they're in /var/run/netns/* or similar. - netns_dir = os.path.dirname(netns) - netns_paths = os.listdir(netns_dir) - netns_paths = [os.path.join(netns_dir, netns_path) - for netns_path in netns_paths] - - for netns_path in netns_paths: - netns_path = os.fsdecode(netns_path) - try: - # NOTE(dulek): inode can be used to clearly distinguish the - # netns' as `man namespaces` says: - # - # Since Linux 3.8, they appear as symbolic links. If two - # processes are in the same namespace, then the device IDs and - # inode numbers of their /proc/[pid]/ns/xxx symbolic links will - # be the same; an application can check this using the - # stat.st_dev and stat.st_ino fields returned by stat(2). - netns_stat = os.stat(netns_path) - netns_id = netns_stat.st_dev, netns_stat.st_ino - except OSError: - continue - if netns_id in handled_netns: - continue - handled_netns.add(netns_id) - - try: - with b_base.get_ipdb(netns_path) as c_ipdb: - for ifname, iface in c_ipdb.interfaces.items(): - if (iface.vlan_id == vlan_id - and iface.link == vm_iface_index): - LOG.warning( - f'Found offending interface {ifname} with ' - f'VLAN ID {vlan_id} in netns {netns_path}. ' - f'Trying to remove it.') - with c_ipdb.interfaces[ifname] as found_iface: - found_iface.remove() - break - except OSError: - continue - - -class MacvlanDriver(NestedDriver): - - def __init__(self): - super(MacvlanDriver, self).__init__() - - def _get_iface_create_args(self, vif): - return {'kind': MACVLAN_KIND, 'macvlan_mode': MACVLAN_MODE_BRIDGE} diff --git a/kuryr_kubernetes/cni/binding/vhostuser.py b/kuryr_kubernetes/cni/binding/vhostuser.py deleted file mode 100644 index ea1a28b10..000000000 --- a/kuryr_kubernetes/cni/binding/vhostuser.py +++ /dev/null @@ -1,131 +0,0 @@ -# Copyright (c) 2020 Samsung Electronics Co., Ltd. -# All Rights Reserved. -# -# Licensed under the Apache License, Version 2.0 (the "License"); you may -# not use this file except in compliance with the License. You may obtain -# a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT -# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the -# License for the specific language governing permissions and limitations -# under the License. - - -import os.path -import stat - -from os_vif.objects import fields as osv_fields -from oslo_config import cfg -from oslo_log import log -from oslo_serialization import jsonutils -from vif_plug_ovs import constants -from vif_plug_ovs import ovs - -from kuryr_kubernetes.cni.binding import base -from kuryr_kubernetes import config -from kuryr_kubernetes import exceptions as k_exc -from kuryr_kubernetes.handlers import health - -LOG = log.getLogger(__name__) - - -def _get_vhostuser_port_name(vif): - return ovs.OvsPlugin.gen_port_name(constants.OVS_VHOSTUSER_PREFIX, vif.id) - - -def _get_vhu_sock(config_file_path): - with open(config_file_path, 'r') as f: - conf = jsonutils.load(f) - return conf['vhostname'] - - -def _check_sock_file(vhostuser_socket): - mode = os.stat(vhostuser_socket).st_mode - return stat.S_ISSOCK(mode) - - -class VIFVHostUserDriver(health.HealthHandler, base.BaseBindingDriver): - - def __init__(self): - super(VIFVHostUserDriver, self).__init__() - self.mount_path = config.CONF.vhostuser.mount_point - self.ovs_vu_path = config.CONF.vhostuser.ovs_vhu_path - if not self.mount_path: - raise cfg.RequiredOptError('mount_point', 'vhostuser') - - def _write_config(self, container_id, ifname, port_name, vif): - """Write vhostuser configuration file - - This function writes configuration file, this file will be used by - application inside container and for cleanup (def disconnect) - procedure. - """ - vhost_conf = {} - vhost_conf["vhostname"] = port_name - vhost_conf["vhostmac"] = vif.address - vhost_conf["mode"] = vif.mode - with open(self._config_file_path(container_id, ifname), "w") as f: - jsonutils.dump(vhost_conf, f) - - def _config_file_path(self, container_id, ifname): - return os.path.join(self.mount_path, f'{container_id}-{ifname}') - - def connect(self, vif, ifname, netns, container_id): - port_name = _get_vhostuser_port_name(vif) - self._write_config(container_id, ifname, port_name, vif) - # no need to copy in case of SERVER mode - if vif.mode == osv_fields.VIFVHostUserMode.SERVER: - return - - src_vhu_sock = os.path.join(self.ovs_vu_path, port_name) - - if _check_sock_file(src_vhu_sock): - dst_vhu_sock = os.path.join(vif.path, port_name) - LOG.debug("Moving %s to %s while processing VIF %s", src_vhu_sock, - dst_vhu_sock, vif.id) - os.rename(src_vhu_sock, dst_vhu_sock) - else: - error_msg = ("Socket %s required for VIF %s doesn't exist" % - (src_vhu_sock, vif.id)) - LOG.error(error_msg) - raise k_exc.CNIError(error_msg) - - def disconnect(self, vif, ifname, netns, container_id): - # This function removes configuration file and appropriate - # socket file. Unfortunatelly Open vSwitch daemon can't remove - # moved socket, so we have to do it - config_file_path = self._config_file_path(container_id, ifname) - - if not os.path.exists(config_file_path): - LOG.warning("Configuration file: %s for VIF %s doesn't exist!", - config_file_path, vif.id) - return - vhu_sock_path = os.path.join(self.mount_path, - _get_vhu_sock(config_file_path)) - LOG.debug("remove: %s, %s", config_file_path, vhu_sock_path) - try: - os.remove(vhu_sock_path) - except Exception: - LOG.exception("Failed to delete socket %s when processing VIF %s.", - vhu_sock_path, vif.id) - os.remove(config_file_path) - - def is_alive(self): - healthy = False - try: - healthy = (os.path.exists(self.ovs_vu_path) - and os.path.exists(self.mount_path)) - except Exception: - LOG.exception('Error when determining health status of vhostuser ' - 'CNI driver.') - - if not healthy: - LOG.error('Directory %s or %s does not exist or Kuryr has no ' - 'permissions to access it. Marking vhostuser binding ' - 'driver as unhealthy.', self.ovs_vu_path, - self.mount_path) - - return healthy diff --git a/kuryr_kubernetes/cni/daemon/__init__.py b/kuryr_kubernetes/cni/daemon/__init__.py deleted file mode 100644 index e69de29bb..000000000 diff --git a/kuryr_kubernetes/cni/daemon/service.py b/kuryr_kubernetes/cni/daemon/service.py deleted file mode 100644 index ac459db25..000000000 --- a/kuryr_kubernetes/cni/daemon/service.py +++ /dev/null @@ -1,375 +0,0 @@ -# Copyright 2017 Red Hat, Inc. -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. - -from ctypes import c_bool -import errno -from http import client as httplib -import multiprocessing -import os -import queue -import sys -import threading -import time -import urllib3 - -import cotyledon -import flask -import pyroute2 -from pyroute2.ipdb import transactional -from werkzeug import serving - -import os_vif -from oslo_config import cfg -from oslo_log import log as logging -from oslo_serialization import jsonutils - -from kuryr_kubernetes import clients -from kuryr_kubernetes.cni.daemon import watcher_service -from kuryr_kubernetes.cni import health -from kuryr_kubernetes.cni.plugins import k8s_cni_registry -from kuryr_kubernetes.cni import prometheus_exporter -from kuryr_kubernetes.cni import utils as cni_utils -from kuryr_kubernetes import config -from kuryr_kubernetes import exceptions -from kuryr_kubernetes import objects - -LOG = logging.getLogger(__name__) -CONF = cfg.CONF -ErrContainerUnknown = 3 -ErrInvalidEnvironmentVariables = 4 -ErrTryAgainLater = 11 -ErrInternal = 999 - - -class DaemonServer(object): - def __init__(self, plugin, healthy, metrics): - self.ctx = None - self.plugin = plugin - self.healthy = healthy - self.metrics = metrics - self.failure_count = multiprocessing.Value('i', 0) - self.application = flask.Flask('kuryr-daemon') - self.application.add_url_rule( - '/addNetwork', methods=['POST'], view_func=self.add) - self.application.add_url_rule( - '/delNetwork', methods=['POST'], view_func=self.delete) - self.headers = {'ContentType': 'application/json', - 'Connection': 'close'} - self._server = None - - def _prepare_request(self): - params = cni_utils.CNIParameters(flask.request.get_json()) - LOG.debug('Received %s request. CNI Params: %s', - params.CNI_COMMAND, params) - return params - - def _error(self, error_code, message, details=""): - template = { - "code": error_code, - "msg": message, - "details": details - } - data = jsonutils.dumps(template) - return data - - def _update_metrics(self, command, error, duration): - """Add a new metric value to the shared metrics dict""" - labels = {'command': command, 'error': error} - self.metrics.put({'labels': labels, 'duration': duration}) - - @cni_utils.measure_time('ADD') - def add(self): - try: - params = self._prepare_request() - except Exception: - self._check_failure() - LOG.exception('Exception when reading CNI params.') - error = self._error(ErrInvalidEnvironmentVariables, - "Required CNI params missing.") - return error, httplib.BAD_REQUEST, self.headers - - try: - vif = self.plugin.add(params) - data = jsonutils.dumps(vif.obj_to_primitive()) - except (exceptions.CNIPodGone, exceptions.CNIPodUidMismatch) as e: - LOG.warning('Pod deleted while processing ADD request') - error = self._error(ErrContainerUnknown, str(e)) - return error, httplib.GONE, self.headers - except exceptions.CNITimeout as e: - LOG.exception('Timeout on ADD request') - error = self._error(ErrTryAgainLater, f"{e}. Try Again Later.") - return error, httplib.GATEWAY_TIMEOUT, self.headers - except pyroute2.NetlinkError as e: - if e.code == errno.EEXIST: - self._check_failure() - LOG.warning( - f'Creation of pod interface failed due to VLAN ID ' - f'conflict. Probably the CRI had not cleaned up the ' - f'network namespace of deleted pods. Attempting to retry.') - error = self._error(ErrTryAgainLater, - "Creation of pod interface failed due to " - "VLAN ID conflict. Try Again Later") - return error, httplib.GATEWAY_TIMEOUT, self.headers - raise - except Exception: - if not self.healthy.value: - error = self._error(ErrInternal, - "Maximum CNI ADD Failures Reached.", - "Error when processing addNetwork request." - " CNI Params: {}".format(params)) - else: - self._check_failure() - error = self._error(ErrInternal, - "Error processing request", - "Failure processing addNetwork request. " - "CNI Params: {}".format(params)) - LOG.exception('Error when processing addNetwork request. CNI ' - 'Params: %s', params) - return error, httplib.INTERNAL_SERVER_ERROR, self.headers - - return data, httplib.ACCEPTED, self.headers - - @cni_utils.measure_time('DEL') - def delete(self): - try: - params = self._prepare_request() - except Exception: - LOG.exception('Exception when reading CNI params.') - error = self._error(ErrInvalidEnvironmentVariables, - "Required CNI params missing.") - return error, httplib.BAD_REQUEST, self.headers - - try: - self.plugin.delete(params) - except (exceptions.CNIKuryrPortTimeout, exceptions.CNIPodUidMismatch): - # NOTE(dulek): It's better to ignore these errors - most of the - # time it will happen when pod is long gone and CRI - # overzealously tries to delete it from the network. - # We cannot really do anything without VIF annotation, - # so let's just tell CRI to move along. - LOG.warning('Error when processing delNetwork request. ' - 'Ignoring this error, pod is most likely gone') - return '', httplib.NO_CONTENT, self.headers - except Exception: - if not self.healthy.value: - error = self._error(ErrInternal, - "Maximum CNI DEL Failures Reached.", - "Error processing delNetwork request. " - "CNI Params: {}".format(params)) - else: - self._check_failure() - error = self._error(ErrInternal, - "Error processing request", - "Failure processing delNetwork request. " - "CNI Params: {}".format(params)) - LOG.exception('Error when processing delNetwork request. CNI ' - 'Params: %s.', params) - return error, httplib.INTERNAL_SERVER_ERROR, self.headers - return '', httplib.NO_CONTENT, self.headers - - def run(self): - server_pair = CONF.cni_daemon.bind_address - LOG.info('Starting server on %s.', server_pair) - try: - address, port = server_pair.split(':') - port = int(port) - except ValueError: - LOG.exception('Cannot start server on %s.', server_pair) - raise - - if CONF.cni_daemon.worker_num <= 1: - msg = ('[cni_daemon]worker_num needs to be set to a value higher ' - 'than 1') - LOG.critical(msg) - raise exceptions.InvalidKuryrConfiguration(msg) - - try: - self._server = serving.make_server( - address, port, self.application, threaded=False, - processes=CONF.cni_daemon.worker_num) - self._server.serve_forever() - except Exception: - LOG.exception('Failed to start kuryr-daemon.') - raise - - def stop(self): - LOG.info("Waiting for DaemonServer worker processes to exit...") - self._server._block_on_close = True - self._server.shutdown() - self._server.server_close() - LOG.info("All DaemonServer workers finished gracefully.") - - def _check_failure(self): - with self.failure_count.get_lock(): - if self.failure_count.value < CONF.cni_daemon.cni_failures_count: - self.failure_count.value += 1 - else: - with self.healthy.get_lock(): - LOG.debug("Reporting maximum CNI ADD/DEL failures " - "reached.") - self.healthy.value = False - - -class CNIDaemonServerService(cotyledon.Service): - name = "server" - - def __init__(self, worker_id, registry, healthy, metrics): - super(CNIDaemonServerService, self).__init__(worker_id) - self.registry = registry - self.healthy = healthy - self.plugin = k8s_cni_registry.K8sCNIRegistryPlugin(registry, - self.healthy) - self.metrics = metrics - self.server = DaemonServer(self.plugin, self.healthy, self.metrics) - - def run(self): - # NOTE(dulek): We might do a *lot* of pyroute2 operations, let's - # make the pyroute2 timeout configurable to make sure - # kernel will have chance to catch up. - transactional.SYNC_TIMEOUT = CONF.cni_daemon.pyroute2_timeout - - # Run HTTP server - self.server.run() - - def terminate(self): - self.server.stop() - - -class CNIDaemonHealthServerService(cotyledon.Service): - name = "health" - - def __init__(self, worker_id, healthy): - super(CNIDaemonHealthServerService, self).__init__(worker_id) - self.health_server = health.CNIHealthServer(healthy) - - def run(self): - self.health_server.run() - - -class CNIDaemonExporterService(cotyledon.Service): - name = "Prometheus Exporter" - - def __init__(self, worker_id, metrics): - super(CNIDaemonExporterService, self).__init__(worker_id) - self.prometheus_exporter = prometheus_exporter.CNIPrometheusExporter() - self.is_running = True - self.metrics = metrics - self.exporter_thread = threading.Thread( - target=self._start_metric_updater) - self.exporter_thread.start() - - def _start_metric_updater(self): - while self.is_running: - try: - metric = self.metrics.get(timeout=1) - except queue.Empty: - continue - labels = metric['labels'] - duration = metric['duration'] - self.prometheus_exporter.update_metric(labels, duration) - - def terminate(self): - self.is_running = False - if self.exporter_thread: - self.exporter_thread.join() - - def run(self): - self.prometheus_exporter.run() - - -class CNIDaemonServiceManager(cotyledon.ServiceManager): - def __init__(self): - # NOTE(mdulko): Default shutdown timeout is 60 seconds and K8s won't - # wait more by default anyway. - super(CNIDaemonServiceManager, self).__init__() - self._server_service = None - # TODO(dulek): Use cotyledon.oslo_config_glue to support conf reload. - - # TODO(vikasc): Should be done using dynamically loadable OVO types - # plugin. - objects.register_locally_defined_vifs() - - os_vif.initialize() - clients.setup_kubernetes_client() - - self.manager = multiprocessing.Manager() - registry = self.manager.dict() # For Watcher->Server communication. - healthy = multiprocessing.Value(c_bool, True) - metrics = self.manager.Queue() - self.add(watcher_service.KuryrPortWatcherService, workers=1, - args=(registry, healthy,)) - self.add(watcher_service.PodWatcherService, workers=1, - args=(registry, healthy,)) - self._server_service = self.add(CNIDaemonServerService, workers=1, - args=(registry, healthy, metrics,)) - self.add(CNIDaemonHealthServerService, workers=1, args=(healthy,)) - self.add(CNIDaemonExporterService, workers=1, args=(metrics,)) - - def shutdown_hook(service_id, worker_id, exit_code): - LOG.critical(f'Child Service {service_id} had exited with code ' - f'{exit_code}, stopping kuryr-daemon') - self.shutdown() - - self.register_hooks(on_terminate=self.terminate, - on_dead_worker=shutdown_hook) - - def run(self): - # FIXME(darshna): Remove pyroute2 IPDB deprecation warning, remove - # once we stop using pyroute2.IPDB. - logging.getLogger('pyroute2').setLevel(logging.ERROR) - logging.getLogger('pr2modules.ipdb.main').setLevel(logging.ERROR) - - reaper_thread = threading.Thread(target=self._zombie_reaper, - daemon=True) - self._terminate_called = threading.Event() - reaper_thread.start() - super(CNIDaemonServiceManager, self).run() - - def _zombie_reaper(self): - while True: - try: - res = os.waitpid(-1, os.WNOHANG) - # don't sleep or stop if a zombie process was found - # as there could be more - if res != (0, 0): - continue - except ChildProcessError: - # There are no child processes yet (or they have been killed) - pass - except os.error: - LOG.exception("Got OS error while reaping zombie processes") - if self._terminate_called.isSet(): - break - time.sleep(1) - - def terminate(self): - self._terminate_called.set() - if self._server_service: - LOG.info("Gracefully stopping DaemonServer service..") - self.reconfigure(self._server_service, 0) - for worker in self._running_services[self._server_service]: - worker.terminate() - for worker in self._running_services[self._server_service]: - worker.join() - LOG.info("Stopping registry manager...") - self.manager.shutdown() - LOG.info("Continuing with shutdown") - - -def start(): - urllib3.disable_warnings() - config.init(sys.argv[1:]) - config.setup_logging() - - CNIDaemonServiceManager().run() diff --git a/kuryr_kubernetes/cni/daemon/watcher_service.py b/kuryr_kubernetes/cni/daemon/watcher_service.py deleted file mode 100644 index cb6987a36..000000000 --- a/kuryr_kubernetes/cni/daemon/watcher_service.py +++ /dev/null @@ -1,94 +0,0 @@ -# Copyright 2022 Red Hat, Inc. -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. - -import threading -import time -import urllib.parse - -import cotyledon -from oslo_config import cfg -from oslo_log import log as logging - -from kuryr_kubernetes.cni import handlers -from kuryr_kubernetes import constants as k_const -from kuryr_kubernetes import utils -from kuryr_kubernetes import watcher as k_watcher - - -HEALTH_CHECKER_DELAY = 5 -LOG = logging.getLogger(__name__) -CONF = cfg.CONF - - -class BaseCNIDaemonWatcherService(cotyledon.Service): - name = "watcher" - - def __init__(self, worker_id, handler, path, registry, healthy): - super().__init__(worker_id) - self.pipeline = None - self.watcher = None - self.health_thread = None - self.handler = handler - self.registry = registry - self.healthy = healthy - self.path = path - self.is_running = False - - def run(self): - self.pipeline = handlers.CNIPipeline() - self.pipeline.register(self.handler) - self.watcher = k_watcher.Watcher(self.pipeline) - self.watcher.add(self.path) - - self.is_running = True - - self.health_thread = threading.Thread( - target=self._start_watcher_health_checker) - self.health_thread.start() - - self.watcher.start() - - def _start_watcher_health_checker(self): - while self.is_running: - if not self.watcher.is_alive(): - LOG.warning(f"Reporting watcher {self.__class__.__name__} is " - f"not healthy because it's not running anymore.") - with self.healthy.get_lock(): - self.healthy.value = False - time.sleep(HEALTH_CHECKER_DELAY) - - def terminate(self): - self.is_running = False - if self.health_thread: - self.health_thread.join() - if self.watcher: - self.watcher.stop() - - -class KuryrPortWatcherService(BaseCNIDaemonWatcherService): - def __init__(self, worker_id, registry, healthy): - query_label = urllib.parse.quote_plus(f'{k_const.KURYRPORT_LABEL}=' - f'{utils.get_nodename()}') - path = f'{k_const.K8S_API_CRD_KURYRPORTS}?labelSelector={query_label}' - handler = handlers.CNIKuryrPortHandler(registry) - super().__init__(worker_id, handler, path, registry, healthy) - - -class PodWatcherService(BaseCNIDaemonWatcherService): - def __init__(self, worker_id, registry, healthy): - query_label = urllib.parse.quote_plus(f'spec.nodeName=' - f'{utils.get_nodename()}') - path = f'{k_const.K8S_API_PODS}?fieldSelector={query_label}' - handler = handlers.CNIPodHandler(registry) - super().__init__(worker_id, handler, path, registry, healthy) diff --git a/kuryr_kubernetes/cni/handlers.py b/kuryr_kubernetes/cni/handlers.py deleted file mode 100644 index 5e90f0dd6..000000000 --- a/kuryr_kubernetes/cni/handlers.py +++ /dev/null @@ -1,116 +0,0 @@ -# Copyright (c) 2016 Mirantis, Inc. -# All Rights Reserved. -# -# Licensed under the Apache License, Version 2.0 (the "License"); you may -# not use this file except in compliance with the License. You may obtain -# a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT -# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the -# License for the specific language governing permissions and limitations -# under the License. - -from os_vif import objects as obj_vif -from oslo_concurrency import lockutils -from oslo_log import log as logging - -from kuryr_kubernetes import constants as k_const -from kuryr_kubernetes.handlers import dispatch as k_dis -from kuryr_kubernetes.handlers import k8s_base -from kuryr_kubernetes import utils - - -LOG = logging.getLogger(__name__) - - -class CNIKuryrPortHandler(k8s_base.ResourceEventHandler): - OBJECT_KIND = k_const.K8S_OBJ_KURYRPORT - - def __init__(self, registry): - super().__init__() - self.registry = registry - - def on_vif(self, kuryrport, vifs): - kp_name = utils.get_res_unique_name(kuryrport) - with lockutils.lock(kp_name, external=True): - if (kp_name not in self.registry or - self.registry[kp_name] == k_const.CNI_DELETED_POD_SENTINEL - or self.registry[kp_name]['kp']['metadata']['uid'] != - kuryrport['metadata']['uid']): - self.registry[kp_name] = {'kp': kuryrport, - 'vifs': vifs, - 'containerid': None, - 'vif_unplugged': False, - 'del_received': False} - else: - old_vifs = self.registry[kp_name]['vifs'] - for iface in vifs: - if old_vifs[iface].active != vifs[iface].active: - kp_dict = self.registry[kp_name] - kp_dict['vifs'] = vifs - self.registry[kp_name] = kp_dict - - def on_deleted(self, kuryrport, *args, **kwargs): - kp_name = utils.get_res_unique_name(kuryrport) - try: - if (kp_name in self.registry and self.registry[kp_name] - != k_const.CNI_DELETED_POD_SENTINEL): - # NOTE(ndesh): We need to lock here to avoid race condition - # with the deletion code for CNI DEL so that - # we delete the registry entry exactly once - with lockutils.lock(kp_name, external=True): - if self.registry[kp_name]['vif_unplugged']: - del self.registry[kp_name] - else: - kp_dict = self.registry[kp_name] - kp_dict['del_received'] = True - self.registry[kp_name] = kp_dict - except KeyError: - # This means someone else removed it. It's odd but safe to ignore. - LOG.debug('KuryrPort %s entry already removed from registry while ' - 'handling DELETED event. Ignoring.', kp_name) - pass - - def on_present(self, kuryrport, *args, **kwargs): - LOG.debug('MODIFIED event for KuryrPort %s', - utils.get_res_unique_name(kuryrport)) - vifs = self._get_vifs(kuryrport) - if vifs: - self.on_vif(kuryrport, vifs) - - def _get_vifs(self, kuryrport): - vifs_dict = { - k: obj_vif.base.VersionedObject.obj_from_primitive(v['vif']) - for k, v in kuryrport['status']['vifs'].items()} - LOG.debug("Got vifs: %r", vifs_dict) - - return vifs_dict - - -class CNIPodHandler(k8s_base.ResourceEventHandler): - OBJECT_KIND = k_const.K8S_OBJ_POD - - def __init__(self, registry): - super().__init__() - self.registry = registry - - def on_finalize(self, pod, *args, **kwargs): - # TODO(dulek): Verify if this is the handler for such case. - kp_name = utils.get_res_unique_name(pod) - with lockutils.lock(kp_name, external=True): - # If there was no KP and Pod got deleted, we need inform the - # thread waiting for it about that. We'll insert sentinel value. - if kp_name not in self.registry: - self.registry[kp_name] = k_const.CNI_DELETED_POD_SENTINEL - - -class CNIPipeline(k_dis.EventPipeline): - - def _wrap_dispatcher(self, dispatcher): - return dispatcher - - def _wrap_consumer(self, consumer): - return consumer diff --git a/kuryr_kubernetes/cni/health.py b/kuryr_kubernetes/cni/health.py deleted file mode 100644 index 0a206c7d5..000000000 --- a/kuryr_kubernetes/cni/health.py +++ /dev/null @@ -1,140 +0,0 @@ -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. - -from http import client as httplib -import os - -from oslo_config import cfg -from oslo_log import log as logging -from pyroute2 import IPDB - -from kuryr.lib._i18n import _ -from kuryr_kubernetes.cni import utils -from kuryr_kubernetes import health as base_server - -LOG = logging.getLogger(__name__) -CONF = cfg.CONF - -cni_health_server_opts = [ - cfg.IntOpt('port', - help=_('Port for CNI Health HTTP Server.'), - default=8090), - cfg.IntOpt('max_memory_usage', - help=_('Maximum memory usage (MiB) for CNI Health Server ' - 'process. If this value is exceeded kuryr-daemon ' - 'will be marked as unhealthy.'), - default=-1), - cfg.StrOpt( - 'cg_path', - help=_('sysfs path to the CNI cgroup. This is used for resource ' - 'tracking and as such should point to the cgroup hierarchy ' - 'leaf. It only applies when non containerized'), - default='/sys/fs/cgroup/memory/system.slice/kuryr-cni.service') -] - -CONF.register_opts(cni_health_server_opts, "cni_health_server") - -TOP_CGROUP_MEMORY_PATH = '/sys/fs/cgroup/memory' -MEMSW_FILENAME = 'memory.memsw.usage_in_bytes' -BYTES_AMOUNT = 1048576 -CAP_NET_ADMIN = 12 # Taken from linux/capabilities.h -EFFECTIVE_CAPS = 'CapEff:\t' - - -def _has_cap(capability, entry, proc_status_path='/proc/self/status'): - """Returns true iff the process has the specified capability. - - :param capability: the bit number for the capability to check as seen - in linux/capabilities.h. - :param entry: Whether to check CapInh, CapEff or CapBnd. - :param proc_status_path: Which process status should be checked. If none - is passed, it will check the current process. - :return: Whether the specified process has the capability bit set - """ - with open(proc_status_path, 'r') as pstat: - for line in pstat: - if line.startswith(entry): - caps = int(line[len(entry):], 16) - return (caps & (1 << capability)) != 0 - - -def _get_cni_cgroup_path(): - """Returns the path to the CNI process cgroup memory directory.""" - if utils.running_under_container_runtime(): - # We are running inside a container. This means the root cgroup - # is the one we need to track as it will be the CNI parent proc - cg_memsw_path = TOP_CGROUP_MEMORY_PATH - else: - cg_memsw_path = CONF.cni_health_server.cg_path - - return cg_memsw_path - - -def _get_memsw_usage(cgroup_mem_path): - """Returns the group's resident memory plus swap usage.""" - with open(os.path.join(cgroup_mem_path, MEMSW_FILENAME)) as memsw: - memsw_in_bytes = int(memsw.read()) - return memsw_in_bytes / BYTES_AMOUNT - - -class CNIHealthServer(base_server.BaseHealthServer): - """Server used by readiness and liveness probe to manage CNI health checks. - - Verifies presence of NET_ADMIN capabilities, IPDB in working order, - connectivity to Kubernetes API, quantity of CNI add failure, health of - CNI components and existence of memory leaks. - """ - - def __init__(self, components_healthy): - - super().__init__('daemon-health', CONF.cni_health_server.port) - self._components_healthy = components_healthy - - def readiness_status(self): - k8s_conn = self.verify_k8s_connection() - - if not _has_cap(CAP_NET_ADMIN, EFFECTIVE_CAPS): - error_message = 'NET_ADMIN capabilities not present.' - LOG.error(error_message) - return error_message, httplib.INTERNAL_SERVER_ERROR, {} - if not k8s_conn: - error_message = 'K8s API healtz endpoint failed.' - LOG.error(error_message) - return error_message, httplib.INTERNAL_SERVER_ERROR, {} - - return 'ok', httplib.OK, {} - - def liveness_status(self): - no_limit = -1 - try: - with IPDB(): - pass - except Exception: - error_message = 'IPDB not in working order.' - LOG.error(error_message) - return error_message, httplib.INTERNAL_SERVER_ERROR, {} - - if CONF.cni_health_server.max_memory_usage != no_limit: - mem_usage = _get_memsw_usage(_get_cni_cgroup_path()) - - if mem_usage > CONF.cni_health_server.max_memory_usage: - err_message = 'CNI daemon exceeded maximum memory usage.' - LOG.error(err_message) - return err_message, httplib.INTERNAL_SERVER_ERROR, {} - - with self._components_healthy.get_lock(): - if not self._components_healthy.value: - err_message = 'Kuryr CNI components not healthy.' - LOG.error(err_message) - return err_message, httplib.INTERNAL_SERVER_ERROR, {} - - return 'ok', httplib.OK, {} diff --git a/kuryr_kubernetes/cni/main.py b/kuryr_kubernetes/cni/main.py deleted file mode 100644 index 0957cd384..000000000 --- a/kuryr_kubernetes/cni/main.py +++ /dev/null @@ -1,69 +0,0 @@ -# Copyright (c) 2016 Mirantis, Inc. -# All Rights Reserved. -# -# Licensed under the Apache License, Version 2.0 (the "License"); you may -# not use this file except in compliance with the License. You may obtain -# a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT -# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the -# License for the specific language governing permissions and limitations -# under the License. - -import os -import signal -import sys - -import os_vif -from oslo_config import cfg -from oslo_log import log as logging -from oslo_serialization import jsonutils - -from kuryr_kubernetes.cni import api as cni_api -from kuryr_kubernetes.cni import utils -from kuryr_kubernetes import config -from kuryr_kubernetes import constants as k_const -from kuryr_kubernetes import objects as k_objects - -CONF = cfg.CONF -LOG = logging.getLogger(__name__) -_CNI_TIMEOUT = 180 - - -def run(): - d = jsonutils.load(sys.stdin.buffer) - cni_conf = utils.CNIConfig(d) - args = (['--config-file', cni_conf.kuryr_conf] if 'kuryr_conf' in d - else []) - - try: - if cni_conf.debug: - args.append('-d') - except AttributeError: - pass - config.init(args) - config.setup_logging() - - # Initialize o.vo registry. - k_objects.register_locally_defined_vifs() - os_vif.initialize() - - runner = cni_api.CNIDaemonizedRunner() - - def _timeout(signum, frame): - runner._write_dict(sys.stdout, { - 'msg': 'timeout', - 'code': k_const.CNI_TIMEOUT_CODE, - }) - LOG.debug('timed out') - sys.exit(1) - - signal.signal(signal.SIGALRM, _timeout) - signal.alarm(_CNI_TIMEOUT) - status = runner.run(os.environ, cni_conf, sys.stdout) - LOG.debug("Exiting with status %s", status) - if status: - sys.exit(status) diff --git a/kuryr_kubernetes/cni/plugins/__init__.py b/kuryr_kubernetes/cni/plugins/__init__.py deleted file mode 100644 index e69de29bb..000000000 diff --git a/kuryr_kubernetes/cni/plugins/base.py b/kuryr_kubernetes/cni/plugins/base.py deleted file mode 100644 index 5d9ccb51e..000000000 --- a/kuryr_kubernetes/cni/plugins/base.py +++ /dev/null @@ -1,28 +0,0 @@ -# Copyright (c) 2016 Mirantis, Inc. -# All Rights Reserved. -# -# Licensed under the Apache License, Version 2.0 (the "License"); you may -# not use this file except in compliance with the License. You may obtain -# a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT -# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the -# License for the specific language governing permissions and limitations -# under the License. - - -import abc - - -class CNIPlugin(object, metaclass=abc.ABCMeta): - - @abc.abstractmethod - def add(self, params): - raise NotImplementedError() - - @abc.abstractmethod - def delete(self, params): - raise NotImplementedError() diff --git a/kuryr_kubernetes/cni/plugins/k8s_cni_registry.py b/kuryr_kubernetes/cni/plugins/k8s_cni_registry.py deleted file mode 100644 index c04791ab7..000000000 --- a/kuryr_kubernetes/cni/plugins/k8s_cni_registry.py +++ /dev/null @@ -1,246 +0,0 @@ -# Copyright 2017 Red Hat, Inc. -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. - -import retrying - -from os_vif import objects as obj_vif -from oslo_concurrency import lockutils -from oslo_config import cfg -from oslo_log import log as logging - -from kuryr_kubernetes import clients -from kuryr_kubernetes.cni.binding import base as b_base -from kuryr_kubernetes.cni.plugins import base as base_cni -from kuryr_kubernetes.cni import utils -from kuryr_kubernetes import constants as k_const -from kuryr_kubernetes import exceptions -from kuryr_kubernetes import utils as k_utils - -LOG = logging.getLogger(__name__) -CONF = cfg.CONF -RETRY_DELAY = 1000 # 1 second in milliseconds - - -class K8sCNIRegistryPlugin(base_cni.CNIPlugin): - def __init__(self, registry, healthy): - self.healthy = healthy - self.registry = registry - self.k8s = clients.get_kubernetes_client() - - def _get_obj_name(self, params): - return f'{params.args.K8S_POD_NAMESPACE}/{params.args.K8S_POD_NAME}' - - def _get_pod(self, params): - namespace = params.args.K8S_POD_NAMESPACE - name = params.args.K8S_POD_NAME - - try: - return self.k8s.get( - f'{k_const.K8S_API_NAMESPACES}/{namespace}/pods/{name}') - except exceptions.K8sResourceNotFound: - return None - except exceptions.K8sClientException: - uniq_name = self._get_obj_name(params) - LOG.exception('Error when getting Pod %s', uniq_name) - raise - - def add(self, params): - kp_name = self._get_obj_name(params) - timeout = CONF.cni_daemon.vif_annotation_timeout - - # In order to fight race conditions when pods get recreated with the - # same name (think StatefulSet), we're trying to get pod UID either - # from the request or the API in order to use it as the ID to compare. - if 'K8S_POD_UID' not in params.args: - # CRI doesn't pass K8S_POD_UID, get it from the API. - pod = self._get_pod(params) - if not pod: - raise exceptions.CNIPodGone(kp_name) - params.args.K8S_POD_UID = pod['metadata']['uid'] - - vifs = self._do_work(params, b_base.connect, timeout) - - # NOTE(dulek): Saving containerid to be able to distinguish old DEL - # requests that we should ignore. We need a lock to - # prevent race conditions and replace whole object in the - # dict for multiprocessing.Manager to notice that. - with lockutils.lock(kp_name, external=True): - d = self.registry[kp_name] - d['containerid'] = params.CNI_CONTAINERID - self.registry[kp_name] = d - LOG.debug('Saved containerid = %s for CRD %s', - params.CNI_CONTAINERID, kp_name) - - # Wait for timeout sec, 1 sec between tries, retry when even one - # vif is not active. - @retrying.retry(stop_max_delay=timeout * 1000, wait_fixed=RETRY_DELAY, - retry_on_result=utils.any_vif_inactive) - def wait_for_active(kp_name): - return self.registry[kp_name]['vifs'] - - data = {'metadata': {'name': params.args.K8S_POD_NAME, - 'namespace': params.args.K8S_POD_NAMESPACE}} - pod = k_utils.get_referenced_object(data, 'Pod') - - try: - self.k8s.add_event(pod, 'CNIWaitingForActiveVIFs', - f'Waiting for Neutron ports of {kp_name} to ' - f'become ACTIVE after binding.', - component='kuryr-daemon') - vifs = wait_for_active(kp_name) - except retrying.RetryError: - self.k8s.add_event(pod, 'CNITimedOutWaitingForActiveVIFs', - f'Timed out waiting for Neutron ports of ' - f'{kp_name} to become ACTIVE after binding.', - 'Warning', 'kuryr-daemon') - raise exceptions.CNINeutronPortActivationTimeout( - kp_name, self.registry[kp_name]['vifs']) - - return vifs[k_const.DEFAULT_IFNAME] - - def delete(self, params): - kp_name = self._get_obj_name(params) - try: - with lockutils.lock(kp_name, external=True): - kp = self.registry[kp_name] - if kp == k_const.CNI_DELETED_POD_SENTINEL: - LOG.warning( - 'Received DEL request for deleted Pod %s without a' - 'KuryrPort. Ignoring.', kp_name) - del self.registry[kp_name] - return - reg_ci = self.registry[kp_name]['containerid'] - LOG.debug('Read containerid = %s for KuryrPort %s', reg_ci, - kp_name) - if reg_ci and reg_ci != params.CNI_CONTAINERID: - # NOTE(dulek): This is a DEL request for some older (probably - # failed) ADD call. We should ignore it or we'll - # unplug a running pod. - LOG.warning('Received DEL request for unknown ADD call for ' - 'Kuryrport %s (CNI_CONTAINERID=%s). Ignoring.', - kp_name, params.CNI_CONTAINERID) - return - except KeyError: - pass - - # Passing arbitrary 5 seconds as timeout, as it does not make any sense - # to wait on CNI DEL. If kuryrport got deleted from API - VIF info is - # gone. If kuryrport got the vif info removed - it is now gone too. - # The number's not 0, because we need to anticipate for restarts and - # delay before registry is populated by watcher. - try: - self._do_work(params, b_base.disconnect, 5) - except (exceptions.CNIKuryrPortTimeout, exceptions.CNIPodUidMismatch): - # So the VIF info seems to be lost at this point, we don't even - # know what binding driver was used to plug it. Let's at least - # try to remove the interface we created from the netns to prevent - # possible VLAN ID conflicts. - b_base.cleanup(params.CNI_IFNAME, params.CNI_NETNS) - raise - - # NOTE(ndesh): We need to lock here to avoid race condition - # with the deletion code in the watcher to ensure that - # we delete the registry entry exactly once - try: - with lockutils.lock(kp_name, external=True): - if self.registry[kp_name]['del_received']: - del self.registry[kp_name] - else: - kp_dict = self.registry[kp_name] - kp_dict['vif_unplugged'] = True - self.registry[kp_name] = kp_dict - except KeyError: - # This means the kuryrport was removed before vif was unplugged. - # This shouldn't happen, but we can't do anything about it now - LOG.debug('KuryrPort %s not found registry while handling DEL ' - 'request. Ignoring.', kp_name) - pass - - def report_drivers_health(self, driver_healthy): - if not driver_healthy: - with self.healthy.get_lock(): - LOG.debug("Reporting CNI driver not healthy.") - self.healthy.value = driver_healthy - - def _get_vifs_from_registry(self, params, timeout): - kp_name = self._get_obj_name(params) - - # In case of KeyError retry for `timeout` s, wait 1 s between tries. - @retrying.retry(stop_max_delay=timeout * 1000, wait_fixed=RETRY_DELAY, - retry_on_exception=lambda e: isinstance( - e, (KeyError, exceptions.CNIPodUidMismatch))) - def find(): - d = self.registry[kp_name] - if d == k_const.CNI_DELETED_POD_SENTINEL: - # Pod got deleted meanwhile - raise exceptions.CNIPodGone(kp_name) - - static = d['kp']['spec'].get('podStatic', None) - uid = d['kp']['spec']['podUid'] - # FIXME(dulek): This is weirdly structured for upgrades support. - # If podStatic is not set (KuryrPort created by old - # Kuryr version), then on uid mismatch we're fetching - # pod from API and check if it's static here. Pods - # are quite ephemeral, so will gradually get replaced - # after the upgrade and in a while all should have - # the field set and the performance penalty should - # be resolved. Remove in the future. - if 'K8S_POD_UID' in params.args and uid != params.args.K8S_POD_UID: - if static is None: - pod = self._get_pod(params) - static = k_utils.is_pod_static(pod) - - # Static pods have mirror pod UID in API, so it's always - # mismatched. We don't raise in that case. See [1] for more. - # [1] https://github.com/k8snetworkplumbingwg/multus-cni/ - # issues/773 - if not static: - raise exceptions.CNIPodUidMismatch( - kp_name, params.args.K8S_POD_UID, uid) - return d - - try: - d = find() - return d['kp'], d['vifs'] - except KeyError: - data = {'metadata': {'name': params.args.K8S_POD_NAME, - 'namespace': params.args.K8S_POD_NAMESPACE}} - pod = k_utils.get_referenced_object(data, 'Pod') - self.k8s.add_event(pod, 'CNITimeoutKuryrPortRegistry', - f'Timed out waiting for Neutron ports to be ' - f'created for {kp_name}. Check ' - f'kuryr-controller logs.', 'Warning', - 'kuryr-daemon') - raise exceptions.CNIKuryrPortTimeout(kp_name) - - def _do_work(self, params, fn, timeout): - kp, vifs = self._get_vifs_from_registry(params, timeout) - - for ifname, vif in vifs.items(): - is_default_gateway = (ifname == k_const.DEFAULT_IFNAME) - if is_default_gateway: - # NOTE(ygupta): if this is the default interface, we should - # use the ifname supplied in the CNI ADD request - ifname = params.CNI_IFNAME - - fn(vif, self._get_inst(kp), ifname, params.CNI_NETNS, - report_health=self.report_drivers_health, - is_default_gateway=is_default_gateway, - container_id=params.CNI_CONTAINERID) - return vifs - - def _get_inst(self, kp): - return (obj_vif.instance_info - .InstanceInfo(uuid=kp['spec']['podUid'], - name=kp['metadata']['name'])) diff --git a/kuryr_kubernetes/cni/prometheus_exporter.py b/kuryr_kubernetes/cni/prometheus_exporter.py deleted file mode 100644 index d04b39621..000000000 --- a/kuryr_kubernetes/cni/prometheus_exporter.py +++ /dev/null @@ -1,71 +0,0 @@ -# Copyright 2020 Red Hat, Inc. -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. - -import flask -import prometheus_client -from prometheus_client.exposition import generate_latest - -from oslo_config import cfg -from oslo_log import log as logging - - -LOG = logging.getLogger(__name__) -CONF = cfg.CONF -_INF = float("inf") - - -class CNIPrometheusExporter(object): - """Provides metrics to Prometheus""" - - def __init__(self): - self.application = flask.Flask('prometheus-exporter') - self.ctx = None - self.application.add_url_rule( - '/metrics', methods=['GET'], view_func=self.metrics) - self.headers = {'Connection': 'close'} - self._create_metric() - - def update_metric(self, labels, duration): - """Observes the request duration value and count it in buckets""" - self.cni_requests_duration.labels(**labels).observe(duration) - - def metrics(self): - """Provides the registered metrics""" - collected_metric = generate_latest(self.registry) - return flask.Response(collected_metric, mimetype='text/plain') - - def run(self): - # Disable obtrusive werkzeug logs. - logging.getLogger('werkzeug').setLevel(logging.WARNING) - - address = '::' - try: - LOG.info('Starting CNI Prometheus exporter') - self.application.run( - address, CONF.prometheus_exporter.cni_exporter_port) - except Exception: - LOG.exception('Failed to start Prometheus exporter') - raise - - def _create_metric(self): - """Creates a registry and records a new Histogram metric.""" - self.registry = prometheus_client.CollectorRegistry() - metric_name = 'kuryr_cni_request_duration_seconds' - metric_description = 'The duration of CNI requests' - buckets = (10, 20, 30, 40, 50, 60, 70, 80, 90, 100, 110, 120, - 130, 140, 150, 160, 170, 180, _INF) - self.cni_requests_duration = prometheus_client.Histogram( - metric_name, metric_description, - labelnames={'command', 'error'}, buckets=buckets, - registry=self.registry) diff --git a/kuryr_kubernetes/cni/utils.py b/kuryr_kubernetes/cni/utils.py deleted file mode 100644 index 2e2f796cb..000000000 --- a/kuryr_kubernetes/cni/utils.py +++ /dev/null @@ -1,111 +0,0 @@ -# Copyright (c) 2017 NEC Technologies India Pvt Ltd. -# All Rights Reserved. -# -# Licensed under the Apache License, Version 2.0 (the "License"); you may -# not use this file except in compliance with the License. You may obtain -# a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT -# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the -# License for the specific language governing permissions and limitations -# under the License. - -import functools -import time - -from http import client as httplib -from oslo_log import log as logging - - -PROC_ONE_CGROUP_PATH = '/proc/1/cgroup' -CONTAINER_RUNTIME_CGROUP_IDS = ( - 'docker', # This is set by docker/moby - 'libpod', # This is set by podman -) - -LOG = logging.getLogger(__name__) -SUCCESSFUL_REQUEST_CODE = (httplib.NO_CONTENT, httplib.ACCEPTED) - - -def running_under_container_runtime(proc_one_cg_path=PROC_ONE_CGROUP_PATH): - """Returns True iff the CNI process is under a known container runtime.""" - with open(proc_one_cg_path, 'r') as cgroup_info: - proc_one_cg_info = cgroup_info.read() - return any(runtime in proc_one_cg_info for runtime in - CONTAINER_RUNTIME_CGROUP_IDS) - - -def any_vif_inactive(vifs): - """Return True if there is at least one VIF that's not ACTIVE.""" - return any(not vif.active for vif in vifs.values()) - - -class CNIConfig(dict): - def __init__(self, cfg): - super(CNIConfig, self).__init__(cfg) - - for k, v in self.items(): - if not k.startswith('_'): - setattr(self, k, v) - - -class CNIArgs(object): - def __init__(self, value): - for item in value.split(';'): - k, v = item.split('=', 1) - if not k.startswith('_'): - setattr(self, k, v) - - def __contains__(self, key): - return hasattr(self, key) - - -class CNIParameters(object): - def __init__(self, env, cfg=None): - for k, v in env.items(): - if k.startswith('CNI_'): - setattr(self, k, v) - if cfg is None: - self.config = CNIConfig(env['config_kuryr']) - else: - self.config = cfg - self.args = CNIArgs(self.CNI_ARGS) - - def __repr__(self): - return repr({key: value for key, value in self.__dict__.items() if - key.startswith('CNI_')}) - - -def log_ipdb(func): - @functools.wraps(func) - def with_logging(*args, **kwargs): - try: - return func(*args, **kwargs) - except RuntimeError as e: - try: - LOG.error('Error when manipulating network interfaces') - LOG.error(e.debug['traceback']) - LOG.debug('Full debugging info: %s', e.debug) - except AttributeError: - pass - raise - return with_logging - - -def measure_time(command): - """Measures CNI ADD/DEL resquest duration""" - def decorator(method): - def wrapper(obj, *args, **kwargs): - start_time = time.time() - result = method(obj, *args, **kwargs) - cni_request_error = ( - result[1] not in SUCCESSFUL_REQUEST_CODE) - obj._update_metrics( - command, cni_request_error, time.time() - start_time) - return result - wrapper.__name__ = method.__name__ - return wrapper - return decorator diff --git a/kuryr_kubernetes/config.py b/kuryr_kubernetes/config.py deleted file mode 100644 index 3ca014fa9..000000000 --- a/kuryr_kubernetes/config.py +++ /dev/null @@ -1,352 +0,0 @@ -# Licensed under the Apache License, Version 2.0 (the "License"); you may -# not use this file except in compliance with the License. You may obtain -# a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT -# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the -# License for the specific language governing permissions and limitations -# under the License. -import os -import sys - -from kuryr.lib._i18n import _ -from kuryr.lib import config as lib_config -from oslo_config import cfg -from oslo_log import log as logging - -from kuryr_kubernetes import version - -LOG = logging.getLogger(__name__) - -kuryr_k8s_opts = [ - cfg.StrOpt('pybasedir', - help=_('Directory where Kuryr-kubernetes python module is ' - 'installed.'), - default=os.path.abspath( - os.path.join(os.path.dirname(__file__), - '../../'))), -] - -daemon_opts = [ - cfg.StrOpt('bind_address', - help=_('Bind address for CNI daemon HTTP server. It is ' - 'recommened to allow only local connections.'), - default='127.0.0.1:5036'), - cfg.IntOpt('worker_num', - help=_('Maximum number of processes that will be spawned to ' - 'process requests from CNI driver.'), - default=30), - cfg.IntOpt('vif_annotation_timeout', - help=_('Time (in seconds) the CNI daemon will wait for VIF ' - 'annotation to appear in pod metadata before failing ' - 'the CNI request.'), - default=60), - cfg.IntOpt('pyroute2_timeout', - help=_('Kuryr uses pyroute2 library to manipulate networking ' - 'interfaces. When processing a high number of Kuryr ' - 'requests in parallel, it may take kernel more time to ' - 'process all networking stack changes. This option ' - 'allows to tune internal pyroute2 timeout.'), - default=10), - cfg.BoolOpt('docker_mode', - help=_('Set to True when you are running kuryr-daemon inside ' - 'a Docker container on Kubernetes host. E.g. as ' - 'DaemonSet on Kubernetes cluster Kuryr is supposed to ' - 'provide networking for. This mainly means that ' - 'kuryr-daemon will look for network namespaces in ' - '$netns_proc_dir instead of /proc.'), - default=False), - cfg.StrOpt('netns_proc_dir', - help=_("When docker_mode is set to True, this config option " - "should be set to where host's /proc directory is " - "mounted. Please note that mounting it is necessary to " - "allow Kuryr-Kubernetes to move host interfaces between " - "host network namespaces, which is essential for Kuryr " - "to work."), - default=None), - cfg.IntOpt('cni_failures_count', - help=_('Maximum number of consecutive failures of kuryr-daemon ' - 'when processing requests. If this number is exceeded, ' - 'kuryr-daemon will be marked as unhealthy.'), - default=3), -] - -k8s_opts = [ - cfg.StrOpt('api_root', - help=_("The root URL of the Kubernetes API"), - default=os.environ.get('K8S_API', 'https://localhost:6443')), - cfg.StrOpt('ssl_client_crt_file', - help=_("Absolute path to client cert to " - "connect to HTTPS K8S_API")), - cfg.StrOpt('ssl_client_key_file', - help=_("Absolute path client key file to " - "connect to HTTPS K8S_API")), - cfg.StrOpt('ssl_ca_crt_file', - help=_("Absolute path to ca cert file to " - "connect to HTTPS K8S_API"), - default='/var/run/secrets/kubernetes.io/serviceaccount/ca.crt'), - cfg.BoolOpt('ssl_verify_server_crt', - help=_("HTTPS K8S_API server identity verification"), - default=False), - cfg.StrOpt('token_file', - help=_("The token to talk to the k8s API"), - default='/var/run/secrets/kubernetes.io/serviceaccount/token'), - cfg.StrOpt('pod_project_driver', - help=_("The driver to determine OpenStack project for pod " - "ports (default or annotation)"), - default='default'), - cfg.StrOpt('service_project_driver', - help=_("The driver to determine OpenStack project for " - "services (default or annotation)"), - default='default'), - cfg.StrOpt('namespace_project_driver', - help=_("The driver to determine OpenStack project for " - "namespaces (default or annotation)"), - default='default'), - cfg.StrOpt('network_policy_project_driver', - help=_("The driver to determine OpenStack project for network " - "policies (default or annotation)"), - default='default'), - cfg.StrOpt('pod_subnets_driver', - help=_("The driver to determine Neutron " - "subnets for pod ports"), - default='default'), - cfg.StrOpt('service_subnets_driver', - help=_("The driver to determine Neutron " - "subnets for services"), - default='default'), - cfg.StrOpt('pod_security_groups_driver', - help=_("The driver to determine Neutron " - "security groups for pods"), - default='default'), - cfg.StrOpt('service_security_groups_driver', - help=_("The driver to determine Neutron " - "security groups for services"), - default='default'), - cfg.StrOpt('pod_vif_driver', - help=_("The driver that provides VIFs for Kubernetes Pods."), - default='neutron-vif'), - cfg.StrOpt('endpoints_lbaas_driver', - help=_("The driver that provides LoadBalancers for " - "Kubernetes Endpoints"), - default='lbaasv2'), - cfg.StrOpt('endpoints_driver_octavia_provider', - help=_("The Octavia load balancer provider that will be used " - "to support Kubernetes Endpoints"), - default='default'), - cfg.StrOpt('vif_pool_driver', - help=_("The driver that manages VIFs pools for " - "Kubernetes Pods"), - default='noop'), - cfg.StrOpt('nodes_subnets_driver', - help=_("The driver that manages listing K8s nodes subnet_ids."), - default='config'), - cfg.BoolOpt('port_debug', - help=_('Enable port debug to force kuryr port names to be ' - 'set to their corresponding pod names.'), - default=False), - cfg.StrOpt('service_public_ip_driver', - help=_("The driver that provides external IP for LB at " - "Kubernetes"), - default='neutron_floating_ip'), - cfg.BoolOpt('enable_manager', - help=_("Enable Manager to manage the pools."), - default=False), - cfg.IntOpt('watch_retry_timeout', - help=_('Time (in seconds) the watcher retries watching for.'), - default=60), - cfg.IntOpt('watch_connection_timeout', - help=_('TCP connection timeout (in seconds) for the watcher ' - 'connections to K8s API.'), - default=30), - cfg.IntOpt('watch_read_timeout', - help=_('TCP read timeout (in seconds) for the watcher ' - 'connections to K8s API. This affects reaction to time ' - 'when there are no events being streamed from K8s API. ' - 'When too low, Kuryr will reconnect more often. When ' - 'too high, Kuryr will take longer to reconnect when K8s ' - 'API stream was being silently broken.'), - default=60), - cfg.IntOpt('watch_reconcile_period', - help=_('Period (in seconds) between iterations of fetching ' - 'full list of watched K8s API resources and putting ' - 'them into the enabled handlers. Setting 0 disables the ' - 'periodic reconciling. The reconciliation is done to ' - 'prevent Kuryr from missing events due to K8s API or ' - 'etcd issues.'), - default=120), - cfg.ListOpt('enabled_handlers', - help=_("The comma-separated handlers that should be " - "registered for watching in the pipeline."), - default=['vif', 'endpoints', 'service', 'kuryrloadbalancer', - 'kuryrport']), - cfg.BoolOpt('controller_ha', - help=_('Enable kuryr-controller active/passive HA. Only ' - 'supported in containerized deployments on Kubernetes ' - 'or OpenShift.'), - default=False), - cfg.PortOpt('controller_ha_elector_port', - help=_('Port on which leader-elector pod is listening to.'), - default=16401), - cfg.StrOpt('network_policy_driver', - help=_("Driver for network policies"), - default='default'), - cfg.ListOpt('multi_vif_drivers', - help=_("The drivers that provide additional VIFs for " - "Kubernetes Pods."), - default='noop'), - cfg.StrOpt('additional_ifname_prefix', - help=_("The prefix to use for additional vifs created by " - " multi_vif drivers"), - default='eth'), - cfg.BoolOpt('use_events', - help=_('Use Kubernetes Events objects to indicate status of ' - 'Kuryr created OpenStack objects like networking for ' - 'pods (Neutron ports) or services (Octavia ' - 'loadbalancers). It might have impact on performance ' - 'on Kubernetes cluster, since all objects (so the ' - 'Event objects too) are stored on etcd.'), - default=True), -] - -neutron_defaults = [ - cfg.StrOpt('project', - help=_("Default OpenStack project ID for " - "Kubernetes resources")), - cfg.StrOpt('pod_subnet', - help=_("Default Neutron subnet ID for Kubernetes pods")), - cfg.ListOpt('pod_security_groups', - help=_("Default Neutron security groups' IDs " - "for Kubernetes pods")), - cfg.StrOpt('ovs_bridge', - help=_("Default OpenVSwitch integration bridge"), - sample_default="br-int"), - cfg.StrOpt('service_subnet', - help=_("Default Neutron subnet ID for Kubernetes services")), - cfg.StrOpt('external_svc_net', - help=_("Default external network ID for Kubernetes services")), - cfg.StrOpt('external_svc_subnet', - help=_("Optional external subnet ID for Kubernetes services"), - default=None), - cfg.IntOpt('network_device_mtu', - help='Default MTU setting for network interface.', - default=0,), - cfg.IntOpt('lbaas_activation_timeout', - help=_("Time (in seconds) that kuryr controller waits for " - "neutron LBaaS to be activated"), - default=300), - cfg.DictOpt('subnet_mapping', - help=_("A mapping of default subnets for certain driverType " - "in a form of :"), - default={}), - cfg.ListOpt('resource_tags', - help=_("List of tags that will be applied to all OpenStack " - "(Neutron and Octavia) resources created by Kuryr. " - "This can be used to identify and garbage-collect " - "them when Kubernetes cluster Kuryr was serving is no " - "longer needed."), - default=[]), -] - -octavia_defaults = [ - cfg.StrOpt('member_mode', - help=_("Define the communication mode between load balanacer " - "and its members"), - default='L3'), - cfg.BoolOpt('enforce_sg_rules', - help=_("Enable the enforcement of SG rules at the LB SG " - "in case the LB does not maintain the source IP " - "of the caller resource"), - default=True), - cfg.StrOpt('lb_algorithm', - help=_("The load-balancer algorithm that distributed traffic " - "to the pool members. The options are: ROUND_ROBIN, " - "LEAST_CONNECTIONS, SOURCE_IP and SOURCE_IP_PORT."), - default='ROUND_ROBIN'), - cfg.IntOpt('timeout_client_data', - help=_("Frontend client inactivity timeout in milliseconds. " - "When set to 0, the default timeout value set by " - "Octavia is used."), - default=0), - cfg.IntOpt('timeout_member_data', - help=_("Backend member inactivity timeout in milliseconds. " - "When set to 0, the default timeout value set by " - "Octavia is used."), - default=0), -] - -cache_defaults = [ - cfg.BoolOpt('enabled', - help=_("Enable caching."), - default=True), - cfg.StrOpt('backend', - help=_("Select backend cache option."), - default="dogpile.cache.memory"), -] - -nested_vif_driver_opts = [ - cfg.ListOpt('worker_nodes_subnets', - help=_("Neutron subnet IDs for k8s worker node VMs."), - default=[]), - cfg.IntOpt('rev_update_attempts', - help=_("How many time to try to re-update the neutron resource " - "when revision has been changed by other thread"), - default=3), -] - -vhostuser = [ - cfg.StrOpt('mount_point', - help=_("Path where vhost-user port will be created " - "also it should be mount point for pod"), - default='/var/cni/vhostuser'), - cfg.StrOpt('ovs_vhu_path', - help=_("Path where OVS keeps socket files for vhost-user " - "ports"), - default='/var/run/openvswitch/') -] - -prometheus_exporter_opts = [ - cfg.IntOpt('controller_exporter_port', - help=_('port for the Controller Prometheus exporter.'), - default=9654), - cfg.IntOpt('cni_exporter_port', - help=_('port for the CNI Prometheus exporter.'), - default=9655) -] - -CONF = cfg.CONF -CONF.register_opts(kuryr_k8s_opts) -CONF.register_opts(daemon_opts, group='cni_daemon') -CONF.register_opts(k8s_opts, group='kubernetes') -CONF.register_opts(neutron_defaults, group='neutron_defaults') -CONF.register_opts(octavia_defaults, group='octavia_defaults') -CONF.register_opts(cache_defaults, group='cache_defaults') -CONF.register_opts(nested_vif_driver_opts, group='pod_vif_nested') -CONF.register_opts(vhostuser, group='vhostuser') -CONF.register_opts(prometheus_exporter_opts, "prometheus_exporter") - -CONF.register_opts(lib_config.core_opts) -CONF.register_opts(lib_config.binding_opts, 'binding') -lib_config.register_neutron_opts(CONF) - -logging.register_options(CONF) - - -def init(args, **kwargs): - version_k8s = version.version_info.version_string() - CONF(args=args, project='kuryr-k8s', version=version_k8s, **kwargs) - if os.environ.get('CNI_COMMAND') == 'VERSION': - CONF.set_default('use_stderr', True) - - -def setup_logging(): - - logging.setup(CONF, 'kuryr-kubernetes') - logging.set_defaults(default_log_levels=logging.get_default_log_levels()) - version_k8s = version.version_info.version_string() - LOG.info("Logging enabled!") - LOG.info("%(prog)s version %(version)s", - {'prog': sys.argv[0], 'version': version_k8s}) diff --git a/kuryr_kubernetes/constants.py b/kuryr_kubernetes/constants.py deleted file mode 100644 index 927967244..000000000 --- a/kuryr_kubernetes/constants.py +++ /dev/null @@ -1,113 +0,0 @@ -# Copyright (c) 2016 Mirantis, Inc. -# All Rights Reserved. -# -# Licensed under the Apache License, Version 2.0 (the "License"); you may -# not use this file except in compliance with the License. You may obtain -# a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT -# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the -# License for the specific language governing permissions and limitations -# under the License. - -KURYR_FQDN = 'kuryr.openstack.org' - -K8S_API_BASE = '/api/v1' -K8S_API_PODS = K8S_API_BASE + '/pods' -K8S_API_NAMESPACES = K8S_API_BASE + '/namespaces' -K8S_API_CRD_VERSION = 'openstack.org/v1' -K8S_API_CRD = '/apis/' + K8S_API_CRD_VERSION -K8S_API_CRD_NAMESPACES = K8S_API_CRD + '/namespaces' -K8S_API_CRD_KURYRNETWORKS = K8S_API_CRD + '/kuryrnetworks' -K8S_API_CRD_KURYRNETWORKPOLICIES = K8S_API_CRD + '/kuryrnetworkpolicies' -K8S_API_CRD_KURYRLOADBALANCERS = K8S_API_CRD + '/kuryrloadbalancers' -K8S_API_CRD_KURYRPORTS = K8S_API_CRD + '/kuryrports' -K8S_API_POLICIES = '/apis/networking.k8s.io/v1/networkpolicies' -K8S_API_NETWORKING = '/apis/networking.k8s.io/v1' -K8S_API_NETWORKING_NAMESPACES = K8S_API_NETWORKING + '/namespaces' - -K8S_API_NPWG_CRD = '/apis/k8s.cni.cncf.io/v1' - -K8S_OBJ_NAMESPACE = 'Namespace' -K8S_OBJ_POD = 'Pod' -K8S_OBJ_SERVICE = 'Service' -K8S_OBJ_ENDPOINTS = 'Endpoints' -K8S_OBJ_POLICY = 'NetworkPolicy' -K8S_OBJ_KURYRNETWORK = 'KuryrNetwork' -K8S_OBJ_KURYRNETWORKPOLICY = 'KuryrNetworkPolicy' -K8S_OBJ_KURYRLOADBALANCER = 'KuryrLoadBalancer' -K8S_OBJ_KURYRPORT = 'KuryrPort' - -OPENSHIFT_OBJ_MACHINE = 'Machine' -OPENSHIFT_API_CRD_MACHINES = '/apis/machine.openshift.io/v1beta1/machines' - -K8S_POD_STATUS_PENDING = 'Pending' -K8S_POD_STATUS_SUCCEEDED = 'Succeeded' -K8S_POD_STATUS_FAILED = 'Failed' - -K8S_NODE_ADDRESS_INTERNAL = 'InternalIP' - -K8S_ANNOTATION_PREFIX = 'openstack.org/kuryr' -K8S_ANNOTATION_VIF = K8S_ANNOTATION_PREFIX + '-vif' -K8S_ANNOTATION_LABEL = K8S_ANNOTATION_PREFIX + '-pod-label' -K8S_ANNOTATION_IP = K8S_ANNOTATION_PREFIX + '-pod-ip' -K8S_ANNOTATION_NAMESPACE_LABEL = K8S_ANNOTATION_PREFIX + '-namespace-label' -K8S_ANNOTATION_LBAAS_SPEC = K8S_ANNOTATION_PREFIX + '-lbaas-spec' -K8S_ANNOTATION_LBAAS_STATE = K8S_ANNOTATION_PREFIX + '-lbaas-state' -K8S_ANNOTATION_NET_CRD = K8S_ANNOTATION_PREFIX + '-net-crd' -K8S_ANNOTATION_NETPOLICY_CRD = K8S_ANNOTATION_PREFIX + '-netpolicy-crd' -K8S_ANNOTATION_POLICY = K8S_ANNOTATION_PREFIX + '-counter' -K8s_ANNOTATION_PROJECT = K8S_ANNOTATION_PREFIX + '-project' - -K8S_ANNOTATION_CLIENT_TIMEOUT = K8S_ANNOTATION_PREFIX + '-timeout-client-data' -K8S_ANNOTATION_MEMBER_TIMEOUT = K8S_ANNOTATION_PREFIX + '-timeout-member-data' - -K8S_ANNOTATION_NPWG_PREFIX = 'k8s.v1.cni.cncf.io' -K8S_ANNOTATION_NPWG_NETWORK = K8S_ANNOTATION_NPWG_PREFIX + '/networks' -K8S_ANNOTATION_NPWG_CRD_SUBNET_ID = 'subnetId' -K8S_ANNOTATION_NPWG_CRD_DRIVER_TYPE = 'driverType' - -K8S_ANNOTATION_HEADLESS_SERVICE = 'service.kubernetes.io/headless' -K8S_ANNOTATION_CONFIG_SOURCE = 'kubernetes.io/config.source' - -POD_FINALIZER = KURYR_FQDN + '/pod-finalizer' -KURYRNETWORK_FINALIZER = 'kuryrnetwork.finalizers.kuryr.openstack.org' -KURYRLB_FINALIZER = 'kuryr.openstack.org/kuryrloadbalancer-finalizers' -SERVICE_FINALIZER = 'kuryr.openstack.org/service-finalizer' -NETWORKPOLICY_FINALIZER = 'kuryr.openstack.org/networkpolicy-finalizer' - -KURYRPORT_FINALIZER = KURYR_FQDN + '/kuryrport-finalizer' -KURYRPORT_LABEL = KURYR_FQDN + '/nodeName' - -K8S_OS_VIF_NOOP_PLUGIN = "noop" - -CNI_EXCEPTION_CODE = 100 -CNI_TIMEOUT_CODE = 200 -CNI_DELETED_POD_SENTINEL = None - -KURYR_PORT_NAME = 'kuryr-pool-port' - -OCTAVIA_L2_MEMBER_MODE = "L2" -OCTAVIA_L3_MEMBER_MODE = "L3" -NEUTRON_LBAAS_HAPROXY_PROVIDER = 'haproxy' -IPv4 = 'IPv4' -IPv6 = 'IPv6' -IP_VERSION_4 = 4 -IP_VERSION_6 = 6 - -VIF_POOL_POPULATE = '/populatePool' -VIF_POOL_FREE = '/freePool' -VIF_POOL_LIST = '/listPools' -VIF_POOL_SHOW = '/showPool' - -DEFAULT_IFNAME = 'eth0' - -K8S_OPERATOR_IN = 'in' -K8S_OPERATOR_NOT_IN = 'notin' -K8S_OPERATOR_DOES_NOT_EXIST = 'doesnotexist' -K8S_OPERATOR_EXISTS = 'exists' - -LEFTOVER_RM_POOL_SIZE = 5 diff --git a/kuryr_kubernetes/controller/__init__.py b/kuryr_kubernetes/controller/__init__.py deleted file mode 100644 index e69de29bb..000000000 diff --git a/kuryr_kubernetes/controller/drivers/__init__.py b/kuryr_kubernetes/controller/drivers/__init__.py deleted file mode 100644 index e69de29bb..000000000 diff --git a/kuryr_kubernetes/controller/drivers/annotation_project.py b/kuryr_kubernetes/controller/drivers/annotation_project.py deleted file mode 100644 index 04acc359d..000000000 --- a/kuryr_kubernetes/controller/drivers/annotation_project.py +++ /dev/null @@ -1,69 +0,0 @@ -# Copyright (c) 2022 Troila -# All Rights Reserved. -# -# Licensed under the Apache License, Version 2.0 (the "License"); you may -# not use this file except in compliance with the License. You may obtain -# a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT -# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the -# License for the specific language governing permissions and limitations -# under the License. - -from oslo_config import cfg -from oslo_log import log as logging - -from kuryr_kubernetes import config -from kuryr_kubernetes import constants -from kuryr_kubernetes.controller.drivers import base -from kuryr_kubernetes.controller.drivers import utils as driver_utils - -LOG = logging.getLogger(__name__) - - -class AnnotationProjectBaseDriver( - base.PodProjectDriver, base.ServiceProjectDriver, - base.NamespaceProjectDriver, base.NetworkPolicyProjectDriver): - """Provides project ID based on resource's annotation.""" - - project_annotation = constants.K8s_ANNOTATION_PROJECT - - def _get_namespace_project(self, namespace): - ns_md = namespace['metadata'] - project = ns_md.get('annotations', {}).get(self.project_annotation) - if not project: - LOG.debug("Namespace %s has no project annotation, try to get " - "project id from the configuration option.", - namespace['metadata']['name']) - project = config.CONF.neutron_defaults.project - if not project: - raise cfg.RequiredOptError('project', - cfg.OptGroup('neutron_defaults')) - return project - - def get_project(self, resource): - res_ns = resource['metadata']['namespace'] - namespace_path = f"{constants.K8S_API_NAMESPACES}/{res_ns}" - namespace = driver_utils.get_k8s_resource(namespace_path) - return self._get_namespace_project(namespace) - - -class AnnotationPodProjectDriver(AnnotationProjectBaseDriver): - pass - - -class AnnotationServiceProjectDriver(AnnotationProjectBaseDriver): - pass - - -class AnnotationNamespaceProjectDriver(AnnotationProjectBaseDriver): - - def get_project(self, namespace): - return self._get_namespace_project(namespace) - - -class AnnotationNetworkPolicyProjectDriver(AnnotationProjectBaseDriver): - pass diff --git a/kuryr_kubernetes/controller/drivers/base.py b/kuryr_kubernetes/controller/drivers/base.py deleted file mode 100644 index 16d11660a..000000000 --- a/kuryr_kubernetes/controller/drivers/base.py +++ /dev/null @@ -1,781 +0,0 @@ -# Copyright (c) 2016 Mirantis, Inc. -# All Rights Reserved. -# -# Licensed under the Apache License, Version 2.0 (the "License"); you may -# not use this file except in compliance with the License. You may obtain -# a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT -# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the -# License for the specific language governing permissions and limitations -# under the License. - -import abc - -from kuryr.lib._i18n import _ -from stevedore import driver as stv_driver - -from kuryr_kubernetes import config - -_DRIVER_NAMESPACE_BASE = 'kuryr_kubernetes.controller.drivers' -_DRIVER_MANAGERS = {} -_MULTI_VIF_DRIVERS = [] - - -class DriverBase(object): - """Base class for controller drivers. - - Subclasses must define an *ALIAS* attribute that is used to find a driver - implementation by `get_instance` class method which utilises - `stevedore.driver.DriverManager` with the namespace set to - 'kuryr_kubernetes.controller.drivers.*ALIAS*' and the name of - the driver determined from the '[kubernetes]/*ALIAS*_driver' configuration - parameter. - - Usage example: - - class SomeDriverInterface(DriverBase, metaclass=abc.ABCMeta): - ALIAS = 'driver_alias' - - @abc.abstractmethod - def some_method(self): - pass - - driver = SomeDriverInterface.get_instance() - driver.some_method() - """ - - @classmethod - def get_instance(cls, specific_driver=None, scope='default'): - """Get an implementing driver instance. - - :param specific_driver: Loads a specific driver instead of using conf. - Uses separate manager entry so that loading of - default/other drivers is not affected. - :param scope: Loads the driver in the given scope (if independent - instances of a driver are required) - """ - - alias = cls.ALIAS - - if specific_driver: - driver_key = '{}:{}:{}'.format(alias, specific_driver, scope) - else: - driver_key = '{}:_from_cfg:{}'.format(alias, scope) - - try: - manager = _DRIVER_MANAGERS[driver_key] - except KeyError: - driver_name = (specific_driver or - config.CONF.kubernetes[alias + '_driver']) - - manager = stv_driver.DriverManager( - namespace="%s.%s" % (_DRIVER_NAMESPACE_BASE, alias), - name=driver_name, - invoke_on_load=True) - _DRIVER_MANAGERS[driver_key] = manager - - driver = manager.driver - if not isinstance(driver, cls): - raise TypeError(_("Invalid %(alias)r driver type: %(driver)s, " - "must be a subclass of %(type)s") % { - 'alias': alias, - 'driver': driver.__class__.__name__, - 'type': cls}) - return driver - - def __str__(self): - return self.__class__.__name__ - - -class PodProjectDriver(DriverBase, metaclass=abc.ABCMeta): - """Provides an OpenStack project ID for Kubernetes Pod ports.""" - - ALIAS = 'pod_project' - - @abc.abstractmethod - def get_project(self, pod): - """Get an OpenStack project ID for Kubernetes Pod ports. - - :param pod: dict containing Kubernetes Pod object - :return: project ID - """ - - raise NotImplementedError() - - -class ServiceProjectDriver(DriverBase, metaclass=abc.ABCMeta): - """Provides an OpenStack project ID for Kubernetes Services.""" - - ALIAS = 'service_project' - - @abc.abstractmethod - def get_project(self, service): - """Get an OpenStack project ID for Kubernetes Service. - - :param service: dict containing Kubernetes Service object - :return: project ID - """ - - raise NotImplementedError() - - -class NamespaceProjectDriver(DriverBase, metaclass=abc.ABCMeta): - """Provides an OpenStack project ID for Kubernetes Namespace.""" - - ALIAS = 'namespace_project' - - @abc.abstractmethod - def get_project(self, namespace): - """Get an OpenStack project ID for Kubernetes Namespace. - - :param service: dict containing Kubernetes Namespace object - :return: project ID - """ - - raise NotImplementedError() - - -class PodSubnetsDriver(DriverBase, metaclass=abc.ABCMeta): - """Provides subnets for Kubernetes Pods.""" - - ALIAS = 'pod_subnets' - - @abc.abstractmethod - def get_subnets(self, pod, project_id): - """Get subnets for Pod. - - :param pod: dict containing Kubernetes Pod object - :param project_id: OpenStack project ID - :return: dict containing the mapping 'subnet_id' -> 'network' for all - the subnets we want to create ports on, where 'network' is an - `os_vif.network.Network` object containing a single - `os_vif.subnet.Subnet` object corresponding to the 'subnet_id' - """ - raise NotImplementedError() - - def create_namespace_network(self, namespace, project_id): - """Create network resources for a namespace. - - :param namespace: string with the namespace name - :param project_id: OpenStack project ID - :return: dict with the keys and values for the CRD spec, such as - routerId or subnetId - """ - raise NotImplementedError() - - def delete_namespace_subnet(self, kuryr_net_crd): - """Delete network resources associated to a namespace. - - :param kuryr_net_crd: kuryrnetwork CRD obj dict that contains Neutron's - network resources associated to a namespace - """ - raise NotImplementedError() - - def rollback_network_resources(self, crd_spec, namespace): - """Rollback created network resources for a namespace. - - :param crd_spec: dict with the keys and values for the CRD spec, such - as routerId or subnetId - :param namespace: name of the Kubernetes namespace object - """ - raise NotImplementedError() - - def cleanup_namespace_networks(self, namespace): - """Clean up network leftover on the namespace. - - Due to Kuryr controller restarts it may happen that some network - resources are leftover. This method ensures they are deleted upon - retries. - - :param namespace: name of the Kubernetes namespace object - """ - raise NotImplementedError() - - -class ServiceSubnetsDriver(DriverBase, metaclass=abc.ABCMeta): - """Provides subnets for Kubernetes Services.""" - - ALIAS = 'service_subnets' - - @abc.abstractmethod - def get_subnets(self, service, project_id): - """Get subnets for Service. - - :param service: dict containing Kubernetes Pod object - :param project_id: OpenStack project ID - :return: dict containing the mapping 'subnet_id' -> 'network' for all - the subnets we want to create ports on, where 'network' is an - `os_vif.network.Network` object containing a single - `os_vif.subnet.Subnet` object corresponding to the 'subnet_id' - """ - raise NotImplementedError() - - -class PodSecurityGroupsDriver(DriverBase, metaclass=abc.ABCMeta): - """Provides security groups for Kubernetes Pods.""" - - ALIAS = 'pod_security_groups' - - @abc.abstractmethod - def get_security_groups(self, pod, project_id): - """Get a list of security groups' IDs for Pod. - - :param pod: dict containing Kubernetes Pod object - :param project_id: OpenStack project ID - :return: list containing security groups' IDs - """ - raise NotImplementedError() - - def create_sg_rules(self, pod): - """Create security group rules for a pod. - - :param pod: dict containing Kubernetes Pod object - :return: a list containing podSelectors of CRDs - that had security group rules created - """ - raise NotImplementedError() - - def delete_sg_rules(self, pod): - """Delete security group rules for a pod - - :param pod: dict containing Kubernetes Pod object - :return: a list containing podSelectors of CRDs - that had security group rules deleted - """ - raise NotImplementedError() - - def update_sg_rules(self, pod): - """Update security group rules for a pod - - :param pod: dict containing Kubernetes Pod object - :return: a list containing podSelectors of CRDs - that had security group rules updated - """ - raise NotImplementedError() - - def delete_namespace_sg_rules(self, namespace): - """Delete security group rule associated to a namespace. - - :param namespace: dict containing K8S Namespace object - """ - raise NotImplementedError() - - def create_namespace_sg_rules(self, namespace): - """Create security group rule associated to a namespace. - - :param namespace: dict containing K8S Namespace object - """ - raise NotImplementedError() - - def update_namespace_sg_rules(self, namespace): - """Update security group rule associated to a namespace. - - :param namespace: dict containing K8S Namespace object - """ - raise NotImplementedError() - - -class ServiceSecurityGroupsDriver(DriverBase, metaclass=abc.ABCMeta): - """Provides security groups for Kubernetes Services.""" - - ALIAS = 'service_security_groups' - - @abc.abstractmethod - def get_security_groups(self, service, project_id): - """Get a list of security groups' IDs for Service. - - :param service: dict containing Kubernetes Service object - :param project_id: OpenStack project ID - :return: list containing security groups' IDs - """ - raise NotImplementedError() - - -class PodVIFDriver(DriverBase, metaclass=abc.ABCMeta): - """Manages Neutron ports to provide VIFs for Kubernetes Pods.""" - - ALIAS = 'pod_vif' - - @abc.abstractmethod - def request_vif(self, pod, project_id, subnets, security_groups): - """Links Neutron port to pod and returns it as VIF object. - - Implementing drivers must ensure the Neutron port satisfying the - requested parameters is present and is valid for specified `pod`. It - is up to the implementing drivers to either create new ports on each - request or reuse available ports when possible. - - Implementing drivers may return a VIF object with its `active` field - set to 'False' to indicate that Neutron port requires additional - actions to enable network connectivity after VIF is plugged (e.g. - setting up OpenFlow and/or iptables rules by OpenVSwitch agent). In - that case the Controller will call driver's `activate_vif` method - and the CNI plugin will block until it receives activation - confirmation from the Controller. - - :param pod: dict containing Kubernetes Pod object - :param project_id: OpenStack project ID - :param subnets: dict containing subnet mapping as returned by - `PodSubnetsDriver.get_subnets`. If multiple entries - are present in that mapping, it is guaranteed that - all entries have the same value of `Network.id`. - :param security_groups: list containing security groups' IDs as - returned by - `PodSecurityGroupsDriver.get_security_groups` - :return: VIF object - """ - raise NotImplementedError() - - def request_vifs(self, pod, project_id, subnets, security_groups, - num_ports, semaphore): - """Creates Neutron ports for pods and returns them as VIF objects list. - - It follows the same pattern as request_vif but creating the specified - amount of ports and vif objects at num_ports parameter. - - The port creation request is generic as it not going to be used by the - pod -- at least not all of them. Additionally, in order to save Neutron - calls, the ports creation is handled in a bulk request. - - :param pod: dict containing Kubernetes Pod object - :param project_id: OpenStack project ID - :param subnets: dict containing subnet mapping as returned by - `PodSubnetsDriver.get_subnets`. If multiple entries - are present in that mapping, it is guaranteed that - all entries have the same value of `Network.id`. - :param security_groups: list containing security groups' IDs as - returned by - `PodSecurityGroupsDriver.get_security_groups` - :param num_ports: number of ports to be created - :param semaphore: a eventlet Semaphore to limit the number of create - Port in bulk running in parallel - :return: VIF objects list - """ - raise NotImplementedError() - - @abc.abstractmethod - def release_vif(self, pod, vif, project_id=None): - """Unlinks Neutron port corresponding to VIF object from pod. - - Implementing drivers must ensure the port is either deleted or made - available for reuse by `PodVIFDriver.request_vif`. - - :param pod: dict containing Kubernetes Pod object - :param vif: VIF object as returned by `PodVIFDriver.request_vif` - :param project_id: OpenStack project ID - """ - raise NotImplementedError() - - def release_vifs(self, pods, vifs, project_id=None): - """Unlinks Neutron ports corresponding to VIF objects. - - It follows the same pattern as release_vif but releasing num_ports - ports. Ideally it will also make use of bulk request to save Neutron - calls in the release/recycle process. - :param pods: list of dict containing Kubernetes Pod objects - :param vifs: list of VIF objects as returned by - `PodVIFDriver.request_vif` - :param project_id: (optional) OpenStack project ID - """ - raise NotImplementedError() - - @abc.abstractmethod - def activate_vif(self, vif, **kwargs): - """Updates VIF to become active. - - Implementing drivers should update the specified `vif` object's - `active` field to 'True' but must ensure that the corresponding - Neutron port is fully configured (i.e. the container using the `vif` - can access the requested network resources). - - Implementing drivers may raise `ResourceNotReady` exception to - indicate that port activation should be retried later which will - cause `activate_vif` to be called again with the same arguments. - - This method may be called before, after or while the VIF is being - plugged by the CNI plugin. - - :param vif: VIF object as returned by `PodVIFDriver.request_vif` - """ - raise NotImplementedError() - - @abc.abstractmethod - def update_vif_sgs(self, pod, security_groups): - """Update VIF security groups. - - Implementing drivers should update the port associated to the pod - with the specified security groups. - - :param pod: dict containing Kubernetes Pod object - :param security_groups: list containing security groups' IDs as - returned by - `PodSecurityGroupsDriver.get_security_groups` - """ - raise NotImplementedError() - - -class MultiVIFDriver(DriverBase, metaclass=abc.ABCMeta): - """Manages additional ports of Kubernetes Pods.""" - - ALIAS = 'multi_vif' - - @abc.abstractmethod - def request_additional_vifs( - self, pod, project_id, security_groups): - """Links Neutron ports to pod and returns them as a list of VIF objects. - - Implementing drivers must be able to parse the additional interface - definition from pod. The format of the definition is up to the - implementation of each driver. Then implementing drivers must invoke - the VIF drivers to either create new Neutron ports on each request or - reuse available ports when possible. - - :param pod: dict containing Kubernetes Pod object - :param project_id: OpenStack project ID - :param security_groups: list containing security groups' IDs as - returned by - `PodSecurityGroupsDriver.get_security_groups` - :return: VIF object list - """ - raise NotImplementedError() - - @classmethod - def get_enabled_drivers(cls): - if _MULTI_VIF_DRIVERS: - pass - else: - drivers = config.CONF.kubernetes['multi_vif_drivers'] - for driver in drivers: - _MULTI_VIF_DRIVERS.append(cls.get_instance(driver)) - return _MULTI_VIF_DRIVERS - - -class LBaaSDriver(DriverBase): - """Base class for Openstack loadbalancer services.""" - - ALIAS = 'endpoints_lbaas' - - @abc.abstractmethod - def get_service_loadbalancer_name(self, namespace, svc_name): - """Generate name of a load balancer that represents K8S service. - - In case a load balancer represents K8S service/ep, the handler - should call first this API to get the load balancer name and use the - return value of this function as 'name' parameter for the - 'ensure_loadbalancer' function - - :param namespace: K8S service namespace - :param svc_name: K8S service name - """ - raise NotImplementedError() - - @abc.abstractmethod - def ensure_loadbalancer(self, name, project_id, subnet_id, ip, - security_groups_ids, service_type, provider): - """Get or create load balancer. - - :param name: LoadBlancer name - :param project_id: OpenStack project ID - :param subnet_id: Neutron subnet ID to host load balancer - :param ip: IP of the load balancer - :param security_groups_ids: security groups that should be allowed - access to the load balancer - :param service_type: K8s service type (ClusterIP or LoadBalancer) - :param provider: load balancer backend service - """ - raise NotImplementedError() - - @abc.abstractmethod - def release_loadbalancer(self, loadbalancer): - """Release load balancer. - - Should return without errors if load balancer does not exist (e.g. - already deleted). - - :param loadbalancer: `LBaaSLoadBalancer` object - """ - raise NotImplementedError() - - @abc.abstractmethod - def ensure_listener(self, loadbalancer, protocol, port): - """Get or create listener. - - :param loadbalancer: `LBaaSLoadBalancer` object - :param protocol: listener's protocol (only TCP is supported for now) - :param port: listener's port - """ - raise NotImplementedError() - - @abc.abstractmethod - def release_listener(self, loadbalancer, listener): - """Release listener. - - Should return without errors if listener or load balancer does not - exist (e.g. already deleted). - - :param loadbalancer: `LBaaSLoadBalancer` object - :param listener: `LBaaSListener` object - """ - raise NotImplementedError() - - @abc.abstractmethod - def ensure_pool(self, loadbalancer, listener): - """Get or create pool attached to Listener. - - :param loadbalancer: `LBaaSLoadBalancer` object - :param listener: `LBaaSListener` object - """ - raise NotImplementedError() - - @abc.abstractmethod - def ensure_pool_attached_to_lb(self, loadbalancer, namespace, - svc_name, protocol): - """Get or create pool attached to LoadBalancer. - - :param loadbalancer: `LBaaSLoadBalancer` object - :param namespace: K8S service's namespace - :param svc_name: K8S service's name - :param protocol: pool's protocol - """ - raise NotImplementedError() - - @abc.abstractmethod - def get_loadbalancer_pool_name(self, loadbalancer, namespace, svc_name): - """Get name of a load balancer's pool attached to LB. - - The pool's name should be unique per K8S service - - :param loadbalancer: `LBaaSLoadBalancer` object - :param namespace: K8S service's namespace - :param svc_name: K8S service's name - """ - raise NotImplementedError() - - @abc.abstractmethod - def release_pool(self, loadbalancer, pool): - """Release pool. - - Should return without errors if pool or load balancer does not exist - (e.g. already deleted). - - :param loadbalancer: `LBaaSLoadBalancer` object - :param pool: `LBaaSPool` object - """ - raise NotImplementedError() - - @abc.abstractmethod - def ensure_member(self, loadbalancer, pool, - subnet_id, ip, port, target_ref_namespace, - target_ref_name): - """Get or create member. - - :param loadbalancer: `LBaaSLoadBalancer` object - :param pool: `LBaaSPool` object - :param subnet_id: Neutron subnet ID of the target - :param ip: target's IP (e.g. Pod's IP) - :param port: target port - :param target_ref_namespace: Kubernetes EP target_ref namespace - :param target_ref_name: Kubernetes EP target_ref name - """ - raise NotImplementedError() - - @abc.abstractmethod - def release_member(self, loadbalancer, member): - """Release member. - - Should return without errors if memberor load balancer does not exist - (e.g. already deleted). - - :param loadbalancer: `LBaaSLoadBalancer` object - :param member: `LBaaSMember` object - """ - raise NotImplementedError() - - @abc.abstractmethod - def update_lbaas_sg(self, service, sgs): - """Update security group rules associated to the loadbalancer - - :param service: k8s service object - :param sgs: list of security group ids to use for updating the rules - """ - raise NotImplementedError() - - @abc.abstractmethod - def add_tags(self, resource, req): - """Add tags to a request if the resource supports it""" - raise NotImplementedError() - - -class VIFPoolDriver(PodVIFDriver, metaclass=abc.ABCMeta): - """Manages Pool of Neutron ports to provide VIFs for Kubernetes Pods.""" - - ALIAS = 'vif_pool' - - @abc.abstractmethod - def set_vif_driver(self, driver): - """Sets the driver the Pool should use to manage resources - - The driver will be used for acquiring, releasing and updating the - vif resources. - """ - raise NotImplementedError() - - @abc.abstractmethod - def remove_sg_from_pools(self, sg_id, net_id): - """Remove the SG from the ports associated to the pools. - - This method ensure that ports on net_id that belongs to pools and have - the referenced SG are updated to clean up their SGs and put back on - the default pool for that network. - - :param sg_id: Security Group ID that needs to be removed from pool - ports - :param net_id: Network ID associated to the pools to clean up, and - where the ports must belong to. - """ - raise NotImplementedError() - - -class ServicePubIpDriver(DriverBase, metaclass=abc.ABCMeta): - """Manages loadbalancerIP/public ip for neutron lbaas.""" - - ALIAS = 'service_public_ip' - - @abc.abstractmethod - def acquire_service_pub_ip_info(self, spec_type, spec_lb_ip, project_id, - port_id_to_be_associated=None): - """Get k8s service loadbalancer IP info based on service spec - - :param spec_type: service.spec.type field - :param spec_lb_ip: service spec LoadBlaceIP field - :param project_id: openstack project id - :param port_id_to_be_associated: port id to associate - - """ - raise NotImplementedError() - - @abc.abstractmethod - def release_pub_ip(self, service_pub_ip_info): - """Release (if needed) based on service_pub_ip_info content - - :param service_pub_ip_info: service loadbalancer IP info - :returns True/False - - """ - raise NotImplementedError() - - @abc.abstractmethod - def associate_pub_ip(self, service_pub_ip_info, vip_port_id): - """Associate loadbalancer IP to lbaas VIP port ID - - :param service_pub_ip_info: service loadbalancer IP info - :param vip_port_id: Lbaas VIP port id - - """ - raise NotImplementedError() - - @abc.abstractmethod - def disassociate_pub_ip(self, service_pub_ip_info): - """Disassociate loadbalancer IP and lbaas VIP port ID - - :param service_pub_ip_info: service loadbalancer IP info - - """ - - -class NetworkPolicyDriver(DriverBase, metaclass=abc.ABCMeta): - """Provide network-policy for pods""" - - ALIAS = 'network_policy' - - @abc.abstractmethod - def ensure_network_policy(self, policy): - """Policy created or updated - - :param policy: dict containing Kubernetes NP object - """ - raise NotImplementedError() - - @abc.abstractmethod - def release_network_policy(self, kuryrnetpolicy): - """Delete a network policy - - :param kuryrnetpolicy: dict containing NetworkPolicy object - """ - raise NotImplementedError() - - @abc.abstractmethod - def affected_pods(self, policy, selector=None): - """Return affected pods by the policy - - This method returns the list of pod objects affected by the policy, or - by the selector if it is specified. - - :param policy: dict containing Kubernetes NP object - :param selector: (optional) specifc pod selector - :returns: list of Pods objects affected by the policy or the selector - if it is passed - """ - raise NotImplementedError() - - @abc.abstractmethod - def namespaced_pods(self, policy): - """Return pods on the policy namespace - - This method returns the pods on the network policy namespace - - :param policy: dict containing Kubernetes NP object - :returns: list of Pods objects on the policy namespace - """ - raise NotImplementedError() - - -class NetworkPolicyProjectDriver(DriverBase, metaclass=abc.ABCMeta): - """Get an OpenStack project id for K8s network policies""" - - ALIAS = 'network_policy_project' - - @abc.abstractmethod - def get_project(self, policy): - """Get an OpenStack project id for K8s pod ports. - - :param policy: dict containing Kubernetes NP object - :returns: OpenStack project_id - """ - raise NotImplementedError() - - -class NodesSubnetsDriver(DriverBase, metaclass=abc.ABCMeta): - """Keeps list of subnet_ids of the OpenShift Nodes.""" - - ALIAS = 'nodes_subnets' - - @abc.abstractmethod - def get_nodes_subnets(self, raise_on_empty=False): - """Gets list of subnet_ids of OpenShift Nodes. - - :param raise_on_empty: whether it should raise if list is empty. - :return: list of subnets - """ - - raise NotImplementedError() - - @abc.abstractmethod - def add_node(self, node): - """Handles node addition. - - :param node: Node object - """ - pass - - @abc.abstractmethod - def delete_node(self, node): - """Handles node removal - - :param node: Node object - """ - pass diff --git a/kuryr_kubernetes/controller/drivers/default_project.py b/kuryr_kubernetes/controller/drivers/default_project.py deleted file mode 100644 index 03ed860e9..000000000 --- a/kuryr_kubernetes/controller/drivers/default_project.py +++ /dev/null @@ -1,77 +0,0 @@ -# Copyright (c) 2016 Mirantis, Inc. -# All Rights Reserved. -# -# Licensed under the Apache License, Version 2.0 (the "License"); you may -# not use this file except in compliance with the License. You may obtain -# a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT -# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the -# License for the specific language governing permissions and limitations -# under the License. - -from oslo_config import cfg - -from kuryr_kubernetes import config -from kuryr_kubernetes.controller.drivers import base - - -class DefaultPodProjectDriver(base.PodProjectDriver): - """Provides project ID for Pod port based on a configuration option.""" - - def get_project(self, pod): - project_id = config.CONF.neutron_defaults.project - - if not project_id: - raise cfg.RequiredOptError('project', - cfg.OptGroup('neutron_defaults')) - - return project_id - - -class DefaultServiceProjectDriver(base.ServiceProjectDriver): - """Provides project ID for Service based on a configuration option.""" - - def get_project(self, service): - project_id = config.CONF.neutron_defaults.project - - if not project_id: - # NOTE(ivc): this option is only required for - # DefaultServiceProjectDriver and its subclasses, but it may be - # optional for other drivers (e.g. when each namespace has own - # project) - raise cfg.RequiredOptError('project', - cfg.OptGroup('neutron_defaults')) - - return project_id - - -class DefaultNamespaceProjectDriver(base.NamespaceProjectDriver): - """Provides project ID for Namespace based on a configuration option.""" - - def get_project(self, namespace): - project_id = config.CONF.neutron_defaults.project - - if not project_id: - # NOTE(ivc): this option is only required for - # DefaultNamespaceProjectDriver and its subclasses, but it may be - # optional for other drivers (e.g. when each namespace has own - # project) - raise cfg.RequiredOptError('project', - cfg.OptGroup('neutron_defaults')) - - return project_id - - -class DefaultNetworkPolicyProjectDriver(base.NetworkPolicyProjectDriver): - - def get_project(self, policy): - project_id = config.CONF.neutron_defaults.project - - if not project_id: - raise cfg.RequiredOptError('project', - cfg.OptGroup('neutron_defaults')) - return project_id diff --git a/kuryr_kubernetes/controller/drivers/default_security_groups.py b/kuryr_kubernetes/controller/drivers/default_security_groups.py deleted file mode 100644 index d88b6e723..000000000 --- a/kuryr_kubernetes/controller/drivers/default_security_groups.py +++ /dev/null @@ -1,81 +0,0 @@ -# Copyright (c) 2016 Mirantis, Inc. -# All Rights Reserved. -# -# Licensed under the Apache License, Version 2.0 (the "License"); you may -# not use this file except in compliance with the License. You may obtain -# a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT -# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the -# License for the specific language governing permissions and limitations -# under the License. - -from oslo_config import cfg -from oslo_log import log as logging - -from kuryr_kubernetes import config -from kuryr_kubernetes.controller.drivers import base - -LOG = logging.getLogger(__name__) - - -class DefaultPodSecurityGroupsDriver(base.PodSecurityGroupsDriver): - """Provides security groups for Pod based on a configuration option.""" - - def get_security_groups(self, pod, project_id): - sg_list = config.CONF.neutron_defaults.pod_security_groups - - if not sg_list: - # NOTE(ivc): this option is only required for - # Default{Pod,Service}SecurityGroupsDriver and its subclasses, - # but it may be optional for other drivers (e.g. when each - # namespace has own set of security groups) - raise cfg.RequiredOptError('pod_security_groups', - cfg.OptGroup('neutron_defaults')) - - return sg_list[:] - - def create_sg_rules(self, pod): - LOG.debug("Security group driver does not create SG rules for " - "the pods.") - - def delete_sg_rules(self, pod): - LOG.debug("Security group driver does not delete SG rules for " - "the pods.") - - def update_sg_rules(self, pod): - LOG.debug("Security group driver does not update SG rules for " - "the pods.") - - def delete_namespace_sg_rules(self, namespace): - LOG.debug("Security group driver does not delete SG rules for " - "namespace.") - - def create_namespace_sg_rules(self, namespace): - LOG.debug("Security group driver does not create SG rules for " - "namespace.") - - def update_namespace_sg_rules(self, namespace): - LOG.debug("Security group driver does not update SG rules for " - "namespace.") - - -class DefaultServiceSecurityGroupsDriver(base.ServiceSecurityGroupsDriver): - """Provides security groups for Service based on a configuration option.""" - - def get_security_groups(self, service, project_id): - # NOTE(ivc): use the same option as DefaultPodSecurityGroupsDriver - sg_list = config.CONF.neutron_defaults.pod_security_groups - - if not sg_list: - # NOTE(ivc): this option is only required for - # Default{Pod,Service}SecurityGroupsDriver and its subclasses, - # but it may be optional for other drivers (e.g. when each - # namespace has own set of security groups) - raise cfg.RequiredOptError('pod_security_groups', - cfg.OptGroup('neutron_defaults')) - - return sg_list[:] diff --git a/kuryr_kubernetes/controller/drivers/default_subnet.py b/kuryr_kubernetes/controller/drivers/default_subnet.py deleted file mode 100644 index e39294c58..000000000 --- a/kuryr_kubernetes/controller/drivers/default_subnet.py +++ /dev/null @@ -1,54 +0,0 @@ -# Copyright (c) 2016 Mirantis, Inc. -# All Rights Reserved. -# -# Licensed under the Apache License, Version 2.0 (the "License"); you may -# not use this file except in compliance with the License. You may obtain -# a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT -# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the -# License for the specific language governing permissions and limitations -# under the License. - -from oslo_config import cfg - -from kuryr_kubernetes import config -from kuryr_kubernetes.controller.drivers import base -from kuryr_kubernetes import utils - - -class DefaultPodSubnetDriver(base.PodSubnetsDriver): - """Provides subnet for Pod port based on a configuration option.""" - - def get_subnets(self, pod, project_id): - subnet_id = config.CONF.neutron_defaults.pod_subnet - - if not subnet_id: - # NOTE(ivc): this option is only required for - # DefaultPodSubnetDriver and its subclasses, but it may be - # optional for other drivers (e.g. when each namespace has own - # subnet) - raise cfg.RequiredOptError('pod_subnet', - cfg.OptGroup('neutron_defaults')) - - return {subnet_id: utils.get_subnet(subnet_id)} - - -class DefaultServiceSubnetDriver(base.ServiceSubnetsDriver): - """Provides subnet for Service's LBaaS based on a configuration option.""" - - def get_subnets(self, service, project_id): - subnet_id = config.CONF.neutron_defaults.service_subnet - - if not subnet_id: - # NOTE(ivc): this option is only required for - # DefaultServiceSubnetDriver and its subclasses, but it may be - # optional for other drivers (e.g. when each namespace has own - # subnet) - raise cfg.RequiredOptError('service_subnet', - cfg.OptGroup('neutron_defaults')) - - return {subnet_id: utils.get_subnet(subnet_id)} diff --git a/kuryr_kubernetes/controller/drivers/lb_public_ip.py b/kuryr_kubernetes/controller/drivers/lb_public_ip.py deleted file mode 100644 index 88912257f..000000000 --- a/kuryr_kubernetes/controller/drivers/lb_public_ip.py +++ /dev/null @@ -1,113 +0,0 @@ -# Copyright (c) 2017 RedHat, Inc. -# All Rights Reserved. -# -# Licensed under the Apache License, Version 2.0 (the "License"); you may -# not use this file except in compliance with the License. You may obtain -# a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT -# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the -# License for the specific language governing permissions and limitations -# under the License. -from kuryr_kubernetes import config -from kuryr_kubernetes.controller.drivers import base -from kuryr_kubernetes.controller.drivers import public_ip -from oslo_log import log as logging - -LOG = logging.getLogger(__name__) - - -class FloatingIpServicePubIPDriver(base.ServicePubIpDriver): - """Manages floating ip for neutron lbaas. - - Service loadbalancerIP support the following : - 1. No loadbalancer IP - k8s service.spec.type != 'LoadBalancer' - 2. Floating IP allocated from pool - - k8s service.spec.type = 'LoadBalancer' and - service.spec.loadBalancerIP NOT defined - 3. Floating IP specified by the user - - k8s service.spec.type = 'LoadBalancer' and - service.spec.loadBalancerIP is defined. - """ - - def __init__(self): - super(FloatingIpServicePubIPDriver, self).__init__() - self._drv_pub_ip = public_ip.FipPubIpDriver() - - def acquire_service_pub_ip_info(self, spec_type, spec_lb_ip, project_id, - port_id_to_be_associated=None): - - if spec_type != 'LoadBalancer': - return None - - # get public network/subnet ids from kuryr.conf - public_network_id = config.CONF.neutron_defaults.external_svc_net - public_subnet_id = config.CONF.neutron_defaults.external_svc_subnet - if not public_network_id: - LOG.warning('Skipping Floating IP allocation on port: %s. ' - 'Missing value for external_svc_net config.', - port_id_to_be_associated) - return None - - if spec_lb_ip: - user_specified_ip = spec_lb_ip.format() - res_id = self._drv_pub_ip.is_ip_available(user_specified_ip, - port_id_to_be_associated) - if res_id: - service_pub_ip_info = { - 'ip_id': res_id, - 'ip_addr': str(user_specified_ip), - 'alloc_method': 'user' - } - - return service_pub_ip_info - else: - # user specified IP is not valid - LOG.error("IP=%s is not available", user_specified_ip) - return None - else: - LOG.debug("Trying to allocate public ip from pool") - - try: - res_id, alloc_ip_addr = (self._drv_pub_ip.allocate_ip( - public_network_id, project_id, pub_subnet_id=public_subnet_id, - description='kuryr_lb', - port_id_to_be_associated=port_id_to_be_associated)) - except Exception: - LOG.exception("Failed to allocate public IP - net_id:%s", - public_network_id) - return None - service_pub_ip_info = { - 'ip_id': res_id, - 'ip_addr': alloc_ip_addr, - 'alloc_method': 'pool' - } - - return service_pub_ip_info - - def release_pub_ip(self, service_pub_ip_info): - if not service_pub_ip_info: - return True - if service_pub_ip_info['alloc_method'] == 'pool': - retcode = self._drv_pub_ip.free_ip(service_pub_ip_info['ip_id']) - if not retcode: - LOG.error("Failed to delete public_ip_id =%s !", - service_pub_ip_info['ip_id']) - return False - return True - - def associate_pub_ip(self, service_pub_ip_info, vip_port_id): - if (not service_pub_ip_info or - not vip_port_id or - not service_pub_ip_info['ip_id']): - return - self._drv_pub_ip.associate( - service_pub_ip_info['ip_id'], vip_port_id) - - def disassociate_pub_ip(self, service_pub_ip_info): - if not service_pub_ip_info or not service_pub_ip_info['ip_id']: - return - self._drv_pub_ip.disassociate(service_pub_ip_info['ip_id']) diff --git a/kuryr_kubernetes/controller/drivers/lbaasv2.py b/kuryr_kubernetes/controller/drivers/lbaasv2.py deleted file mode 100644 index 521b7b2c6..000000000 --- a/kuryr_kubernetes/controller/drivers/lbaasv2.py +++ /dev/null @@ -1,940 +0,0 @@ -# Copyright (c) 2016 Mirantis, Inc. -# All Rights Reserved. -# -# Licensed under the Apache License, Version 2.0 (the "License"); you may -# not use this file except in compliance with the License. You may obtain -# a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT -# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the -# License for the specific language governing permissions and limitations -# under the License. - -import random -import time - -from openstack import exceptions as os_exc -from oslo_config import cfg -from oslo_log import log as logging -from oslo_utils import timeutils -from oslo_utils import versionutils - -from kuryr_kubernetes import clients -from kuryr_kubernetes import config -from kuryr_kubernetes import constants as k_const -from kuryr_kubernetes.controller.drivers import base -from kuryr_kubernetes.controller.drivers import utils as c_utils -from kuryr_kubernetes import exceptions as k_exc -from kuryr_kubernetes import utils - - -CONF = cfg.CONF -LOG = logging.getLogger(__name__) - -_ACTIVATION_TIMEOUT = CONF.neutron_defaults.lbaas_activation_timeout -# NOTE(yboaron):Prior to sending create request to Octavia, LBaaS driver -# verifies that LB is in a stable state by polling LB's provisioning_status -# using backoff timer. -# A similar method is used also for the delete flow. -# Unlike LB creation, rest of octavia operations are completed usually after -# few seconds. Next constants define the intervals values for 'fast' and -# 'slow' (will be used for LB creation) polling. -_LB_STS_POLL_FAST_INTERVAL = 1 -_LB_STS_POLL_SLOW_INTERVAL = 3 -_OCTAVIA_TAGGING_VERSION = 2, 5 -# NOTE(ltomasbo): amphora supports it on 2.11, but ovn-octavia only on 2.13 -# In order to make it simpler, we assume this is supported only from 2.13 -_OCTAVIA_DL_VERSION = 2, 13 -_OCTAVIA_ACL_VERSION = 2, 12 -_OCTAVIA_PROVIDER_VERSION = 2, 6 -_OCTAVIA_SCTP_VERSION = 2, 23 - -# HTTP Codes raised by Octavia when a Resource already exists -OKAY_CODES = (409, 500) - - -class LBaaSv2Driver(base.LBaaSDriver): - """LBaaSv2Driver implements LBaaSDriver for Neutron LBaaSv2 API.""" - - def __init__(self): - super(LBaaSv2Driver, self).__init__() - - self._octavia_tags = False - self._octavia_acls = False - self._octavia_double_listeners = False - self._octavia_providers = False - self._octavia_sctp = False - # Check if Octavia API supports tagging. - # TODO(dulek): *Maybe* this can be replaced with - # lbaas.get_api_major_version(version=_OCTAVIA_TAGGING_VERSION) - # if bug https://storyboard.openstack.org/#!/story/2007040 gets - # fixed one day. - v = self.get_octavia_version() - if v >= _OCTAVIA_ACL_VERSION: - self._octavia_acls = True - LOG.info('Octavia supports ACLs for Amphora provider.') - if v >= _OCTAVIA_DL_VERSION: - self._octavia_double_listeners = True - LOG.info('Octavia supports double listeners (different ' - 'protocol, same port) for Amphora provider.') - if v >= _OCTAVIA_TAGGING_VERSION: - LOG.info('Octavia supports resource tags.') - self._octavia_tags = True - else: - v_str = '%d.%d' % v - LOG.warning('[neutron_defaults]resource_tags is set, but Octavia ' - 'API %s does not support resource tagging. Kuryr ' - 'will put requested tags in the description field of ' - 'Octavia resources.', v_str) - if v >= _OCTAVIA_PROVIDER_VERSION: - self._octavia_providers = True - if v >= _OCTAVIA_SCTP_VERSION: - LOG.info('Octavia API supports SCTP protocol.') - self._octavia_sctp = True - - def double_listeners_supported(self): - return self._octavia_double_listeners - - def providers_supported(self): - return self._octavia_providers - - def sctp_supported(self): - return self._octavia_sctp - - def get_octavia_version(self): - lbaas = clients.get_loadbalancer_client() - region_name = getattr(CONF.neutron, 'region_name', None) - - regions = lbaas.get_all_version_data() - # If region was specified take it, otherwise just take first as default - endpoints = regions.get(region_name, list(regions.values())[0]) - # Take the first endpoint - services = list(endpoints.values())[0] - # Try load-balancer service, if not take the first - versions = services.get('load-balancer', list(services.values())[0]) - # Lookup the latest version. For safety, we won't look for - # version['status'] == 'CURRENT' and assume it's the maximum. Also we - # won't assume this dict is sorted. - max_ver = 0, 0 - for version in versions: - if version.get('version') is None: - raise k_exc.UnreachableOctavia('Unable to reach Octavia API') - v_tuple = versionutils.convert_version_to_tuple( - version['version']) - if v_tuple > max_ver: - max_ver = v_tuple - - LOG.debug("Detected Octavia version %d.%d", *max_ver) - return max_ver - - def get_service_loadbalancer_name(self, namespace, svc_name): - return "%s/%s" % (namespace, svc_name) - - def get_loadbalancer_pool_name(self, loadbalancer, namespace, svc_name): - return "%s/%s/%s" % (loadbalancer['name'], namespace, svc_name) - - def add_tags(self, resource, req): - if CONF.neutron_defaults.resource_tags: - if self._octavia_tags: - req['tags'] = CONF.neutron_defaults.resource_tags - else: - if resource in ('loadbalancer', 'listener', 'pool'): - req['description'] = ','.join( - CONF.neutron_defaults.resource_tags) - - def ensure_loadbalancer(self, name, project_id, subnet_id, ip, - security_groups_ids=None, service_type=None, - provider=None): - request = { - 'name': name, - 'project_id': project_id, - 'subnet_id': subnet_id, - 'ip': ip, - 'security_groups': security_groups_ids, - 'provider': provider - } - - response = self._ensure_loadbalancer(request) - - if not response: - # NOTE(ivc): load balancer was present before 'create', but got - # deleted externally between 'create' and 'find' - # NOTE(ltomasbo): or it is in ERROR status, so we deleted and - # trigger the retry - raise k_exc.ResourceNotReady(request) - - return response - - def release_loadbalancer(self, loadbalancer): - lbaas = clients.get_loadbalancer_client() - self._release( - loadbalancer, - loadbalancer, - lbaas.delete_load_balancer, - loadbalancer['id'], - cascade=True) - self._wait_for_deletion(loadbalancer, _ACTIVATION_TIMEOUT) - - def _create_listeners_acls(self, loadbalancer, port, target_port, - protocol, lb_sg, new_sgs, listener_id): - all_pod_rules = [] - add_default_rules = False - os_net = clients.get_network_client() - sgs = [] - - if new_sgs: - sgs = new_sgs - elif loadbalancer['security_groups']: - sgs = loadbalancer['security_groups'] - else: - # NOTE(gryf): in case there is no new SG rules and loadbalancer - # has the SG removed, just add default ones. - add_default_rules = True - - # Check if Network Policy allows listener on the pods - for sg in sgs: - if sg != lb_sg: - if sg in config.CONF.neutron_defaults.pod_security_groups: - # If default sg is set, this means there is no NP - # associated to the service, thus falling back to the - # default listener rules - add_default_rules = True - break - rules = os_net.security_group_rules(security_group_id=sg) - for rule in rules: - # NOTE(ltomasbo): NP sg can only have rules with - # or without remote_ip_prefix. Rules with remote_group_id - # are not possible, therefore only applying the ones - # with or without remote_ip_prefix. - if rule.remote_group_id: - continue - if ((not rule.protocol or - rule.protocol == protocol.lower()) - and rule.direction == 'ingress'): - # If listener port not in allowed range, skip - min_port = rule.port_range_min - max_port = rule.port_range_max - if (min_port and target_port not in range(min_port, - max_port+1)): - continue - if rule.remote_ip_prefix: - all_pod_rules.append(rule.remote_ip_prefix) - else: - add_default_rules = True - - if add_default_rules: - # update the listener without allowed-cidr - self._update_listener_acls(loadbalancer, listener_id, None) - else: - self._update_listener_acls(loadbalancer, listener_id, - all_pod_rules) - - def _apply_members_security_groups(self, loadbalancer, port, target_port, - protocol, sg_rule_name, listener_id, - new_sgs=None): - LOG.debug("Applying members security groups.") - os_net = clients.get_network_client() - lb_sg = None - if CONF.octavia_defaults.enforce_sg_rules: - vip_port = self._get_vip_port(loadbalancer) - if vip_port: - try: - lb_sg = vip_port.security_group_ids[0] - except IndexError: - LOG.warning("We still waiting for SG to be created for " - "VIP %s", vip_port) - raise k_exc.ResourceNotReady(listener_id) - else: - LOG.debug("Skipping sg update for lb %s", loadbalancer['name']) - return - - # NOTE (maysams) It might happen that the update of LBaaS SG - # has been triggered and the LBaaS SG was not created yet. - # This update is skiped, until the LBaaS members are created. - if not lb_sg: - return - - if self._octavia_acls: - self._create_listeners_acls(loadbalancer, port, target_port, - protocol, lb_sg, new_sgs, listener_id) - return - - lbaas_sg_rules = os_net.security_group_rules( - security_group_id=lb_sg, project_id=loadbalancer['project_id']) - all_pod_rules = [] - add_default_rules = False - - if new_sgs: - sgs = new_sgs - else: - sgs = loadbalancer['security_groups'] - - sg_rule_ethertype = k_const.IPv4 - if utils.get_service_subnet_version() == k_const.IP_VERSION_6: - sg_rule_ethertype = k_const.IPv6 - # Check if Network Policy allows listener on the pods - for sg in sgs: - if sg != lb_sg: - if sg in config.CONF.neutron_defaults.pod_security_groups: - # If default sg is set, this means there is no NP - # associated to the service, thus falling back to the - # default listener rules - add_default_rules = True - break - rules = os_net.security_group_rules(security_group_id=sg) - for rule in rules: - # copying ingress rules with same protocol onto the - # loadbalancer sg rules - # NOTE(ltomasbo): NP sg can only have rules with - # or without remote_ip_prefix. Rules with remote_group_id - # are not possible, therefore only applying the ones - # with or without remote_ip_prefix. - if (rule.protocol == protocol.lower() and - rule.direction == 'ingress'): - # If listener port not in allowed range, skip - min_port = rule.port_range_min - max_port = rule.port_range_max - if (min_port and target_port not in range(min_port, - max_port+1)): - continue - all_pod_rules.append(rule) - try: - LOG.debug("Creating LBaaS sg rule for sg: %r", - lb_sg) - os_net.create_security_group_rule( - direction='ingress', - ether_type=sg_rule_ethertype, - port_range_min=port, - port_range_max=port, - protocol=protocol, - remote_ip_prefix=rule.remote_ip_prefix, - security_group_id=lb_sg, - description=sg_rule_name) - except os_exc.ConflictException: - pass - except os_exc.SDKException: - LOG.exception('Failed when creating security ' - 'group rule for listener %s.', - sg_rule_name) - - # Delete LBaaS sg rules that do not match NP - for rule in lbaas_sg_rules: - if (rule.protocol != protocol.lower() or - rule.port_range_min != port or - rule.direction != 'ingress'): - if all_pod_rules and self._is_default_rule(rule): - LOG.debug("Removing default LBaaS sg rule for sg: %r", - lb_sg) - os_net.delete_security_group_rule(rule.id) - continue - self._delete_rule_if_no_match(rule, all_pod_rules) - - if add_default_rules: - try: - LOG.debug("Restoring default LBaaS sg rule for sg: %r", lb_sg) - os_net.create_security_group_rule(direction='ingress', - ether_type=sg_rule_ethertype, - port_range_min=port, - port_range_max=port, - protocol=protocol, - security_group_id=lb_sg, - description=sg_rule_name) - except os_exc.ConflictException: - pass - except os_exc.SDKException: - LOG.exception('Failed when creating security group rule for ' - 'listener %s.', sg_rule_name) - - def _delete_rule_if_no_match(self, rule, all_pod_rules): - for pod_rule in all_pod_rules: - if pod_rule['remote_ip_prefix'] == rule['remote_ip_prefix']: - return - os_net = clients.get_network_client() - LOG.debug("Deleting sg rule: %r", rule.id) - os_net.delete_security_group_rule(rule.id) - - def _is_default_rule(self, rule): - return (rule.get('direction') == 'ingress' and - not rule.get('remote_ip_prefix') and - 'network-policy' not in rule.get('description')) - - def ensure_listener(self, loadbalancer, protocol, port, - service_type='ClusterIP', timeout_client_data=0, - timeout_member_data=0): - name = "%s:%s:%s" % (loadbalancer['name'], protocol, port) - listener = { - 'name': name, - 'project_id': loadbalancer['project_id'], - 'loadbalancer_id': loadbalancer['id'], - 'protocol': protocol, - 'port': port - } - if timeout_client_data: - listener['timeout_client_data'] = timeout_client_data - if timeout_member_data: - listener['timeout_member_data'] = timeout_member_data - try: - result = self._ensure_provisioned( - loadbalancer, listener, self._create_listener, - self._find_listener, interval=_LB_STS_POLL_SLOW_INTERVAL) - except os_exc.SDKException: - LOG.exception("Failed when creating listener for loadbalancer " - "%r", loadbalancer['id']) - return None - - # NOTE(maysams): When ovn-octavia provider is used - # there is no need to set a security group for - # the load balancer as it wouldn't be enforced. - if not CONF.octavia_defaults.enforce_sg_rules and result: - os_net = clients.get_network_client() - vip_port = self._get_vip_port(loadbalancer) - if vip_port: - os_net.update_port(vip_port.id, security_groups=[]) - loadbalancer['security_groups'] = [] - - return result - - def release_listener(self, loadbalancer, listener): - os_net = clients.get_network_client() - lbaas = clients.get_loadbalancer_client() - self._release(loadbalancer, listener, - lbaas.delete_listener, - listener['id']) - - # NOTE(maysams): since lbs created with ovn-octavia provider - # does not have a sg in place, only need to delete sg rules - # when enforcing sg rules on the lb sg, meaning octavia - # Amphora provider is configured. - if CONF.octavia_defaults.enforce_sg_rules: - try: - sg_id = self._get_vip_port(loadbalancer).security_group_ids[0] - except AttributeError: - sg_id = None - if sg_id: - rules = os_net.security_group_rules(security_group_id=sg_id, - description=listener[ - 'name']) - try: - os_net.delete_security_group_rule(next(rules).id) - except StopIteration: - LOG.warning('Cannot find SG rule for %s (%s) listener.', - listener['id'], listener['name']) - - def ensure_pool(self, loadbalancer, listener): - pool = { - 'name': listener['name'], - 'project_id': loadbalancer['project_id'], - 'loadbalancer_id': loadbalancer['id'], - 'listener_id': listener['id'], - 'protocol': listener['protocol'] - } - return self._ensure_provisioned(loadbalancer, pool, - self._create_pool, - self._find_pool) - - def ensure_pool_attached_to_lb(self, loadbalancer, namespace, - svc_name, protocol): - name = self.get_loadbalancer_pool_name(loadbalancer, - namespace, svc_name) - pool = { - 'name': name, - 'project_id': loadbalancer['project_id'], - 'loadbalancer_id': loadbalancer['id'], - 'listener_id': None, - 'protocol': protocol - } - return self._ensure_provisioned(loadbalancer, pool, - self._create_pool, - self._find_pool_by_name) - - def release_pool(self, loadbalancer, pool): - lbaas = clients.get_loadbalancer_client() - self._release(loadbalancer, pool, lbaas.delete_pool, pool['id']) - - def ensure_member(self, loadbalancer, pool, - subnet_id, ip, port, target_ref_namespace, - target_ref_name, listener_port=None): - lbaas = clients.get_loadbalancer_client() - name = ("%s/%s" % (target_ref_namespace, target_ref_name)) - name += ":%s" % port - member = { - 'name': name, - 'project_id': loadbalancer['project_id'], - 'pool_id': pool['id'], - 'subnet_id': subnet_id, - 'ip': ip, - 'port': port - } - result = self._ensure_provisioned(loadbalancer, member, - self._create_member, - self._find_member, - update=lbaas.update_member) - - network_policy = ( - 'policy' in CONF.kubernetes.enabled_handlers and - CONF.kubernetes.service_security_groups_driver == 'policy') - if (network_policy and CONF.octavia_defaults.enforce_sg_rules and - listener_port): - protocol = pool['protocol'] - sg_rule_name = pool['name'] - listener_id = pool['listener_id'] - self._apply_members_security_groups(loadbalancer, listener_port, - port, protocol, sg_rule_name, - listener_id) - return result - - def release_member(self, loadbalancer, member): - lbaas = clients.get_loadbalancer_client() - self._release(loadbalancer, member, lbaas.delete_member, member['id'], - member['pool_id']) - - def _get_vip_port(self, loadbalancer): - os_net = clients.get_network_client() - try: - fixed_ips = ['subnet_id=%s' % str(loadbalancer['subnet_id']), - 'ip_address=%s' % str(loadbalancer['ip'])] - ports = os_net.ports(fixed_ips=fixed_ips) - except os_exc.SDKException: - LOG.error("Port with fixed ips %s not found!", fixed_ips) - raise - - try: - return next(ports) - except StopIteration: - return None - - def _create_loadbalancer(self, loadbalancer): - request = { - 'name': loadbalancer['name'], - 'project_id': loadbalancer['project_id'], - 'vip_address': str(loadbalancer['ip']), - 'vip_subnet_id': loadbalancer['subnet_id'], - } - - if loadbalancer['provider'] is not None: - request['provider'] = loadbalancer['provider'] - - self.add_tags('loadbalancer', request) - - lbaas = clients.get_loadbalancer_client() - response = lbaas.create_load_balancer(**request) - loadbalancer['id'] = response.id - loadbalancer['port_id'] = self._get_vip_port(loadbalancer).id - if (loadbalancer['provider'] is not None and - loadbalancer['provider'] != response.provider): - LOG.error("Request provider(%s) != Response provider(%s)", - loadbalancer['provider'], response.provider) - return None - loadbalancer['provider'] = response.provider - return loadbalancer - - def _find_loadbalancer(self, loadbalancer): - lbaas = clients.get_loadbalancer_client() - response = lbaas.load_balancers( - name=loadbalancer['name'], - project_id=loadbalancer['project_id'], - vip_address=str(loadbalancer['ip']), - vip_subnet_id=loadbalancer['subnet_id'], - provider=loadbalancer['provider']) - - try: - os_lb = next(response) # openstacksdk returns a generator - loadbalancer['id'] = os_lb.id - loadbalancer['port_id'] = self._get_vip_port(loadbalancer).id - loadbalancer['provider'] = os_lb.provider - if os_lb.provisioning_status == 'ERROR': - self.release_loadbalancer(loadbalancer) - utils.clean_lb_crd_status(loadbalancer['name']) - return None - except (KeyError, StopIteration): - return None - - return loadbalancer - - def _create_listener(self, listener): - request = { - 'name': listener['name'], - 'project_id': listener['project_id'], - 'loadbalancer_id': listener['loadbalancer_id'], - 'protocol': listener['protocol'], - 'protocol_port': listener['port'], - } - timeout_cli = listener.get('timeout_client_data') - timeout_mem = listener.get('timeout_member_data') - if timeout_cli: - request['timeout_client_data'] = timeout_cli - if timeout_mem: - request['timeout_member_data'] = timeout_mem - self.add_tags('listener', request) - lbaas = clients.get_loadbalancer_client() - response = lbaas.create_listener(**request) - listener['id'] = response.id - if timeout_cli: - listener['timeout_client_data'] = response.timeout_client_data - if timeout_mem: - listener['timeout_member_data'] = response.timeout_member_data - return listener - - def _update_listener_acls(self, loadbalancer, listener_id, allowed_cidrs): - admin_state_up = True - if allowed_cidrs is None: - # World accessible, no restriction on the listeners - pass - elif len(allowed_cidrs) == 0: - # Prevent any traffic as no CIDR is allowed - admin_state_up = False - - request = { - 'allowed_cidrs': allowed_cidrs, - 'admin_state_up': admin_state_up, - } - - # Wait for the loadbalancer to be ACTIVE - if not self._wait_for_provisioning( - loadbalancer, _ACTIVATION_TIMEOUT, - _LB_STS_POLL_FAST_INTERVAL): - LOG.debug('Skipping ACLs update. ' - 'No Load Balancer Provisioned.') - return - - lbaas = clients.get_loadbalancer_client() - try: - lbaas.update_listener(listener_id, **request) - except os_exc.SDKException: - LOG.error('Error when updating listener %s' % listener_id) - raise k_exc.ResourceNotReady(listener_id) - - def _find_listener(self, listener, loadbalancer): - lbaas = clients.get_loadbalancer_client() - timeout_cli = listener.get('timeout_client_data') - timeout_mb = listener.get('timeout_member_data') - response = lbaas.listeners( - name=listener['name'], - project_id=listener['project_id'], - load_balancer_id=listener['loadbalancer_id'], - protocol=listener['protocol'], - protocol_port=listener['port']) - - request = {} - request['timeout_client_data'] = timeout_cli - request['timeout_member_data'] = timeout_mb - try: - os_listener = next(response) - listener['id'] = os_listener.id - if os_listener.provisioning_status == 'ERROR': - LOG.debug("Releasing listener %s", os_listener.id) - self.release_listener(loadbalancer, listener) - return None - if (timeout_cli and ( - os_listener.timeout_client_data != timeout_cli)) or ( - timeout_mb and ( - os_listener.timeout_member_data != timeout_mb)): - LOG.debug("Updating listener %s", os_listener.id) - n_listen = lbaas.update_listener(os_listener.id, **request) - listener['timeout_client_data'] = n_listen.timeout_client_data - listener['timeout_member_data'] = n_listen.timeout_member_data - elif not timeout_cli or not timeout_mb: - LOG.debug("Updating listener %s", os_listener.id) - lbaas.update_listener(os_listener.id, **request) - - except (KeyError, StopIteration): - return None - except os_exc.SDKException: - LOG.error('Error when updating listener %s' % listener['id']) - raise k_exc.ResourceNotReady(listener['id']) - return listener - - def _create_pool(self, pool): - # TODO(ivc): make lb_algorithm configurable - lb_algorithm = CONF.octavia_defaults.lb_algorithm - request = { - 'name': pool['name'], - 'project_id': pool['project_id'], - 'listener_id': pool['listener_id'], - 'loadbalancer_id': pool['loadbalancer_id'], - 'protocol': pool['protocol'], - 'lb_algorithm': lb_algorithm, - } - self.add_tags('pool', request) - lbaas = clients.get_loadbalancer_client() - response = lbaas.create_pool(**request) - pool['id'] = response.id - return pool - - def _find_pool(self, pool, loadbalancer, by_listener=True): - lbaas = clients.get_loadbalancer_client() - response = lbaas.pools( - name=pool['name'], - project_id=pool['project_id'], - loadbalancer_id=pool['loadbalancer_id'], - protocol=pool['protocol']) - # TODO(scavnic) check response - try: - if by_listener: - pools = [p for p in response if pool['listener_id'] - in {listener['id'] for listener in p.listeners}] - else: - pools = [p for p in response if pool.name == p.name] - pool['id'] = pools[0].id - if pools[0].provisioning_status == 'ERROR': - LOG.debug("Releasing pool %s", pool.id) - self.release_pool(loadbalancer, pool) - return None - except (KeyError, IndexError): - return None - return pool - - def _find_pool_by_name(self, pool, loadbalancer): - return self._find_pool(pool, loadbalancer, by_listener=False) - - def _create_member(self, member): - request = { - 'name': member['name'], - 'project_id': member['project_id'], - 'subnet_id': member['subnet_id'], - 'address': str(member['ip']), - 'protocol_port': member['port'], - } - self.add_tags('member', request) - lbaas = clients.get_loadbalancer_client() - try: - response = lbaas.create_member(member['pool_id'], **request) - except os_exc.BadRequestException as e: - details = e.response.json() - if (details['faultstring'] == f'Subnet {member["subnet_id"]} not ' - f'found.'): - # Most likely the subnet is deleted already as the namespace is - # being deleted. Ignore, we'll delete that LB soon anyway. - LOG.warning('Member %s not created as subnet %s is being ' - 'deleted.', member['name'], member['subnet_id']) - return None - raise - member['id'] = response.id - return member - - def _find_member(self, member, loadbalancer): - lbaas = clients.get_loadbalancer_client() - member = dict(member) - response = lbaas.members( - member['pool_id'], - project_id=member['project_id'], - subnet_id=member['subnet_id'], - address=member['ip'], - protocol_port=member['port']) - - try: - os_members = next(response) - member['id'] = os_members.id - member['name'] = os_members.name - if os_members.provisioning_status == 'ERROR': - LOG.debug("Releasing Member %s", os_members.id) - self.release_member(loadbalancer, member) - return None - except (KeyError, StopIteration): - return None - - return member - - def _ensure(self, create, find, obj, loadbalancer, update=None): - try: - result = create(obj) - LOG.debug("Created %(obj)s", {'obj': result}) - return result - except os_exc.HttpException as e: - if e.status_code not in OKAY_CODES: - raise - result = find(obj, loadbalancer) - # NOTE(maysams): A conflict may happen when a member is - # a lefover and a new pod uses the same address. Let's - # attempt to udpate the member name if already existent. - if result and obj['name'] != result.get('name') and update: - update(result['id'], obj['pool_id'], name=obj['name']) - result['name'] = obj['name'] - if result: - LOG.debug("Found %(obj)s", {'obj': result}) - return result - - def _ensure_loadbalancer(self, loadbalancer): - result = self._find_loadbalancer(loadbalancer) - if result: - LOG.debug("Found %(obj)s", {'obj': result}) - return result - - result = self._create_loadbalancer(loadbalancer) - LOG.debug("Created %(obj)s", {'obj': result}) - return result - - def _ensure_provisioned(self, loadbalancer, obj, create, find, - interval=_LB_STS_POLL_FAST_INTERVAL, **kwargs): - for remaining in self._provisioning_timer(_ACTIVATION_TIMEOUT, - interval): - if not self._wait_for_provisioning( - loadbalancer, remaining, interval): - return None - try: - result = self._ensure( - create, find, obj, loadbalancer, **kwargs) - if result: - return result - except os_exc.BadRequestException: - raise - except os_exc.HttpException as e: - if e.status_code == 501: - LOG.exception("Listener creation failed, most probably " - "because protocol %(prot)s is not supported", - {'prot': obj['protocol']}) - return None - else: - raise - except os_exc.SDKException: - pass - - raise k_exc.ResourceNotReady(obj) - - def _release(self, loadbalancer, obj, delete, *args, **kwargs): - for remaining in self._provisioning_timer(_ACTIVATION_TIMEOUT): - try: - try: - delete(*args, **kwargs) - return - except (os_exc.ConflictException, os_exc.BadRequestException): - if not self._wait_for_provisioning( - loadbalancer, remaining): - return - except os_exc.NotFoundException: - return - - raise k_exc.ResourceNotReady(obj) - - def _wait_for_provisioning(self, loadbalancer, timeout, - interval=_LB_STS_POLL_FAST_INTERVAL): - lbaas = clients.get_loadbalancer_client() - - for remaining in self._provisioning_timer(timeout, interval): - try: - response = lbaas.get_load_balancer(loadbalancer['id']) - except os_exc.ResourceNotFound: - LOG.debug("Cleaning CRD status for deleted " - "loadbalancer %s", loadbalancer['name']) - utils.clean_lb_crd_status(loadbalancer['name']) - return None - - status = response.provisioning_status - if status == 'ACTIVE': - LOG.debug("Provisioning complete for %(lb)s", { - 'lb': loadbalancer}) - return loadbalancer - elif status == 'ERROR': - LOG.debug("Releasing loadbalancer %s with error status", - loadbalancer['id']) - self.release_loadbalancer(loadbalancer) - utils.clean_lb_crd_status(loadbalancer['name']) - return None - elif status == 'DELETED': - LOG.debug("Cleaning CRD status for deleted " - "loadbalancer %s", loadbalancer['name']) - utils.clean_lb_crd_status(loadbalancer['name']) - return None - else: - LOG.debug("Provisioning status %(status)s for %(lb)s, " - "%(rem).3gs remaining until timeout", - {'status': status, 'lb': loadbalancer, - 'rem': remaining}) - - raise k_exc.LoadBalancerNotReady(loadbalancer['id'], status) - - def _wait_for_deletion(self, loadbalancer, timeout, - interval=_LB_STS_POLL_FAST_INTERVAL): - lbaas = clients.get_loadbalancer_client() - - status = 'PENDING_DELETE' - for remaining in self._provisioning_timer(timeout, interval): - try: - lb = lbaas.get_load_balancer(loadbalancer['id']) - status = lb.provisioning_status - except os_exc.NotFoundException: - return - - raise k_exc.LoadBalancerNotReady(loadbalancer['id'], status) - - def _provisioning_timer(self, timeout, - interval=_LB_STS_POLL_FAST_INTERVAL): - # REVISIT(ivc): consider integrating with Retry - max_interval = 15 - with timeutils.StopWatch(duration=timeout) as timer: - while not timer.expired(): - yield timer.leftover() - interval = interval * 2 * random.gauss(0.8, 0.05) - interval = min(interval, max_interval) - interval = min(interval, timer.leftover()) - if interval: - time.sleep(interval) - - def update_lbaas_sg(self, service, sgs): - LOG.debug('Setting SG for LBaaS VIP port') - - svc_namespace = service['metadata']['namespace'] - svc_name = service['metadata']['name'] - svc_ports = service['spec'].get('ports', []) - - lbaas_name = c_utils.get_resource_name(svc_name, - prefix=svc_namespace + "/") - - endpoints_link = utils.get_endpoints_link(service) - k8s = clients.get_kubernetes_client() - try: - k8s.get(endpoints_link) - except k_exc.K8sResourceNotFound: - LOG.debug("Endpoint not Found. Skipping LB SG update for " - "%s as the LB resources are not present", lbaas_name) - return - - try: - klb = k8s.get(f'{k_const.K8S_API_CRD_NAMESPACES}/{svc_namespace}/' - f'kuryrloadbalancers/{svc_name}') - except k_exc.K8sResourceNotFound: - LOG.debug('No KuryrLoadBalancer for service %s created yet.', - lbaas_name) - raise k_exc.ResourceNotReady(svc_name) - - if (not klb.get('status', {}).get('loadbalancer') or - klb.get('status', {}).get('listeners') is None): - LOG.debug('KuryrLoadBalancer for service %s not populated yet.', - lbaas_name) - raise k_exc.ResourceNotReady(svc_name) - - klb['status']['loadbalancer']['security_groups'] = sgs - - lb = klb['status']['loadbalancer'] - try: - k8s.patch_crd('status/loadbalancer', utils.get_res_link(klb), - {'security_groups': sgs}) - except k_exc.K8sResourceNotFound: - LOG.debug('KuryrLoadBalancer CRD not found %s', lbaas_name) - return - except k_exc.K8sClientException: - LOG.exception('Error updating KuryLoadBalancer CRD %s', lbaas_name) - raise - - lsnr_ids = {(listener['protocol'], listener['port']): listener['id'] - for listener in klb['status']['listeners']} - - for port in svc_ports: - port_protocol = port['protocol'] - lbaas_port = port['port'] - target_port = port['targetPort'] - suffix = f"{port_protocol}:{lbaas_port}" - sg_rule_name = c_utils.get_resource_name(lbaas_name, - suffix=':' + suffix) - listener_id = lsnr_ids.get((port_protocol, lbaas_port)) - if listener_id is None: - LOG.warning("There is no listener associated to the protocol " - "%s and port %s. Skipping", port_protocol, - lbaas_port) - continue - self._apply_members_security_groups(lb, lbaas_port, - target_port, port_protocol, - sg_rule_name, listener_id, sgs) diff --git a/kuryr_kubernetes/controller/drivers/multi_vif.py b/kuryr_kubernetes/controller/drivers/multi_vif.py deleted file mode 100644 index 16752dce9..000000000 --- a/kuryr_kubernetes/controller/drivers/multi_vif.py +++ /dev/null @@ -1,117 +0,0 @@ -# Copyright (c) 2018 RedHat, Inc. -# All Rights Reserved. -# Licensed under the Apache License, Version 2.0 (the "License"); you may -# not use this file except in compliance with the License. You may obtain -# a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT -# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the -# License for the specific language governing permissions and limitations -# under the License. - -from oslo_log import log as logging -from oslo_serialization import jsonutils - -from kuryr_kubernetes import clients -from kuryr_kubernetes import config as kuryr_config -from kuryr_kubernetes import constants -from kuryr_kubernetes.controller.drivers import base -from kuryr_kubernetes import exceptions -from kuryr_kubernetes import utils - -LOG = logging.getLogger(__name__) - - -class NoopMultiVIFDriver(base.MultiVIFDriver): - - def request_additional_vifs( - self, pod, project_id, security_groups): - return [] - - -class NPWGMultiVIFDriver(base.MultiVIFDriver): - def __init__(self): - super(NPWGMultiVIFDriver, self).__init__() - self._drv_vif_pool = base.VIFPoolDriver.get_instance( - specific_driver='multi_pool') - self._drv_vif_pool.set_vif_driver() - - def request_additional_vifs(self, pod, project_id, security_groups): - vifs = [] - networks = self._get_networks(pod) - if not networks: - return vifs - - kubernetes = clients.get_kubernetes_client() - namespace = pod['metadata']['namespace'] - - for network in networks: - if 'name' not in network: - raise exceptions.InvalidKuryrNetworkAnnotation() - - if 'namespace' in network: - namespace = network['namespace'] - - try: - url = '%s/namespaces/%s/network-attachment-definitions/%s' % ( - constants.K8S_API_NPWG_CRD, namespace, network['name']) - nad_obj = kubernetes.get(url) - except exceptions.K8sClientException: - LOG.exception("Kubernetes Client Exception") - raise - - config = jsonutils.loads(nad_obj['metadata']['annotations'] - ['openstack.org/kuryr-config']) - - subnet_id = config.get(constants.K8S_ANNOTATION_NPWG_CRD_SUBNET_ID) - neutron_defaults = kuryr_config.CONF.neutron_defaults - if constants.K8S_ANNOTATION_NPWG_CRD_DRIVER_TYPE not in config: - vif_drv = self._drv_vif_pool - if not subnet_id: - subnet_id = neutron_defaults.pod_subnet - else: - alias = config[constants.K8S_ANNOTATION_NPWG_CRD_DRIVER_TYPE] - vif_drv = base.PodVIFDriver.get_instance( - specific_driver=alias) - if not subnet_id: - try: - subnet_id = neutron_defaults.subnet_mapping[alias] - except KeyError: - subnet_id = neutron_defaults.pod_subnet - LOG.debug("Default subnet mapping in config file " - "doesn't contain any subnet for %s driver " - "alias. Default pod_subnet was used.", alias) - subnet = {subnet_id: utils.get_subnet(subnet_id)} - vif = vif_drv.request_vif(pod, project_id, subnet, security_groups) - if vif: - vifs.append(vif) - return vifs - - def _get_networks(self, pod): - networks = [] - try: - annotations = pod['metadata']['annotations'] - key = constants.K8S_ANNOTATION_NPWG_NETWORK - networks_annotation = annotations[key] - except KeyError: - return [] - - try: - networks = jsonutils.loads(networks_annotation) - except ValueError: - # if annotation is not in json format, convert it to json. - net_list = networks_annotation.split(',') - for net in net_list: - net_details = net.split('/') - if len(net_details) == 1: - networks.append({'name': net_details[0]}) - elif len(net_details) == 2: - networks.append( - {'namespace': net_details[0], 'name': net_details[1]} - ) - else: - raise exceptions.InvalidKuryrNetworkAnnotation() - return networks diff --git a/kuryr_kubernetes/controller/drivers/namespace_subnet.py b/kuryr_kubernetes/controller/drivers/namespace_subnet.py deleted file mode 100644 index c1c2e78be..000000000 --- a/kuryr_kubernetes/controller/drivers/namespace_subnet.py +++ /dev/null @@ -1,214 +0,0 @@ -# Copyright 2018 Red Hat, Inc. -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. - -from kuryr.lib._i18n import _ -from kuryr.lib import constants as kl_const -from oslo_config import cfg as oslo_cfg -from oslo_log import log as logging - -from kuryr_kubernetes import clients -from kuryr_kubernetes import constants -from kuryr_kubernetes.controller.drivers import default_subnet -from kuryr_kubernetes.controller.drivers import utils as c_utils -from kuryr_kubernetes import exceptions -from kuryr_kubernetes import utils - -from openstack import exceptions as os_exc - -LOG = logging.getLogger(__name__) - -namespace_subnet_driver_opts = [ - oslo_cfg.StrOpt('pod_router', - help=_("Default Neutron router ID where pod subnet(s) is " - "connected")), - oslo_cfg.StrOpt('pod_subnet_pool', - help=_("Default Neutron subnet pool ID where pod subnets " - "get their cidr from")), -] - -oslo_cfg.CONF.register_opts(namespace_subnet_driver_opts, "namespace_subnet") -TAGS = oslo_cfg.CONF.neutron_defaults.resource_tags - - -class NamespacePodSubnetDriver(default_subnet.DefaultPodSubnetDriver): - """Provides subnet for Pod port based on a Pod's namespace.""" - - def get_subnets(self, pod, project_id): - pod_namespace = pod['metadata']['namespace'] - return self.get_namespace_subnet(pod_namespace) - - def get_namespace_subnet(self, namespace, subnet_id=None): - if not subnet_id: - subnet_id = self._get_namespace_subnet_id(namespace) - return {subnet_id: utils.get_subnet(subnet_id)} - - def _get_namespace_subnet_id(self, namespace): - kubernetes = clients.get_kubernetes_client() - try: - net_crd_path = (f"{constants.K8S_API_CRD_NAMESPACES}/" - f"{namespace}/kuryrnetworks/{namespace}") - net_crd = kubernetes.get(net_crd_path) - except exceptions.K8sResourceNotFound: - LOG.debug("Kuryrnetwork resource not yet created, retrying...") - raise exceptions.ResourceNotReady(namespace) - except exceptions.K8sClientException: - LOG.exception("Kubernetes Client Exception.") - raise - - try: - subnet_id = net_crd['status']['subnetId'] - except KeyError: - LOG.debug("Subnet for namespace %s not yet created, retrying.", - namespace) - raise exceptions.ResourceNotReady(namespace) - return subnet_id - - def delete_namespace_subnet(self, net_crd): - subnet_id = net_crd['status'].get('subnetId') - net_id = net_crd['status'].get('netId') - - if net_id: - self._delete_namespace_network_resources(subnet_id, net_id) - - def _delete_namespace_network_resources(self, subnet_id, net_id): - os_net = clients.get_network_client() - if subnet_id: - router_id = oslo_cfg.CONF.namespace_subnet.pod_router - try: - clients.handle_neutron_errors( - os_net.remove_interface_from_router, router_id, - subnet_id=subnet_id) - except os_exc.NotFoundException as e: - # Nothing to worry about, either router or subnet is no more, - # or subnet is already detached. - LOG.debug(e.message) - pass - except os_exc.SDKException: - LOG.exception("Error deleting subnet %(subnet)s from router " - "%(router)s.", - {'subnet': subnet_id, 'router': router_id}) - raise - - try: - os_net.delete_network(net_id) - except os_exc.ConflictException: - LOG.warning("One or more ports in use on the network %s. " - "Deleting leftovers ports before retrying", net_id) - # NOTE(dulek): '' is there because Neutron seems to unset - # device_owner on detach. - leftover_ports = [p for p in os_net.ports(network_id=net_id) - if p.device_owner in - ['', 'trunk:subport', kl_const.DEVICE_OWNER]] - c_utils.delete_ports(leftover_ports) - raise exceptions.ResourceNotReady(net_id) - except os_exc.SDKException: - LOG.exception("Error deleting network %s.", net_id) - raise - - def create_network(self, ns, project_id): - os_net = clients.get_network_client() - ns_name = ns['metadata']['name'] - ns_uid = ns['metadata']['uid'] - net_name = c_utils.get_resource_name(ns_name, ns_uid) - old_net_name = c_utils.get_resource_name(ns_name, prefix='ns/', - suffix='-net') - # TODO(gryf): remove old_net_name support in next release, and precise - # the query by adding additional query parameter 'description' which - # should contain namespace uid. - networks = os_net.networks(name=(net_name, old_net_name)) - tags = ",".join(TAGS) - - try: - # NOTE(ltomasbo): only one network must exists - net = next(networks) - # NOTE(gryf): It might happen, that network has been created, but - # for some reason tagging has failed. - if TAGS and not set(TAGS).issubset(set(net.tags)): - c_utils.tag_neutron_resources([net], exceptions=True) - return net.id - except (StopIteration, ValueError): - LOG.debug('Network does not exist. Creating.') - - mtu_cfg = oslo_cfg.CONF.neutron_defaults.network_device_mtu - attrs = {'name': net_name, 'project_id': project_id, - 'description': tags} - if mtu_cfg: - attrs['mtu'] = mtu_cfg - - try: - net = os_net.create_network(**attrs) - except os_exc.SDKException: - LOG.exception("Error creating neutron resources for the namespace " - "%s", ns_name) - raise - c_utils.tag_neutron_resources([net], exceptions=True) - return net.id - - def create_subnet(self, ns, project_id, net_id): - os_net = clients.get_network_client() - ns_name = ns['metadata']['name'] - ns_uid = ns['metadata']['uid'] - tags = ",".join(TAGS) - - # NOTE(gryf): assumption is, that all the subnets (well, currently - # only one) in specific k8s namespaces are under exactly one network, - # which have proper namespace uid in its description, so there is no - # need to put it on the subnet as well. - subnet_name = c_utils.get_resource_name(ns_name, ns_uid) - subnets = os_net.subnets(network_id=net_id) - - try: - # NOTE(ltomasbo): only one subnet must exists - subnet = next(subnets) - # NOTE(gryf): same situation as in networks. - if TAGS and not set(TAGS).issubset(set(subnet.tags)): - c_utils.tag_neutron_resources([subnet], exceptions=True) - return subnet.id, subnet.cidr - except StopIteration: - LOG.debug('Subnet does not exist. Creating.') - - # create subnet with namespace as name - subnet_pool_id = oslo_cfg.CONF.namespace_subnet.pod_subnet_pool - ip_version = utils.get_subnetpool_version(subnet_pool_id) - try: - neutron_subnet = (os_net - .create_subnet(network_id=net_id, - description=tags, - ip_version=ip_version, - name=subnet_name, - enable_dhcp=False, - subnetpool_id=subnet_pool_id, - project_id=project_id)) - except os_exc.ConflictException: - LOG.debug("Max number of retries on neutron side achieved, " - "raising ResourceNotReady to retry subnet creation " - "for %s", subnet_name) - raise exceptions.ResourceNotReady(subnet_name) - c_utils.tag_neutron_resources([neutron_subnet], exceptions=True) - - return neutron_subnet.id, neutron_subnet.cidr - - def add_subnet_to_router(self, subnet_id): - os_net = clients.get_network_client() - router_id = oslo_cfg.CONF.namespace_subnet.pod_router - try: - # connect the subnet to the router - os_net.add_interface_to_router(router_id, subnet_id=subnet_id) - except os_exc.BadRequestException: - LOG.debug("Subnet %s already connected to the router", subnet_id) - except os_exc.SDKException: - LOG.exception("Error attaching the subnet %s to the router", - subnet_id) - raise - return router_id diff --git a/kuryr_kubernetes/controller/drivers/nested_dpdk_vif.py b/kuryr_kubernetes/controller/drivers/nested_dpdk_vif.py deleted file mode 100644 index 307a2ef44..000000000 --- a/kuryr_kubernetes/controller/drivers/nested_dpdk_vif.py +++ /dev/null @@ -1,75 +0,0 @@ -# Copyright (C) 2020 Intel Corporation -# All Rights Reserved. -# -# Licensed under the Apache License, Version 2.0 (the "License"); you may -# not use this file except in compliance with the License. You may obtain -# a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT -# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the -# License for the specific language governing permissions and limitations -# under the License. - -from openstack import exceptions as o_exc -from oslo_log import log as logging - -from kuryr_kubernetes import clients -from kuryr_kubernetes.controller.drivers import nested_vif -from kuryr_kubernetes.controller.drivers import utils -from kuryr_kubernetes import os_vif_util as ovu - - -LOG = logging.getLogger(__name__) - - -class NestedDpdkPodVIFDriver(nested_vif.NestedPodVIFDriver): - """Manages ports for DPDK based nested-containers to provide VIFs.""" - - # TODO(garyloug): maybe log a warning if the vswitch is not ovs-dpdk? - - def request_vif(self, pod, project_id, subnets, security_groups): - os_net = clients.get_network_client() - compute = clients.get_compute_client() - - vm_id = self._get_parent_port(pod).device_id - net_id = utils.get_network_id(subnets) - - try: - result = compute.create_server_interface(vm_id, net_id=net_id) - except o_exc.SDKException: - LOG.warning("Unable to create interface for server %s.", - vm_id) - raise - port = os_net.get_port(result.port_id) - return ovu.neutron_to_osvif_vif_dpdk(port, subnets, pod) - - def request_vifs(self, pod, project_id, subnets, security_groups, - num_ports): - # TODO(garyloug): provide an implementation - raise NotImplementedError() - - def release_vif(self, pod, vif, project_id=None): - compute = clients.get_compute_client() - - vm_id = self._get_parent_port(pod).device_id - LOG.debug("release_vif for vm_id %s %s", vm_id, vif.id) - - try: - compute.delete_server_interface(vif.id, server=vm_id) - except o_exc.SDKException: - LOG.warning("Unable to delete interface %s for server %s.", - vif.id, vm_id) - raise - - def activate_vif(self, vif, **kwargs): - # NOTE(danil): new virtual interface was created in nova instance - # during request_vif call, thus if it was not created successfully - # an exception o_exc.SDKException would be throwed. During binding - # process only rebinding of interface on userspace driver was done. - # There is no any chance to check the state of rebinded interface. - # Thus just set 'active' immediately to let the CNI driver make - # progress. - vif.active = True diff --git a/kuryr_kubernetes/controller/drivers/nested_macvlan_vif.py b/kuryr_kubernetes/controller/drivers/nested_macvlan_vif.py deleted file mode 100755 index 7493ae24f..000000000 --- a/kuryr_kubernetes/controller/drivers/nested_macvlan_vif.py +++ /dev/null @@ -1,179 +0,0 @@ -# All Rights Reserved. -# -# Licensed under the Apache License, Version 2.0 (the "License"); you may -# not use this file except in compliance with the License. You may obtain -# a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT -# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the -# License for the specific language governing permissions and limitations -# under the License. - -import threading - -from openstack import exceptions as o_exc -from oslo_config import cfg -from oslo_log import log as logging - -from kuryr_kubernetes import clients -from kuryr_kubernetes import config as kuryr_config -from kuryr_kubernetes.controller.drivers import nested_vif -from kuryr_kubernetes.controller.drivers import utils -from kuryr_kubernetes import exceptions as k_exc -from kuryr_kubernetes import os_vif_util as ovu - -LOG = logging.getLogger(__name__) -CONF = cfg.CONF - - -class NestedMacvlanPodVIFDriver(nested_vif.NestedPodVIFDriver): - """Manages ports for nested-containers using MACVLAN to provide VIFs.""" - - def __init__(self): - self.lock = threading.Lock() - - def request_vif(self, pod, project_id, subnets, security_groups): - os_net = clients.get_network_client() - req = self._get_port_request(pod, project_id, subnets, - security_groups) - attempts = kuryr_config.CONF.pod_vif_nested.rev_update_attempts - container_port = None - while attempts > 0: - vm_port = self._get_parent_port(pod) - - if not container_port: - container_port = os_net.create_port(**req) - self._check_port_binding([container_port]) - if not self._tag_on_creation: - utils.tag_neutron_resources([container_port]) - - container_mac = container_port.mac_address - container_ips = frozenset(entry['ip_address'] for entry in - container_port.fixed_ips) - - attempts = self._try_update_port( - attempts, self._add_to_allowed_address_pairs, vm_port, - container_ips, container_mac) - - return ovu.neutron_to_osvif_vif_nested_macvlan(container_port, subnets) - - def request_vifs(self, pod, project_id, subnets, security_groups, - num_ports): - # TODO(mchiappe): provide an implementation - raise NotImplementedError() - - def release_vif(self, pod, vif, project_id=None): - os_net = clients.get_network_client() - - attempts = kuryr_config.CONF.pod_vif_nested.rev_update_attempts - while attempts > 0: - container_port = os_net.get_port(vif.id) - - container_mac = container_port.mac_address - container_ips = frozenset(entry['ip_address'] for entry in - container_port.fixed_ips) - vm_port = self._get_parent_port(pod) - attempts = self._try_update_port( - attempts, self._remove_from_allowed_address_pairs, - vm_port, container_ips, container_mac) - - try: - os_net.delete_port(vif.id, ignore_missing=False) - except o_exc.ResourceNotFound: - LOG.warning("Unable to release port %s as it no longer exists.", - vif.id) - - def activate_vif(self, vif, **kwargs): - # NOTE(mchiappe): there is no way to get feedback on the actual - # interface creation or activation as no plugging can happen for this - # interface type. However the status of the port is not relevant as - # it is used for IPAM purposes only, thus just set 'active' - # immediately to let the CNI driver make progress. - vif.active = True - - def _add_to_allowed_address_pairs(self, port, ip_addresses, - mac_address=None): - if not ip_addresses: - raise k_exc.IntegrityError( - "Cannot add pair from the " - "allowed_address_pairs of port %s: missing IP address" % - port.id) - - mac = mac_address if mac_address else port.mac_address - address_pairs = port.allowed_address_pairs - - # look for duplicates or near-matches - for pair in address_pairs: - if pair['ip_address'] in ip_addresses: - if pair['mac_address'] is mac: - raise k_exc.AllowedAddressAlreadyPresent( - "Pair %s already " - "present in the 'allowed_address_pair' list. This is " - "due to a misconfiguration or a bug" % str(pair)) - else: - LOG.warning( - "A pair with IP %s but different MAC address " - "is already present in the 'allowed_address_pair'. " - "This could indicate a misconfiguration or a " - "bug", pair['ip_address']) - - for ip in ip_addresses: - address_pairs.append({'ip_address': ip, 'mac_address': mac}) - - self._update_port_address_pairs( - port.id, address_pairs, - revision_number=port.revision_number) - - LOG.debug("Added allowed_address_pair %s %s" % - (str(ip_addresses,), mac_address)) - - def _remove_from_allowed_address_pairs(self, port, ip_addresses, - mac_address=None): - if not ip_addresses: - raise k_exc.IntegrityError( - "Cannot remove pair from the " - "allowed_address_pairs of port %s: missing IP address" % - port.id) - - mac = mac_address if mac_address else port.mac_address - address_pairs = port.allowed_address_pairs - updated = False - - for ip in ip_addresses: - try: - address_pairs.remove({'ip_address': ip, 'mac_address': mac}) - updated = True - except ValueError: - LOG.error("No {'ip_address': %s, 'mac_address': %s} pair " - "found in the 'allowed_address_pair' list while " - "trying to remove it.", ip, mac) - - if updated: - self._update_port_address_pairs( - port.id, - address_pairs, - revision_number=port.revision_number) - - def _update_port_address_pairs(self, port_id, address_pairs, - revision_number=None): - os_net = clients.get_network_client() - os_net.update_port(port_id, allowed_address_pairs=address_pairs, - if_revision=revision_number) - - def _try_update_port(self, attempts, f, - vm_port, container_ips, container_mac): - try: - with self.lock: - f(vm_port, container_ips, container_mac) - attempts = 0 - except o_exc.SDKException: - attempts -= 1 - if attempts == 0: - LOG.exception("Error happened during updating port %s", - vm_port['id'] if vm_port else None) - raise - - return attempts diff --git a/kuryr_kubernetes/controller/drivers/nested_vif.py b/kuryr_kubernetes/controller/drivers/nested_vif.py deleted file mode 100644 index b6c3c9c88..000000000 --- a/kuryr_kubernetes/controller/drivers/nested_vif.py +++ /dev/null @@ -1,75 +0,0 @@ -# All Rights Reserved. -# -# Licensed under the Apache License, Version 2.0 (the "License"); you may -# not use this file except in compliance with the License. You may obtain -# a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT -# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the -# License for the specific language governing permissions and limitations -# under the License. - -import abc - -from kuryr.lib import exceptions as kl_exc -from openstack import exceptions as os_exc -from oslo_config import cfg as oslo_cfg -from oslo_log import log as logging - -from kuryr_kubernetes import clients -from kuryr_kubernetes.controller.drivers import base -from kuryr_kubernetes.controller.drivers import neutron_vif - - -CONF = oslo_cfg.CONF -LOG = logging.getLogger(__name__) - - -class NestedPodVIFDriver(neutron_vif.NeutronPodVIFDriver, - metaclass=abc.ABCMeta): - """Skeletal handler driver for VIFs for Nested Pods.""" - - def __init__(self): - super().__init__() - self.nodes_subnets_driver = base.NodesSubnetsDriver.get_instance() - - def _get_parent_port_by_host_ip(self, node_fixed_ip): - os_net = clients.get_network_client() - node_subnet_ids = self.nodes_subnets_driver.get_nodes_subnets( - raise_on_empty=True) - - fixed_ips = ['ip_address=%s' % str(node_fixed_ip)] - filters = {'fixed_ips': fixed_ips} - tags = CONF.neutron_defaults.resource_tags - if tags: - filters['tags'] = tags - try: - ports = os_net.ports(**filters) - except os_exc.SDKException: - LOG.error("Parent VM port with fixed IPs %s not found!", fixed_ips) - raise - - for port in ports: - for fip in port.fixed_ips: - if fip.get('subnet_id') in node_subnet_ids: - return port - - LOG.error("Neutron port for VM port with fixed IPs %s not found!", - fixed_ips) - raise kl_exc.NoResourceException() - - def _get_parent_port(self, pod): - try: - # REVISIT(vikasc): Assumption is being made that hostIP is the IP - # of trunk interface on the node(vm). - node_fixed_ip = pod['status']['hostIP'] - except KeyError: - if pod['status']['conditions'][0]['type'] != "Initialized": - LOG.debug("Pod condition type is not 'Initialized'") - - LOG.error("Failed to get parent vm port ip") - raise - return self._get_parent_port_by_host_ip(node_fixed_ip) diff --git a/kuryr_kubernetes/controller/drivers/nested_vlan_vif.py b/kuryr_kubernetes/controller/drivers/nested_vlan_vif.py deleted file mode 100644 index e3dabea10..000000000 --- a/kuryr_kubernetes/controller/drivers/nested_vlan_vif.py +++ /dev/null @@ -1,311 +0,0 @@ -# All Rights Reserved. -# -# Licensed under the Apache License, Version 2.0 (the "License"); you may -# not use this file except in compliance with the License. You may obtain -# a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT -# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the -# License for the specific language governing permissions and limitations -# under the License. -from time import sleep - -from kuryr.lib import constants as kl_const -from kuryr.lib import exceptions as kl_exc -from kuryr.lib import segmentation_type_drivers as seg_driver -from openstack import exceptions as os_exc -from oslo_config import cfg -from oslo_log import log as logging - -from kuryr_kubernetes import clients -from kuryr_kubernetes import config -from kuryr_kubernetes import constants -from kuryr_kubernetes.controller.drivers import nested_vif -from kuryr_kubernetes.controller.drivers import utils -from kuryr_kubernetes import exceptions as k_exc -from kuryr_kubernetes import os_vif_util as ovu - - -LOG = logging.getLogger(__name__) - -DEFAULT_MAX_RETRY_COUNT = 3 -DEFAULT_RETRY_INTERVAL = 1 -ACTIVE_TIMEOUT = 90 - -CONF = cfg.CONF - - -class NestedVlanPodVIFDriver(nested_vif.NestedPodVIFDriver): - """Manages ports for nested-containers using VLANs to provide VIFs.""" - - def request_vif(self, pod, project_id, subnets, security_groups): - os_net = clients.get_network_client() - parent_port = self._get_parent_port(pod) - trunk_id = self._get_trunk_id(parent_port) - - rq = self._get_port_request(pod, project_id, subnets, security_groups) - port = os_net.create_port(**rq) - self._check_port_binding([port]) - if not self._tag_on_creation: - utils.tag_neutron_resources([port]) - vlan_id = self._add_subport(trunk_id, port.id) - - return ovu.neutron_to_osvif_vif_nested_vlan(port, subnets, vlan_id) - - def request_vifs(self, pod, project_id, subnets, security_groups, - num_ports, semaphore, trunk_ip=None): - """This method creates subports and returns a list with their vifs. - - It creates up to num_ports subports and attaches them to the trunk - port. - - If not enough vlan ids are available for all the subports to create, - it creates as much as available vlan ids. - - Note the os_net add_trunk_subports is an atomic operation that will - either attach all or none of the subports. Therefore, if there is a - vlan id collision, all the created ports will be deleted and the - exception is raised. - """ - os_net = clients.get_network_client() - if trunk_ip: - parent_port = self._get_parent_port_by_host_ip(trunk_ip) - else: - parent_port = self._get_parent_port(pod) - trunk_id = self._get_trunk_id(parent_port) - - port_rq, subports_info = self._create_subports_info( - pod, project_id, subnets, security_groups, - trunk_id, num_ports, unbound=True) - - if not subports_info: - LOG.error("There are no vlan ids available to create subports") - return [] - - bulk_port_rq = [port_rq] * len(subports_info) - # restrict amount of create Ports in bulk that might be running - # in parallel. - with semaphore: - try: - ports = list(os_net.create_ports(bulk_port_rq)) - except os_exc.SDKException: - for subport_info in subports_info: - self._release_vlan_id(subport_info['segmentation_id']) - LOG.exception("Error creating bulk ports: %s", bulk_port_rq) - raise - self._check_port_binding(ports) - if not self._tag_on_creation: - utils.tag_neutron_resources(ports) - - for index, port in enumerate(ports): - subports_info[index]['port_id'] = port['id'] - - try: - try: - os_net.add_trunk_subports(trunk_id, subports_info) - except os_exc.ConflictException: - LOG.error("vlan ids already in use on trunk") - utils.delete_ports(ports) - for subport_info in subports_info: - self._release_vlan_id(subport_info['segmentation_id']) - return [] - except os_exc.SDKException: - LOG.exception("Error happened during subport addition to trunk") - utils.delete_ports(ports) - for subport_info in subports_info: - self._release_vlan_id(subport_info['segmentation_id']) - return [] - - vifs = [] - for index, port in enumerate(ports): - vlan_id = subports_info[index]['segmentation_id'] - vif = ovu.neutron_to_osvif_vif_nested_vlan(port, subnets, vlan_id) - vifs.append(vif) - return vifs - - def activate_vif(self, vif, pod=None, retry_info=None): - try: - super().activate_vif(vif) - except k_exc.PortNotReady: - if retry_info and retry_info.get('elapsed', 0) > ACTIVE_TIMEOUT: - parent_port = self._get_parent_port(pod) - trunk_id = self._get_trunk_id(parent_port) - # NOTE(dulek): We don't need a lock to prevent VLAN ID from - # being taken over because the SegmentationDriver - # will keep it reserved in memory unless we - # release it. And we won't. - LOG.warning('Subport %s is in DOWN status for more than %d ' - 'seconds. This is a Neutron issue. Attempting to ' - 'reattach the subport to trunk %s using VLAN ID %s' - ' to fix it.', vif.id, retry_info['elapsed'], - trunk_id, vif.vlan_id) - try: - self._remove_subport(trunk_id, vif.id) - except os_exc.NotFoundException: - # NOTE(dulek): This may happen when _add_subport() failed - # or Kuryr crashed between the calls. Let's - # try to fix it hoping that VLAN ID is still - # free. - LOG.warning('Subport %s was not attached to the trunk. ' - 'Trying to attach it anyway.', vif.id) - self._add_subport(trunk_id, vif.id, - requested_vlan_id=vif.vlan_id) - LOG.warning("Reattached subport %s, its state will be " - "rechecked when event will be retried.", vif.id) - raise - - def release_vif(self, pod, vif, project_id=None): - os_net = clients.get_network_client() - parent_port = self._get_parent_port(pod) - trunk_id = self._get_trunk_id(parent_port) - try: - self._remove_subport(trunk_id, vif.id) - except os_exc.NotFoundException: - pass - self._release_vlan_id(vif.vlan_id) - os_net.delete_port(vif.id) - - def _get_port_request(self, pod, project_id, subnets, security_groups, - unbound=False): - port_req_body = {'project_id': project_id, - 'network_id': utils.get_network_id(subnets), - 'fixed_ips': ovu.osvif_to_neutron_fixed_ips(subnets), - 'device_owner': kl_const.DEVICE_OWNER, - 'admin_state_up': True} - - # only set name if port_debug is enabled - if config.CONF.kubernetes.port_debug: - if unbound: - port_req_body['name'] = constants.KURYR_PORT_NAME - else: - port_req_body['name'] = utils.get_port_name(pod) - - if security_groups: - port_req_body['security_groups'] = security_groups - - if self._tag_on_creation: - tags = CONF.neutron_defaults.resource_tags - if tags: - port_req_body['tags'] = tags - - return port_req_body - - def _create_subports_info(self, pod, project_id, subnets, - security_groups, trunk_id, num_ports, - unbound=False): - subports_info = [] - - in_use_vlan_ids = self._get_in_use_vlan_ids_set(trunk_id) - port_rq = self._get_port_request(pod, project_id, subnets, - security_groups, unbound) - for _ in range(num_ports): - try: - vlan_id = seg_driver.allocate_segmentation_id(in_use_vlan_ids) - except kl_exc.SegmentationIdAllocationFailure: - LOG.warning("There is not enough vlan ids available to " - "create a batch of %d subports.", num_ports) - break - in_use_vlan_ids.add(vlan_id) - - subports_info.append({'segmentation_id': vlan_id, - 'port_id': '', - 'segmentation_type': 'vlan'}) - return port_rq, subports_info - - def _get_trunk_id(self, port): - try: - return port['trunk_details']['trunk_id'] - except (KeyError, TypeError): - LOG.error("Neutron port is missing trunk details. " - "Please ensure that k8s node port is associated " - "with a Neutron vlan trunk") - raise k_exc.K8sNodeTrunkPortFailure - - def _add_subport(self, trunk_id, subport, requested_vlan_id=None): - """Adds subport port to Neutron trunk - - This method gets vlanid allocated from kuryr segmentation driver. - In active/active HA type deployment, possibility of vlanid conflict - is there. In such a case, vlanid will be requested again and subport - addition is re-tried. This is tried DEFAULT_MAX_RETRY_COUNT times in - case of vlanid conflict. - """ - # TODO(vikasc): Better approach for retrying in case of - # vlan-id conflict. - os_net = clients.get_network_client() - retry_count = 1 - while True: - if requested_vlan_id: - vlan_id = requested_vlan_id - else: - try: - vlan_id = self._get_vlan_id(trunk_id) - except os_exc.SDKException: - LOG.error("Getting VLAN ID for subport on " - "trunk %s failed!!", trunk_id) - raise - - subport = [{'segmentation_id': vlan_id, - 'port_id': subport, - 'segmentation_type': 'vlan'}] - try: - os_net.add_trunk_subports(trunk_id, subport) - except os_exc.ConflictException: - if (retry_count < DEFAULT_MAX_RETRY_COUNT and - not requested_vlan_id): - LOG.error("VLAN ID already in use on trunk %s. " - "Retrying.", trunk_id) - retry_count += 1 - sleep(DEFAULT_RETRY_INTERVAL) - continue - else: - LOG.error("Failed to add subport %s to trunk %s due to " - "VLAN ID %d conflict.", subport, trunk_id, - vlan_id) - raise - except os_exc.SDKException: - self._release_vlan_id(vlan_id) - LOG.exception("Error happened during subport " - "addition to trunk %s", trunk_id) - raise - return vlan_id - - def _remove_subports(self, trunk_id, subports_id): - os_net = clients.get_network_client() - subports_body = [] - for subport_id in set(subports_id): - subports_body.append({'port_id': subport_id}) - try: - os_net.delete_trunk_subports(trunk_id, subports_body) - except os_exc.NotFoundException: - if len(subports_id) > 1: - LOG.debug('Not Found on subport deletion, most likely some ' - 'subports in the list got detached already.') - raise # We don't know if all ports are detached, so raise. - # If single requested port is detached already, we're cool, ignore. - except os_exc.SDKException: - LOG.exception("Error happened during subport removal from " - "trunk %s", trunk_id) - raise - - def _remove_subport(self, trunk_id, subport_id): - self._remove_subports(trunk_id, [subport_id]) - - def _get_vlan_id(self, trunk_id): - vlan_ids = self._get_in_use_vlan_ids_set(trunk_id) - return seg_driver.allocate_segmentation_id(vlan_ids) - - def _release_vlan_id(self, id): - return seg_driver.release_segmentation_id(id) - - def _get_in_use_vlan_ids_set(self, trunk_id): - vlan_ids = set() - os_net = clients.get_network_client() - trunk = os_net.get_trunk(trunk_id) - for port in trunk.sub_ports: - vlan_ids.add(port['segmentation_id']) - - return vlan_ids diff --git a/kuryr_kubernetes/controller/drivers/network_policy.py b/kuryr_kubernetes/controller/drivers/network_policy.py deleted file mode 100644 index f97d626c0..000000000 --- a/kuryr_kubernetes/controller/drivers/network_policy.py +++ /dev/null @@ -1,792 +0,0 @@ -# Copyright 2018 Red Hat, Inc. -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. - -import ipaddress -import netaddr - -from openstack import exceptions as os_exc -from oslo_log import log as logging - -from kuryr_kubernetes import clients -from kuryr_kubernetes import config -from kuryr_kubernetes import constants -from kuryr_kubernetes.controller.drivers import base -from kuryr_kubernetes.controller.drivers import utils as driver_utils -from kuryr_kubernetes import exceptions -from kuryr_kubernetes import utils - -CONF = config.CONF - -LOG = logging.getLogger(__name__) - - -class NetworkPolicyDriver(base.NetworkPolicyDriver): - """Provide security groups actions based on K8s Network Policies""" - - def __init__(self): - super().__init__() - self.os_net = clients.get_network_client() - self.kubernetes = clients.get_kubernetes_client() - self.nodes_subnets_driver = base.NodesSubnetsDriver.get_instance() - - def affected_pods(self, policy, selector=None): - if selector is not None: - pod_selector = selector - else: - pod_selector = policy['spec'].get('podSelector') - if pod_selector: - policy_namespace = policy['metadata']['namespace'] - pods = driver_utils.get_pods(pod_selector, policy_namespace) - return pods.get('items') - else: - # NOTE(ltomasbo): It affects all the pods on the namespace - return self.namespaced_pods(policy) - - def create_security_group(self, knp, project_id): - sg_name = driver_utils.get_resource_name(knp['metadata']['namespace'] + - '-' + - knp['metadata']['name'], - prefix='sg/') - desc = ("Kuryr-Kubernetes Network Policy %s SG" % - utils.get_res_unique_name(knp)) - try: - # Create initial security group - sg = self.os_net.create_security_group(name=sg_name, - project_id=project_id, - description=desc) - driver_utils.tag_neutron_resources([sg]) - # NOTE(dulek): Neutron populates every new SG with two rules - # allowing egress on IPv4 and IPv6. This collides with - # how network policies are supposed to work, because - # initially even egress traffic should be blocked. - # To work around this we will delete those two SG - # rules just after creation. - for sgr in sg.security_group_rules: - self.os_net.delete_security_group_rule(sgr['id']) - except (os_exc.SDKException, exceptions.ResourceNotReady) as exc: - np = utils.get_referenced_object(knp, 'NetworkPolicy') - if np: - self.kubernetes.add_event(np, 'FailedToAddSecurityGroup', - f'Adding new security group or ' - f'security group rules for ' - f'corresponding network policy has ' - f'failed: {exc}', 'Warning') - LOG.exception("Error creating security group for network policy " - " %s", knp['metadata']['name']) - raise - - return sg.id - - def delete_np_sg(self, sg_id): - try: - self.os_net.delete_security_group(sg_id) - except os_exc.ConflictException: - LOG.debug("Security Group %s still in use!", sg_id) - # raising ResourceNotReady to retry this action in case ports - # associated to affected pods are not updated on time, i.e., - # they are still using the security group to be removed - raise exceptions.ResourceNotReady(sg_id) - except os_exc.SDKException: - LOG.exception("Error deleting security group %s.", sg_id) - raise - - def ensure_network_policy(self, policy): - """Create security group rules out of network policies - - Triggered by events from network policies, this method ensures that - KuryrNetworkPolicy object is created with the security group rules - definitions required to represent the NetworkPolicy. - """ - LOG.debug("Creating network policy %s", policy['metadata']['name']) - - i_rules, e_rules = self._get_security_group_rules_from_network_policy( - policy) - - knp = self._get_knp_crd(policy) - if not knp: - try: - self._create_knp_crd(policy, i_rules, e_rules) - except exceptions.K8sNamespaceTerminating: - LOG.debug('Namespace %s is being terminated, ignoring ' - 'NetworkPolicy %s in that namespace.', - policy['metadata']['namespace'], - policy['metadata']['name']) - return - else: - self._patch_knp_crd(policy, i_rules, e_rules, knp) - - def namespaced_pods(self, policy): - pod_namespace = policy['metadata']['namespace'] - pods = self.kubernetes.get('{}/namespaces/{}/pods'.format( - constants.K8S_API_BASE, pod_namespace)) - return pods.get('items') - - def _get_security_group_rules_from_network_policy(self, policy): - """Get security group rules required to represent an NP - - This method creates the security group rules bodies coming out of a - network policies' parsing. - """ - i_rules, e_rules = self._parse_network_policy_rules(policy) - # Add default rules to allow traffic from host and svc subnet - i_rules += self._get_default_np_rules() - # Add rules allowing ingress from LBs - # FIXME(dulek): Rules added below cannot work around the Amphora - # source-ip problem as Amphora does not use LB VIP for - # LB->members traffic, but that other IP attached to the - # Amphora VM in the service subnet. It's ridiculous. - i_rules += self._get_service_ingress_rules(policy) - - return i_rules, e_rules - - def _get_service_ingress_rules(self, policy): - """Get SG rules allowing traffic from Services in the namespace - - This methods returns ingress rules allowing traffic from all - services clusterIPs in the cluster. This is required for OVN LBs in - order to work around the fact that it changes source-ip to LB IP in - hairpin traffic. This shouldn't be a security problem as this can only - happen when the pod receiving the traffic is the one that calls the - service. - - FIXME(dulek): Once OVN supports selecting a single, configurable - source-IP for hairpin traffic, consider using it instead. - """ - if CONF.octavia_defaults.enforce_sg_rules: - # When enforce_sg_rules is True, one of the default rules will - # open ingress from all the services subnets, so those rules would - # be redundant. - return [] - - ns = policy['metadata']['namespace'] - rules = [] - services = self.kubernetes.get( - f'{constants.K8S_API_NAMESPACES}/{ns}/services').get('items', []) - for svc in services: - if svc['metadata'].get('deletionTimestamp'): - # Ignore services being deleted - continue - ip = svc['spec'].get('clusterIP') - if not ip or ip == 'None': - # Ignore headless services - continue - rules.append(driver_utils.create_security_group_rule_body( - 'ingress', cidr=ip, - description=f"Allow traffic from local namespace service " - f"{svc['metadata']['name']}")) - return rules - - def _get_default_np_rules(self): - """Add extra SG rule to allow traffic from svcs and host. - - This method adds the base security group rules for the NP security - group: - - Ensure traffic is allowed from the services subnet - - Ensure traffic is allowed from the host - """ - rules = [] - default_cidrs = [] - if CONF.octavia_defaults.enforce_sg_rules: - default_cidrs.append(utils.get_subnet_cidr( - CONF.neutron_defaults.service_subnet)) - worker_subnet_ids = self.nodes_subnets_driver.get_nodes_subnets() - default_cidrs.extend(utils.get_subnets_cidrs(worker_subnet_ids)) - - for cidr in default_cidrs: - ethertype = constants.IPv4 - if ipaddress.ip_network(cidr).version == constants.IP_VERSION_6: - ethertype = constants.IPv6 - rules.append({ - 'sgRule': { - 'ethertype': ethertype, - 'direction': 'ingress', - 'description': 'Kuryr-Kubernetes NetPolicy SG rule', - 'remote_ip_prefix': cidr, - }}) - - return rules - - def _get_pods(self, pod_selector, namespace=None, namespace_selector=None): - matching_pods = {"items": []} - if namespace_selector: - matching_namespaces = driver_utils.get_namespaces( - namespace_selector) - for ns in matching_namespaces.get('items'): - matching_pods = driver_utils.get_pods(pod_selector, - ns['metadata']['name']) - else: - matching_pods = driver_utils.get_pods(pod_selector, namespace) - return matching_pods.get('items') - - def _get_namespaces(self, namespace_selector, namespace=None): - matching_namespaces = [] - if not namespace_selector and namespace: - matching_namespaces.append(self.kubernetes.get( - '{}/namespaces/{}'.format(constants.K8S_API_BASE, namespace))) - else: - matching_namespaces.extend(driver_utils.get_namespaces( - namespace_selector).get('items')) - return matching_namespaces - - def _parse_selectors(self, rule_block, rule_direction, policy_namespace): - allowed_resources = [] - allowed_cidrs = None - selectors = False - for rule in rule_block.get(rule_direction, []): - namespace_selector = rule.get('namespaceSelector') - pod_selector = rule.get('podSelector') - if namespace_selector == {}: - selectors = True - if pod_selector: - # allow matching pods in all namespaces - allowed_resources.extend(self._get_pods( - pod_selector)) - else: - # allow from all the cluster, which means pod subnets and - # service subnet. - allowed_cidrs = utils.get_subnetpool_cidrs( - CONF.namespace_subnet.pod_subnet_pool) - allowed_cidrs.append(utils.get_subnet_cidr( - CONF.neutron_defaults.service_subnet)) - elif namespace_selector: - selectors = True - if pod_selector: - # allow matching pods on matching namespaces - allowed_resources.extend(self._get_pods( - pod_selector, - namespace_selector=namespace_selector)) - else: - # allow from/to all on the matching namespaces - allowed_resources.extend(self._get_namespaces( - namespace_selector)) - else: - if pod_selector == {}: - # allow from/to all pods on the network policy - # namespace - selectors = True - allowed_resources.extend(self._get_namespaces( - None, - namespace=policy_namespace)) - elif pod_selector: - # allow matching pods on the network policy - # namespace - selectors = True - allowed_resources.extend(self._get_pods( - pod_selector, - namespace=policy_namespace)) - - return allowed_cidrs, selectors, allowed_resources - - def _create_sg_rules_with_container_ports( - self, container_ports, allowed_cidrs, resource, matched_pods, - crd_rules, direction, port, pod_selector=None, - policy_namespace=None): - cidr, ns = self._get_resource_details(resource) - for pod, container_port in container_ports: - pod_label = pod['metadata'].get('labels') - pod_ip = pod['status'].get('podIP') - pod_namespace = pod['metadata']['namespace'] - pod_info = {pod_ip: pod_namespace} - # NOTE(maysams) Avoid to take into account pods that are also - # matched by NetworkPolicySpec's podSelector. This way we do - # not allow egress traffic to the actual set of pods the NP - # is enforced on. - if (direction == 'egress' and - (driver_utils.match_selector(pod_selector, pod_label) and - policy_namespace == pod_namespace)): - continue - if container_port in matched_pods: - matched_pods[container_port].update(pod_info) - else: - matched_pods[container_port] = pod_info - if not allowed_cidrs and matched_pods and cidr: - for container_port, pods in matched_pods.items(): - sg_rule = driver_utils.create_security_group_rule_body( - direction, container_port, - # Pod's spec.containers[].port.protocol defaults to TCP - protocol=port.get('protocol', 'TCP'), - cidr=cidr, pods=pods) - if sg_rule not in crd_rules: - crd_rules.append(sg_rule) - if direction == 'egress': - self._create_svc_egress_sg_rule( - policy_namespace, crd_rules, - resource=resource, port=container_port, - # Pod's spec.containers[].port.protocol defaults to TCP - protocol=port.get('protocol', 'TCP')) - - def _create_sg_rule_body_on_text_port(self, direction, port, - resources, crd_rules, pod_selector, - policy_namespace, - allowed_cidrs=None): - """Create SG rules when named port is used in the NP rule - - In case of ingress, the pods selected by NetworkPolicySpec's - podSelector have its containers checked for ports with same name as - the named port. If true, rules are created for the resource matched - in the NP rule selector with that port. In case of egress, all the pods - selected by the NetworkPolicyEgressRule's selector have its containers - checked for containers ports with same name as the ones defined in - NP rule, and if true the rule is created. - - param sg_id: String with the Security Group ID - param direction: String with ingress or egress - param port: dict containing port and protocol - param resources: list of K8S resources(pod/namespace) or - a dict with cird - param crd_rules: list of parsed SG rules - param pod_selector: dict with NetworkPolicySpec's podSelector - param policy_namespace: string with policy namespace - param allowed_cidrs: None, or a list of cidrs, where/from the traffic - should be allowed. - """ - matched_pods = {} - if direction == "ingress": - selected_pods = driver_utils.get_pods( - pod_selector, policy_namespace).get('items') - for selected_pod in selected_pods: - container_ports = driver_utils.get_ports(selected_pod, port) - for resource in resources: - self._create_sg_rules_with_container_ports( - container_ports, allowed_cidrs, resource, matched_pods, - crd_rules, direction, port) - elif direction == "egress": - for resource in resources: - # NOTE(maysams) Skipping objects that refers to ipblocks - # and consequently do not contains a spec field - if not resource.get('spec'): - LOG.warning("IPBlock for egress with named ports is " - "not supported.") - continue - container_ports = driver_utils.get_ports(resource, port) - self._create_sg_rules_with_container_ports( - container_ports, allowed_cidrs, resource, matched_pods, - crd_rules, direction, port, pod_selector, - policy_namespace) - if allowed_cidrs: - for container_port, pods in matched_pods.items(): - for cidr in allowed_cidrs: - sg_rule = driver_utils.create_security_group_rule_body( - direction, container_port, - # Pod's spec.containers[].port.protocol defaults to TCP - protocol=port.get('protocol', 'TCP'), - cidr=cidr, - pods=pods) - crd_rules.append(sg_rule) - - def _create_sg_rule_on_number_port(self, allowed_resources, - direction, port, sg_rule_body_list, - policy_namespace): - for resource in allowed_resources: - cidr, ns = self._get_resource_details(resource) - # NOTE(maysams): Skipping resource that do not have - # an IP assigned. The security group rule creation - # will be triggered again after the resource is running. - if not cidr: - continue - sg_rule = ( - driver_utils.create_security_group_rule_body( - direction, port.get('port'), - # NP's ports[].protocol defaults to TCP - protocol=port.get('protocol', 'TCP'), - cidr=cidr, - namespace=ns)) - sg_rule_body_list.append(sg_rule) - if direction == 'egress': - self._create_svc_egress_sg_rule( - policy_namespace, sg_rule_body_list, - resource=resource, port=port.get('port'), - # NP's ports[].protocol defaults to TCP - protocol=port.get('protocol', 'TCP')) - - def _create_all_pods_sg_rules(self, port, direction, - sg_rule_body_list, pod_selector, - policy_namespace, allowed_cidrs=None): - if not isinstance(port.get('port'), int): - all_pods = driver_utils.get_namespaced_pods().get('items') - self._create_sg_rule_body_on_text_port( - direction, port, all_pods, - sg_rule_body_list, pod_selector, policy_namespace, - allowed_cidrs=allowed_cidrs) - elif allowed_cidrs: - for cidr in allowed_cidrs: - sg_rule = driver_utils.create_security_group_rule_body( - direction, port.get('port'), - protocol=port.get('protocol'), - cidr=cidr) - sg_rule_body_list.append(sg_rule) - else: - for ethertype in (constants.IPv4, constants.IPv6): - sg_rule = ( - driver_utils.create_security_group_rule_body( - direction, port.get('port'), - ethertype=ethertype, - # NP's ports[].protocol defaults to TCP - protocol=port.get('protocol', 'TCP'))) - sg_rule_body_list.append(sg_rule) - - def _create_default_sg_rule(self, direction, sg_rule_body_list): - for ethertype in (constants.IPv4, constants.IPv6): - default_rule = { - 'sgRule': { - 'ethertype': ethertype, - 'direction': direction, - 'description': 'Kuryr-Kubernetes NetPolicy SG rule', - }} - sg_rule_body_list.append(default_rule) - - def _parse_sg_rules(self, sg_rule_body_list, direction, policy): - """Parse policy into security group rules. - - This method inspects the policy object and create the equivalent - security group rules associating them to the referenced sg_id. - It returns the rules by adding them to the sg_rule_body_list list, - for the stated direction. - - It accounts for special cases, such as: - - PolicyTypes stating only Egress: ensuring ingress is not restricted - - PolicyTypes not including Egress: ensuring egress is not restricted - - {} ingress/egress rules: applying default open for all the cluster - """ - _create_sg_rule_body = driver_utils.create_security_group_rule_body - rule_list = policy['spec'].get(direction) - - if not rule_list: - policy_types = policy['spec'].get('policyTypes') - if direction == 'ingress': - if len(policy_types) == 1 and policy_types[0] == 'Egress': - # NOTE(ltomasbo): add default rule to enable all ingress - # traffic as NP policy is not affecting ingress - LOG.debug('Applying default all open for ingress for ' - 'policy %s', utils.get_res_link(policy)) - self._create_default_sg_rule(direction, sg_rule_body_list) - elif direction == 'egress': - if policy_types and 'Egress' not in policy_types: - # NOTE(ltomasbo): add default rule to enable all egress - # traffic as NP policy is not affecting egress - LOG.debug('Applying default all open for egress for ' - 'policy %s', utils.get_res_link(policy)) - self._create_default_sg_rule(direction, sg_rule_body_list) - else: - LOG.warning('Not supported policyType at network policy %s', - utils.get_res_link(policy)) - return - - policy_namespace = policy['metadata']['namespace'] - pod_selector = policy['spec'].get('podSelector') - - rule_direction = 'from' - if direction == 'egress': - rule_direction = 'to' - - if rule_list[0] == {}: - LOG.debug('Applying default all open policy from %s', - utils.get_res_link(policy)) - for ethertype in (constants.IPv4, constants.IPv6): - rule = _create_sg_rule_body(direction, ethertype=ethertype) - sg_rule_body_list.append(rule) - - for rule_block in rule_list: - LOG.debug('Parsing %(dir)s Rule %(rule)s', {'dir': direction, - 'rule': rule_block}) - (allowed_cidrs, selectors, - allowed_resources) = self._parse_selectors(rule_block, - rule_direction, - policy_namespace) - - ipblock_list = [] - - if rule_direction in rule_block: - ipblock_list = [ipblock.get('ipBlock') for ipblock in - rule_block[rule_direction] if 'ipBlock' - in ipblock] - - for ipblock in ipblock_list: - if ipblock.get('except'): - for cidr_except in ipblock.get('except'): - cidr_list = netaddr.cidr_exclude( - ipblock.get('cidr'), cidr_except) - cidr_list = [{'cidr': str(cidr)} - for cidr in cidr_list] - allowed_resources.extend(cidr_list) - else: - allowed_resources.append(ipblock) - - if 'ports' in rule_block: - for port in rule_block['ports']: - if allowed_resources or allowed_cidrs or selectors: - if not isinstance(port.get('port'), int): - self._create_sg_rule_body_on_text_port( - direction, port, allowed_resources, - sg_rule_body_list, pod_selector, - policy_namespace) - else: - self._create_sg_rule_on_number_port( - allowed_resources, direction, port, - sg_rule_body_list, policy_namespace) - if allowed_cidrs: - self._create_all_pods_sg_rules( - port, direction, sg_rule_body_list, - pod_selector, policy_namespace, allowed_cidrs) - else: - self._create_all_pods_sg_rules( - port, direction, sg_rule_body_list, - pod_selector, policy_namespace) - elif allowed_resources or allowed_cidrs or selectors: - for resource in allowed_resources: - cidr, namespace = self._get_resource_details(resource) - # NOTE(maysams): Skipping resource that do not have - # an IP assigned. The security group rule creation - # will be triggered again after the resource is running. - if not cidr: - continue - rule = _create_sg_rule_body(direction, cidr=cidr, - namespace=namespace) - sg_rule_body_list.append(rule) - if direction == 'egress': - self._create_svc_egress_sg_rule( - policy_namespace, sg_rule_body_list, - resource=resource) - if allowed_cidrs: - for cidr in allowed_cidrs: - rule = _create_sg_rule_body(direction, cidr=cidr) - sg_rule_body_list.append(rule) - else: - LOG.debug('This network policy specifies no %(direction)s ' - '%(rule_direction)s and no ports: %(policy)s', - {'direction': direction, - 'rule_direction': rule_direction, - 'policy': utils.get_res_link(policy)}) - - def _create_svc_egress_sg_rule(self, policy_namespace, sg_rule_body_list, - resource=None, port=None, protocol=None): - # FIXME(dulek): We could probably filter by namespace here for pods - # and namespace resources? - services = driver_utils.get_services() - if not resource: - svc_subnet = utils.get_subnet_cidr( - CONF.neutron_defaults.service_subnet) - rule = driver_utils.create_security_group_rule_body( - 'egress', port, protocol=protocol, cidr=svc_subnet) - if rule not in sg_rule_body_list: - sg_rule_body_list.append(rule) - return - - for service in services.get('items'): - if service['metadata'].get('deletionTimestamp'): - # Ignore services being deleted - continue - - cluster_ip = service['spec'].get('clusterIP') - if not cluster_ip or cluster_ip == 'None': - # Headless services has 'None' as clusterIP, ignore. - continue - - svc_name = service['metadata']['name'] - svc_namespace = service['metadata']['namespace'] - if self._is_pod(resource): - pod_labels = resource['metadata'].get('labels') - svc_selector = service['spec'].get('selector') - if not svc_selector: - targets = driver_utils.get_endpoints_targets( - svc_name, svc_namespace) - pod_ip = resource['status'].get('podIP') - if pod_ip and pod_ip not in targets: - continue - elif pod_labels: - if not driver_utils.match_labels(svc_selector, pod_labels): - continue - elif resource.get('cidr'): - # NOTE(maysams) Accounts for traffic to pods under - # a service matching an IPBlock rule. - svc_selector = service['spec'].get('selector') - if not svc_selector: - # Retrieving targets of services on any Namespace - targets = driver_utils.get_endpoints_targets( - svc_name, svc_namespace) - if (not targets or - not self._targets_in_ip_block(targets, resource)): - continue - else: - if svc_namespace != policy_namespace: - continue - pods = driver_utils.get_pods({'selector': svc_selector}, - svc_namespace).get('items') - if not self._pods_in_ip_block(pods, resource): - continue - else: - ns_name = service['metadata']['namespace'] - if ns_name != resource['metadata']['name']: - continue - rule = driver_utils.create_security_group_rule_body( - 'egress', port, protocol=protocol, cidr=cluster_ip) - if rule not in sg_rule_body_list: - sg_rule_body_list.append(rule) - - def _pods_in_ip_block(self, pods, resource): - for pod in pods: - pod_ip = driver_utils.get_pod_ip(pod) - if pod_ip is None: - continue - if (ipaddress.ip_address(pod_ip) - in ipaddress.ip_network(resource.get('cidr'))): - return True - return False - - def _targets_in_ip_block(self, targets, resource): - for target in targets: - if (ipaddress.ip_address(target) - not in ipaddress.ip_network(resource.get('cidr'))): - return False - return True - - def _parse_network_policy_rules(self, policy): - """Create security group rule bodies out of network policies. - - Whenever a notification from the handler 'on-present' method is - received, security group rules are created out of network policies' - ingress and egress ports blocks. - """ - LOG.debug('Parsing Network Policy %s' % policy['metadata']['name']) - ingress_sg_rule_body_list = [] - egress_sg_rule_body_list = [] - - self._parse_sg_rules(ingress_sg_rule_body_list, 'ingress', policy) - self._parse_sg_rules(egress_sg_rule_body_list, 'egress', policy) - - return ingress_sg_rule_body_list, egress_sg_rule_body_list - - def release_network_policy(self, policy): - return self._del_knp_crd(policy) - - def _get_knp_crd(self, policy): - netpolicy_crd_name = policy['metadata']['name'] - netpolicy_crd_namespace = policy['metadata']['namespace'] - try: - netpolicy_crd = self.kubernetes.get( - '{}/{}/kuryrnetworkpolicies/{}'.format( - constants.K8S_API_CRD_NAMESPACES, netpolicy_crd_namespace, - netpolicy_crd_name)) - except exceptions.K8sResourceNotFound: - return None - except exceptions.K8sClientException: - LOG.exception("Kubernetes Client Exception.") - raise - return netpolicy_crd - - def _create_knp_crd(self, policy, i_rules, e_rules): - networkpolicy_name = policy['metadata']['name'] - namespace = policy['metadata']['namespace'] - pod_selector = policy['spec'].get('podSelector') - policy_types = policy['spec'].get('policyTypes', []) - - owner_reference = {'apiVersion': policy['apiVersion'], - 'kind': policy['kind'], - 'name': policy['metadata']['name'], - 'uid': policy['metadata']['uid']} - - netpolicy_crd = { - 'apiVersion': 'openstack.org/v1', - 'kind': constants.K8S_OBJ_KURYRNETWORKPOLICY, - 'metadata': { - 'name': networkpolicy_name, - 'namespace': namespace, - 'annotations': { - 'networkPolicyLink': utils.get_res_link(policy) - }, - 'finalizers': [constants.NETWORKPOLICY_FINALIZER], - 'ownerReferences': [owner_reference] - }, - 'spec': { - 'ingressSgRules': i_rules, - 'egressSgRules': e_rules, - 'podSelector': pod_selector, - 'policyTypes': policy_types, - }, - 'status': { - 'securityGroupRules': [], - }, - } - - try: - LOG.debug("Creating KuryrNetworkPolicy CRD %s" % netpolicy_crd) - url = '{}/{}/kuryrnetworkpolicies'.format( - constants.K8S_API_CRD_NAMESPACES, - namespace) - netpolicy_crd = self.kubernetes.post(url, netpolicy_crd) - except exceptions.K8sNamespaceTerminating: - raise - except exceptions.K8sClientException as exc: - self.kubernetes.add_event(policy, 'FailedToCreateNetworkPolicyCRD', - f'Adding corresponding Kuryr Network ' - f'Policy CRD has failed: {exc}', - 'Warning') - LOG.exception("Kubernetes Client Exception creating " - "KuryrNetworkPolicy CRD.") - raise - return netpolicy_crd - - def _patch_knp_crd(self, policy, i_rules, e_rules, knp): - networkpolicy_name = policy['metadata']['name'] - namespace = policy['metadata']['namespace'] - pod_selector = policy['spec'].get('podSelector') - url = (f'{constants.K8S_API_CRD_NAMESPACES}/{namespace}' - f'/kuryrnetworkpolicies/{networkpolicy_name}') - - # FIXME(dulek): Rules should be hashable objects, not dict so that - # we could compare them easily here. - data = { - 'ingressSgRules': i_rules, - 'egressSgRules': e_rules, - } - if knp['spec'].get('podSelector') != pod_selector: - data['podSelector'] = pod_selector - - self.kubernetes.patch_crd('spec', url, data) - - def _del_knp_crd(self, policy): - try: - ns = policy['metadata']['namespace'] - name = policy['metadata']['name'] - LOG.debug("Deleting KuryrNetworkPolicy CRD %s" % name) - self.kubernetes.delete('{}/{}/kuryrnetworkpolicies/{}'.format( - constants.K8S_API_CRD_NAMESPACES, ns, name)) - return True - except exceptions.K8sResourceNotFound: - LOG.debug("KuryrNetworkPolicy CRD Object not found: %s", name) - return False - except exceptions.K8sClientException as exc: - self.kubernetes.add_event(policy, 'FailedToDeleteNetworkPolicyCRD', - f'Deleting corresponding Kuryr Network ' - f'Policy CRD has failed: {exc}', - 'Warning') - LOG.exception("Kubernetes Client Exception deleting " - "KuryrNetworkPolicy CRD %s." % name) - raise - - def _get_resource_details(self, resource): - namespace = None - if self._is_pod(resource): - cidr = resource['status'].get('podIP') - namespace = resource['metadata']['namespace'] - elif resource.get('cidr'): - cidr = resource.get('cidr') - else: - cidr = driver_utils.get_namespace_subnet_cidr(resource) - namespace = resource['metadata']['name'] - return cidr, namespace - - def _is_pod(self, resource): - if resource.get('spec'): - return resource['spec'].get('containers') diff --git a/kuryr_kubernetes/controller/drivers/network_policy_security_groups.py b/kuryr_kubernetes/controller/drivers/network_policy_security_groups.py deleted file mode 100644 index f40d34c19..000000000 --- a/kuryr_kubernetes/controller/drivers/network_policy_security_groups.py +++ /dev/null @@ -1,437 +0,0 @@ -# Copyright 2018 Red Hat, Inc. -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. - -from oslo_config import cfg -from oslo_log import log as logging - -from kuryr_kubernetes import clients -from kuryr_kubernetes import config -from kuryr_kubernetes import constants -from kuryr_kubernetes.controller.drivers import base -from kuryr_kubernetes.controller.drivers import utils as driver_utils -from kuryr_kubernetes import exceptions -from kuryr_kubernetes import utils - -LOG = logging.getLogger(__name__) - - -def _get_namespace_labels(namespace): - kubernetes = clients.get_kubernetes_client() - - try: - path = '{}/{}'.format(constants.K8S_API_NAMESPACES, namespace) - namespaces = kubernetes.get(path) - LOG.debug("Return Namespace: %s", namespaces) - except exceptions.K8sResourceNotFound: - LOG.exception("Namespace not found") - raise - except exceptions.K8sClientException: - LOG.exception("Kubernetes Client Exception") - raise - return namespaces['metadata'].get('labels') - - -def _create_sg_rules_with_container_ports(container_ports, matched): - """Checks if security group rules based on container ports will be updated - - param container_ports: List of tuples with pods and port values - param matched: If a sg rule was created for the NP rule - - return: True if a sg rule needs to be created, False otherwise. - """ - for pod, container_port in container_ports: - pod_ip = driver_utils.get_pod_ip(pod) - if not pod_ip: - LOG.debug("Skipping SG rule creation for pod %s due to " - "no IP assigned", pod['metadata']['name']) - continue - return matched - return False - - -def _create_sg_rule_on_text_port(direction, port, rule_selected_pods, matched, - crd): - spec_pod_selector = crd['spec'].get('podSelector') - policy_namespace = crd['metadata']['namespace'] - spec_pods = driver_utils.get_pods( - spec_pod_selector, policy_namespace).get('items') - if direction == 'ingress': - for spec_pod in spec_pods: - container_ports = driver_utils.get_ports(spec_pod, port) - matched = _create_sg_rules_with_container_ports( - container_ports, matched) - elif direction == 'egress': - for rule_selected_pod in rule_selected_pods: - pod_label = rule_selected_pod['metadata'].get('labels') - pod_ns = rule_selected_pod['metadata'].get('namespace') - # NOTE(maysams) Do not allow egress traffic to the actual - # set of pods the NP is enforced on. - if (driver_utils.match_selector(spec_pod_selector, pod_label) and - policy_namespace == pod_ns): - continue - container_ports = driver_utils.get_ports( - rule_selected_pod, port) - matched = _create_sg_rules_with_container_ports( - container_ports, matched) - return matched - - -def _create_sg_rules(crd, pod, pod_selector, rule_block, direction, matched): - pod_labels = pod['metadata'].get('labels') - pod_ip = driver_utils.get_pod_ip(pod) - if not pod_ip: - LOG.debug("Skipping SG rule creation for pod %s due to " - "no IP assigned", pod['metadata']['name']) - return None - - # NOTE (maysams) No need to differentiate between podSelector - # with empty value or with '{}', as they have same result in here. - if pod_selector: - if driver_utils.match_selector(pod_selector, pod_labels): - if 'ports' in rule_block: - for port in rule_block['ports']: - if type(port.get('port')) is not int: - matched = _create_sg_rule_on_text_port( - direction, port, [pod], matched, crd) - else: - matched = True - else: - matched = True - else: - # NOTE (maysams) When a policy with namespaceSelector and text port - # is applied the port on the pods needs to be retrieved. - if 'ports' in rule_block: - for port in rule_block['ports']: - if type(port.get('port')) is not int: - matched = _create_sg_rule_on_text_port( - direction, port, [pod], matched, crd) - return matched - - -def _parse_selectors_on_pod(crd, pod, pod_selector, namespace_selector, - rule_block, direction, matched): - pod_namespace = pod['metadata']['namespace'] - pod_namespace_labels = _get_namespace_labels(pod_namespace) - policy_namespace = crd['metadata']['namespace'] - - if namespace_selector == {}: - matched = _create_sg_rules(crd, pod, pod_selector, rule_block, - direction, matched) - elif namespace_selector: - if (pod_namespace_labels and - driver_utils.match_selector(namespace_selector, - pod_namespace_labels)): - matched = _create_sg_rules(crd, pod, pod_selector, - rule_block, direction, matched) - else: - if pod_namespace == policy_namespace: - matched = _create_sg_rules(crd, pod, pod_selector, rule_block, - direction, matched) - return matched - - -def _parse_selectors_on_namespace(crd, direction, pod_selector, - ns_selector, rule_block, namespace, matched): - ns_name = namespace['metadata'].get('name') - ns_labels = namespace['metadata'].get('labels') - - if (ns_selector and ns_labels and - driver_utils.match_selector(ns_selector, ns_labels)): - if pod_selector: - pods = driver_utils.get_pods(pod_selector, ns_name).get('items') - if 'ports' in rule_block: - for port in rule_block['ports']: - if type(port.get('port')) is not int: - matched = ( - _create_sg_rule_on_text_port( - direction, port, pods, matched, crd)) - else: - for pod in pods: - pod_ip = driver_utils.get_pod_ip(pod) - if not pod_ip: - pod_name = pod['metadata']['name'] - LOG.debug("Skipping SG rule creation for pod " - "%s due to no IP assigned", pod_name) - continue - matched = True - else: - for pod in pods: - pod_ip = driver_utils.get_pod_ip(pod) - if not pod_ip: - pod_name = pod['metadata']['name'] - LOG.debug("Skipping SG rule creation for pod %s due" - " to no IP assigned", pod_name) - continue - matched = True - else: - ns_pods = driver_utils.get_pods(ns_selector)['items'] - if 'ports' in rule_block: - for port in rule_block['ports']: - if type(port.get('port')) is not int: - matched = ( - _create_sg_rule_on_text_port( - direction, port, ns_pods, matched, crd)) - else: - matched = True - else: - matched = True - return matched - - -def _parse_rules(direction, crd, policy, pod=None, namespace=None): - rule_direction = 'from' - if direction == 'egress': - rule_direction = 'to' - - matched = False - rule_list = policy.get(direction, []) - for rule_block in rule_list: - for rule in rule_block.get(rule_direction, []): - namespace_selector = rule.get('namespaceSelector') - pod_selector = rule.get('podSelector') - if pod: - matched = _parse_selectors_on_pod( - crd, pod, pod_selector, namespace_selector, - rule_block, direction, matched) - elif namespace: - matched = _parse_selectors_on_namespace( - crd, direction, pod_selector, namespace_selector, - rule_block, namespace, matched) - - # NOTE(maysams): Cover the case of a network policy that allows - # from everywhere on a named port, e.g., when there is no 'from' - # specified. - if pod and not matched: - for port in rule_block.get('ports', []): - if type(port.get('port')) is not int: - if (not rule_block.get(rule_direction, []) - or direction == "ingress"): - matched = _create_sg_rule_on_text_port( - direction, port, [pod], matched, crd) - return matched - - -def _parse_rules_on_delete_namespace(rule_list, direction, ns_name): - for rule in rule_list: - LOG.debug('Parsing %(dir)s Rule %(r)s', {'dir': direction, 'r': rule}) - rule_namespace = rule.get('namespace', None) - affectedPods = rule.get('affectedPods', []) - if rule_namespace and rule_namespace == ns_name: - return True - elif affectedPods: - for pod_info in affectedPods: - if pod_info['podNamespace'] == ns_name: - return True - return False - - -def _parse_rules_on_delete_pod(rule_list, direction, pod_ip): - for rule in rule_list: - LOG.debug('Parsing %(dir)s Rule %(r)s', {'dir': direction, 'r': rule}) - remote_ip_prefix = rule['sgRule'].get('remote_ip_prefix') - affectedPods = rule.get('affectedPods', []) - if remote_ip_prefix and remote_ip_prefix == pod_ip: - return True - elif affectedPods: - for pod_info in affectedPods: - if pod_info['podIP'] == pod_ip: - return True - return False - - -def _get_pod_sgs(pod): - sg_list = [] - - pod_labels = pod['metadata'].get('labels') - pod_namespace = pod['metadata']['namespace'] - - knp_crds = driver_utils.get_kuryrnetworkpolicy_crds( - namespace=pod_namespace) - for crd in knp_crds: - pod_selector = crd['spec'].get('podSelector') - if driver_utils.match_selector(pod_selector, pod_labels): - sg_id = crd['status'].get('securityGroupId') - if not sg_id: - # NOTE(dulek): We could just assume KNP handler will apply it, - # but it's possible that when it gets this pod it - # will have no IP yet and will be skipped. - LOG.warning('SG for NP %s not created yet, will retry.', - utils.get_res_unique_name(crd)) - raise exceptions.ResourceNotReady(pod) - LOG.debug("Appending %s", crd['status']['securityGroupId']) - sg_list.append(crd['status']['securityGroupId']) - - # NOTE(maysams) Pods that are not selected by any Networkpolicy - # are fully accessible. Thus, the default security group is associated. - if not sg_list: - sg_list = config.CONF.neutron_defaults.pod_security_groups - if not sg_list: - raise cfg.RequiredOptError('pod_security_groups', - cfg.OptGroup('neutron_defaults')) - - return sg_list[:] - - -class NetworkPolicySecurityGroupsDriver(base.PodSecurityGroupsDriver): - """Provides security groups for pods based on network policies""" - - def get_security_groups(self, pod, project_id): - return _get_pod_sgs(pod) - - def create_sg_rules(self, pod): - LOG.debug("Creating SG rules for pod: %s", pod['metadata']['name']) - crd_pod_selectors = [] - knp_crds = driver_utils.get_kuryrnetworkpolicy_crds() - nps = driver_utils.get_networkpolicies() - pairs = driver_utils.zip_knp_np(knp_crds, nps) - - for crd, policy in pairs: - crd_selector = crd['spec'].get('podSelector') - spec = policy.get('spec') - - i_matched = _parse_rules('ingress', crd, spec, pod=pod) - e_matched = _parse_rules('egress', crd, spec, pod=pod) - - if i_matched or e_matched: - try: - driver_utils.bump_networkpolicy(crd) - except exceptions.K8sResourceNotFound: - # The NP got deleted, ignore it. - continue - if i_matched: - crd_pod_selectors.append(crd_selector) - return crd_pod_selectors - - def delete_sg_rules(self, pod): - LOG.debug("Deleting SG rules for pod: %s", pod['metadata']['name']) - pod_ip = driver_utils.get_pod_ip(pod) - crd_pod_selectors = [] - if not pod_ip: - LOG.debug("Skipping SG rule deletion as pod %s has no IP assigned", - pod['metadata']['name']) - return crd_pod_selectors - knp_crds = driver_utils.get_kuryrnetworkpolicy_crds() - for crd in knp_crds: - crd_selector = crd['spec'].get('podSelector') - ingress_rule_list = crd['spec'].get('ingressSgRules') - egress_rule_list = crd['spec'].get('egressSgRules') - - i_matched = _parse_rules_on_delete_pod( - ingress_rule_list, "ingress", pod_ip) - e_matched = _parse_rules_on_delete_pod( - egress_rule_list, "egress", pod_ip) - - if i_matched or e_matched: - try: - driver_utils.bump_networkpolicy(crd) - except exceptions.K8sResourceNotFound: - # The NP got deleted, ignore it. - continue - if i_matched: - crd_pod_selectors.append(crd_selector) - return crd_pod_selectors - - def update_sg_rules(self, pod): - LOG.debug("Updating SG rules for pod: %s", pod['metadata']['name']) - # FIXME(dulek): No need to bump twice. - crd_pod_selectors = [] - crd_pod_selectors.extend(self.delete_sg_rules(pod)) - crd_pod_selectors.extend(self.create_sg_rules(pod)) - return crd_pod_selectors - - def delete_namespace_sg_rules(self, namespace): - ns_name = namespace['metadata']['name'] - LOG.debug("Deleting SG rules for namespace: %s", ns_name) - - crd_selectors = [] - knp_crds = driver_utils.get_kuryrnetworkpolicy_crds() - for crd in knp_crds: - crd_selector = crd['spec'].get('podSelector') - ingress_rule_list = crd['spec'].get('ingressSgRules') - egress_rule_list = crd['spec'].get('egressSgRules') - - i_matched = _parse_rules_on_delete_namespace( - ingress_rule_list, "ingress", ns_name) - e_matched = _parse_rules_on_delete_namespace( - egress_rule_list, "egress", ns_name) - - if i_matched or e_matched: - try: - driver_utils.bump_networkpolicy(crd) - except exceptions.K8sResourceNotFound: - # The NP got deleted, ignore it. - continue - if i_matched: - crd_selectors.append(crd_selector) - return crd_selectors - - def create_namespace_sg_rules(self, namespace): - ns_name = namespace['metadata']['name'] - LOG.debug("Creating SG rules for namespace: %s", ns_name) - crd_selectors = [] - knp_crds = driver_utils.get_kuryrnetworkpolicy_crds() - nps = driver_utils.get_networkpolicies() - pairs = driver_utils.zip_knp_np(knp_crds, nps) - for crd, policy in pairs: - crd_selector = crd['spec'].get('podSelector') - spec = policy.get('spec') - i_matched = _parse_rules('ingress', crd, spec, namespace=namespace) - e_matched = _parse_rules('egress', crd, spec, namespace=namespace) - - if i_matched or e_matched: - try: - driver_utils.bump_networkpolicy(crd) - except exceptions.K8sResourceNotFound: - # The NP got deleted, ignore it. - continue - if i_matched: - crd_selectors.append(crd_selector) - return crd_selectors - - def update_namespace_sg_rules(self, namespace): - LOG.debug("Updating SG rules for namespace: %s", - namespace['metadata']['name']) - crd_selectors = [] - crd_selectors.extend(self.delete_namespace_sg_rules(namespace)) - crd_selectors.extend(self.create_namespace_sg_rules(namespace)) - return crd_selectors - - -class NetworkPolicyServiceSecurityGroupsDriver( - base.ServiceSecurityGroupsDriver): - """Provides security groups for services based on network policies""" - - def get_security_groups(self, service, project_id): - sg_list = [] - svc_namespace = service['metadata']['namespace'] - svc_selector = service['spec'].get('selector') - - if svc_selector: - # get affected pods by svc selector - pods = driver_utils.get_pods({'selector': svc_selector}, - svc_namespace).get('items') - # NOTE(ltomasbo): We assume all the pods pointed by a service - # have the same labels, and the same policy will be applied to - # all of them. Hence only considering the security groups applied - # to the first one. - if pods: - return _get_pod_sgs(pods[0]) - else: - # NOTE(maysams): Network Policy is not enforced on Services - # without selectors. - sg_list = config.CONF.neutron_defaults.pod_security_groups - if not sg_list: - raise cfg.RequiredOptError('pod_security_groups', - cfg.OptGroup('neutron_defaults')) - return sg_list[:] diff --git a/kuryr_kubernetes/controller/drivers/neutron_vif.py b/kuryr_kubernetes/controller/drivers/neutron_vif.py deleted file mode 100644 index e6242b6a9..000000000 --- a/kuryr_kubernetes/controller/drivers/neutron_vif.py +++ /dev/null @@ -1,161 +0,0 @@ -# Copyright (c) 2016 Mirantis, Inc. -# All Rights Reserved. -# -# Licensed under the Apache License, Version 2.0 (the "License"); you may -# not use this file except in compliance with the License. You may obtain -# a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT -# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the -# License for the specific language governing permissions and limitations -# under the License. - -from kuryr.lib import constants as kl_const -from openstack import exceptions as os_exc -from oslo_config import cfg -from oslo_log import log as logging - -from kuryr_kubernetes import clients -from kuryr_kubernetes import config -from kuryr_kubernetes import constants -from kuryr_kubernetes.controller.drivers import base -from kuryr_kubernetes.controller.drivers import utils -from kuryr_kubernetes import exceptions as k_exc -from kuryr_kubernetes import os_vif_util as ovu - - -LOG = logging.getLogger(__name__) - -CONF = cfg.CONF - - -class NeutronPodVIFDriver(base.PodVIFDriver): - """Manages normal Neutron ports to provide VIFs for Kubernetes Pods.""" - - def __init__(self): - super(NeutronPodVIFDriver, self).__init__() - - self._tag_on_creation = utils.check_tag_on_creation() - if self._tag_on_creation: - LOG.info('Neutron supports tagging during bulk port creation.') - else: - LOG.warning('Neutron does not support tagging during bulk ' - 'port creation. Kuryr will tag resources after ' - 'port creation.') - - def request_vif(self, pod, project_id, subnets, security_groups): - os_net = clients.get_network_client() - - rq = self._get_port_request(pod, project_id, subnets, security_groups) - port = os_net.create_port(**rq) - - self._check_port_binding([port]) - if not self._tag_on_creation: - utils.tag_neutron_resources([port]) - return ovu.neutron_to_osvif_vif(port.binding_vif_type, port, subnets) - - def request_vifs(self, pod, project_id, subnets, security_groups, - num_ports, semaphore): - os_net = clients.get_network_client() - - rq = self._get_port_request(pod, project_id, subnets, security_groups, - unbound=True) - - bulk_port_rq = [rq] * num_ports - # restrict amount of create Ports in bulk that might be running - # in parallel. - with semaphore: - try: - ports = list(os_net.create_ports(bulk_port_rq)) - except os_exc.SDKException: - LOG.exception("Error creating bulk ports: %s", bulk_port_rq) - raise - - vif_plugin = ports[0].binding_vif_type - - # NOTE(ltomasbo): Due to the bug (1696051) on neutron bulk port - # creation request returning the port objects without binding - # information, an additional port show is performed to get the binding - # information - if vif_plugin == 'unbound': - port_info = os_net.get_port(ports[0].id) - vif_plugin = port_info.binding_vif_type - - self._check_port_binding(ports) - if not self._tag_on_creation: - utils.tag_neutron_resources(ports) - vifs = [] - for port in ports: - vif = ovu.neutron_to_osvif_vif(vif_plugin, port, subnets) - vifs.append(vif) - return vifs - - def release_vif(self, pod, vif, project_id=None): - clients.get_network_client().delete_port(vif.id) - - def activate_vif(self, vif, **kwargs): - if vif.active: - return - - os_net = clients.get_network_client() - try: - port = os_net.get_port(vif.id) - except os_exc.SDKException: - LOG.debug("Unable to obtain port information, retrying.") - raise k_exc.ResourceNotReady(vif) - - if port['status'] != kl_const.PORT_STATUS_ACTIVE: - raise k_exc.PortNotReady(vif.id, port['status']) - - vif.active = True - - def update_vif_sgs(self, pod, security_groups): - os_net = clients.get_network_client() - kp = utils.get_kuryrport(pod) - vifs = utils.get_vifs(kp) - if vifs: - # NOTE(ltomasbo): It just updates the default_vif security group - port_id = vifs[constants.DEFAULT_IFNAME].id - os_net.update_port(port_id, security_groups=list(security_groups)) - - def _get_port_request(self, pod, project_id, subnets, security_groups, - unbound=False): - port_req_body = {'project_id': project_id, - 'network_id': utils.get_network_id(subnets), - 'fixed_ips': ovu.osvif_to_neutron_fixed_ips(subnets), - 'device_owner': kl_const.DEVICE_OWNER, - 'admin_state_up': True, - 'binding_host_id': utils.get_host_id(pod)} - - # if unbound argument is set to true, it means the port requested - # should not be bound and not associated to the pod. Thus the port dict - # is filled with a generic name (constants.KURYR_PORT_NAME) if - # port_debug is enabled, and without device_id - if unbound and config.CONF.kubernetes.port_debug: - port_req_body['name'] = constants.KURYR_PORT_NAME - else: - # only set the name if port_debug is enabled - if config.CONF.kubernetes.port_debug: - port_req_body['name'] = utils.get_port_name(pod) - port_req_body['device_id'] = utils.get_device_id(pod) - - if security_groups: - port_req_body['security_groups'] = security_groups - - if self._tag_on_creation: - tags = CONF.neutron_defaults.resource_tags - if tags: - port_req_body['tags'] = tags - - return port_req_body - - def _check_port_binding(self, ports): - if ports[0].binding_vif_type == "binding_failed": - for port in ports: - clients.get_network_client().delete_port(port.id) - LOG.error("Binding failed error for ports: %s." - " Please check Neutron for errors.", ports) - raise k_exc.ResourceNotReady(ports) diff --git a/kuryr_kubernetes/controller/drivers/node_subnets.py b/kuryr_kubernetes/controller/drivers/node_subnets.py deleted file mode 100644 index baacdcdd1..000000000 --- a/kuryr_kubernetes/controller/drivers/node_subnets.py +++ /dev/null @@ -1,179 +0,0 @@ -# Copyright 2020 Red Hat, Inc. -# All Rights Reserved. -# -# Licensed under the Apache License, Version 2.0 (the "License"); you may -# not use this file except in compliance with the License. You may obtain -# a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT -# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the -# License for the specific language governing permissions and limitations -# under the License. - -from openstack import exceptions as os_exc -from oslo_concurrency import lockutils -from oslo_config import cfg -from oslo_log import log as logging - -from kuryr_kubernetes import clients -from kuryr_kubernetes import constants -from kuryr_kubernetes.controller.drivers import base -from kuryr_kubernetes import exceptions -from kuryr_kubernetes import utils - - -CONF = cfg.CONF -LOG = logging.getLogger(__name__) - - -class ConfigNodesSubnets(base.NodesSubnetsDriver): - """Provides list of nodes subnets from configuration.""" - - def get_nodes_subnets(self, raise_on_empty=False): - node_subnet_ids = CONF.pod_vif_nested.worker_nodes_subnets - if not node_subnet_ids: - if raise_on_empty: - raise cfg.RequiredOptError( - 'worker_nodes_subnets', cfg.OptGroup('pod_vif_nested')) - else: - return [] - - return node_subnet_ids - - def add_node(self, node): - return False - - def delete_node(self, node): - return False - - -class OpenShiftNodesSubnets(base.NodesSubnetsDriver): - """Provides list of nodes subnets based on OpenShift Machine objects.""" - - def __init__(self): - super().__init__() - self.subnets = set() - - def _get_subnet_from_machine(self, machine): - spec = machine['spec'].get('providerSpec', {}).get('value') - subnet_id = None - trunk = spec.get('trunk') - k8s = clients.get_kubernetes_client() - - if 'primarySubnet' in spec: - # NOTE(gryf) in old OpenShift versions, primarySubnet was used for - # selecting primary subnet from multiple networks. In the future - # this field will be deprecated. - - os_net = clients.get_network_client() - try: - subnet = os_net.find_subnet(spec['primarySubnet']) - except os_exc.DuplicateResource: - LOG.error('Name "%s" defined in primarySubnet for Machine/' - 'MachineSet found in more than one subnets, which ' - 'may lead to issues. Please, use desired subnet id ' - 'instead.', spec['primarySubnet']) - k8s.add_event(machine, 'AmbiguousPrimarySubnet', - f'Name "{spec["primarySubnet"]}" defined in ' - f'primarySubnet for Machine/MachineSet found in ' - f'multiple subnets, which may lead to issues. ' - f'Please, use desired subnet id instead.', - 'Warning', 'kuryr-controller') - return None - except os_exc.SDKException as ex: - raise exceptions.ResourceNotReady(f'OpenStackSDK threw an ' - f'exception {ex}, retrying.') - - if not subnet: - LOG.error('Subnet name/id `%s` provided in MachineSet ' - 'primarySubnet field does not match any subnet. ' - 'Check the configuration.', spec['primarySubnet']) - k8s.add_event(machine, 'PrimarySubnetNotFound', - f'Name "{spec["primarySubnet"]}" defined in ' - f'primarySubnet for Machine/MachineSet does ' - f'not match any subnet. Check the configuration.' - 'Warning', 'kuryr-controller') - return None - - return subnet.id - - if trunk and 'networks' in spec and spec['networks']: - subnets = spec['networks'][0].get('subnets') - if not subnets: - LOG.warning('No `subnets` in Machine `providerSpec.values.' - 'networks`.') - else: - primary_subnet = subnets[0] - if primary_subnet.get('uuid'): - subnet_id = primary_subnet['uuid'] - else: - subnet_filter = primary_subnet['filter'] - subnet_id = utils.get_subnet_id(**subnet_filter) - - if not subnet_id and 'ports' in spec and spec['ports']: - for port in spec['ports']: - if port.get('trunk', trunk) and port.get('fixedIPs'): - for fip in port['fixedIPs']: - if fip.get('subnetID'): - subnet_id = fip['subnetID'] - break - - if not subnet_id: - LOG.warning('No `subnets` found in Machine `providerSpec`') - - return subnet_id - - def get_nodes_subnets(self, raise_on_empty=False): - with lockutils.lock('kuryr-machine-add'): - # We add any hardcoded ones from config anyway. - result = self.subnets - if CONF.pod_vif_nested.worker_nodes_subnets: - result = result.union( - set(CONF.pod_vif_nested.worker_nodes_subnets)) - if not result and raise_on_empty: - raise exceptions.ResourceNotReady( - 'OpenShift Machines does not exist or are not yet ' - 'handled. Cannot determine worker nodes subnets.') - - return list(result) - - def add_node(self, machine): - subnet_id = self._get_subnet_from_machine(machine) - if not subnet_id: - LOG.warning('Could not determine subnet of Machine %s', - machine['metadata']['name']) - return False - - with lockutils.lock('kuryr-machine-add'): - if subnet_id not in self.subnets: - LOG.info('Adding subnet %s to the worker nodes subnets as ' - 'machine %s runs in it.', subnet_id, - machine['metadata']['name']) - self.subnets.add(subnet_id) - return True - return False - - def delete_node(self, machine): - k8s = clients.get_kubernetes_client() - affected_subnet_id = self._get_subnet_from_machine(machine) - if not affected_subnet_id: - LOG.warning('Could not determine subnet of Machine %s', - machine['metadata']['name']) - return False - - machines = k8s.get(constants.OPENSHIFT_API_CRD_MACHINES) - for existing_machine in machines.get('items', []): - if affected_subnet_id == self._get_subnet_from_machine( - existing_machine): - return False - - # We know that subnet is no longer used, so we remove it. - LOG.info('Removing subnet %s from the worker nodes subnets', - affected_subnet_id) - with lockutils.lock('kuryr-machine-add'): - self.subnets.remove(affected_subnet_id) - - return True diff --git a/kuryr_kubernetes/controller/drivers/public_ip.py b/kuryr_kubernetes/controller/drivers/public_ip.py deleted file mode 100644 index f9d55ab90..000000000 --- a/kuryr_kubernetes/controller/drivers/public_ip.py +++ /dev/null @@ -1,178 +0,0 @@ -# Copyright (c) 2017 RedHat, Inc. -# All Rights Reserved. -# -# Licensed under the Apache License, Version 2.0 (the "License"); you may -# not use this file except in compliance with the License. You may obtain -# a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT -# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the -# License for the specific language governing permissions and limitations -# under the License. -import abc - -from openstack import exceptions as os_exc -from oslo_log import log as logging - -from kuryr_kubernetes import clients -from kuryr_kubernetes.controller.drivers import utils - -LOG = logging.getLogger(__name__) - - -class BasePubIpDriver(object, metaclass=abc.ABCMeta): - """Base class for public IP functionality.""" - - @abc.abstractmethod - def is_ip_available(self, ip_addr, port_id_to_be_associated): - """check availability of ip address - - :param ip_address: - :param port_id_to_be_associated - :returns res_id in case ip is available returns resources id else None - - """ - raise NotImplementedError() - - @abc.abstractmethod - def allocate_ip(self, pub_net_id, project_id, pub_subnet_id=None, - description=None, port_id_to_be_associated=None): - """allocate ip address from public network id - - :param pub_net_id: public network id - :param project_id: - :param pub_subnet_id: public subnet id (Optional) - :param description: string describing request (Optional) - :param port_id_to_be_associated: (optional) - :returns res_id , ip_addr - :res_id - resource id - :ip_addr - ip aaddress - - - """ - raise NotImplementedError() - - @abc.abstractmethod - def free_ip(self, res_id): - """free ip by resource ID - - :param res_id: resource_id - :returns True/False - - """ - raise NotImplementedError() - - @abc.abstractmethod - def associate(self, res_id, vip_port_id): - """Associate VIP port id with resource_id - - :param res_id: id represents pub ip resource - :param vip_port_id: VIP port id - - """ - raise NotImplementedError() - - @abc.abstractmethod - def disassociate(self, res_id): - """Clear association between res_id to any vip port - - :param res_id: id represents pub ip resource - - """ - - -class FipPubIpDriver(BasePubIpDriver): - """Floating IP implementation for public IP capability .""" - - def is_ip_available(self, ip_addr, port_id_to_be_associated=None): - if ip_addr: - os_net = clients.get_network_client() - floating_ips_list = os_net.ips(floating_ip_address=ip_addr) - for entry in floating_ips_list: - if not entry: - continue - if (entry.floating_ip_address == ip_addr): - if not entry.port_id or ( - port_id_to_be_associated is not None - and entry.port_id == port_id_to_be_associated): - return entry.id - # floating IP not available - LOG.error("Floating IP=%s not available", ip_addr) - else: - LOG.error("Invalid parameter ip_addr=%s", ip_addr) - return None - - def allocate_ip(self, pub_net_id, project_id, pub_subnet_id=None, - description=None, port_id_to_be_associated=None): - os_net = clients.get_network_client() - - if port_id_to_be_associated is not None: - floating_ips_list = os_net.ips( - port_id=port_id_to_be_associated) - for entry in floating_ips_list: - if not entry: - continue - if (entry['floating_ip_address']): - LOG.debug('FIP %s already allocated to port %s', - entry['floating_ip_address'], - port_id_to_be_associated) - return entry['id'], entry['floating_ip_address'] - - try: - fip = os_net.create_ip(floating_network_id=pub_net_id, - project_id=project_id, - subnet_id=pub_subnet_id, - description=description) - except os_exc.SDKException: - LOG.exception("Failed to create floating IP - netid=%s ", - pub_net_id) - raise - utils.tag_neutron_resources([fip]) - return fip.id, fip.floating_ip_address - - def free_ip(self, res_id): - os_net = clients.get_network_client() - try: - os_net.delete_ip(res_id) - except os_exc.SDKException: - LOG.error("Failed to delete floating_ip_id =%s !", res_id) - return False - return True - - def _update(self, res_id, vip_port_id): - response = None - os_net = clients.get_network_client() - try: - response = os_net.update_ip(res_id, port_id=vip_port_id) - except os_exc.ConflictException: - LOG.warning("Conflict when assigning floating IP with id %s. " - "Checking if it's already assigned correctly.", res_id) - try: - fip = os_net.get_ip(res_id) - except os_exc.NotFoundException: - LOG.exception("Failed to get FIP %s - it doesn't exist.", - res_id) - raise - - if fip.port_id == vip_port_id: - LOG.debug('FIP %s already assigned to %s', res_id, - vip_port_id) - else: - LOG.exception('Failed to assign FIP %s to VIP port %s. It is ' - 'probably already bound', res_id, vip_port_id) - raise - except os_exc.SDKException: - # NOTE(gryf): the response will be None, since in case of - # exception, there will be no value assigned to response variable. - LOG.error("Failed to update_ip, floating_ip_id=%s," - "response=%s!", res_id, response) - raise - - def associate(self, res_id, vip_port_id): - self._update(res_id, vip_port_id) - - def disassociate(self, res_id): - self._update(res_id, None) diff --git a/kuryr_kubernetes/controller/drivers/utils.py b/kuryr_kubernetes/controller/drivers/utils.py deleted file mode 100644 index f2d335e5c..000000000 --- a/kuryr_kubernetes/controller/drivers/utils.py +++ /dev/null @@ -1,770 +0,0 @@ -# Copyright (c) 2018 Samsung Electronics Co.,Ltd -# All Rights Reserved. -# -# Licensed under the Apache License, Version 2.0 (the "License"); you may -# not use this file except in compliance with the License. You may obtain -# a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT -# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the -# License for the specific language governing permissions and limitations -# under the License. - -import urllib -import uuid - -import eventlet -import netaddr -from openstack import exceptions as os_exc -from os_vif import objects -from oslo_config import cfg -from oslo_log import log -from oslo_serialization import jsonutils - -from kuryr_kubernetes import clients -from kuryr_kubernetes import constants -from kuryr_kubernetes import exceptions as k_exc -from kuryr_kubernetes import utils - - -OPERATORS_WITH_VALUES = [constants.K8S_OPERATOR_IN, - constants.K8S_OPERATOR_NOT_IN] - -LOG = log.getLogger(__name__) - -CONF = cfg.CONF - - -def get_network_id(subnets): - ids = list({net.id for net in subnets.values()}) - - if len(ids) != 1: - raise k_exc.IntegrityError( - "Subnet mapping %(subnets)s is not valid: " - "%(num_networks)s unique networks found" % - {'subnets': subnets, 'num_networks': len(ids)}) - - return ids[0] - - -def get_port_name(pod): - return get_resource_name(pod['metadata']['name'], - prefix=pod['metadata']['namespace'] + "/") - - -def get_device_id(pod): - return pod['metadata']['uid'] - - -def get_host_id(pod): - return pod['spec']['nodeName'] - - -def get_kuryrport(pod): - k8s = clients.get_kubernetes_client() - try: - return k8s.get(f'{constants.K8S_API_CRD_NAMESPACES}/' - f'{pod["metadata"]["namespace"]}/kuryrports/' - f'{pod["metadata"]["name"]}') - except k_exc.K8sResourceNotFound: - return None - - -def get_vifs(kp): - try: - return {k: objects.base.VersionedObject.obj_from_primitive(v['vif']) - for k, v in kp['status']['vifs'].items()} - except (KeyError, AttributeError, TypeError): - return {} - - -def is_pod_scheduled(pod): - try: - return bool(pod['spec']['nodeName']) - except KeyError: - return False - - -def get_pods(selector, namespace=None): - """Return a k8s object list with the pods matching the selector. - - It accepts an optional parameter to state the namespace where the pod - selector will be apply. If empty namespace is passed, then the pod - selector is applied in all namespaces. - - param selector: k8s selector of types matchLabels or matchExpressions - param namespace: namespace name where the selector will be applied. If - None, the pod selector is applied in all namespaces - return: k8s list object containing all matching pods - - """ - kubernetes = clients.get_kubernetes_client() - - svc_selector = selector.get('selector') - if svc_selector: - labels = replace_encoded_characters(svc_selector) - else: - labels = selector.get('matchLabels', None) - if labels: - # Removing pod-template-hash as pods will not have it and - # otherwise there will be no match - labels.pop('pod-template-hash', None) - labels = replace_encoded_characters(labels) - - exps = selector.get('matchExpressions', None) - if exps: - exps = ', '.join(format_expression(exp) for exp in exps) - if labels: - expressions = urllib.parse.quote("," + exps) - labels += expressions - else: - labels = urllib.parse.quote(exps) - - if namespace: - pods = kubernetes.get( - '{}/namespaces/{}/pods?labelSelector={}'.format( - constants.K8S_API_BASE, namespace, labels)) - else: - pods = kubernetes.get( - '{}/pods?labelSelector={}'.format(constants.K8S_API_BASE, labels)) - - return pods - - -def get_namespaces(selector): - """Return a k8s object list with the namespaces matching the selector. - - param selector: k8s selector of types matchLabels or matchExpressions - return: k8s list object containing all matching namespaces - - """ - kubernetes = clients.get_kubernetes_client() - labels = selector.get('matchLabels', None) - if labels: - labels = replace_encoded_characters(labels) - - exps = selector.get('matchExpressions', None) - if exps: - exps = ', '.join(format_expression(exp) for exp in exps) - if labels: - expressions = urllib.parse.quote("," + exps) - labels += expressions - else: - labels = urllib.parse.quote(exps) - - namespaces = kubernetes.get( - '{}/namespaces?labelSelector={}'.format( - constants.K8S_API_BASE, labels)) - - return namespaces - - -def format_expression(expression): - key = expression['key'] - operator = expression['operator'].lower() - if operator in OPERATORS_WITH_VALUES: - values = expression['values'] - values = str(', '.join(values)) - values = "(%s)" % values - return "%s %s %s" % (key, operator, values) - else: - if operator == constants.K8S_OPERATOR_DOES_NOT_EXIST: - return "!%s" % key - else: - return key - - -def replace_encoded_characters(labels): - labels = urllib.parse.urlencode(labels) - # NOTE(ltomasbo): K8s API does not accept &, so we need to AND - # the matchLabels with ',' or '%2C' instead - labels = labels.replace('&', ',') - return labels - - -def create_security_group_rule(body, knp): - os_net = clients.get_network_client() - k8s = clients.get_kubernetes_client() - - try: - params = dict(body) - if 'ethertype' in params: - # NOTE(gryf): in openstacksdk, there is ether_type attribute in - # the security_group_rule object, in CRD we have 'ethertype' - # instead, just like it was returned by the neutron client. - params['ether_type'] = params['ethertype'] - del params['ethertype'] - sgr = os_net.create_security_group_rule(**params) - return sgr.id - except os_exc.ConflictException as ex: - if 'quota' in ex.details.lower(): - np = utils.get_referenced_object(knp, 'NetworkPolicy') - k8s.add_event(np, 'FailedToCreateSecurityGroupRule', - f'Creating security group rule for corresponding ' - f'Network Policy has failed: {ex}', - 'Warning') - LOG.error("Failed to create security group rule %s: %s", body, - ex.details) - raise - else: - LOG.debug("Failed to create already existing security group " - "rule %s", body) - # Get existent sg rule id from exception message - return str(ex).split()[-1][:-1] - except os_exc.SDKException as exc: - np = utils.get_referenced_object(knp, 'NetworkPolicy') - k8s.add_event(np, 'FailedToCreateSecurityGroupRule', - f'Creating security group rule for corresponding ' - f'Network Policy has failed: {exc}', - 'Warning') - LOG.debug("Error creating security group rule") - raise - - -def check_tag_on_creation(): - """Checks if Neutron supports tagging during bulk port creation. - - :param os_net: Network proxy object from Openstacksdk. - :return: Boolean - """ - os_net = clients.get_network_client() - extension = os_net.find_extension( - name_or_id='tag-ports-during-bulk-creation') - return bool(extension) - - -def delete_security_group_rule(security_group_rule_id, knp): - os_net = clients.get_network_client() - k8s = clients.get_kubernetes_client() - - try: - LOG.debug("Deleting sg rule with ID: %s", security_group_rule_id) - os_net.delete_security_group_rule(security_group_rule_id) - except os_exc.SDKException as exc: - np = utils.get_referenced_object(knp, 'NetworkPolicy') - k8s.add_event(np, 'FailedToDeleteSecurityGroupRule', - f'Deleting security group rule for corresponding ' - f'Network Policy has failed: {exc}', - 'Warning') - LOG.debug("Error deleting security group rule: %s", - security_group_rule_id) - raise - - -def patch_kuryrnetworkpolicy_crd(crd, i_rules, e_rules): - kubernetes = clients.get_kubernetes_client() - crd_name = crd['metadata']['name'] - LOG.debug('Patching KuryrNetworkPolicy CRD %s' % crd_name) - try: - spec = { - 'ingressSgRules': i_rules, - 'egressSgRules': e_rules, - } - - kubernetes.patch_crd('spec', utils.get_res_link(crd), spec) - except k_exc.K8sResourceNotFound: - LOG.debug('KuryrNetworkPolicy CRD not found %s', crd_name) - except k_exc.K8sClientException: - LOG.exception('Error updating KuryrNetworkPolicy CRD %s', crd_name) - raise - - -def create_security_group_rule_body( - direction, port_range_min=None, port_range_max=None, protocol=None, - ethertype='IPv4', cidr=None, - description="Kuryr-Kubernetes NetPolicy SG rule", namespace=None, - pods=None): - - if port_range_min and not port_range_max: - port_range_max = port_range_min - - if cidr and netaddr.IPNetwork(cidr).version == 6: - ethertype = 'IPv6' - - security_group_rule_body = { - 'sgRule': { - 'ethertype': ethertype, - 'description': description, - 'direction': direction, - } - } - if port_range_min and port_range_max: - security_group_rule_body['sgRule']['port_range_min'] = port_range_min - security_group_rule_body['sgRule']['port_range_max'] = port_range_max - if protocol: - security_group_rule_body['sgRule']['protocol'] = protocol.lower() - if cidr: - security_group_rule_body['sgRule']['remote_ip_prefix'] = cidr - if namespace: - security_group_rule_body['namespace'] = namespace - if pods: - security_group_rule_body['affectedPods'] = [ - {'podIP': ip, 'podNamespace': ns} for ip, ns in pods.items() if ip] - LOG.debug("Creating sg rule body %s", security_group_rule_body) - return security_group_rule_body - - -def get_pod_ip(pod): - try: - kp = get_kuryrport(pod) - vif = [x['vif'] for x in kp['status']['vifs'].values() - if x['default']][0] - except (KeyError, TypeError, IndexError): - return None - return (vif['versioned_object.data']['network'] - ['versioned_object.data']['subnets'] - ['versioned_object.data']['objects'][0] - ['versioned_object.data']['ips'] - ['versioned_object.data']['objects'][0] - ['versioned_object.data']['address']) - - -def get_annotations(resource, annotation): - try: - annotations = resource['metadata']['annotations'] - return annotations[annotation] - except KeyError: - return None - - -def get_annotated_labels(resource, annotation_labels): - labels_annotation = get_annotations(resource, annotation_labels) - if labels_annotation: - return jsonutils.loads(labels_annotation) - return None - - -def get_kuryrnetworkpolicy_crds(namespace=None): - - try: - if namespace: - knp_path = '{}/{}/kuryrnetworkpolicies'.format( - constants.K8S_API_CRD_NAMESPACES, namespace) - else: - knp_path = constants.K8S_API_CRD_KURYRNETWORKPOLICIES - knps = get_k8s_resources(knp_path) - LOG.debug("Returning KuryrNetworkPolicies %s", knps) - except k_exc.K8sClientException: - LOG.exception("Exception during fetch KuryrNetworkPolicies. Retrying.") - raise k_exc.ResourceNotReady(knp_path) - return knps - - -def get_kuryrloadbalancer_crds(namespace=None): - if namespace: - klb_path = '{}/{}/kuryrloadbalancers'.format( - constants.K8S_API_CRD_KURYRLOADBALANCERS, namespace) - else: - klb_path = constants.K8S_API_CRD_KURYRLOADBALANCERS - klbs = get_k8s_resources(klb_path) - return klbs - - -def get_k8s_resources(resource_path): - kubernetes = clients.get_kubernetes_client() - k8s_resource = {} - try: - k8s_resource = kubernetes.get(resource_path) - except k_exc.K8sResourceNotFound: - LOG.exception('Kubernetes CRD not found') - return [] - return k8s_resource.get('items', []) - - -def get_k8s_resource(resource_path): - kubernetes = clients.get_kubernetes_client() - k8s_resource = {} - try: - k8s_resource = kubernetes.get(resource_path) - except k_exc.K8sResourceNotFound: - LOG.debug('Kubernetes CRD not found %s', resource_path) - return k8s_resource - return k8s_resource - - -def get_networkpolicies(namespace=None): - # FIXME(dulek): This is awful, shouldn't we have list method on k8s_client? - kubernetes = clients.get_kubernetes_client() - - try: - if namespace: - np_path = '{}/{}/networkpolicies'.format( - constants.K8S_API_NETWORKING_NAMESPACES, namespace) - else: - np_path = constants.K8S_API_POLICIES - nps = kubernetes.get(np_path) - except k_exc.K8sResourceNotFound: - LOG.exception("NetworkPolicy or namespace %s not found", namespace) - raise - except k_exc.K8sClientException: - LOG.exception("Exception when listing NetworkPolicies.") - raise - return nps.get('items', []) - - -def zip_resources(xs, ys): - """Returns tuples of resources matched by namespace and name. - - :param xs: List of objects x, first level of iteration. - :param ys: List of objects y. - :return: List of tuples of matching (x, y) - """ - pairs = [] - for x in xs: - for y in ys: - if utils.get_res_unique_name(x) == utils.get_res_unique_name(y): - pairs.append((x, y)) - break - return pairs - - -def zip_knp_np(knps, nps): - """Returns tuples of matching KuryrNetworkPolicy and NetworkPolicy objs. - - :param knps: List of KuryrNetworkPolicy objects - :param nps: List of NetworkPolicy objects - :return: List of tuples of matching (knp, np) - """ - return zip_resources(knps, nps) - - -def match_expressions(expressions, labels): - for exp in expressions: - exp_op = exp['operator'].lower() - if labels: - if exp_op in OPERATORS_WITH_VALUES: - exp_values = exp['values'] - label_value = labels.get(str(exp['key']), None) - if exp_op == constants.K8S_OPERATOR_IN: - if label_value is None or label_value not in exp_values: - return False - elif exp_op == constants.K8S_OPERATOR_NOT_IN: - if label_value in exp_values: - return False - else: - if exp_op == constants.K8S_OPERATOR_EXISTS: - exists = labels.get(str(exp['key']), None) - if exists is None: - return False - elif exp_op == constants.K8S_OPERATOR_DOES_NOT_EXIST: - exists = labels.get(str(exp['key']), None) - if exists is not None: - return False - else: - if exp_op in (constants.K8S_OPERATOR_IN, - constants.K8S_OPERATOR_EXISTS): - return False - return True - - -def match_labels(crd_labels, labels): - for crd_key, crd_value in crd_labels.items(): - label_value = labels.get(crd_key, None) - if not label_value or crd_value != label_value: - return False - return True - - -def match_selector(selector, labels): - if selector is None: - return True - if labels is None: - labels = {} - crd_labels = selector.get('matchLabels', None) - crd_expressions = selector.get('matchExpressions', None) - - match_exp = match_lb = True - if crd_expressions: - match_exp = match_expressions(crd_expressions, labels) - if crd_labels: - match_lb = match_labels(crd_labels, labels) - return match_exp and match_lb - - -def get_namespace_subnet_cidr(namespace): - kubernetes = clients.get_kubernetes_client() - try: - net_crd_path = (f"{constants.K8S_API_CRD_NAMESPACES}/" - f"{namespace['metadata']['name']}/kuryrnetworks/" - f"{namespace['metadata']['name']}") - net_crd = kubernetes.get(net_crd_path) - except k_exc.K8sResourceNotFound: - LOG.warning('Namespace %s not yet ready', - namespace['metadata']['name']) - return None - except k_exc.K8sClientException: - LOG.exception("Kubernetes Client Exception.") - raise - try: - subnet_cidr = net_crd['status']['subnetCIDR'] - except KeyError: - LOG.debug('Namespace not yet ready %s', - namespace['metadata']['name']) - return None - return subnet_cidr - - -def tag_neutron_resources(resources, exceptions=False): - """Set tags to the provided resources. - - param resources: list of openstacksdk objects to tag. - param exceptions: if true, SDKException will not be ignored - """ - tags = CONF.neutron_defaults.resource_tags - if not tags: - return - - os_net = clients.get_network_client() - for res in resources: - try: - os_net.set_tags(res, tags=tags) - except os_exc.SDKException: - LOG.warning("Failed to tag %s with %s. Ignoring, but this is " - "still unexpected.", res, tags, exc_info=True) - if exceptions: - raise - - -def get_services(namespace=None): - kubernetes = clients.get_kubernetes_client() - try: - if namespace: - services = kubernetes.get( - '{}/namespaces/{}/services'.format(constants.K8S_API_BASE, - namespace)) - else: - services = kubernetes.get( - '{}/services'.format(constants.K8S_API_BASE)) - except k_exc.K8sClientException: - LOG.exception('Exception when getting K8s services.') - raise - return services - - -def service_matches_affected_pods(service, pod_selectors): - """Returns if the service is affected by the pod selectors - - Checks if the service selector matches the labelSelectors of - NetworkPolicies. - - param service: k8s service - param pod_selectors: a list of kubernetes labelSelectors - return: True if the service is selected by any of the labelSelectors - and False otherwise. - """ - svc_selector = service['spec'].get('selector') - if not svc_selector: - return False - for selector in pod_selectors: - if match_selector(selector, svc_selector): - return True - return False - - -def get_namespaced_pods(namespace=None): - kubernetes = clients.get_kubernetes_client() - if namespace: - namespace = namespace['metadata']['name'] - pods = kubernetes.get( - '{}/namespaces/{}/pods'.format( - constants.K8S_API_BASE, namespace)) - else: - pods = kubernetes.get( - '{}/pods'.format( - constants.K8S_API_BASE)) - return pods - - -def get_container_ports(containers, np_port_name, pod): - matched_ports = [] - if utils.is_host_network(pod): - return matched_ports - for container in containers: - for container_port in container.get('ports', []): - if container_port.get('name') == np_port_name: - container_port = container_port.get('containerPort') - if container_port not in matched_ports: - matched_ports.append((pod, container_port)) - return matched_ports - - -def get_ports(resource, port): - """Returns values of ports that have a given port name - - Retrieves the values of ports, defined in the containers - associated to the resource, that has its name matching a - given port. - - param resource: k8s Pod or Namespace - param port: a dict containing a port and protocol - return: A list of tuples of port values and associated pods - """ - containers = resource['spec'].get('containers') - ports = [] - np_port = port.get('port') - if containers: - ports.extend(get_container_ports(containers, np_port, resource)) - else: - pods = get_namespaced_pods(resource).get('items') - for pod in pods: - containers = pod['spec']['containers'] - ports.extend(get_container_ports( - containers, np_port, pod)) - return ports - - -def get_namespace(namespace_name): - kubernetes = clients.get_kubernetes_client() - try: - return kubernetes.get( - '{}/namespaces/{}'.format( - constants.K8S_API_BASE, namespace_name)) - except k_exc.K8sResourceNotFound: - LOG.debug("Namespace not found: %s", - namespace_name) - return None - - -def get_endpoints_targets(name, namespace): - kubernetes = clients.get_kubernetes_client() - target_ips = [] - try: - klb_crd = kubernetes.get( - f'{constants.K8S_API_CRD_NAMESPACES}/{namespace}/' - f'kuryrloadbalancers/{name}') - except k_exc.K8sResourceNotFound: - LOG.debug("KuryrLoadBalancer %s not found on Namespace %s.", - name, namespace) - return target_ips - except k_exc.K8sClientException: - LOG.exception('Exception when getting K8s Endpoints.') - raise - - for ep_slice in klb_crd['spec'].get('endpointSlices', []): - for endpoint in ep_slice.get('endpoints', []): - target_ips.extend(endpoint.get('addresses', [])) - return target_ips - - -def bump_networkpolicy(knp): - kubernetes = clients.get_kubernetes_client() - - try: - kubernetes.annotate( - knp['metadata']['annotations']['networkPolicyLink'], - {constants.K8S_ANNOTATION_POLICY: str(uuid.uuid4())}) - except k_exc.K8sResourceNotFound: - raise - except k_exc.K8sClientException: - LOG.exception("Failed to annotate network policy %s to force its " - "recalculation.", utils.get_res_unique_name(knp)) - raise - - -def bump_networkpolicies(namespace=None): - k8s = clients.get_kubernetes_client() - nps = get_networkpolicies(namespace) - for np in nps: - try: - k8s.annotate(utils.get_res_link(np), - {constants.K8S_ANNOTATION_POLICY: str(uuid.uuid4())}) - except k_exc.K8sResourceNotFound: - # Ignore if NP got deleted. - pass - except k_exc.K8sClientException: - LOG.warning("Failed to annotate network policy %s to force its " - "recalculation.", utils.get_res_unique_name(np)) - continue - - -def is_network_policy_enabled(): - enabled_handlers = CONF.kubernetes.enabled_handlers - svc_sg_driver = CONF.kubernetes.service_security_groups_driver - return 'policy' in enabled_handlers and svc_sg_driver == 'policy' - - -def delete_port(leftover_port): - os_net = clients.get_network_client() - - try: - # NOTE(gryf): there is unlikely, that we get an exception - # like PortNotFound or something, since openstacksdk - # doesn't raise an exception if port doesn't exists nor - # return any information. - os_net.delete_port(leftover_port.id) - return True - except os_exc.SDKException as e: - if "currently a subport for trunk" in str(e): - if leftover_port.status == "DOWN": - LOG.warning("Port %s is in DOWN status but still " - "associated to a trunk. This should " - "not happen. Trying to delete it from " - "the trunk.", leftover_port.id) - - # Get the trunk_id from the error message - trunk_id = ( - str(e).split('trunk')[1].split('.')[0].strip()) - try: - os_net.delete_trunk_subports( - trunk_id, [{'port_id': leftover_port.id}]) - except os_exc.ResourceNotFound: - LOG.debug( - "Port %s already removed from trunk %s", - leftover_port.id, trunk_id) - try: - os_net.delete_port(leftover_port.id) - return True - except os_exc.SDKException: - LOG.exception("Unexpected error deleting " - "leftover port %s. Skipping it " - "and continue with the other " - "rest.", leftover_port.id) - else: - LOG.exception("Unexpected error deleting leftover " - "port %s. Skipping it and " - "continue with the other " - "rest.", leftover_port.id) - return False - - -def get_resource_name(name, uid='', prefix='', suffix=''): - """Get OpenStack resource name out of Kubernetes resources - - Return name for the OpenStack resource, which usually is up to 255 chars - long. And while Kubernetes allows to set resource names up to 253 - characters, that makes a risk to have too long name. This function will - favor UID, prefix and suffix over name of the k8s resource, which will get - truncated if needed. - - https://kubernetes.io/docs/concepts/overview/working-with-objects/names/ - """ - if uid: - uid += '/' - - length = len(f'{prefix}{uid}{name}{suffix}') - - if length > 255: - name = name[:254-(length-254)] - - return f'{prefix}{uid}{name}{suffix}' - - -def delete_ports(leftover_port_list): - pool = eventlet.GreenPool(constants.LEFTOVER_RM_POOL_SIZE) - return all([i for i in pool.imap(delete_port, leftover_port_list)]) - - -def delete_neutron_port(port): - os_net = clients.get_network_client() - try: - os_net.delete_port(port) - except Exception as ex: - # NOTE(gryf): Catching all the exceptions here is intentional, since - # this function is intended to be run in the greenthread. User needs - # to examine return value and decide which exception can be safely - # skipped, and which need to be handled/raised. - return ex - return None diff --git a/kuryr_kubernetes/controller/drivers/vif_pool.py b/kuryr_kubernetes/controller/drivers/vif_pool.py deleted file mode 100644 index 566adf4ba..000000000 --- a/kuryr_kubernetes/controller/drivers/vif_pool.py +++ /dev/null @@ -1,1289 +0,0 @@ -# Copyright (c) 2017 Red Hat, Inc. -# All Rights Reserved. -# -# Licensed under the Apache License, Version 2.0 (the "License"); you may -# not use this file except in compliance with the License. You may obtain -# a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT -# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the -# License for the specific language governing permissions and limitations -# under the License. - -import abc -import collections -import os -import threading - -import eventlet -from kuryr.lib._i18n import _ -from kuryr.lib import constants as kl_const -from openstack import exceptions as os_exc -from oslo_cache import core as cache -from oslo_concurrency import lockutils -from oslo_config import cfg as oslo_cfg -from oslo_log import log as logging -from oslo_log import versionutils - -from kuryr_kubernetes import clients -from kuryr_kubernetes import config -from kuryr_kubernetes import constants -from kuryr_kubernetes.controller.drivers import base -from kuryr_kubernetes.controller.drivers import utils as c_utils -from kuryr_kubernetes.controller.managers import pool -from kuryr_kubernetes import exceptions -from kuryr_kubernetes import os_vif_util as ovu -from kuryr_kubernetes import utils - -LOG = logging.getLogger(__name__) - -# Moved out from neutron_default group -vif_pool_driver_opts = [ - oslo_cfg.IntOpt('ports_pool_max', - help=_("Set a maximum amount of ports per pool. " - "0 to disable"), - default=0), - oslo_cfg.IntOpt('ports_pool_min', - help=_("Set a target minimum size of the pool of ports"), - default=5), - oslo_cfg.IntOpt('ports_pool_batch', - help=_("Number of ports to be created in a bulk request"), - default=10), - oslo_cfg.IntOpt('ports_pool_update_frequency', - help=_("Minimum interval (in seconds) " - "between pool updates"), - default=20), - oslo_cfg.DictOpt('pools_vif_drivers', - help=_("Dict with the pool driver and pod driver to be " - "used. If not set, it will take them from the " - "kubernetes driver options for pool and pod " - "drivers respectively"), - default={}, deprecated_for_removal=True, - deprecated_since="Stein", - deprecated_reason=_( - "Mapping from pool->vif does not allow different " - "vifs to use the same pool driver. " - "Use vif_pool_mapping instead.")), - oslo_cfg.DictOpt('vif_pool_mapping', - help=_("Dict with the pod driver and the corresponding " - "pool driver to be used. If not set, it will take " - "them from the kubernetes driver options for pool " - "and pod drivers respectively"), - default={}), -] - -oslo_cfg.CONF.register_opts(vif_pool_driver_opts, "vif_pool") - -node_vif_driver_caching_opts = [ - oslo_cfg.BoolOpt('caching', default=True, - help=_('Enable caching of vifs.')), - oslo_cfg.IntOpt('cache_time', default=3600, - help=_('TTL, in seconds, for cached vifs')), -] - -oslo_cfg.CONF.register_opts(node_vif_driver_caching_opts, - "node_driver_caching") - -cache.configure(oslo_cfg.CONF) -node_driver_cache_region = cache.create_region() -MEMOIZE = cache.get_memoization_decorator( - oslo_cfg.CONF, node_driver_cache_region, "node_driver_caching") - -cache.configure_cache_region(oslo_cfg.CONF, node_driver_cache_region) - -VIF_TYPE_TO_DRIVER_MAPPING = { - 'VIFOpenVSwitch': 'neutron-vif', - 'VIFBridge': 'neutron-vif', - 'VIFVlanNested': 'nested-vlan', - 'VIFMacvlanNested': 'nested-macvlan', - 'VIFDPDKNested': 'nested-dpdk', - 'VIFVHostUser': 'neutron-vif', -} - -NODE_PORTS_CLEAN_FREQUENCY = 600 # seconds -POPULATE_POOL_TIMEOUT = 420 # seconds -BULK_PORTS_CREATION_REQUESTS = 20 - - -class NoopVIFPool(base.VIFPoolDriver): - """No pool VIFs for Kubernetes Pods""" - - def set_vif_driver(self, driver): - self._drv_vif = driver - - def request_vif(self, pod, project_id, subnets, security_groups): - return self._drv_vif.request_vif(pod, project_id, subnets, - security_groups) - - def release_vif(self, pod, vif, *argv): - self._drv_vif.release_vif(pod, vif, *argv) - - def activate_vif(self, vif, **kwargs): - self._drv_vif.activate_vif(vif, **kwargs) - - def update_vif_sgs(self, pod, sgs): - self._drv_vif.update_vif_sgs(pod, sgs) - - def remove_sg_from_pools(self, sg_id, net_id): - pass - - def sync_pools(self): - pass - - -class BaseVIFPool(base.VIFPoolDriver, metaclass=abc.ABCMeta): - """Skeletal pool driver. - - In order to handle the pools of ports, a few dicts are used: - _available_ports_pool is a dictionary with the ready to use Neutron ports - information. The keys are the 'pool_key' and the values the 'port_id's. - _existing_vifs is a dictionary containing the port vif objects. The keys - are the 'port_id' and the values are the vif objects. - _recyclable_ports is a dictionary with the Neutron ports to be - recycled. The keys are the 'port_id' and their values are the 'pool_key'. - _populate_pool_lock is a dict with the pool_key as key and a lock as value. - Also, there is a _lock to control access to _populate_pool_lock dict. - - The following driver configuration options exist: - - ports_pool_max: it specifies how many ports can be kept at each pool. - If the pool already reached the specified size, the ports to be recycled - are deleted instead. If set to 0, the limit is disabled and ports are - always recycled. - - ports_pool_min: minimum desired number of ready to use ports at populated - pools. Should be smaller than ports_pool_max (if enabled). - - ports_pool_batch: target number of ports to be created in bulk requests - when populating pools. - - ports_pool_update_frequency: interval in seconds between ports pool - updates for recycling ports. - Also, it has a Semaphore _create_ports_semaphore to restrict the number of - bulk Ports creation calls running in parallel. - """ - - def __init__(self): - # Note(ltomasbo) Execute the port recycling periodic actions in a - # background thread - self._recovered_pools = False - eventlet.spawn(self._return_ports_to_pool) - eventlet.spawn(self._cleanup_removed_nodes) - - def set_vif_driver(self, driver): - self._drv_vif = driver - - def activate_vif(self, vif, **kwargs): - self._drv_vif.activate_vif(vif, **kwargs) - - def update_vif_sgs(self, pod, sgs): - self._drv_vif.update_vif_sgs(pod, sgs) - - def _get_pool_size(self, pool_key): - pool = self._available_ports_pools.get(pool_key, {}) - pool_members = [] - for port_list in pool.values(): - pool_members.extend(port_list) - return len(pool_members) - - def _get_host_addr(self, pod): - return pod['status']['hostIP'] - - def _get_pool_key(self, host, project_id, net_id=None, subnets=None): - if not net_id and subnets: - net_obj = list(subnets.values())[0] - net_id = net_obj.id - pool_key = (host, project_id, net_id) - return pool_key - - def _get_pool_key_net(self, pool_key): - return pool_key[2] - - def request_vif(self, pod, project_id, subnets, security_groups): - if not self._recovered_pools: - LOG.debug("Kuryr-controller not yet ready to handle new pods.") - raise exceptions.ResourceNotReady(pod) - try: - host_addr = self._get_host_addr(pod) - except KeyError: - return None - - pool_key = self._get_pool_key(host_addr, project_id, None, subnets) - - # NOTE(maysams): It's possible that more recent Pods will retrieve - # the Ports from the pool that older Pods were waiting for. In case - # this happens, the event will be retried. - try: - return self._get_port_from_pool(pool_key, pod, subnets, - tuple(sorted(security_groups))) - except exceptions.ResourceNotReady: - LOG.debug("Ports pool does not have available ports: %s", pool_key) - if self._populate_pool(pool_key, pod, subnets, - tuple(sorted(security_groups))): - return self._get_port_from_pool( - pool_key, pod, subnets, tuple(sorted(security_groups))) - raise - - def _set_port_debug(self, port_id, pod): - """_set_port_debug sets name to the port to simplify debugging""" - raise NotImplementedError() - - def _get_populate_pool_lock(self, pool_key): - with self._lock: - return self._populate_pool_lock[pool_key] - - def _get_port_from_pool(self, pool_key, pod, subnets, security_groups): - try: - pool_ports = self._available_ports_pools[pool_key] - except (KeyError, AttributeError): - raise exceptions.ResourceNotReady(pod) - - try: - port_id = pool_ports[security_groups].pop() - except (KeyError, IndexError): - # Get another port from the pool and update the SG to the - # appropriate one. It uses a port from the group that was updated - # longer ago - these will be at the front of the OrderedDict. - for sg_group, ports in pool_ports.items(): - try: - port_id = pool_ports[sg_group].pop() - break - except (IndexError, KeyError): - continue - else: - # pool is empty, no port to reuse - raise exceptions.ResourceNotReady(pod) - os_net = clients.get_network_client() - os_net.update_port(port_id, security_groups=list(security_groups)) - if config.CONF.kubernetes.port_debug: - self._set_port_debug(port_id, pod) - eventlet.spawn(self._populate_pool, pool_key, pod, subnets, - security_groups) - # Add protection from port_id not in existing_vifs - try: - port = self._existing_vifs[port_id] - except KeyError: - LOG.debug('Missing port on existing_vifs, this should not happen.' - ' Retrying.') - raise exceptions.ResourceNotReady(pod) - return port - - def _populate_pool(self, pool_key, pod, subnets, security_groups): - # REVISIT(ltomasbo): Drop the subnets parameter and get the information - # from the pool_key, which will be required when multi-network is - # supported - kubernetes = clients.get_kubernetes_client() - - if not self._recovered_pools: - LOG.debug("Kuryr-controller not yet ready to populate pools.") - return False - ports_pool_min = oslo_cfg.CONF.vif_pool.ports_pool_min - lock = self._get_populate_pool_lock(pool_key) - # NOTE(maysams): Only allow one request vifs per pool and times out - # if takes 420 sec. - if lock.acquire(timeout=POPULATE_POOL_TIMEOUT): - pool_size = self._get_pool_size(pool_key) - try: - if pool_size < ports_pool_min: - num_ports = max(oslo_cfg.CONF.vif_pool.ports_pool_batch, - ports_pool_min - pool_size) - try: - vifs = self._drv_vif.request_vifs( - pod=pod, - project_id=pool_key[1], - subnets=subnets, - security_groups=security_groups, - num_ports=num_ports, - semaphore=self._create_ports_semaphore) - except os_exc.SDKException as exc: - kubernetes.add_event( - pod, 'FailToPopulateVIFPool', - f'There was an error during populating VIF pool ' - f'for pod: {exc.message}', type_='Warning') - raise - - for vif in vifs: - self._existing_vifs[vif.id] = vif - self._available_ports_pools[pool_key].setdefault( - security_groups, []).append(vif.id) - if vifs: - # Mark it as updated most recently. - self._available_ports_pools[pool_key].move_to_end( - security_groups) - finally: - lock.release() - else: - return False - return True - - def release_vif(self, pod, vif, project_id, host_addr=None): - if not self._recovered_pools: - LOG.debug("Kuryr-controller not yet ready to remove pods.") - raise exceptions.ResourceNotReady(pod) - if not host_addr: - host_addr = self._get_host_addr(pod) - - pool_key = self._get_pool_key(host_addr, project_id, vif.network.id, - None) - - try: - if not self._existing_vifs.get(vif.id): - self._existing_vifs[vif.id] = vif - self._recyclable_ports[vif.id] = pool_key - except AttributeError: - LOG.debug("Kuryr-controller is not ready to handle the pools yet.") - raise exceptions.ResourceNotReady(pod) - - def _return_ports_to_pool(self): - raise NotImplementedError() - - def _recover_precreated_ports(self): - raise NotImplementedError() - - def _get_in_use_ports_info(self): - kubernetes = clients.get_kubernetes_client() - in_use_ports = [] - networks = {} - kuryr_ports = kubernetes.get(constants.K8S_API_CRD_KURYRPORTS) - for kp in kuryr_ports['items']: - vifs = c_utils.get_vifs(kp) - for data in vifs.values(): - in_use_ports.append(data.id) - networks[data.network.id] = data.network - return in_use_ports, networks - - def list_pools(self): - return self._available_ports_pools - - def show_pool(self, pool_key): - return self._available_ports_pools.get(pool_key) - - def delete_network_pools(self, net_id): - raise NotImplementedError() - - def remove_sg_from_pools(self, sg_id, net_id): - os_net = clients.get_network_client() - for pool_key, pool_ports in list(self._available_ports_pools.items()): - if self._get_pool_key_net(pool_key) != net_id: - continue - for sg_key, ports in list(pool_ports.items()): - if sg_id not in sg_key: - continue - # remove the pool associated to that SG - try: - del self._available_ports_pools[pool_key][sg_key] - except KeyError: - LOG.debug("SG already removed from the pool. Ports " - "already re-used, no need to change their " - "associated SGs.") - continue - for port_id in ports: - # remove all SGs from the port to be reused - os_net.update_port(port_id, security_groups=None) - # add the port to the default pool - self._available_ports_pools[pool_key].setdefault( - (), []).append(port_id) - # NOTE(ltomasbo): as this ports were not created for this - # pool, ensuring they are used first, marking them as the - # most outdated - self._available_ports_pools[pool_key].move_to_end( - (), last=False) - - def _create_healthcheck_file(self): - # Note(ltomasbo): Create a health check file when the pre-created - # ports are loaded into their corresponding pools. This file is used - # by the readiness probe when the controller is deployed in - # containerized mode. This way the controller pod will not be ready - # until all the pre-created ports have been loaded - try: - with open('/tmp/pools_loaded', 'a'): - LOG.debug("Health check file created for readiness probe") - except IOError: - LOG.exception("I/O error creating the health check file.") - - @lockutils.synchronized('return_to_pool_baremetal') - @lockutils.synchronized('return_to_pool_nested') - def sync_pools(self): - # NOTE(ltomasbo): Ensure readiness probe is not set to true until the - # pools sync is completed in case of controller restart - try: - os.remove('/tmp/pools_loaded') - except OSError: - pass - - self._available_ports_pools = collections.defaultdict( - collections.OrderedDict) - self._existing_vifs = collections.defaultdict() - self._recyclable_ports = collections.defaultdict() - self._lock = threading.Lock() - self._populate_pool_lock = collections.defaultdict(threading.Lock) - semaphore = eventlet.semaphore.Semaphore(BULK_PORTS_CREATION_REQUESTS) - self._create_ports_semaphore = semaphore - - def _get_trunks_info(self): - """Returns information about trunks and their subports. - - This method searches for parent ports and subports among the active - neutron ports. - To find the parent ports it filters the ones that have trunk_details, - i.e., the ones that are the parent port of a trunk. - To find the subports to recover, it filters out the ports that are - already in used by running kubernetes pods. It also filters out the - ports whose device_owner is not related to subports, i.e., the ports - that are not attached to trunks, such as active ports allocated to - running VMs. - At the same time it collects information about ports subnets to - minimize the number of interaction with Neutron API. - - It returns three dictionaries with the needed information about the - parent ports, subports and subnets - - :return: 3 dicts with the trunk details (Key: trunk_id; Value: dict - containing ip and subports), subport details (Key: port_id; Value: - port_object), and subnet details (Key: subnet_id; Value: subnet dict) - """ - # REVISIT(ltomasbo): there is no need to recover the subports - # belonging to trunk ports whose parent port is DOWN as that means no - # pods can be scheduled there. We may need to update this if we allow - # lively extending the kubernetes cluster with VMs that already have - # precreated subports. For instance by shutting down and up a - # kubernetes Worker VM with subports already attached, and the - # controller is restarted in between. - os_net = clients.get_network_client() - parent_ports = {} - subports = {} - subnets = {} - - attrs = {'status': 'ACTIVE'} - tags = config.CONF.neutron_defaults.resource_tags - if tags: - attrs['tags'] = tags - - all_active_ports = os_net.ports(**attrs) - in_use_ports, in_use_networks = self._get_in_use_ports_info() - - for port in all_active_ports: - # Parent port - # NOTE(dulek): We do not filter by worker_nodes_subnets here - # meaning that we might include some unrelated trunks, - # but the consequence is only memory usage. - if port.trunk_details and port.fixed_ips: - parent_ports[port.trunk_details['trunk_id']] = { - 'ip': port.fixed_ips[0]['ip_address'], - 'subports': port.trunk_details['sub_ports']} - else: - # Filter to only get subports that are not in use - if (port.id not in in_use_ports and - port.device_owner in ['trunk:subport', - kl_const.DEVICE_OWNER]): - subports[port.id] = port - # NOTE(ltomasbo): _get_subnet can be costly as it - # needs to call neutron to get network and subnet - # information. This ensures it is only called once - # per subnet in use - subnet_id = port.fixed_ips[0]['subnet_id'] - if not subnets.get(subnet_id): - # NOTE(maysams): Avoid calling Neutron by - # getting the Network and Subnet info from - # Network defined on an existing KuryrPort CR. - # This assumes only one Subnet exists per Network. - network = in_use_networks.get(port.network_id) - if network: - subnets[subnet_id] = {subnet_id: network} - else: - subnets[subnet_id] = { - subnet_id: utils.get_subnet(subnet_id)} - return parent_ports, subports, subnets - - def _cleanup_leftover_ports(self): - os_net = clients.get_network_client() - existing_ports = os_net.ports(device_owner=kl_const.DEVICE_OWNER, - status='DOWN') - - tags = config.CONF.neutron_defaults.resource_tags - if tags: - nets = os_net.networks(tags=tags) - nets_ids = [n.id for n in nets] - for port in existing_ports: - net_id = port.network_id - if net_id in nets_ids: - if port.binding_host_id: - if set(tags).difference(set(port.tags)): - # delete the port if it has binding details, it - # belongs to the deployment subnet and it does not - # have the right tags - try: - os_net.delete_port(port.id) - except os_exc.SDKException: - LOG.debug("Problem deleting leftover port %s. " - "Skipping.", port.id) - else: - # delete port if they have no binding but belong to the - # deployment networks, regardless of their tagging - try: - os_net.delete_port(port.id) - except os_exc.SDKException: - LOG.debug("Problem deleting leftover port %s. " - "Skipping.", port.id) - continue - else: - c_utils.delete_ports([p for p in existing_ports - if not p.binding_host_id]) - - def _cleanup_removed_nodes(self): - """Remove ports associated to removed nodes.""" - previous_ports_to_remove = [] - while True: - # NOTE(ltomasbo): Nodes are not expected to be removed - # frequently, so there is no need to execute this frequently - # either - eventlet.sleep(NODE_PORTS_CLEAN_FREQUENCY) - try: - self._trigger_removed_nodes_ports_cleanup( - previous_ports_to_remove) - except Exception: - LOG.exception('Error while removing the ports associated to ' - 'deleted nodes. It will be retried in %s ' - 'seconds', NODE_PORTS_CLEAN_FREQUENCY) - - def _trigger_removed_nodes_ports_cleanup(self, previous_ports_to_remove): - """Remove ports associated to removed nodes. - - There are two types of ports pool, one for neutron and one for nested. - For the nested, the ports lost their device_owner after being detached, - i.e., after the node they belong to got removed. This means we cannot - find them unless they have been tagged. - - For the neutron ones, we rely on them having the kuryr device owner - and not having binding information, thus ensuring they are not - attached to any node. However, to avoid the case where those ports - are being created at the same time of the cleanup process, we don't - delete them unless we have seen them for 2 iterations. - """ - if not self._recovered_pools: - LOG.debug("Kuryr-controller not yet ready to perform nodes" - " cleanup.") - return - os_net = clients.get_network_client() - tags = config.CONF.neutron_defaults.resource_tags - if tags: - subnetpool_id = config.CONF.namespace_subnet.pod_subnet_pool - if subnetpool_id: - subnets = os_net.subnets(tags=tags, - subnetpool_id=subnetpool_id) - subnets_ids = [s.id for s in subnets] - else: - subnets_ids = [config.CONF.neutron_defaults.pod_subnet] - - # NOTE(ltomasbo): Detached subports gets their device_owner unset - detached_subports = os_net.ports(status='DOWN', tags=tags) - for subport in detached_subports: - # FIXME(ltomasbo): Looking for trunk:subport is only needed - # due to a bug in neutron that does not reset the - # device_owner after the port is detached from the trunk - if subport.device_owner not in ['', 'trunk:subport']: - continue - if subport.id not in previous_ports_to_remove: - # FIXME(ltomasbo): Until the above problem is there, - # we need to add protection for recently created ports - # that are still being attached - previous_ports_to_remove.append(subport.id) - continue - # check if port belonged to kuryr and it was a subport - # FIXME(ltomasbo): Assuming single stack - if len(subport.fixed_ips) != 1: - # This should never happen as there is no option to create - # ports without IPs in Neutron, yet we hit it. So adding - # protection from it - continue - if subport.fixed_ips[0].get('subnet_id') not in subnets_ids: - continue - try: - del self._existing_vifs[subport.id] - except KeyError: - LOG.debug('Port %s is not in the ports list.', subport.id) - port_deleted = c_utils.delete_port(subport) - if port_deleted: - previous_ports_to_remove.remove(subport.id) - - # normal ports, or subports not yet attached - existing_ports = os_net.ports( - device_owner=kl_const.DEVICE_OWNER, - status='DOWN', - tags=tags) - else: - # normal ports, or subports not yet attached - existing_ports = os_net.ports( - device_owner=kl_const.DEVICE_OWNER, - status='DOWN') - - for port in existing_ports: - # NOTE(ltomasbo): It may be that the port got just created and it - # is still being attached and/or being tagged. - if port.id not in previous_ports_to_remove: - previous_ports_to_remove.append(port.id) - continue - - if not port.binding_host_id: - try: - del self._existing_vifs[port.id] - except KeyError: - LOG.debug('Port %s is not in the ports list.', port.id) - try: - os_net.delete_port(port.id) - except os_exc.SDKException: - LOG.debug("Problem deleting leftover port %s. " - "Skipping.", port.id) - else: - previous_ports_to_remove.remove(port.id) - - -class NeutronVIFPool(BaseVIFPool): - """Manages VIFs for Bare Metal Kubernetes Pods.""" - - def _get_host_addr(self, pod): - return pod['spec']['nodeName'] - - def _set_port_debug(self, port_id, pod): - os_net = clients.get_network_client() - os_net.update_port(port_id, name=c_utils.get_port_name(pod), - device_id=pod['metadata']['uid']) - - def _return_ports_to_pool(self): - """Recycle ports to be reused by future pods. - - For each port in the recyclable_ports dict it reapplies - security group if they have been changed and it changes the port - name to available_port if the port_debug option is enabled. - Then the port_id is included in the dict with the available_ports. - - If a maximum number of ports per pool is set, the port will be - deleted if the maximum has been already reached. - """ - while True: - eventlet.sleep(oslo_cfg.CONF.vif_pool.ports_pool_update_frequency) - try: - self._trigger_return_to_pool() - except Exception: - LOG.exception( - 'Error while returning ports to pool. ' - 'It will be retried in %s seconds', - oslo_cfg.CONF.vif_pool.ports_pool_update_frequency) - - @lockutils.synchronized('return_to_pool_baremetal') - def _trigger_return_to_pool(self): - if not self._recovered_pools: - LOG.debug("Kuryr-controller not yet ready to return ports to " - "pools.") - return - os_net = clients.get_network_client() - sg_current = {} - if not config.CONF.kubernetes.port_debug: - attrs = {'device_owner': kl_const.DEVICE_OWNER} - tags = config.CONF.neutron_defaults.resource_tags - if tags: - attrs['tags'] = tags - - for port in os_net.ports(**attrs): - if port.id in self._recyclable_ports: - sg_current[port.id] = tuple(sorted( - port.security_group_ids)) - - for port_id, pool_key in list(self._recyclable_ports.items()): - if (not oslo_cfg.CONF.vif_pool.ports_pool_max or - self._get_pool_size(pool_key) < - oslo_cfg.CONF.vif_pool.ports_pool_max): - port_name = (constants.KURYR_PORT_NAME - if config.CONF.kubernetes.port_debug - else '') - if config.CONF.kubernetes.port_debug: - try: - os_net.update_port(port_id, name=port_name, - device_id='') - except os_exc.SDKException: - LOG.warning("Error changing name for port %s to be " - "reused, put back on the cleanable " - "pool.", port_id) - continue - sg = sg_current.get(port_id) - self._available_ports_pools[pool_key].setdefault( - sg, []).append(port_id) - # Move it to the end of ports to update the SG. - self._available_ports_pools[pool_key].move_to_end(sg) - else: - try: - del self._existing_vifs[port_id] - os_net.delete_port(port_id) - except KeyError: - LOG.debug('Port %s is not in the ports list.', port_id) - try: - del self._recyclable_ports[port_id] - except KeyError: - LOG.debug('Port already recycled: %s', port_id) - - def sync_pools(self): - super(NeutronVIFPool, self).sync_pools() - # NOTE(ltomasbo): Ensure previously created ports are recovered into - # their respective pools - self._cleanup_leftover_ports() - self._recover_precreated_ports() - self._recovered_pools = True - - def _recover_precreated_ports(self): - os_net = clients.get_network_client() - attrs = {'device_owner': kl_const.DEVICE_OWNER} - tags = config.CONF.neutron_defaults.resource_tags - if tags: - attrs['tags'] = tags - - if config.CONF.kubernetes.port_debug: - attrs['name'] = constants.KURYR_PORT_NAME - available_ports = os_net.ports(**attrs) - else: - kuryr_ports = os_net.ports(**attrs) - in_use_ports, _ = self._get_in_use_ports_info() - available_ports = [port for port in kuryr_ports - if port.id not in in_use_ports] - - _, available_subports, _ = self._get_trunks_info() - for port in available_ports: - # NOTE(ltomasbo): ensure subports are not considered for - # recovering in the case of multi pools - if available_subports.get(port.id): - continue - if not port.binding_vif_type or not port.binding_host_id: - # NOTE(ltomasbo): kuryr-controller is running without the - # rights to get the needed information to recover the ports. - # Thus, removing the port instead - os_net = clients.get_network_client() - os_net.delete_port(port.id) - continue - subnet_id = port.fixed_ips[0]['subnet_id'] - subnet = { - subnet_id: utils.get_subnet(subnet_id)} - vif = ovu.neutron_to_osvif_vif(port.binding_vif_type, port, subnet) - net_obj = subnet[subnet_id] - pool_key = self._get_pool_key(port.binding_host_id, - port.project_id, - net_obj.id, None) - - self._existing_vifs[port.id] = vif - self._available_ports_pools[pool_key].setdefault( - tuple(sorted(port.security_group_ids)), []).append(port.id) - - LOG.info("PORTS POOL: pools updated with pre-created ports") - self._create_healthcheck_file() - - def delete_network_pools(self, net_id): - if not self._recovered_pools: - LOG.debug("Kuryr-controller not yet ready to delete network " - "pools.") - raise exceptions.ResourceNotReady(net_id) - - epool = eventlet.GreenPool(constants.LEFTOVER_RM_POOL_SIZE) - - # NOTE(ltomasbo): Note the pods should already be deleted, but their - # associated ports may not have been recycled yet, therefore not being - # on the available_ports_pools dict. The next call forces it to be on - # that dict before cleaning it up - self._trigger_return_to_pool() - for pool_key, ports in list(self._available_ports_pools.items()): - if self._get_pool_key_net(pool_key) != net_id: - continue - ports_id = [] - for sg_ports in ports.values(): - ports_id.extend(sg_ports) - for port_id in ports_id: - try: - del self._existing_vifs[port_id] - except KeyError: - LOG.debug('Port %s is not in the ports list.', port_id) - # NOTE(gryf): openstack client doesn't return information, if - # the port deos not exists - - # Delete ports concurrently - for result in epool.imap(c_utils.delete_neutron_port, ports_id): - if result: - LOG.error('During Neutron port deletion an error occured: ' - '%s', result) - raise result - - del self._available_ports_pools[pool_key] - with self._lock: - try: - del self._populate_pool_lock[pool_key] - except KeyError: - pass - - -class NestedVIFPool(BaseVIFPool): - """Manages VIFs for nested Kubernetes Pods. - - In order to handle the pools of ports for nested Pods, an extra dict is - used: - _known_trunk_ids is a dictionary that keeps the trunk port ids associated - to each pool_key to skip calls to neutron to get the trunk information. - """ - _known_trunk_ids = collections.defaultdict(str) - - def __init__(self): - super(NestedVIFPool, self).__init__() - # Start the pool manager so that pools can be populated/freed on - # demand - if config.CONF.kubernetes.enable_manager: - self._pool_manager = pool.PoolManager() - - def set_vif_driver(self, driver): - self._drv_vif = driver - - def release_vif(self, pod, vif, project_id): - if not self._recovered_pools: - LOG.debug("Kuryr-controller not yet ready to remove pods.") - raise exceptions.ResourceNotReady(pod) - try: - host_addr = self._get_host_addr(pod) - except KeyError: - name = pod['metadata']['name'] - LOG.warning("Pod %s does not have status.hostIP field set when " - "getting deleted. This is unusual. Trying to " - "determine the IP by calling Neutron.", - name) - - parent_id = utils.get_parent_port_id(vif) - if not parent_id: - LOG.warning("Port %s not found, ignoring its release request.", - vif.id) - return - - host_addr = utils.get_parent_port_ip(parent_id) - LOG.debug("Determined hostIP for pod %s is %s", name, host_addr) - - super(NestedVIFPool, self).release_vif( - pod, vif, project_id, host_addr=host_addr) - - def _set_port_debug(self, port_id, pod): - os_net = clients.get_network_client() - os_net.update_port(port_id, name=c_utils.get_port_name(pod)) - - def _return_ports_to_pool(self): - """Recycle ports to be reused by future pods. - - For each port in the recyclable_ports dict it reapplies - security group if they have been changed and it changes the port - name to available_port if the port_debug option is enabled. - Then the port_id is included in the dict with the available_ports. - - If a maximum number of ports per pool is set, the port will be - deleted if the maximum has been already reached. - """ - while True: - eventlet.sleep(oslo_cfg.CONF.vif_pool.ports_pool_update_frequency) - try: - self._trigger_return_to_pool() - except Exception: - LOG.exception( - 'Error while returning ports to pool. ' - 'It will be retried in %s seconds', - oslo_cfg.CONF.vif_pool.ports_pool_update_frequency) - - @lockutils.synchronized('return_to_pool_nested') - def _trigger_return_to_pool(self): - if not self._recovered_pools: - LOG.debug("Kuryr-controller not yet ready to return ports to " - "pools.") - return - os_net = clients.get_network_client() - sg_current = {} - if not config.CONF.kubernetes.port_debug: - attrs = {'device_owner': ['trunk:subport', kl_const.DEVICE_OWNER]} - tags = config.CONF.neutron_defaults.resource_tags - if tags: - attrs['tags'] = tags - kuryr_subports = os_net.ports(**attrs) - for subport in kuryr_subports: - if subport.id in self._recyclable_ports: - sg_current[subport.id] = tuple(sorted( - subport.security_group_ids)) - - for port_id, pool_key in list(self._recyclable_ports.items()): - if (not oslo_cfg.CONF.vif_pool.ports_pool_max or - self._get_pool_size(pool_key) < - oslo_cfg.CONF.vif_pool.ports_pool_max): - port_name = (constants.KURYR_PORT_NAME - if config.CONF.kubernetes.port_debug - else '') - if config.CONF.kubernetes.port_debug: - try: - os_net.update_port(port_id, name=port_name) - except os_exc.SDKException: - LOG.warning("Error changing name for port %s to be " - "reused, put back on the cleanable " - "pool.", port_id) - continue - sg = sg_current.get(port_id) - self._available_ports_pools[pool_key].setdefault( - sg, []).append(port_id) - # Move it to the end of ports to update the SG. - self._available_ports_pools[pool_key].move_to_end(sg) - else: - trunk_id = self._get_trunk_id(pool_key) - try: - self._drv_vif._remove_subport(trunk_id, port_id) - self._drv_vif._release_vlan_id( - self._existing_vifs[port_id].vlan_id) - del self._existing_vifs[port_id] - os_net.delete_port(port_id) - except KeyError: - LOG.debug('Port %s is not in the ports list.', port_id) - except (os_exc.SDKException, os_exc.HttpException): - LOG.warning('Error removing the subport %s', port_id) - continue - try: - del self._recyclable_ports[port_id] - except KeyError: - LOG.debug('Port already recycled: %s', port_id) - - def _get_trunk_id(self, pool_key): - trunk_id = self._known_trunk_ids.get(pool_key, None) - if not trunk_id: - p_port = self._drv_vif._get_parent_port_by_host_ip(pool_key[0]) - trunk_id = self._drv_vif._get_trunk_id(p_port) - self._known_trunk_ids[pool_key] = trunk_id - return trunk_id - - def sync_pools(self): - super(NestedVIFPool, self).sync_pools() - # NOTE(ltomasbo): Ensure previously created ports are recovered into - # their respective pools - self._recover_precreated_ports() - self._recovered_pools = True - eventlet.spawn(self._cleanup_leftover_ports) - - def _recover_precreated_ports(self): - self._precreated_ports(action='recover') - LOG.info("PORTS POOL: pools updated with pre-created ports") - self._create_healthcheck_file() - - def _remove_precreated_ports(self, trunk_ips=None): - self._precreated_ports(action='free', trunk_ips=trunk_ips) - - def _precreated_ports(self, action, trunk_ips=None): - """Removes or recovers pre-created subports at given pools - - This function handles the pre-created ports based on the given action: - - If action is `free` it will remove all the subport from the given - trunk ports, or from all the trunk ports if no trunk_ips are passed. - - If action is `recover` it will discover the existing subports in the - given trunk ports (or in all of them if none are passed) and will add - them (and the needed information) to the respective pools. - """ - os_net = clients.get_network_client() - # Note(ltomasbo): ML2/OVS changes the device_owner to trunk:subport - # when a port is attached to a trunk. However, that is not the case - # for other ML2 drivers, such as ODL. So we also need to look for - # compute:kuryr - - parent_ports, available_subports, subnets = self._get_trunks_info() - - if not available_subports: - return - - # FIXME(ltomasbo): Workaround for ports already detached from trunks - # whose status is ACTIVE - trunks_subports = [subport_id['port_id'] - for p_port in parent_ports.values() - for subport_id in p_port['subports']] - port_ids_to_delete = [p_id for p_id in available_subports - if p_id not in trunks_subports] - for port_id in port_ids_to_delete: - LOG.debug("Deleting port with wrong status: %s", port_id) - try: - os_net.delete_port(port_id) - except os_exc.SDKException: - LOG.exception('Error removing the port %s', port_id) - - for trunk_id, parent_port in parent_ports.items(): - host_addr = parent_port.get('ip') - if trunk_ips and host_addr not in trunk_ips: - continue - - for subport in parent_port.get('subports'): - kuryr_subport = available_subports.get(subport['port_id']) - if not kuryr_subport: - continue - - subnet_id = kuryr_subport.fixed_ips[0]['subnet_id'] - subnet = subnets[subnet_id] - net_obj = subnet[subnet_id] - pool_key = self._get_pool_key(host_addr, - kuryr_subport.project_id, - net_obj.id, None) - - if action == 'recover': - vif = ovu.neutron_to_osvif_vif_nested_vlan( - kuryr_subport, subnet, subport['segmentation_id']) - - self._existing_vifs[kuryr_subport.id] = vif - self._available_ports_pools[pool_key].setdefault( - tuple(sorted(kuryr_subport.security_group_ids)), - []).append(kuryr_subport.id) - - elif action == 'free': - try: - self._drv_vif._remove_subport(trunk_id, - kuryr_subport.id) - os_net.delete_port(kuryr_subport.id) - self._drv_vif._release_vlan_id( - subport['segmentation_id']) - del self._existing_vifs[kuryr_subport.id] - self._available_ports_pools[pool_key][ - tuple(sorted(kuryr_subport.security_group_ids - ))].remove(kuryr_subport.id) - except KeyError: - LOG.debug('Port %s is not in the ports list.', - kuryr_subport.id) - except (os_exc.SDKException, os_exc.HttpException): - LOG.warning('Error removing the subport %s', - kuryr_subport.id) - except ValueError: - LOG.debug('Port %s is not in the available ports ' - 'pool.', kuryr_subport.id) - - @lockutils.synchronized('return_to_pool_nested') - def populate_pool(self, trunk_ip, project_id, subnets, security_groups): - if not self._recovered_pools: - LOG.debug("Kuryr-controller not yet ready to populate pools.") - raise exceptions.ResourceNotReady(trunk_ip) - - pool_key = self._get_pool_key(trunk_ip, project_id, None, subnets) - lock = self._get_populate_pool_lock(pool_key) - - if lock.acquire(timeout=POPULATE_POOL_TIMEOUT): - try: - pools = self._available_ports_pools.get(pool_key) - if not pools: - # NOTE(ltomasbo): If the amount of nodes is large the - # repopulation actions may take too long. Using half of the - # batch to prevent the problem - num_ports = int(max(oslo_cfg.CONF.vif_pool - .ports_pool_batch/2, - oslo_cfg.CONF.vif_pool.ports_pool_min)) - self.force_populate_pool(trunk_ip, project_id, subnets, - security_groups, num_ports) - finally: - lock.release() - else: - LOG.debug("Kuryr-controller timed out waiting for it turn to " - "populate pool, retrying.") - raise exceptions.ResourceNotReady(trunk_ip) - - def force_populate_pool(self, trunk_ip, project_id, subnets, - security_groups, num_ports=None): - """Create a given amount of subports at a given trunk port. - - This function creates a given amount of subports and attaches them to - the specified trunk, adding them to the related subports pool - regardless of the amount of subports already available in the pool. - """ - if not num_ports: - num_ports = oslo_cfg.CONF.vif_pool.ports_pool_batch - vifs = self._drv_vif.request_vifs( - pod=[], - project_id=project_id, - subnets=subnets, - security_groups=security_groups, - num_ports=num_ports, - trunk_ip=trunk_ip, - semaphore=self._create_ports_semaphore) - - pool_key = self._get_pool_key(trunk_ip, project_id, None, subnets) - for vif in vifs: - self._existing_vifs[vif.id] = vif - self._available_ports_pools[pool_key].setdefault( - tuple(sorted(security_groups)), []).append(vif.id) - - def free_pool(self, trunk_ips=None): - """Removes subports from the pool and deletes neutron port resource. - - This function empties the pool of available subports and removes the - neutron port resources of the specified trunk port (or all of them if - no trunk is specified). - """ - self._remove_precreated_ports(trunk_ips) - - def delete_network_pools(self, net_id): - if not self._recovered_pools: - LOG.debug("Kuryr-controller not yet ready to delete network " - "pools.") - raise exceptions.ResourceNotReady(net_id) - - epool = eventlet.GreenPool(constants.LEFTOVER_RM_POOL_SIZE) - ports_to_remove = [] - - # NOTE(ltomasbo): Note the pods should already be deleted, but their - # associated ports may not have been recycled yet, therefore not being - # on the available_ports_pools dict. The next call forces it to be on - # that dict before cleaning it up - self._trigger_return_to_pool() - for pool_key, ports in list(self._available_ports_pools.items()): - if self._get_pool_key_net(pool_key) != net_id: - continue - trunk_id = self._get_trunk_id(pool_key) - ports_id = [p_id for sg_ports in ports.values() - for p_id in sg_ports] - try: - self._drv_vif._remove_subports(trunk_id, ports_id) - except os_exc.NotFoundException: - # We don't know which subport was already removed, but we'll - # attempt a manual detach on DELETE error, so just continue. - pass - except (os_exc.SDKException, os_exc.HttpException): - LOG.exception('Error removing subports from trunk: %s', - trunk_id) - raise exceptions.ResourceNotReady(net_id) - - for port_id in ports_id: - try: - self._drv_vif._release_vlan_id( - self._existing_vifs[port_id].vlan_id) - del self._existing_vifs[port_id] - except KeyError: - LOG.debug('Port %s is not in the ports list.', port_id) - ports_to_remove.append(port_id) - - del self._available_ports_pools[pool_key] - with self._lock: - try: - del self._populate_pool_lock[pool_key] - except KeyError: - pass - - # Parallelize Ports deletion. At this point the Ports - # should have been detatched from Trunk and if not operation - # will be retried - for result in epool.imap(c_utils.delete_neutron_port, ports_to_remove): - if result: - LOG.error('During Neutron port deletion an error occured: %s', - result) - raise exceptions.ResourceNotReady(net_id) - - -class MultiVIFPool(base.VIFPoolDriver): - """Manages pools with different VIF types. - - It manages hybrid deployments containing both Bare Metal and Nested - Kubernetes Pods. To do that it creates a pool per node with a different - pool driver depending on the vif driver that the node is using. - - It assumes a label pod_vif is added to each node to inform about the - driver set for that node. If no label is added, it assumes the default pod - vif: the one specified at kuryr.conf - """ - - def set_vif_driver(self): - self._vif_drvs = {} - vif_pool_mapping = self._get_vif_pool_mapping() - - if not vif_pool_mapping: - pod_vif = oslo_cfg.CONF.kubernetes.pod_vif_driver - drv_vif = base.PodVIFDriver.get_instance() - drv_pool = base.VIFPoolDriver.get_instance() - drv_pool.set_vif_driver(drv_vif) - self._vif_drvs[pod_vif] = drv_pool - return - for pod_driver, pool_driver in vif_pool_mapping.items(): - if not utils.check_suitable_multi_pool_driver_opt(pool_driver, - pod_driver): - LOG.error("The pool(%s) and pod(%s) driver selected are not " - "compatible.", pool_driver, pod_driver) - raise exceptions.MultiPodDriverPoolConfigurationNotSupported() - drv_vif = base.PodVIFDriver.get_instance( - specific_driver=pod_driver) - drv_pool = base.VIFPoolDriver.get_instance( - specific_driver=pool_driver, scope='for:{}'.format(pod_driver)) - drv_pool.set_vif_driver(drv_vif) - self._vif_drvs[pod_driver] = drv_pool - - def request_vif(self, pod, project_id, subnets, security_groups): - pod_info = "%s/%s" % (pod['metadata']['namespace'], - pod['metadata']['name']) - try: - pod_vif_type = self._get_pod_vif_type(pod) - except KeyError: - # NOTE(maysams): No nodeName set. Event should be skipped - LOG.warning("Pod %s has no .spec.nodeName set. This is unexpected " - "as it's supposed to be scheduled. Ignoring event.", - pod_info) - return None - return self._vif_drvs[pod_vif_type].request_vif( - pod, project_id, subnets, security_groups) - - def release_vif(self, pod, vif, *argv): - vif_drv_alias = self._get_vif_drv_alias(vif) - self._vif_drvs[vif_drv_alias].release_vif(pod, vif, *argv) - - def activate_vif(self, vif, **kwargs): - vif_drv_alias = self._get_vif_drv_alias(vif) - self._vif_drvs[vif_drv_alias].activate_vif(vif, **kwargs) - - def update_vif_sgs(self, pod, sgs): - pod_vif_type = self._get_pod_vif_type(pod) - self._vif_drvs[pod_vif_type].update_vif_sgs(pod, sgs) - - def remove_sg_from_pools(self, sg_id, net_id): - for vif_drv in self._vif_drvs.values(): - if str(vif_drv) == 'NoopVIFPool': - continue - vif_drv.remove_sg_from_pools(sg_id, net_id) - - def delete_network_pools(self, net_id): - for vif_drv in self._vif_drvs.values(): - if str(vif_drv) == 'NoopVIFPool': - continue - vif_drv.delete_network_pools(net_id) - - def sync_pools(self): - for vif_drv in self._vif_drvs.values(): - vif_drv.sync_pools() - - def _get_pod_vif_type(self, pod): - node_name = pod['spec']['nodeName'] - return self._get_node_vif_driver(node_name) - - @MEMOIZE - def _get_node_vif_driver(self, node_name): - kubernetes = clients.get_kubernetes_client() - node_info = kubernetes.get( - constants.K8S_API_BASE + '/nodes/' + node_name) - - labels = node_info['metadata'].get('labels', None) - if labels: - pod_vif = labels.get('pod_vif', - oslo_cfg.CONF.kubernetes.pod_vif_driver) - return pod_vif - return oslo_cfg.CONF.kubernetes.pod_vif_driver - - def _get_vif_drv_alias(self, vif): - vif_type_name = type(vif).__name__ - return VIF_TYPE_TO_DRIVER_MAPPING[vif_type_name] - - def _get_vif_pool_mapping(self): - vif_pool_mapping = oslo_cfg.CONF.vif_pool.vif_pool_mapping - - if not vif_pool_mapping: - pools_vif_drivers = oslo_cfg.CONF.vif_pool.pools_vif_drivers - - if pools_vif_drivers: - msg = ("Config option vif_pool.pools_vif_drivers is " - "deprecated in favour of vif_pool.vif_pool_mapping, " - "and will be removed in a future release") - versionutils.report_deprecated_feature(LOG, msg) - - for pool_driver, pod_driver in pools_vif_drivers.items(): - vif_pool_mapping[pod_driver] = pool_driver - - return vif_pool_mapping - - def populate_pool(self, node_ip, project_id, subnets, sg_id): - for vif_drv in self._vif_drvs.values(): - if str(vif_drv) == 'NestedVIFPool': - vif_drv.populate_pool(node_ip, project_id, subnets, sg_id) diff --git a/kuryr_kubernetes/controller/handlers/__init__.py b/kuryr_kubernetes/controller/handlers/__init__.py deleted file mode 100644 index e69de29bb..000000000 diff --git a/kuryr_kubernetes/controller/handlers/kuryrnetwork.py b/kuryr_kubernetes/controller/handlers/kuryrnetwork.py deleted file mode 100644 index d55884c9e..000000000 --- a/kuryr_kubernetes/controller/handlers/kuryrnetwork.py +++ /dev/null @@ -1,213 +0,0 @@ -# Copyright 2020 Red Hat, Inc. -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. - -from openstack import exceptions as os_exc -from oslo_config import cfg as oslo_cfg -from oslo_log import log as logging - -from kuryr_kubernetes import clients -from kuryr_kubernetes import constants -from kuryr_kubernetes.controller.drivers import base as drivers -from kuryr_kubernetes.controller.drivers import utils as driver_utils -from kuryr_kubernetes import exceptions as k_exc -from kuryr_kubernetes.handlers import k8s_base -from kuryr_kubernetes import utils - -LOG = logging.getLogger(__name__) - - -class KuryrNetworkHandler(k8s_base.ResourceEventHandler): - """Controller side of KuryrNetwork process for Kubernetes pods. - - `KuryrNetworkHandler` runs on the Kuryr-Kubernetes controller and is - responsible for creating the OpenStack resources associated to the - newly created namespaces, and update the KuryrNetwork CRD status with - them. - """ - OBJECT_KIND = constants.K8S_OBJ_KURYRNETWORK - OBJECT_WATCH_PATH = constants.K8S_API_CRD_KURYRNETWORKS - - def __init__(self): - super(KuryrNetworkHandler, self).__init__() - self._drv_project = drivers.NamespaceProjectDriver.get_instance() - self._drv_subnets = drivers.PodSubnetsDriver.get_instance() - self._drv_sg = drivers.PodSecurityGroupsDriver.get_instance() - self._drv_vif_pool = drivers.VIFPoolDriver.get_instance( - specific_driver='multi_pool') - self._drv_vif_pool.set_vif_driver() - if driver_utils.is_network_policy_enabled(): - self._drv_lbaas = drivers.LBaaSDriver.get_instance() - self._drv_svc_sg = ( - drivers.ServiceSecurityGroupsDriver.get_instance()) - self.k8s = clients.get_kubernetes_client() - - def on_present(self, kuryrnet_crd, *args, **kwargs): - ns_name = kuryrnet_crd['spec']['nsName'] - project_id = kuryrnet_crd['spec']['projectId'] - kns_status = kuryrnet_crd.get('status', {}) - namespace = driver_utils.get_namespace(ns_name) - - crd_creation = False - net_id = kns_status.get('netId') - if not net_id: - try: - net_id = self._drv_subnets.create_network(namespace, - project_id) - except os_exc.SDKException as ex: - self.k8s.add_event(kuryrnet_crd, 'CreateNetworkFailed', - f'Error during creating Neutron network: ' - f'{ex.details}', 'Warning') - raise - status = {'netId': net_id} - self._patch_kuryrnetwork_crd(kuryrnet_crd, status) - self.k8s.add_event(kuryrnet_crd, 'CreateNetworkSucceed', - f'Neutron network {net_id} for namespace') - crd_creation = True - subnet_id = kns_status.get('subnetId') - if not subnet_id or crd_creation: - try: - subnet_id, subnet_cidr = self._drv_subnets.create_subnet( - namespace, project_id, net_id) - except os_exc.ConflictException as ex: - self.k8s.add_event(kuryrnet_crd, 'CreateSubnetFailed', - f'Error during creating Neutron subnet ' - f'for network {net_id}: {ex.details}', - 'Warning') - raise - status = {'subnetId': subnet_id, 'subnetCIDR': subnet_cidr} - self._patch_kuryrnetwork_crd(kuryrnet_crd, status) - self.k8s.add_event(kuryrnet_crd, 'CreateSubnetSucceed', - f'Neutron subnet {subnet_id} for network ' - f'{net_id}') - crd_creation = True - if not kns_status.get('routerId') or crd_creation: - try: - router_id = self._drv_subnets.add_subnet_to_router(subnet_id) - except os_exc.SDKException as ex: - self.k8s.add_event(kuryrnet_crd, 'AddingSubnetToRouterFailed', - f'Error adding Neutron subnet {subnet_id} ' - f'to router: {ex.details}', - 'Warning') - raise - status = {'routerId': router_id, 'populated': False} - self._patch_kuryrnetwork_crd(kuryrnet_crd, status) - self.k8s.add_event(kuryrnet_crd, 'AddingSubnetToRouterSucceed', - f'Neutron subnet {subnet_id} added to router ' - f'{router_id}') - crd_creation = True - - # check labels to create sg rules - ns_labels = kns_status.get('nsLabels', {}) - if (crd_creation or - ns_labels != kuryrnet_crd['spec']['nsLabels']): - # update SG and svc SGs - crd_selectors = self._drv_sg.update_namespace_sg_rules(namespace) - if (driver_utils.is_network_policy_enabled() and crd_selectors and - oslo_cfg.CONF.octavia_defaults.enforce_sg_rules): - services = driver_utils.get_services() - self._update_services(services, crd_selectors, project_id) - # update status - status = {'nsLabels': kuryrnet_crd['spec']['nsLabels']} - self._patch_kuryrnetwork_crd(kuryrnet_crd, status, labels=True) - self.k8s.add_event(kuryrnet_crd, 'SGUpdateTriggered', - 'Neutron security groups update has been ' - 'triggered') - - def on_finalize(self, kuryrnet_crd, *args, **kwargs): - LOG.debug("Deleting kuryrnetwork CRD resources: %s", kuryrnet_crd) - - net_id = kuryrnet_crd.get('status', {}).get('netId') - if net_id: - self._drv_vif_pool.delete_network_pools(net_id) - try: - self._drv_subnets.delete_namespace_subnet(kuryrnet_crd) - except k_exc.ResourceNotReady: - LOG.debug("Subnet is not ready to be removed.") - # TODO(ltomasbo): Once KuryrPort CRDs is supported, we should - # execute a delete network ports method here to remove the - # ports associated to the namespace/subnet, ensuring next - # retry will be successful - raise - else: - # NOTE(gryf): It may happen, that even if KuryrNetworkCRD was not - # updated (when controller crash in between), but the network and - # possibly subnet is there, so it needs to be searched manually. - ns = self.k8s.get(f'{constants.K8S_API_NAMESPACES}/' - f'{kuryrnet_crd["spec"]["nsName"]}') - ns_name = ns['metadata']['name'] - ns_uid = ns['metadata']['uid'] - net_name = driver_utils.get_resource_name(ns_name) - old_net_name = driver_utils.get_resource_name(ns_name, - prefix='ns/', - suffix='-net') - # TODO(gryf): remove old_net_name support in next release. - os_net = clients.get_network_client() - networks = os_net.networks(name=(net_name, old_net_name)) - for net in networks: - if ns_uid == net.description: - LOG.warning('Found Neutron network associated with ' - 'namespace `%s`, while it is not registered ' - 'on KuryrNetwork CRD. Trying to remove.', - ns_name) - self._drv_vif_pool.delete_network_pools(net.id) - - try: - os_net.delete_network(net) - except os_exc.ConflictException: - LOG.warning("One or more ports in use on the network " - "%s. Retrying.", net.id) - raise k_exc.ResourceNotReady(net.id) - - namespace = { - 'metadata': {'name': kuryrnet_crd['spec']['nsName']}} - crd_selectors = self._drv_sg.delete_namespace_sg_rules(namespace) - - if (driver_utils.is_network_policy_enabled() and crd_selectors and - oslo_cfg.CONF.octavia_defaults.enforce_sg_rules): - project_id = kuryrnet_crd['spec']['projectId'] - services = driver_utils.get_services() - self._update_services(services, crd_selectors, project_id) - - LOG.debug('Removing finalizer for KuryrNetwork CRD %s', kuryrnet_crd) - try: - self.k8s.remove_finalizer(kuryrnet_crd, - constants.KURYRNETWORK_FINALIZER) - except k_exc.K8sClientException: - LOG.exception('Error removing KuryrNetwork CRD finalizer for %s', - kuryrnet_crd) - raise - - def _update_services(self, services, crd_selectors, project_id): - for service in services.get('items'): - if not driver_utils.service_matches_affected_pods( - service, crd_selectors): - continue - sgs = self._drv_svc_sg.get_security_groups(service, - project_id) - self._drv_lbaas.update_lbaas_sg(service, sgs) - - def _patch_kuryrnetwork_crd(self, kuryrnet_crd, status, labels=False): - LOG.debug('Patching KuryrNetwork CRD %s', kuryrnet_crd) - try: - if labels: - self.k8s.patch_crd('status', - utils.get_res_link(kuryrnet_crd), status) - else: - self.k8s.patch('status', utils.get_res_link(kuryrnet_crd), - status) - except k_exc.K8sResourceNotFound: - LOG.debug('KuryrNetwork CRD not found %s', kuryrnet_crd) - except k_exc.K8sClientException: - LOG.exception('Error updating kuryrNetwork CRD %s', kuryrnet_crd) - raise diff --git a/kuryr_kubernetes/controller/handlers/kuryrnetwork_population.py b/kuryr_kubernetes/controller/handlers/kuryrnetwork_population.py deleted file mode 100644 index 248452b55..000000000 --- a/kuryr_kubernetes/controller/handlers/kuryrnetwork_population.py +++ /dev/null @@ -1,97 +0,0 @@ -# Copyright 2020 Red Hat, Inc. -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. - -from oslo_log import log as logging - -from kuryr_kubernetes import clients -from kuryr_kubernetes import constants -from kuryr_kubernetes.controller.drivers import base as drivers -from kuryr_kubernetes import exceptions -from kuryr_kubernetes.handlers import k8s_base -from kuryr_kubernetes import utils - -LOG = logging.getLogger(__name__) - - -class KuryrNetworkPopulationHandler(k8s_base.ResourceEventHandler): - """Controller side of KuryrNetwork process for Kubernetes pods. - - `KuryrNetworkPopulationHandler` runs on the Kuryr-Kubernetes controller - and is responsible for populating pools for newly created namespaces. - """ - OBJECT_KIND = constants.K8S_OBJ_KURYRNETWORK - OBJECT_WATCH_PATH = constants.K8S_API_CRD_KURYRNETWORKS - - def __init__(self): - super(KuryrNetworkPopulationHandler, self).__init__() - self._drv_subnets = drivers.PodSubnetsDriver.get_instance() - self._drv_vif_pool = drivers.VIFPoolDriver.get_instance( - specific_driver='multi_pool') - self._drv_vif_pool.set_vif_driver() - self._drv_nodes_subnets = drivers.NodesSubnetsDriver.get_instance() - - def on_present(self, kuryrnet_crd, *args, **kwargs): - subnet_id = kuryrnet_crd.get('status', {}).get('subnetId') - if not subnet_id: - LOG.debug("No Subnet present for KuryrNetwork %s", - kuryrnet_crd['metadata']['name']) - return - - if kuryrnet_crd['status'].get('populated'): - LOG.debug("Subnet %s already populated for Namespace %s", - subnet_id, kuryrnet_crd['metadata']['name']) - return - - namespace = kuryrnet_crd['spec'].get('nsName') - project_id = kuryrnet_crd['spec'].get('projectId') - # NOTE(ltomasbo): using namespace name instead of object as it is not - # required - subnets = self._drv_subnets.get_namespace_subnet(namespace, subnet_id) - - node_subnets = self._drv_nodes_subnets.get_nodes_subnets( - raise_on_empty=True) - nodes = utils.get_nodes_ips(node_subnets) - # NOTE(ltomasbo): Patching the kuryrnet_crd here instead of after - # populate_pool method to ensure initial repopulation is not happening - # twice upon unexpected problems, such as neutron failing to - # transition the ports to ACTIVE or being too slow replying. - # In such case, even though the repopulation actions got triggered, - # the pools will not get the ports loaded (as they are not ACTIVE) - # and new population actions may be triggered if the controller was - # restarted before performing the populated=true patching. - self._patch_kuryrnetwork_crd(kuryrnet_crd, populated=True) - # TODO(ltomasbo): Skip the master node where pods are not usually - # allocated. - for node_ip in nodes: - LOG.debug("Populating subnet pool %s at node %s", subnet_id, - node_ip) - try: - self._drv_vif_pool.populate_pool(node_ip, project_id, subnets, - []) - except exceptions.ResourceNotReady: - # Ensure the repopulation is retriggered if the system was not - # yet ready to perform the repopulation actions - self._patch_kuryrnetwork_crd(kuryrnet_crd, populated=False) - raise - - def _patch_kuryrnetwork_crd(self, kns_crd, populated=True): - kubernetes = clients.get_kubernetes_client() - crd_name = kns_crd['metadata']['name'] - LOG.debug('Patching KuryrNetwork CRD %s' % crd_name) - try: - kubernetes.patch_crd('status', utils.get_res_link(kns_crd), - {'populated': populated}) - except exceptions.K8sClientException: - LOG.exception('Error updating KuryrNetwork CRD %s', crd_name) - raise diff --git a/kuryr_kubernetes/controller/handlers/kuryrnetworkpolicy.py b/kuryr_kubernetes/controller/handlers/kuryrnetworkpolicy.py deleted file mode 100644 index ce7ae274d..000000000 --- a/kuryr_kubernetes/controller/handlers/kuryrnetworkpolicy.py +++ /dev/null @@ -1,319 +0,0 @@ -# Copyright 2019 Red Hat, Inc. -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. - -from openstack import exceptions as os_exc -from oslo_config import cfg -from oslo_log import log as logging - -from kuryr_kubernetes import clients -from kuryr_kubernetes import constants -from kuryr_kubernetes.controller.drivers import base as drivers -from kuryr_kubernetes.controller.drivers import utils as driver_utils -from kuryr_kubernetes import exceptions -from kuryr_kubernetes.handlers import k8s_base -from kuryr_kubernetes import utils - -LOG = logging.getLogger(__name__) -CONF = cfg.CONF - - -class KuryrNetworkPolicyHandler(k8s_base.ResourceEventHandler): - """Controller side of KuryrNetworkPolicy process for Kubernetes pods. - - `KuryrNetworkPolicyHandler` runs on the kuryr-controller and is - responsible for creating and deleting SG and SG rules for `NetworkPolicy`. - The `KuryrNetworkPolicy` objects are created by `NetworkPolicyHandler`. - """ - OBJECT_KIND = constants.K8S_OBJ_KURYRNETWORKPOLICY - OBJECT_WATCH_PATH = constants.K8S_API_CRD_KURYRNETWORKPOLICIES - - def __init__(self): - super(KuryrNetworkPolicyHandler, self).__init__() - self.os_net = clients.get_network_client() - self.k8s = clients.get_kubernetes_client() - self._drv_project = drivers.NetworkPolicyProjectDriver.get_instance() - self._drv_policy = drivers.NetworkPolicyDriver.get_instance() - self._drv_vif_pool = drivers.VIFPoolDriver.get_instance( - specific_driver='multi_pool') - self._drv_vif_pool.set_vif_driver() - self._drv_pod_sg = drivers.PodSecurityGroupsDriver.get_instance() - self._drv_svc_sg = drivers.ServiceSecurityGroupsDriver.get_instance() - self._drv_lbaas = drivers.LBaaSDriver.get_instance() - - def _patch_kuryrnetworkpolicy_crd(self, knp, field, data, - action='replace'): - name = knp['metadata']['name'] - LOG.debug('Patching KuryrNetwork CRD %s', name) - try: - status = self.k8s.patch_crd(field, utils.get_res_link(knp), - data, action=action) - except exceptions.K8sResourceNotFound: - LOG.debug('KuryrNetworkPolicy CRD not found %s', name) - return None - except exceptions.K8sClientException as exc: - np = utils.get_referenced_object(knp, 'NetworkPolicy') - self.k8s.add_event(np, 'FailedToPatchKuryrNetworkPolicy', - f'Failed to update KuryrNetworkPolicy CRD: ' - f'{exc}', 'Warning') - LOG.exception('Error updating KuryrNetworkPolicy CRD %s', name) - raise - - knp['status'] = status - return knp - - def _get_networkpolicy(self, link): - return self.k8s.get(link) - - def _compare_sgs(self, a, b): - checked_props = ('direction', 'ethertype', 'port_range_max', - 'port_range_min', 'protocol', 'remote_ip_prefix') - - for k in checked_props: - if a.get(k) != b.get(k): - return False - return True - - def _find_sgs(self, a, rules): - for r in rules: - if self._compare_sgs(r, a): - return True - - return False - - def on_present(self, knp, *args, **kwargs): - uniq_name = utils.get_res_unique_name(knp) - LOG.debug('on_present() for NP %s', uniq_name) - project_id = self._drv_project.get_project(knp) - if not knp['status'].get('securityGroupId'): - LOG.debug('Creating SG for NP %s', uniq_name) - # TODO(dulek): Do this right, why do we have a project driver per - # resource?! This one expects policy, not knp, but it - # ignores it anyway! - sg_id = self._drv_policy.create_security_group(knp, project_id) - knp = self._patch_kuryrnetworkpolicy_crd( - knp, 'status', {'securityGroupId': sg_id}) - LOG.debug('Created SG %s for NP %s', sg_id, uniq_name) - else: - # TODO(dulek): Check if it really exists, recreate if not. - sg_id = knp['status'].get('securityGroupId') - - # First update SG rules as we want to apply updated ones - current = knp['status']['securityGroupRules'] - required = knp['spec']['ingressSgRules'] + knp['spec']['egressSgRules'] - required = [r['sgRule'] for r in required] - - # FIXME(dulek): This *might* be prone to race conditions if failure - # happens between SG rule is created/deleted and status - # is annotated. We don't however need to revert on failed - # K8s operations - creation, deletion of SG rules and - # attaching or detaching SG from ports are idempotent - # so we can repeat them. What worries me is losing track - # of an update due to restart. The only way to do it - # would be to periodically check if what's in `status` - # is the reality in OpenStack API. That should be just - # two Neutron API calls + possible resync. - to_add = [] - to_remove = [] - for r in required: - if not self._find_sgs(r, current): - to_add.append(r) - - for i, c in enumerate(current): - if not self._find_sgs(c, required): - to_remove.append((i, c['id'])) - - LOG.debug('SGs to add for NP %s: %s', uniq_name, to_add) - - for sg_rule in to_add: - LOG.debug('Adding SG rule %s for NP %s', sg_rule, uniq_name) - sg_rule['security_group_id'] = sg_id - sgr_id = driver_utils.create_security_group_rule(sg_rule, knp) - sg_rule['id'] = sgr_id - knp = self._patch_kuryrnetworkpolicy_crd( - knp, 'status', {'securityGroupRules/-': sg_rule}, 'add') - - # We need to remove starting from the last one in order to maintain - # indexes. Please note this will start to fail miserably if we start - # to change status from multiple places. - to_remove.reverse() - - LOG.debug('SGs to remove for NP %s: %s', uniq_name, - [x[1] for x in to_remove]) - - for i, sg_rule_id in to_remove: - LOG.debug('Removing SG rule %s as it is no longer part of NP %s', - sg_rule_id, uniq_name) - driver_utils.delete_security_group_rule(sg_rule_id, knp) - knp = self._patch_kuryrnetworkpolicy_crd( - knp, 'status/securityGroupRules', i, 'remove') - - pods_to_update = [] - - previous_sel = knp['status'].get('podSelector', None) - current_sel = knp['spec']['podSelector'] - if previous_sel is None: - # Fresh NetworkPolicy that was never applied. - pods_to_update.extend(self._drv_policy.namespaced_pods(knp)) - elif previous_sel != current_sel or previous_sel == {}: - pods_to_update.extend( - self._drv_policy.affected_pods(knp, previous_sel)) - - matched_pods = self._drv_policy.affected_pods(knp) - pods_to_update.extend(matched_pods) - - for pod in pods_to_update: - if (utils.is_host_network(pod) or - not driver_utils.is_pod_scheduled(pod)): - continue - pod_sgs = self._drv_pod_sg.get_security_groups(pod, project_id) - try: - self._drv_vif_pool.update_vif_sgs(pod, pod_sgs) - except os_exc.NotFoundException: - # Pod got deleted in the meanwhile, should be safe to ignore. - pass - - # FIXME(dulek): We should not need this one day. - policy = self._get_networkpolicy(knp['metadata']['annotations'] - ['networkPolicyLink']) - if (pods_to_update and CONF.octavia_defaults.enforce_sg_rules and - not self._is_egress_only_policy(policy)): - # NOTE(ltomasbo): only need to change services if the pods that - # they point to are updated - services = driver_utils.get_services(knp['metadata']['namespace']) - for service in services.get('items', []): - # TODO(ltomasbo): Skip other services that are not affected - # by the policy - # NOTE(maysams): Network Policy is not enforced on Services - # without selectors for Amphora Octavia provider. - # NOTE(dulek): Skip services being deleted. - if (not service['spec'].get('selector') or - service['metadata'].get('deletionTimestamp') or not - self._is_service_affected(service, pods_to_update)): - continue - sgs = self._drv_svc_sg.get_security_groups(service, project_id) - try: - self._drv_lbaas.update_lbaas_sg(service, sgs) - except exceptions.ResourceNotReady: - # We can ignore LB that's being created - its SGs will get - # handled when members will be getting created. - pass - - self._patch_kuryrnetworkpolicy_crd(knp, 'status', - {'podSelector': current_sel}) - - def _is_service_affected(self, service, affected_pods): - svc_namespace = service['metadata']['namespace'] - svc_selector = service['spec'].get('selector') - svc_pods = driver_utils.get_pods({'selector': svc_selector}, - svc_namespace).get('items') - return any(pod in svc_pods for pod in affected_pods) - - def _is_egress_only_policy(self, policy): - policy_types = policy['spec'].get('policyTypes', []) - return (policy_types == ['Egress'] or - (policy['spec'].get('egress') and - not policy['spec'].get('ingress'))) - - def _get_policy_net_id(self, knp): - policy_ns = knp['metadata']['namespace'] - - try: - path = (f'{constants.K8S_API_CRD_NAMESPACES}/{policy_ns}/' - f'kuryrnetworks/{policy_ns}') - net_crd = self.k8s.get(path) - except exceptions.K8sClientException: - LOG.exception("Kubernetes Client Exception.") - raise - return net_crd['status']['netId'] - - def on_finalize(self, knp, *args, **kwargs): - LOG.debug("Finalizing KuryrNetworkPolicy %s", knp) - project_id = self._drv_project.get_project(knp) - pods_to_update = self._drv_policy.affected_pods(knp) - crd_sg = knp['status'].get('securityGroupId') - try: - policy = self._get_networkpolicy(knp['metadata']['annotations'] - ['networkPolicyLink']) - except exceptions.K8sResourceNotFound: - # NP is already gone, let's just try to clean up. - policy = None - - if crd_sg: - for pod in pods_to_update: - if (utils.is_host_network(pod) - or not driver_utils.is_pod_scheduled(pod)): - continue - pod_sgs = self._drv_pod_sg.get_security_groups(pod, project_id) - if crd_sg in pod_sgs: - pod_sgs.remove(crd_sg) - if not pod_sgs: - pod_sgs = CONF.neutron_defaults.pod_security_groups - if not pod_sgs: - raise cfg.RequiredOptError( - 'pod_security_groups', - cfg.OptGroup('neutron_defaults')) - try: - self._drv_vif_pool.update_vif_sgs(pod, pod_sgs) - except os_exc.NotFoundException: - # Pod got deleted in the meanwhile, safe to ignore. - pass - - # ensure ports at the pool don't have the NP sg associated - try: - net_id = self._get_policy_net_id(knp) - self._drv_vif_pool.remove_sg_from_pools(crd_sg, net_id) - except exceptions.K8sResourceNotFound: - # Probably the network got removed already, we can ignore it. - pass - - try: - self._drv_policy.delete_np_sg(crd_sg) - except os_exc.SDKException as exc: - np = utils.get_referenced_object(knp, 'NetworkPolicy') - if np: - self.k8s.add_event(np, 'FailedToRemoveSecurityGroup', - f'Deleting security group for ' - f'corresponding Network Policy has ' - f'failed: {exc}', 'Warning') - raise - - if (CONF.octavia_defaults.enforce_sg_rules and policy and - not self._is_egress_only_policy(policy)): - services = driver_utils.get_services( - knp['metadata']['namespace']) - for svc in services.get('items'): - if (not svc['spec'].get('selector') or not - self._is_service_affected(svc, pods_to_update)): - continue - - sgs = self._drv_svc_sg.get_security_groups(svc, project_id) - - if crd_sg in sgs: - # Remove our crd_sg out of service groups since we - # don't have it anymore - sgs.remove(crd_sg) - - try: - self._drv_lbaas.update_lbaas_sg(svc, sgs) - except exceptions.ResourceNotReady: - # We can ignore LB that's being created - its SGs will - # get handled when members will be getting created. - pass - - LOG.debug("Removing finalizers from KuryrNetworkPolicy and " - "NetworkPolicy.") - if policy: - self.k8s.remove_finalizer(policy, - constants.NETWORKPOLICY_FINALIZER) - self.k8s.remove_finalizer(knp, constants.NETWORKPOLICY_FINALIZER) diff --git a/kuryr_kubernetes/controller/handlers/kuryrport.py b/kuryr_kubernetes/controller/handlers/kuryrport.py deleted file mode 100644 index a6275c089..000000000 --- a/kuryr_kubernetes/controller/handlers/kuryrport.py +++ /dev/null @@ -1,403 +0,0 @@ -# Copyright 2020 Red Hat, Inc. -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. - -import datetime - -from openstack import exceptions as os_exc -from os_vif import objects -from oslo_config import cfg as oslo_cfg -from oslo_log import log as logging - -from kuryr_kubernetes import clients -from kuryr_kubernetes import constants -from kuryr_kubernetes.controller.drivers import base as drivers -from kuryr_kubernetes.controller.drivers import nested_vlan_vif -from kuryr_kubernetes.controller.drivers import utils as driver_utils -from kuryr_kubernetes.controller.managers import prometheus_exporter as exp -from kuryr_kubernetes import exceptions as k_exc -from kuryr_kubernetes.handlers import k8s_base -from kuryr_kubernetes import utils - - -LOG = logging.getLogger(__name__) -KURYRPORT_URI = constants.K8S_API_CRD_NAMESPACES + '/{ns}/kuryrports/{crd}' -ACTIVE_TIMEOUT = nested_vlan_vif.ACTIVE_TIMEOUT - - -class KuryrPortHandler(k8s_base.ResourceEventHandler): - """Controller side of KuryrPort process for Kubernetes pods. - - `KuryrPortHandler` runs on the Kuryr-Kubernetes controller and is - responsible for creating/removing the OpenStack resources associated to - the newly created pods, namely ports and update the KuryrPort CRD data. - """ - OBJECT_KIND = constants.K8S_OBJ_KURYRPORT - OBJECT_WATCH_PATH = constants.K8S_API_CRD_KURYRPORTS - - def __init__(self): - super(KuryrPortHandler, self).__init__() - self._drv_project = drivers.PodProjectDriver.get_instance() - self._drv_subnets = drivers.PodSubnetsDriver.get_instance() - self._drv_sg = drivers.PodSecurityGroupsDriver.get_instance() - # REVISIT(ltomasbo): The VIF Handler should not be aware of the pool - # directly. Due to the lack of a mechanism to load and set the - # VIFHandler driver, for now it is aware of the pool driver, but this - # will be reverted as soon as a mechanism is in place. - self._drv_vif_pool = drivers.VIFPoolDriver.get_instance( - specific_driver='multi_pool') - self._drv_vif_pool.set_vif_driver() - self._drv_multi_vif = drivers.MultiVIFDriver.get_enabled_drivers() - if driver_utils.is_network_policy_enabled(): - self._drv_lbaas = drivers.LBaaSDriver.get_instance() - self._drv_svc_sg = (drivers.ServiceSecurityGroupsDriver - .get_instance()) - self.k8s = clients.get_kubernetes_client() - - def on_present(self, kuryrport_crd, *args, **kwargs): - if not kuryrport_crd['status']['vifs']: - # Get vifs - if not self.get_vifs(kuryrport_crd): - # Ignore this event, according to one of the cases logged in - # get_vifs method. - return - - retry_info = kwargs.get('retry_info') - - vifs = {ifname: {'default': data['default'], - 'vif': objects.base.VersionedObject - .obj_from_primitive(data['vif'])} - for ifname, data in kuryrport_crd['status']['vifs'].items()} - - if all([v['vif'].active for v in vifs.values()]): - return - - changed = False - pod = self._get_pod(kuryrport_crd) - - try: - for ifname, data in vifs.items(): - if not data['vif'].active: - try: - self._drv_vif_pool.activate_vif(data['vif'], pod=pod, - retry_info=retry_info) - changed = True - except k_exc.ResourceNotReady: - if retry_info and retry_info.get('elapsed', - 0) > ACTIVE_TIMEOUT: - self.k8s.add_event(pod, 'ActivatePortFailed', - 'Activating Neutron port has ' - 'timed out', 'Warning') - raise - except os_exc.ResourceNotFound: - self.k8s.add_event(pod, 'ActivatePortFailed', - 'Activating Neutron port has ' - 'failed, possibly deleted', - 'Warning') - LOG.debug("Port not found, possibly already deleted. " - "No need to activate it") - finally: - if changed: - project_id = self._drv_project.get_project(pod) - - try: - kp_name = kuryrport_crd['metadata']['name'] - self._update_kuryrport_crd(kuryrport_crd, vifs) - self.k8s.add_event(pod, 'KuryrPortUpdatedWithActiveVIFs', - f'KuryrPort CRD: {kp_name} updated with' - f' active VIFs') - except k_exc.K8sResourceNotFound as ex: - LOG.exception("Failed to update KuryrPort CRD: %s", ex) - security_groups = self._drv_sg.get_security_groups( - pod, project_id) - for ifname, data in vifs.items(): - self._drv_vif_pool.release_vif(pod, data['vif'], - project_id, - security_groups) - self.k8s.add_event(pod, 'UpdateKuryrPortCRDFailed', - f'Marking ports as ACTIVE in the ' - f'KuryrPort failed: {ex}', 'Warning') - except k_exc.K8sClientException: - raise k_exc.ResourceNotReady(pod['metadata']['name']) - try: - self._record_pod_creation_metric(pod) - except Exception: - LOG.debug("Failed to record metric for pod %s", - pod['metadata']['name']) - - if driver_utils.is_network_policy_enabled(): - crd_pod_selectors = self._drv_sg.create_sg_rules(pod) - if oslo_cfg.CONF.octavia_defaults.enforce_sg_rules: - services = driver_utils.get_services() - self._update_services(services, crd_pod_selectors, - project_id) - - def _mock_cleanup_pod(self, kuryrport_crd): - """Mock Pod that doesn't exist anymore for cleanup purposes""" - pod = { - 'apiVersion': 'v1', - 'kind': 'Pod', - 'metadata': kuryrport_crd['metadata'].copy(), - } - # No need to try to delete the finalizer from the pod later, as - # pod's gone. - del pod['metadata']['finalizers'] - - main_vif = objects.base.VersionedObject.obj_from_primitive( - kuryrport_crd['status']['vifs'][constants.DEFAULT_IFNAME] - ['vif']) - - # Let's try to get the node's address using nodeName saved in CRD - host_ip = None - try: - node = self.k8s.get(f"{constants.K8S_API_BASE}/nodes/" - f"{kuryrport_crd['spec']['podNodeName']}") - for address in node['status']['addresses']: - if address['type'] == constants.K8S_NODE_ADDRESS_INTERNAL: - host_ip = address['address'] - break - except k_exc.K8sClientException: - LOG.warning('Could not find node %s when cleaning up port.', - kuryrport_crd['spec']['podNodeName']) - - if not host_ip: - # We can still try to use OpenStack API if this is nested VIF - port_id = utils.get_parent_port_id(main_vif) - if port_id: - host_ip = utils.get_parent_port_ip(port_id) - - if host_ip: - pod['status'] = {'hostIP': host_ip} - - # If we failed to find host_ip we still allow cleanup to follow, we - # catch all exceptions from release_vif anyway. - return pod - - def on_finalize(self, kuryrport_crd, *args, **kwargs): - name = kuryrport_crd['metadata']['name'] - namespace = kuryrport_crd['metadata']['namespace'] - cleanup = False # If we're doing a cleanup, raise no error. - try: - pod = self.k8s.get(f"{constants.K8S_API_NAMESPACES}" - f"/{namespace}/pods/{name}") - if pod['metadata']['uid'] != kuryrport_crd['spec']['podUid']: - # Seems like this is KuryrPort created for an old Pod instance, - # with the same name. Cleaning it up instead of regular delete. - raise k_exc.K8sResourceNotFound( - 'Pod %s' % pod['metadata']['uid']) - except k_exc.K8sResourceNotFound: - LOG.warning('Pod for KuryrPort %s was forcibly deleted. ' - 'Attempting a cleanup before releasing KuryrPort.', - utils.get_res_unique_name(kuryrport_crd)) - self.k8s.add_event(kuryrport_crd, 'MissingPod', - 'Pod does not exist anymore, attempting to ' - 'cleanup orphaned KuryrPort', 'Warning') - if kuryrport_crd['status']['vifs']: - pod = self._mock_cleanup_pod(kuryrport_crd) - cleanup = True # Make sure we don't raise on release_vif() - else: - # Remove the finalizer, most likely ports never got created. - self.k8s.remove_finalizer(kuryrport_crd, - constants.KURYRPORT_FINALIZER) - return - - if ('deletionTimestamp' not in pod['metadata'] and - not utils.is_pod_completed(pod)): - # NOTE(gryf): Ignore deleting KuryrPort, since most likely it was - # removed manually, while we need vifs for corresponding pod - # object which apparently is still running. - LOG.warning('Manually triggered KuryrPort %s removal. This ' - 'action should be avoided, since KuryrPort CRDs are ' - 'internal to Kuryr.', name) - self.k8s.add_event(pod, 'NoKuryrPort', 'KuryrPort was not found, ' - 'most probably it was manually removed.', - 'Warning') - return - - project_id = self._drv_project.get_project(pod) - try: - crd_pod_selectors = self._drv_sg.delete_sg_rules(pod) - except k_exc.ResourceNotReady: - # NOTE(ltomasbo): If the pod is being deleted before - # kuryr-controller annotated any information about the port - # associated, there is no need for deleting sg rules associated to - # it. So this exception could be safely ignored for the current - # sg drivers. Only the NP driver associates rules to the pods ips, - # and that waits for annotations to start. - # - # NOTE(gryf): perhaps we don't need to handle this case, since - # during CRD creation all the things, including security groups - # rules would be created too. - LOG.debug("Skipping SG rules deletion associated to the pod %s", - pod) - self.k8s.add_event(pod, 'SkipingSGDeletion', 'Skipping SG rules ' - 'deletion') - crd_pod_selectors = [] - - for data in kuryrport_crd['status']['vifs'].values(): - vif = objects.base.VersionedObject.obj_from_primitive(data['vif']) - try: - self._drv_vif_pool.release_vif(pod, vif, project_id) - except Exception: - if not cleanup: - raise - LOG.warning('Error when cleaning up VIF %s, ignoring.', - utils.get_res_unique_name(kuryrport_crd)) - if (driver_utils.is_network_policy_enabled() and crd_pod_selectors and - oslo_cfg.CONF.octavia_defaults.enforce_sg_rules): - services = driver_utils.get_services() - self._update_services(services, crd_pod_selectors, project_id) - - # Remove finalizer out of pod. - self.k8s.remove_finalizer(pod, constants.POD_FINALIZER) - - # Finally, remove finalizer from KuryrPort CRD - self.k8s.remove_finalizer(kuryrport_crd, constants.KURYRPORT_FINALIZER) - - def get_vifs(self, kuryrport_crd): - try: - pod = self.k8s.get(f"{constants.K8S_API_NAMESPACES}" - f"/{kuryrport_crd['metadata']['namespace']}" - f"/pods" - f"/{kuryrport_crd['metadata']['name']}") - if pod['metadata']['uid'] != kuryrport_crd['spec']['podUid']: - # Seems like this is KuryrPort created for an old Pod, deleting - # it anyway. - raise k_exc.K8sResourceNotFound( - 'Pod %s' % pod['metadata']['uid']) - except k_exc.K8sResourceNotFound: - LOG.warning('Pod for KuryrPort %s was forcibly deleted. Deleting' - 'the KuryrPort too to attempt resource cleanup.', - utils.get_res_unique_name(kuryrport_crd)) - self.k8s.add_event(kuryrport_crd, 'MissingPod', - 'Pod does not exist anymore, attempting to ' - 'delete orphaned KuryrPort', 'Warning') - try: - self.k8s.delete(utils.get_res_link(kuryrport_crd)) - except k_exc.K8sResourceNotFound: - pass - except k_exc.K8sClientException: - LOG.exception('Error when trying to delete KuryrPort %s. Will ' - 'retry later.', - utils.get_res_unique_name(kuryrport_crd)) - return False - - project_id = self._drv_project.get_project(pod) - security_groups = self._drv_sg.get_security_groups(pod, project_id) - try: - subnets = self._drv_subnets.get_subnets(pod, project_id) - except (os_exc.ResourceNotFound, k_exc.K8sResourceNotFound): - LOG.warning("Subnet does not exists. If namespace driver is " - "used, probably the namespace for the pod is " - "already deleted. So this pod does not need to " - "get a port as it will be deleted too. If the " - "default subnet driver is used, then you must " - "select an existing subnet to be used by Kuryr.") - self.k8s.add_event(pod, 'NoPodSubnetFound', 'Pod subnet not ' - 'found. Namespace for this pod was probably ' - 'deleted. For default subnet driver it must ' - 'be existing subnet configured for Kuryr', - 'Warning') - return False - - # Request the default interface of pod - try: - main_vif = self._drv_vif_pool.request_vif(pod, project_id, - subnets, - security_groups) - except os_exc.ResourceNotFound: - # NOTE(gryf): It might happen, that between getting security - # groups above and requesting VIF, network policy is deleted, - # hence we will get 404 from OpenStackSDK. Let's retry, to refresh - # information regarding SG. - LOG.warning("SG not found during VIF requesting. Retrying.") - raise k_exc.ResourceNotReady(pod['metadata']['name']) - - pod_name = pod['metadata']['name'] - if not main_vif: - LOG.warning("Ignoring event due to pod %s not being " - "scheduled yet.", pod_name) - self.k8s.add_event(pod, 'KuryrIgnoringPodEvent', - f'Ignoring event: Pod not scheduled ' - f'{pod_name}') - return False - - vifs = {constants.DEFAULT_IFNAME: {'default': True, 'vif': main_vif}} - - # Request the additional interfaces from multiple drivers - index = 0 - for driver in self._drv_multi_vif: - additional_vifs = driver.request_additional_vifs(pod, project_id, - security_groups) - for index, vif in enumerate(additional_vifs, start=index+1): - ifname = (oslo_cfg.CONF.kubernetes.additional_ifname_prefix + - str(index)) - vifs[ifname] = {'default': False, 'vif': vif} - - try: - self._update_kuryrport_crd(kuryrport_crd, vifs) - self.k8s.add_event(pod, 'KuryrPortUpdatedWithVIFs', - f'KuryrPort CRD: {pod_name} updated with VIFs') - except k_exc.K8sClientException as ex: - LOG.exception("Kubernetes Client Exception creating " - "KuryrPort CRD: %s", ex) - for ifname, data in vifs.items(): - self._drv_vif_pool.release_vif(pod, data['vif'], project_id) - self.k8s.add_event(pod, 'ExceptionOnKPUpdate', f'There was k8s ' - f'client exception on updating corresponding ' - f'KuryrPort CRD: {ex}', 'Warning') - return True - - def _update_kuryrport_crd(self, kuryrport_crd, vifs): - LOG.debug('Updating CRD %s', kuryrport_crd["metadata"]["name"]) - vif_dict = {} - for ifname, data in vifs.items(): - data['vif'].obj_reset_changes(recursive=True) - vif_dict[ifname] = {'default': data['default'], - 'vif': data['vif'].obj_to_primitive()} - - self.k8s.patch_crd('status', utils.get_res_link(kuryrport_crd), - {'vifs': vif_dict}) - - def _update_services(self, services, crd_pod_selectors, project_id): - for service in services.get('items'): - if not driver_utils.service_matches_affected_pods( - service, crd_pod_selectors): - continue - sgs = self._drv_svc_sg.get_security_groups(service, - project_id) - self._drv_lbaas.update_lbaas_sg(service, sgs) - - def _record_pod_creation_metric(self, pod): - exporter = exp.ControllerPrometheusExporter.get_instance() - for condition in pod['status'].get('conditions'): - if condition['type'] == 'PodScheduled' and condition['status']: - f_str = "%Y-%m-%dT%H:%M:%SZ" - time_obj = datetime.datetime.strptime( - condition['lastTransitionTime'], f_str) - pod_creation_time = datetime.datetime.now() - time_obj - pod_creation_sec = (pod_creation_time).total_seconds() - exporter.record_pod_creation_metric(pod_creation_sec) - - def _get_pod(self, kuryrport_crd): - try: - name = kuryrport_crd['metadata']['name'] - namespace = kuryrport_crd['metadata']['namespace'] - return self.k8s.get(f"{constants.K8S_API_NAMESPACES}" - f"/{namespace}/pods/{name}") - except k_exc.K8sResourceNotFound as ex: - self.k8s.add_event(kuryrport_crd, 'KuryrFailedGettingPod' - f'Failed to get corresponding pod: {ex}', - 'Warning') - LOG.exception("Failed to get pod: %s", ex) - raise diff --git a/kuryr_kubernetes/controller/handlers/lbaas.py b/kuryr_kubernetes/controller/handlers/lbaas.py deleted file mode 100644 index e0a4e5795..000000000 --- a/kuryr_kubernetes/controller/handlers/lbaas.py +++ /dev/null @@ -1,447 +0,0 @@ -# Copyright (c) 2016 Mirantis, Inc. -# All Rights Reserved. -# -# Licensed under the Apache License, Version 2.0 (the "License"); you may -# not use this file except in compliance with the License. You may obtain -# a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT -# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the -# License for the specific language governing permissions and limitations -# under the License. - -import netaddr - -from kuryr.lib._i18n import _ -from oslo_log import log as logging - -from kuryr_kubernetes import clients -from kuryr_kubernetes import config -from kuryr_kubernetes import constants as k_const -from kuryr_kubernetes.controller.drivers import base as drv_base -from kuryr_kubernetes.controller.drivers import utils as driver_utils -from kuryr_kubernetes import exceptions as k_exc -from kuryr_kubernetes.handlers import k8s_base -from kuryr_kubernetes import utils - -LOG = logging.getLogger(__name__) -CONF = config.CONF - -SUPPORTED_SERVICE_TYPES = ('ClusterIP', 'LoadBalancer') - - -class ServiceHandler(k8s_base.ResourceEventHandler): - """ServiceHandler handles K8s Service events. - - ServiceHandler handles K8s Service events and updates related Endpoints - with LBaaSServiceSpec when necessary. - """ - - OBJECT_KIND = k_const.K8S_OBJ_SERVICE - OBJECT_WATCH_PATH = "%s/%s" % (k_const.K8S_API_BASE, "services") - - def __init__(self): - super(ServiceHandler, self).__init__() - self._drv_project = drv_base.ServiceProjectDriver.get_instance() - self._drv_subnets = drv_base.ServiceSubnetsDriver.get_instance() - self._drv_sg = drv_base.ServiceSecurityGroupsDriver.get_instance() - self._drv_lbaas = drv_base.LBaaSDriver.get_instance() - self.k8s = clients.get_kubernetes_client() - - self._lb_provider = None - if self._drv_lbaas.providers_supported(): - self._lb_provider = 'amphora' - config_provider = CONF.kubernetes.endpoints_driver_octavia_provider - if config_provider != 'default': - self._lb_provider = config_provider - - def _bump_network_policies(self, svc): - if driver_utils.is_network_policy_enabled(): - driver_utils.bump_networkpolicies(svc['metadata']['namespace']) - - def on_present(self, service, *args, **kwargs): - reason = self._should_ignore(service) - if reason: - reason %= utils.get_res_unique_name(service) - LOG.debug(reason) - self.k8s.add_event(service, 'KuryrServiceSkipped', reason) - return - - loadbalancer_crd = self.k8s.get_loadbalancer_crd(service) - try: - if not self._patch_service_finalizer(service): - return - except k_exc.K8sClientException as ex: - msg = (f'K8s API error when adding finalizer to Service ' - f'{utils.get_res_unique_name(service)}') - LOG.exception(msg) - self.k8s.add_event(service, 'KuryrAddServiceFinalizerError', - f'{msg}: {ex}', 'Warning') - raise - - if loadbalancer_crd is None: - try: - # Bump all the NPs in the namespace to force SG rules - # recalculation. - self._bump_network_policies(service) - self.create_crd_spec(service) - except k_exc.K8sNamespaceTerminating: - LOG.debug('Namespace %s is being terminated, ignoring ' - 'Service %s in that namespace.', - service['metadata']['namespace'], - service['metadata']['name']) - return - elif self._has_lbaas_spec_changes(service, loadbalancer_crd): - self._update_crd_spec(loadbalancer_crd, service) - - def _is_supported_type(self, service): - spec = service['spec'] - return spec.get('type') in SUPPORTED_SERVICE_TYPES - - def _has_spec_annotation(self, service): - return (k_const.K8S_ANNOTATION_LBAAS_SPEC in - service['metadata'].get('annotations', {})) - - def _get_service_ip(self, service): - if self._is_supported_type(service): - return self._strip_funny_ip(service['spec'].get('clusterIP')) - return None - - def _should_ignore(self, service): - if not self._has_clusterip(service): - return 'Skipping headless Service %s.' - if not self._is_supported_type(service): - return 'Skipping service %s of unsupported type.' - if self._has_spec_annotation(service): - return ('Skipping annotated service %s, waiting for it to be ' - 'converted to KuryrLoadBalancer object and annotation ' - 'removed.') - if utils.is_kubernetes_default_resource(service): - # Avoid to handle default Kubernetes service as requires https. - return 'Skipping default service %s.' - return None - - def _patch_service_finalizer(self, service): - return self.k8s.add_finalizer(service, k_const.SERVICE_FINALIZER) - - def on_finalize(self, service, *args, **kwargs): - klb_crd_path = utils.get_klb_crd_path(service) - # Bump all the NPs in the namespace to force SG rules - # recalculation. - self._bump_network_policies(service) - try: - self.k8s.delete(klb_crd_path) - except k_exc.K8sResourceNotFound: - self.k8s.remove_finalizer(service, k_const.SERVICE_FINALIZER) - - def _has_clusterip(self, service): - # ignore headless service, clusterIP is None - return service['spec'].get('clusterIP') != 'None' - - def _get_subnet_id(self, service, project_id, ip): - subnets_mapping = self._drv_subnets.get_subnets(service, project_id) - subnet_ids = { - subnet_id - for subnet_id, network in subnets_mapping.items() - for subnet in network.subnets.objects - if ip in subnet.cidr} - - if len(subnet_ids) != 1: - raise k_exc.IntegrityError(_( - "Found %(num)s subnets for service %(link)s IP %(ip)s") % { - 'link': utils.get_res_link(service), - 'ip': ip, - 'num': len(subnet_ids)}) - - return subnet_ids.pop() - - def create_crd_spec(self, service): - svc_name = service['metadata']['name'] - svc_namespace = service['metadata']['namespace'] - kubernetes = clients.get_kubernetes_client() - spec = self._build_kuryrloadbalancer_spec(service) - - owner_reference = { - 'apiVersion': service['apiVersion'], - 'kind': service['kind'], - 'name': service['metadata']['name'], - 'uid': service['metadata']['uid'], - } - - loadbalancer_crd = { - 'apiVersion': 'openstack.org/v1', - 'kind': 'KuryrLoadBalancer', - 'metadata': { - 'name': svc_name, - 'finalizers': [k_const.KURYRLB_FINALIZER], - 'ownerReferences': [owner_reference], - }, - 'spec': spec, - 'status': {}, - } - - try: - kubernetes.post('{}/{}/kuryrloadbalancers'.format( - k_const.K8S_API_CRD_NAMESPACES, svc_namespace), - loadbalancer_crd) - except k_exc.K8sConflict: - raise k_exc.ResourceNotReady(svc_name) - except k_exc.K8sNamespaceTerminating: - raise - except k_exc.K8sClientException as e: - LOG.exception("Exception when creating KuryrLoadBalancer CRD.") - self.k8s.add_event( - service, 'CreateKLBFailed', - 'Error when creating KuryrLoadBalancer object: %s' % e, - 'Warning') - raise - - def _update_crd_spec(self, loadbalancer_crd, service): - svc_name = service['metadata']['name'] - kubernetes = clients.get_kubernetes_client() - spec = self._build_kuryrloadbalancer_spec(service) - LOG.debug('Patching KuryrLoadBalancer CRD %s', loadbalancer_crd) - try: - kubernetes.patch_crd('spec', utils.get_res_link(loadbalancer_crd), - spec) - except k_exc.K8sResourceNotFound: - LOG.debug('KuryrLoadBalancer CRD not found %s', loadbalancer_crd) - except k_exc.K8sConflict: - raise k_exc.ResourceNotReady(svc_name) - except k_exc.K8sClientException as e: - LOG.exception('Error updating KuryrNetwork CRD %s', - loadbalancer_crd) - self.k8s.add_event( - service, 'UpdateKLBFailed', - 'Error when updating KuryrLoadBalancer object: %s' % e, - 'Warning') - raise - - def _get_data_timeout_annotation(self, service): - default_timeout_cli = CONF.octavia_defaults.timeout_client_data - default_timeout_mem = CONF.octavia_defaults.timeout_member_data - try: - annotations = service['metadata']['annotations'] - except KeyError: - return default_timeout_cli, default_timeout_mem - try: - timeout_cli = annotations[k_const.K8S_ANNOTATION_CLIENT_TIMEOUT] - data_timeout_cli = int(timeout_cli) - except KeyError: - data_timeout_cli = default_timeout_cli - try: - timeout_mem = annotations[k_const.K8S_ANNOTATION_MEMBER_TIMEOUT] - data_timeout_mem = int(timeout_mem) - except KeyError: - data_timeout_mem = default_timeout_mem - return data_timeout_cli, data_timeout_mem - - def _build_kuryrloadbalancer_spec(self, service): - svc_ip = self._get_service_ip(service) - spec_lb_ip = service['spec'].get('loadBalancerIP') - ports = service['spec'].get('ports') - for port in ports: - if type(port['targetPort']) == int: - port['targetPort'] = str(port['targetPort']) - project_id = self._drv_project.get_project(service) - sg_ids = self._drv_sg.get_security_groups(service, project_id) - subnet_id = self._get_subnet_id(service, project_id, svc_ip) - spec_type = service['spec'].get('type') - spec = { - 'ip': svc_ip, - 'ports': ports, - 'project_id': project_id, - 'security_groups_ids': sg_ids, - 'subnet_id': subnet_id, - 'type': spec_type, - } - - if self._lb_provider: - spec['provider'] = self._lb_provider - - if spec_lb_ip is not None: - spec['lb_ip'] = self._strip_funny_ip(spec_lb_ip) - timeout_cli, timeout_mem = self._get_data_timeout_annotation(service) - spec['timeout_client_data'] = timeout_cli - spec['timeout_member_data'] = timeout_mem - return spec - - def _has_lbaas_spec_changes(self, service, loadbalancer_crd): - return (self._has_ip_changes(service, loadbalancer_crd) or - utils.has_port_changes(service, loadbalancer_crd) or - self._has_timeout_changes(service, loadbalancer_crd) or - self._has_provider_changes(loadbalancer_crd)) - - def _has_provider_changes(self, loadbalancer_crd): - return (self._lb_provider and - loadbalancer_crd['spec'].get('provider') != self._lb_provider) - - def _has_ip_changes(self, service, loadbalancer_crd): - link = utils.get_res_link(service) - svc_ip = self._get_service_ip(service) - - if loadbalancer_crd['spec'].get('ip') is None: - if svc_ip is None: - return False - return True - - elif str(loadbalancer_crd['spec'].get('ip')) != svc_ip: - LOG.debug("LBaaS spec IP %(spec_ip)s != %(svc_ip)s for %(link)s" - % {'spec_ip': loadbalancer_crd['spec']['ip'], - 'svc_ip': svc_ip, - 'link': link}) - return True - - return False - - def _has_timeout_changes(self, service, loadbalancer_crd): - link = utils.get_res_link(service) - cli_timeout, mem_timeout = self._get_data_timeout_annotation(service) - - for spec_value, current_value in [(loadbalancer_crd['spec'].get( - 'timeout_client_data'), cli_timeout), (loadbalancer_crd[ - 'spec'].get('timeout_member_data'), mem_timeout)]: - if not spec_value and not current_value: - continue - elif spec_value != current_value: - LOG.debug("LBaaS spec listener timeout {} != {} for {}".format( - spec_value, current_value, link)) - return True - - return False - - def _strip_funny_ip(self, ip): - return str(netaddr.IPAddress(ip, flags=netaddr.core.ZEROFILL)) - - -class EndpointsHandler(k8s_base.ResourceEventHandler): - """EndpointsHandler handles K8s Endpoints events. - - EndpointsHandler handles K8s Endpoints events and tracks changes in - LBaaSServiceSpec to update Neutron LBaaS accordingly and to reflect its' - actual state in LBaaSState. - """ - - OBJECT_KIND = k_const.K8S_OBJ_ENDPOINTS - OBJECT_WATCH_PATH = "%s/%s" % (k_const.K8S_API_BASE, "endpoints") - - def __init__(self): - super(EndpointsHandler, self).__init__() - self.k8s = clients.get_kubernetes_client() - - def on_present(self, endpoints, *args, **kwargs): - ep_name = endpoints['metadata']['name'] - ep_namespace = endpoints['metadata']['namespace'] - - loadbalancer_crd = self.k8s.get_loadbalancer_crd(endpoints) - - if (not (self._has_pods(endpoints) or (loadbalancer_crd and - loadbalancer_crd.get('status'))) - or k_const.K8S_ANNOTATION_HEADLESS_SERVICE - in endpoints['metadata'].get('labels', []) or - utils.is_kubernetes_default_resource(endpoints)): - LOG.debug("Ignoring Kubernetes endpoints %s", - endpoints['metadata']['name']) - return - - if loadbalancer_crd is None: - raise k_exc.KuryrLoadBalancerNotCreated(endpoints) - else: - try: - self._update_crd_spec(loadbalancer_crd, endpoints) - except k_exc.K8sNamespaceTerminating: - LOG.debug('Namespace %s is being terminated, ignoring ' - 'Endpoints %s in that namespace.', - ep_namespace, ep_name) - - def on_deleted(self, endpoints, *args, **kwargs): - self._remove_endpoints(endpoints) - - def _has_pods(self, endpoints): - ep_subsets = endpoints.get('subsets', []) - if not ep_subsets: - return False - return any(True - for subset in ep_subsets - if subset.get('addresses', [])) - - def _convert_subsets_to_endpointslice(self, endpoints_obj): - endpointslices = [] - endpoints = [] - subsets = endpoints_obj.get('subsets', []) - for subset in subsets: - addresses = subset.get('addresses', []) - ports = subset.get('ports', []) - for address in addresses: - ip = address.get('ip') - targetRef = address.get('targetRef') - endpoint = { - 'addresses': [ip], - 'conditions': { - 'ready': True - }, - } - if targetRef: - endpoint['targetRef'] = targetRef - endpoints.append(endpoint) - endpointslices.append({ - 'endpoints': endpoints, - 'ports': ports, - }) - - return endpointslices - - def _add_event(self, endpoints, reason, message, type_=None): - """_add_event adds an event for the corresponding Service.""" - try: - service = self.k8s.get(utils.get_service_link(endpoints)) - except k_exc.K8sClientException: - LOG.debug('Error when fetching Service to add an event %s, ' - 'ignoring', utils.get_res_unique_name(endpoints)) - return - kwargs = {'type_': type_} if type_ else {} - self.k8s.add_event(service, reason, message, **kwargs) - - def _update_crd_spec(self, loadbalancer_crd, endpoints): - # TODO(maysams): Remove the conversion once we start handling - # EndpointSlices. - epslices = self._convert_subsets_to_endpointslice(endpoints) - try: - self.k8s.patch_crd('spec', utils.get_res_link(loadbalancer_crd), - {'endpointSlices': epslices}) - except k_exc.K8sResourceNotFound: - LOG.debug('KuryrLoadbalancer CRD not found %s', loadbalancer_crd) - except k_exc.K8sConflict: - raise k_exc.ResourceNotReady(loadbalancer_crd) - except k_exc.K8sClientException as e: - LOG.exception('Error updating KuryrLoadbalancer CRD %s', - loadbalancer_crd) - self._add_event( - endpoints, 'UpdateKLBFailed', - 'Error when updating KuryrLoadBalancer object: %s' % e, - 'Warning') - raise - - return True - - def _remove_endpoints(self, endpoints): - lb_name = utils.get_res_unique_name(endpoints) - try: - self.k8s.patch_crd('spec', utils.get_klb_crd_path(endpoints), - 'endpointSlices', action='remove') - except k_exc.K8sResourceNotFound: - LOG.debug('KuryrLoadBalancer CRD not found %s', lb_name) - except k_exc.K8sUnprocessableEntity: - # This happens when endpointSlices doesn't exist on the KLB, - # safe to ignore, the resources is in the state we want already. - pass - except k_exc.K8sClientException as e: - LOG.exception('Error updating KuryrLoadBalancer CRD %s', lb_name) - self._add_event( - endpoints, 'UpdateKLBFailed', - 'Error when updating KuryrLoadBalancer object: %s' % e, - 'Warning') - raise diff --git a/kuryr_kubernetes/controller/handlers/loadbalancer.py b/kuryr_kubernetes/controller/handlers/loadbalancer.py deleted file mode 100755 index 04dd00ed8..000000000 --- a/kuryr_kubernetes/controller/handlers/loadbalancer.py +++ /dev/null @@ -1,916 +0,0 @@ -# Copyright (c) 2020 Red Hat, Inc. -# All Rights Reserved. -# -# Licensed under the Apache License, Version 2.0 (the "License"); you may -# not use this file except in compliance with the License. You may obtain -# a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT -# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the -# License for the specific language governing permissions and limitations -# under the License. - -import time - -from oslo_log import log as logging - -from kuryr_kubernetes import clients -from kuryr_kubernetes import config -from kuryr_kubernetes import constants as k_const -from kuryr_kubernetes.controller.drivers import base as drv_base -from kuryr_kubernetes.controller.drivers import utils as driver_utils -from kuryr_kubernetes import exceptions as k_exc -from kuryr_kubernetes.handlers import k8s_base -from kuryr_kubernetes import utils - -LOG = logging.getLogger(__name__) -CONF = config.CONF - -OCTAVIA_DEFAULT_PROVIDERS = ['octavia', 'amphora'] -CRD_RECONCILIATION_FREQUENCY = 600 # seconds - - -class KuryrLoadBalancerHandler(k8s_base.ResourceEventHandler): - """LoadBalancerStatusHandler handles K8s Endpoints events. - - LBStatusHandler handles K8s Endpoints events and tracks changes in - LBaaSServiceSpec to update Neutron LBaaS accordingly and to reflect its' - actual state in LBaaSState. - """ - - OBJECT_KIND = k_const.K8S_OBJ_KURYRLOADBALANCER - OBJECT_WATCH_PATH = k_const.K8S_API_CRD_KURYRLOADBALANCERS - - def __init__(self): - super(KuryrLoadBalancerHandler, self).__init__() - self._drv_lbaas = drv_base.LBaaSDriver.get_instance() - self._drv_pod_project = drv_base.PodProjectDriver.get_instance() - self._drv_pod_subnets = drv_base.PodSubnetsDriver.get_instance() - self._drv_service_pub_ip = drv_base.ServicePubIpDriver.get_instance() - self._drv_svc_project = drv_base.ServiceProjectDriver.get_instance() - self._drv_sg = drv_base.ServiceSecurityGroupsDriver.get_instance() - self._drv_nodes_subnets = drv_base.NodesSubnetsDriver.get_instance() - self.k8s = clients.get_kubernetes_client() - - def _get_nodes_subnets(self): - return utils.get_subnets_id_cidrs( - self._drv_nodes_subnets.get_nodes_subnets()) - - def _add_event(self, klb, reason, message, type_=None): - """_add_event adds an event for the corresponding Service.""" - klb_meta = klb['metadata'] - for ref in klb_meta.get('ownerReferences', []): - # "mock" a Service based on ownerReference to it. - if ref['kind'] == 'Service' and ref['name'] == klb_meta['name']: - service = { - 'apiVersion': ref['apiVersion'], - 'kind': ref['kind'], - 'metadata': { - 'name': ref['name'], - 'uid': ref['uid'], - 'namespace': klb_meta['namespace'], # ref shares ns - }, - } - break - else: - # No reference, just fetch the service from the API. - try: - service = self.k8s.get( - f"{k_const.K8S_API_NAMESPACES}/{klb_meta['namespace']}" - f"/services/{klb_meta['name']}") - except k_exc.K8sClientException: - LOG.debug('Error when fetching Service to add an event %s, ' - 'ignoring', utils.get_res_unique_name(klb)) - return - kwargs = {'type_': type_} if type_ else {} - self.k8s.add_event(service, reason, message, **kwargs) - - def on_present(self, loadbalancer_crd, *args, **kwargs): - if loadbalancer_crd.get('status', None) is None: - try: - self.k8s.patch_crd('status', - utils.get_res_link(loadbalancer_crd), {}) - except k_exc.K8sResourceNotFound: - LOG.debug('KuryrLoadbalancer CRD not found %s', - utils.get_res_unique_name(loadbalancer_crd)) - return - - reason = self._should_ignore(loadbalancer_crd) - if reason: - reason %= utils.get_res_unique_name(loadbalancer_crd) - LOG.debug(reason) - self._add_event(loadbalancer_crd, 'KuryrServiceSkipped', reason) - return - - crd_lb = loadbalancer_crd['status'].get('loadbalancer') - if crd_lb: - lb_provider = crd_lb.get('provider') - spec_lb_provider = loadbalancer_crd['spec'].get('provider') - # amphora to ovn upgrade - if not lb_provider or lb_provider in OCTAVIA_DEFAULT_PROVIDERS: - if (spec_lb_provider and - spec_lb_provider not in OCTAVIA_DEFAULT_PROVIDERS): - self._add_event(loadbalancer_crd, 'KuryrUpdateProvider', - 'Deleting Amphora load balancer to ' - 'recreate it with OVN provider') - self._ensure_release_lbaas(loadbalancer_crd) - - # ovn to amphora downgrade - elif lb_provider and lb_provider not in OCTAVIA_DEFAULT_PROVIDERS: - if (not spec_lb_provider or - spec_lb_provider in OCTAVIA_DEFAULT_PROVIDERS): - self._add_event(loadbalancer_crd, 'KuryrUpdateProvider', - 'Deleting OVN load balancer to ' - 'recreate it with Amphora provider') - self._ensure_release_lbaas(loadbalancer_crd) - - if not crd_lb: - self._add_event(loadbalancer_crd, 'KuryrEnsureLB', - 'Provisioning a load balancer') - try: - changed = self._sync_lbaas_members(loadbalancer_crd) - except Exception as e: - self._add_event( - loadbalancer_crd, 'KuryrEnsureLBError', - f'Error when provisioning load balancer: {e}', 'Warning') - raise - - if changed: - self._add_event(loadbalancer_crd, 'KuryrEnsuredLB', - 'Load balancer provisioned') - # Note(yboaron) For LoadBalancer services, we should allocate FIP, - # associate it to LB VIP and update K8S service status - lb_ip = loadbalancer_crd['spec'].get('lb_ip') - pub_info = loadbalancer_crd['status'].get( - 'service_pub_ip_info') - if pub_info is None and loadbalancer_crd['spec'].get('type'): - service_pub_ip_info = ( - self._drv_service_pub_ip.acquire_service_pub_ip_info( - loadbalancer_crd['spec']['type'], - lb_ip, - loadbalancer_crd['spec']['project_id'], - loadbalancer_crd['status']['loadbalancer'][ - 'port_id'])) - if service_pub_ip_info: - self._add_event( - loadbalancer_crd, 'KuryrEnsureFIP', - 'Associating floating IP to the load balancer') - self._drv_service_pub_ip.associate_pub_ip( - service_pub_ip_info, loadbalancer_crd['status'][ - 'loadbalancer']['port_id']) - loadbalancer_crd['status'][ - 'service_pub_ip_info'] = service_pub_ip_info - self._update_lb_status(loadbalancer_crd) - self._patch_status(loadbalancer_crd) - - def reconcile(self): - loadbalancer_crds = [] - try: - loadbalancer_crds = driver_utils.get_kuryrloadbalancer_crds() - except k_exc.K8sClientException: - LOG.warning("Error retriving KuryrLoadBalanders CRDs") - try: - self._trigger_reconciliation(loadbalancer_crds) - except Exception: - LOG.exception('Error while running loadbalancers reconciliation.') - - def _trigger_reconciliation(self, loadbalancer_crds): - LOG.debug("Reconciling the KuryrLoadBalancer CRDs") - lbaas = clients.get_loadbalancer_client() - resources_fn = {'loadbalancer': lbaas.load_balancers, - 'listener': lbaas.listeners, - 'pool': lbaas.pools, 'member': lbaas.members} - resources = {'loadbalancer': [], 'listener': [], 'pool': [], - 'member': []} - - for klb in loadbalancer_crds: - if klb['metadata'].get('deletionTimestamp'): - continue - - selflink = utils.get_res_link(klb) - lb_id = klb.get('status', {}).get('loadbalancer', {}).get('id') - - if lb_id: - resources['loadbalancer'].append({'id': lb_id, - 'selflink': selflink, - 'klb': klb}) - - for lbl in klb.get('status', {}).get('listeners', []): - resources['listener'].append({'id': lbl['id'], - 'selflink': selflink, - 'lklb': klb}) - for pl in klb.get('status', {}).get('pools', []): - resources['pool'].append({'id': pl['id'], - 'selflink': selflink, - 'pklb': klb}) - for lbm in klb.get('status', {}).get('members', []): - resources['member'].append({'id': lbm['id'], - 'selflink': selflink, - 'mklb': klb, - 'pool_id': lbm['pool_id']}) - - resources_already_triggered = [] - # let's reconcile load balancers first, listeners, pools - # and then members - resource_types = ('loadbalancer', 'listener', 'pool', 'member') - for resource_type in resource_types: - filters = {} - self._drv_lbaas.add_tags(resource_type, filters) - os_list = resources_fn[resource_type] - if resource_type == 'member': - pool_ids = [cr_member['pool_id'] for cr_member in - resources[resource_type]] - pool_ids = list(set(pool_ids)) - os_resources_id = [] - for pl_id in pool_ids: - os_resources = os_list(pl_id, **filters) - os_resources_id.extend([rsrc['id'] for rsrc in - os_resources]) - else: - os_resources = os_list(**filters) - os_resources_id = [rsrc['id'] for rsrc in os_resources] - for data in resources[resource_type]: - if data['selflink'] in resources_already_triggered: - continue - if data['id'] not in os_resources_id: - resources_already_triggered.append(data['selflink']) - LOG.debug("Reconciling KuryrLoadBalancer CRD: %s", - data['selflink']) - self._reconcile_lb(data) - - def _reconcile_lb(self, data): - kubernetes = clients.get_kubernetes_client() - try: - if data.get('klb'): - self._add_event(data['klb'], 'LoadBalancerMissing', - 'Load balancer for the Service does not ' - 'exist anymore. Recreating it.', 'Warning') - if data.get('lklb'): - self._add_event(data['lklb'], 'LoadBalancerListenerMissing', - 'Load Balancer listener does not exist ' - 'anymore. Recreating it.', 'Warning') - if data.get('pklb'): - self._add_event(data['pklb'], 'LoadBalancerPoolMissing', - 'Load Balancer pool does not exist anymore. ' - 'Recreating it.', 'Warning') - if data.get('mklb'): - self._add_event(data['mklb'], 'LoadBalancerMemberMissing', - 'Load Balancer member does not exist anymore. ' - 'Recreating it.', 'Warning') - - kubernetes.patch_crd('status', data['selflink'], {}) - except k_exc.K8sResourceNotFound: - LOG.debug('Unable to reconcile the KuryLoadBalancer CRD %s', - data['selflink']) - except k_exc.K8sClientException: - LOG.warning('Unable to patch the KuryLoadBalancer CRD %s', - data['selflink']) - - def _should_ignore(self, loadbalancer_crd): - if not(self._has_endpoints(loadbalancer_crd) or - loadbalancer_crd.get('status')): - return 'Skipping Service %s without Endpoints' - elif not loadbalancer_crd['spec'].get('ip'): - return 'Skipping Service %s without IP set yet' - return False - - def _has_endpoints(self, loadbalancer_crd): - ep_slices = loadbalancer_crd['spec'].get('endpointSlices', []) - if not ep_slices: - return False - return True - - def on_finalize(self, loadbalancer_crd, *args, **kwargs): - LOG.debug("Deleting the loadbalancer CRD") - - if loadbalancer_crd['status'] != {}: - self._add_event(loadbalancer_crd, 'KuryrReleaseLB', - 'Releasing the load balancer') - try: - # NOTE(ivc): deleting pool deletes its members - self._drv_lbaas.release_loadbalancer( - loadbalancer_crd['status'].get('loadbalancer')) - except Exception as e: - # FIXME(dulek): It seems like if loadbalancer will be stuck in - # PENDING_DELETE we'll just silently time out - # waiting for it to be deleted. Is that expected? - self._add_event( - loadbalancer_crd, 'KuryrReleaseLBError', - f'Error when releasing load balancer: {e}', 'Warning') - raise - - try: - pub_info = loadbalancer_crd['status']['service_pub_ip_info'] - except KeyError: - pub_info = None - - if pub_info: - self._add_event( - loadbalancer_crd, 'KuryrReleaseFIP', - 'Dissociating floating IP from the load balancer') - self._drv_service_pub_ip.release_pub_ip( - loadbalancer_crd['status']['service_pub_ip_info']) - - LOG.debug('Removing finalizer from KuryrLoadBalancer CRD %s', - loadbalancer_crd) - try: - self.k8s.remove_finalizer(loadbalancer_crd, - k_const.KURYRLB_FINALIZER) - except k_exc.K8sClientException as e: - msg = (f'K8s API error when removing finalizer from ' - f'KuryrLoadBalancer of Service ' - f'{utils.get_res_unique_name(loadbalancer_crd)}') - LOG.exception(msg) - self._add_event(loadbalancer_crd, 'KuryrRemoveLBFinalizerError', - f'{msg}: {e}', 'Warning') - raise - - namespace = loadbalancer_crd['metadata']['namespace'] - name = loadbalancer_crd['metadata']['name'] - try: - service = self.k8s.get(f"{k_const.K8S_API_NAMESPACES}/{namespace}" - f"/services/{name}") - except k_exc.K8sResourceNotFound: - LOG.warning('Service %s not found. This is unexpected.', - utils.get_res_unique_name(loadbalancer_crd)) - return - - LOG.debug('Removing finalizer from Service %s', - utils.get_res_unique_name(service)) - try: - self.k8s.remove_finalizer(service, k_const.SERVICE_FINALIZER) - except k_exc.K8sClientException as e: - msg = (f'K8s API error when removing finalizer from Service ' - f'{utils.get_res_unique_name(service)}') - LOG.exception(msg) - self._add_event( - loadbalancer_crd, 'KuryrRemoveServiceFinalizerError', - f'{msg}: {e}', 'Warning') - raise - - def _patch_status(self, loadbalancer_crd): - try: - self.k8s.patch_crd('status', utils.get_res_link(loadbalancer_crd), - loadbalancer_crd['status']) - except k_exc.K8sResourceNotFound: - LOG.debug('KuryrLoadBalancer CRD not found %s', loadbalancer_crd) - return False - except k_exc.K8sUnprocessableEntity: - LOG.warning('KuryrLoadBalancer %s modified, retrying later.', - utils.get_res_unique_name(loadbalancer_crd)) - return False - except k_exc.K8sClientException as e: - msg = (f'K8s API error when updating status of ' - f'{utils.get_res_unique_name(loadbalancer_crd)} Service ' - f'load balancer') - LOG.exception(msg) - self._add_event(loadbalancer_crd, 'KuryrUpdateLBStatusError', - f'{msg}: {e}', 'Warning') - raise - return True - - def _sync_lbaas_members(self, loadbalancer_crd): - changed = False - - if self._remove_unused_members(loadbalancer_crd): - changed = True - - if self._sync_lbaas_pools(loadbalancer_crd): - changed = True - - if (self._has_endpoints(loadbalancer_crd) and - self._add_new_members(loadbalancer_crd)): - changed = True - - return changed - - def _sync_lbaas_sgs(self, klb_crd): - lb = klb_crd['status'].get('loadbalancer') - svc_name = klb_crd['metadata']['name'] - svc_namespace = klb_crd['metadata']['namespace'] - try: - service = self.k8s.get( - f'{k_const.K8S_API_NAMESPACES}/{svc_namespace}/' - f'services/{svc_name}') - except k_exc.K8sResourceNotFound: - LOG.debug('Service %s not found.', svc_name) - return - except k_exc.K8sClientException: - LOG.exception('Error retrieving Service %s.', svc_name) - raise - - project_id = self._drv_svc_project.get_project(service) - lb_sgs = self._drv_sg.get_security_groups(service, project_id) - lb['security_groups'] = lb_sgs - - try: - self.k8s.patch_crd('status/loadbalancer', - utils.get_res_link(klb_crd), - {'security_groups': lb_sgs}) - except k_exc.K8sResourceNotFound: - LOG.debug('KuryrLoadBalancer %s not found', svc_name) - return None - except k_exc.K8sUnprocessableEntity: - LOG.debug('KuryrLoadBalancer entity not processable ' - 'due to missing loadbalancer field.') - return None - except k_exc.K8sClientException as e: - msg = (f'K8s API error when updating SGs status of ' - f'{utils.get_res_unique_name(klb_crd)} Service load ' - f'balancer') - LOG.exception(msg) - self._add_event(klb_crd, 'KuryrUpdateLBStatusError', - f'{msg}: {e}', 'Warning') - raise - return klb_crd - - def _add_new_members(self, loadbalancer_crd): - changed = False - - if loadbalancer_crd['status'].get('loadbalancer'): - loadbalancer_crd = self._sync_lbaas_sgs(loadbalancer_crd) - if not loadbalancer_crd: - return changed - - lsnr_by_id = {l['id']: l for l in loadbalancer_crd['status'].get( - 'listeners', [])} - pool_by_lsnr_port = {(lsnr_by_id[p['listener_id']]['protocol'], - lsnr_by_id[p['listener_id']]['port']): p - for p in loadbalancer_crd['status'].get( - 'pools', [])} - - # NOTE(yboaron): Since LBaaSv2 doesn't support UDP load balancing, - # the LBaaS driver will return 'None' in case of UDP port - # listener creation. - # we should consider the case in which - # 'pool_by_lsnr_port[p.protocol, p.port]' is missing - pool_by_tgt_name = {} - for p in loadbalancer_crd['spec'].get('ports', []): - try: - pool_by_tgt_name[p['name']] = pool_by_lsnr_port[p['protocol'], - p['port']] - except KeyError: - continue - - current_targets = [(str(m['ip']), m['port'], m['pool_id']) - for m in loadbalancer_crd['status'].get( - 'members', [])] - - for ep_slice in loadbalancer_crd['spec']['endpointSlices']: - ep_slices_ports = ep_slice.get('ports', []) - for endpoint in ep_slice.get('endpoints', []): - try: - target_ip = endpoint['addresses'][0] - target_ref = endpoint.get('targetRef') - target_namespace = None - if target_ref: - target_namespace = target_ref['namespace'] - # Avoid to point to a Pod on hostNetwork - # that isn't the one to be added as Member. - if not target_ref and utils.get_subnet_by_ip( - self._get_nodes_subnets(), - target_ip): - target_pod = {} - else: - target_pod = utils.get_pod_by_ip( - target_ip, target_namespace) - except KeyError: - continue - if not pool_by_tgt_name: - continue - for ep_slice_port in ep_slices_ports: - target_port = ep_slice_port['port'] - port_name = ep_slice_port.get('name') - try: - pool = pool_by_tgt_name[port_name] - except KeyError: - LOG.debug("No pool found for port: %r", port_name) - continue - - if (target_ip, target_port, pool['id']) in current_targets: - continue - - member_subnet_id = self._get_subnet_by_octavia_mode( - target_pod, target_ip, loadbalancer_crd) - - if not member_subnet_id: - msg = ( - f'Unable to determine ID of the subnet of member ' - f'{target_ip} for service ' - f'{utils.get_res_unique_name(loadbalancer_crd)}. ' - f'Skipping its creation') - self._add_event(loadbalancer_crd, 'KuryrSkipMember', - msg, 'Warning') - LOG.warning(msg) - continue - - target_name, target_namespace = self._get_target_info( - target_ref, loadbalancer_crd) - - first_member_of_the_pool = True - for member in loadbalancer_crd['status'].get( - 'members', []): - if pool['id'] == member['pool_id']: - first_member_of_the_pool = False - break - if first_member_of_the_pool: - listener_port = lsnr_by_id[pool['listener_id']][ - 'port'] - else: - listener_port = None - loadbalancer = loadbalancer_crd['status']['loadbalancer'] - member = self._drv_lbaas.ensure_member( - loadbalancer=loadbalancer, - pool=pool, - subnet_id=member_subnet_id, - ip=target_ip, - port=target_port, - target_ref_namespace=target_namespace, - target_ref_name=target_name, - listener_port=listener_port) - if not member: - continue - members = loadbalancer_crd['status'].get('members', []) - if members: - loadbalancer_crd['status'].get('members', []).append( - member) - else: - loadbalancer_crd['status']['members'] = [] - loadbalancer_crd['status'].get('members', []).append( - member) - if not self._patch_status(loadbalancer_crd): - return False - changed = True - return changed - - def _get_target_info(self, target_ref, loadbalancer_crd): - if target_ref: - target_namespace = target_ref['namespace'] - target_name = target_ref['name'] - else: - target_namespace = loadbalancer_crd['metadata']['namespace'] - target_name = loadbalancer_crd['metadata']['name'] - return target_name, target_namespace - - def _get_subnet_by_octavia_mode(self, target_pod, target_ip, lb_crd): - # TODO(apuimedo): Do not pass subnet_id at all when in - # L3 mode once old neutron-lbaasv2 is not supported, as - # octavia does not require it - subnet_id = None - if (CONF.octavia_defaults.member_mode == - k_const.OCTAVIA_L2_MEMBER_MODE): - if target_pod: - subnet_id = self._get_pod_subnet(target_pod, target_ip) - else: - subnet = utils.get_subnet_by_ip( - self._get_nodes_subnets(), target_ip) - if subnet: - subnet_id = subnet[0] - else: - # We use the service subnet id so that the connectivity - # from VIP to pods happens in layer 3 mode, i.e., - # routed. - subnet_id = lb_crd['status']['loadbalancer']['subnet_id'] - return subnet_id - - def _get_pod_subnet(self, pod, ip): - project_id = self._drv_pod_project.get_project(pod) - - subnet_ids = [] - if not utils.is_host_network(pod): - subnets_map = self._drv_pod_subnets.get_subnets(pod, project_id) - subnet_ids = [subnet_id - for subnet_id, network in subnets_map.items() - for subnet in network.subnets.objects - if ip in subnet.cidr] - if subnet_ids: - return subnet_ids[0] - else: - # NOTE(ltomasbo): We are assuming that if IP is not on the - # pod subnet it's because the member is using hostNetworking. In - # this case we look for the IP in worker_nodes_subnets. - subnet = utils.get_subnet_by_ip(self._get_nodes_subnets(), ip) - if subnet: - return subnet[0] - else: - # This shouldn't ever happen but let's return just the first - # worker_nodes_subnet id. - return self._get_nodes_subnets()[0][0] - - def _get_port_in_pool(self, pool, loadbalancer_crd): - - for l in loadbalancer_crd['status']['listeners']: - if l['id'] != pool['listener_id']: - continue - for port in loadbalancer_crd['spec'].get('ports', []): - if l.get('port') == port.get( - 'port') and l.get('protocol') == port.get('protocol'): - return port - return None - - def _remove_unused_members(self, loadbalancer_crd): - lb_crd_name = loadbalancer_crd['metadata']['name'] - spec_ports = {} - pools = loadbalancer_crd['status'].get('pools', []) - for pool in pools: - port = self._get_port_in_pool(pool, loadbalancer_crd) - if port: - if not port.get('name'): - port['name'] = None - spec_ports[port['name']] = pool['id'] - - ep_slices = loadbalancer_crd['spec'].get('endpointSlices', []) - current_targets = [utils.get_current_endpoints_target( - ep, p, spec_ports, lb_crd_name) - for ep_slice in ep_slices - for ep in ep_slice['endpoints'] - for p in ep_slice['ports'] - if p.get('name') in spec_ports] - - removed_ids = set() - for member in loadbalancer_crd['status'].get('members', []): - member_name = member.get('name', '') - try: - # NOTE: The member name is compose of: - # NAMESPACE_NAME/POD_NAME:PROTOCOL_PORT - pod_name = member_name.split('/')[1].split(':')[0] - except AttributeError: - pod_name = "" - - if ((str(member['ip']), pod_name, member['port'], member[ - 'pool_id']) in current_targets): - continue - - self._drv_lbaas.release_member(loadbalancer_crd['status'][ - 'loadbalancer'], member) - removed_ids.add(member['id']) - - if removed_ids: - members = [m for m in loadbalancer_crd['status'].get('members', []) - if m['id'] not in removed_ids] - loadbalancer_crd['status']['members'] = members - - if not self._patch_status(loadbalancer_crd): - return False - return bool(removed_ids) - - def _sync_lbaas_pools(self, loadbalancer_crd): - changed = False - - if self._remove_unused_pools(loadbalancer_crd): - changed = True - - if self._sync_lbaas_listeners(loadbalancer_crd): - changed = True - - if self._add_new_pools(loadbalancer_crd): - changed = True - - return changed - - def _add_new_pools(self, loadbalancer_crd): - changed = False - - current_listeners_ids = {pool['listener_id'] - for pool in loadbalancer_crd['status'].get( - 'pools', [])} - for listener in loadbalancer_crd['status'].get('listeners', []): - if listener['id'] in current_listeners_ids: - continue - pool = self._drv_lbaas.ensure_pool(loadbalancer_crd['status'][ - 'loadbalancer'], listener) - if not pool: - continue - pools = loadbalancer_crd['status'].get('pools', []) - if pools: - loadbalancer_crd['status'].get('pools', []).append( - pool) - else: - loadbalancer_crd['status']['pools'] = [] - loadbalancer_crd['status'].get('pools', []).append( - pool) - - if not self._patch_status(loadbalancer_crd): - return False - changed = True - return changed - - def _is_pool_in_spec(self, pool, loadbalancer_crd): - # NOTE(yboaron): in order to check if a specific pool is in lbaas_spec - # we should: - # 1. get the listener that pool is attached to - # 2. check if listener's attributes appear in lbaas_spec. - for l in loadbalancer_crd['status']['listeners']: - if l['id'] != pool['listener_id']: - continue - for port in loadbalancer_crd['spec'].get('ports', []): - if l['port'] == port['port'] and l['protocol'] == port[ - 'protocol']: - return True - return False - - def _remove_unused_pools(self, loadbalancer_crd): - removed_ids = set() - - for pool in loadbalancer_crd['status'].get('pools', []): - if self._is_pool_in_spec(pool, loadbalancer_crd): - continue - self._drv_lbaas.release_pool(loadbalancer_crd['status'][ - 'loadbalancer'], pool) - removed_ids.add(pool['id']) - if removed_ids: - loadbalancer_crd['status']['pools'] = [p for p in loadbalancer_crd[ - 'status'].get('pools', []) if p['id'] not in removed_ids] - loadbalancer_crd['status']['members'] = [m for m in - loadbalancer_crd[ - 'status'].get( - 'members', []) - if m['pool_id'] not in - removed_ids] - - if not self._patch_status(loadbalancer_crd): - return False - return bool(removed_ids) - - def _sync_lbaas_listeners(self, loadbalancer_crd): - changed = False - - if self._remove_unused_listeners(loadbalancer_crd): - changed = True - - if self._sync_lbaas_loadbalancer(loadbalancer_crd): - changed = True - - if self._add_new_listeners(loadbalancer_crd): - changed = True - - return changed - - def _add_new_listeners(self, loadbalancer_crd): - changed = False - lb_crd_spec_ports = loadbalancer_crd['spec'].get('ports') - spec_t_cli = loadbalancer_crd['spec'].get('timeout_client_data', 0) - spec_t_mb = loadbalancer_crd['spec'].get('timeout_member_data', 0) - if not lb_crd_spec_ports: - return changed - lbaas_spec_ports = sorted(lb_crd_spec_ports, - key=lambda x: x['protocol']) - for port_spec in lbaas_spec_ports: - protocol = port_spec['protocol'] - port = port_spec['port'] - - listener = [] - for l in loadbalancer_crd['status'].get('listeners', []): - timeout_cli = l.get('timeout_client_data', 0) - timeout_mb = l.get('timeout_member_data', 0) - if l['port'] == port and l['protocol'] == protocol: - if timeout_cli == spec_t_cli and timeout_mb == spec_t_mb: - listener.append(l) - - if listener: - continue - # FIXME (maysams): Due to a bug in Octavia, which does - # not allows listeners with same port but different - # protocols to co-exist, we need to skip the creation of - # listeners that have the same port as an existing one. - listener = [l for l in loadbalancer_crd['status'].get( - 'listeners', []) if l['port'] == port] - - if listener and not self._drv_lbaas.double_listeners_supported(): - msg = ( - f'Octavia does not support multiple listeners listening ' - f'on the same port. Skipping creation of listener ' - f'{protocol}:{port} because {listener["protocol"]}:' - f'{listener["port"]} already exists for Service ' - f'{utils.get_res_unique_name(loadbalancer_crd)}') - self._add_event(loadbalancer_crd, 'KuryrSkipListener', msg, - 'Warning') - LOG.warning(msg) - continue - if protocol == "SCTP" and not self._drv_lbaas.sctp_supported(): - msg = ( - f'Skipping listener {protocol}:{port} creation as Octavia ' - f'does not support {protocol} protocol.') - self._add_event(loadbalancer_crd, 'KuryrSkipListener', msg, - 'Warning') - LOG.warning(msg) - continue - listener = self._drv_lbaas.ensure_listener( - loadbalancer=loadbalancer_crd['status'].get('loadbalancer'), - protocol=protocol, - port=port, - service_type=loadbalancer_crd['spec'].get('type'), - timeout_client_data=spec_t_cli, - timeout_member_data=spec_t_mb) - - if listener is not None: - listeners = loadbalancer_crd['status'].get('listeners', []) - if listeners: - for pre_listener in listeners: - if pre_listener['id'] == listener['id']: - listeners.remove(pre_listener) - listeners.append(listener) - else: - loadbalancer_crd['status']['listeners'] = [] - loadbalancer_crd['status'].get('listeners', []).append( - listener) - - if not self._patch_status(loadbalancer_crd): - return False - changed = True - return changed - - def _remove_unused_listeners(self, loadbalancer_crd): - current_listeners = {p['listener_id'] for p in loadbalancer_crd[ - 'status'].get('pools', [])} - removed_ids = set() - for listener in loadbalancer_crd['status'].get('listeners', []): - if listener['id'] in current_listeners: - continue - self._drv_lbaas.release_listener(loadbalancer_crd['status'][ - 'loadbalancer'], listener) - removed_ids.add(listener['id']) - if removed_ids: - loadbalancer_crd['status']['listeners'] = [ - l for l in loadbalancer_crd['status'].get('listeners', - []) if l['id'] - not in removed_ids] - - if not self._patch_status(loadbalancer_crd): - return False - return bool(removed_ids) - - def _update_lb_status(self, lb_crd): - lb_crd_status = lb_crd['status'] - lb_ip_address = lb_crd_status['service_pub_ip_info']['ip_addr'] - name = lb_crd['metadata']['name'] - ns = lb_crd['metadata']['namespace'] - status_data = {"loadBalancer": { - "ingress": [{"ip": lb_ip_address.format()}]}} - try: - self.k8s.patch("status", f"{k_const.K8S_API_NAMESPACES}" - f"/{ns}/services/{name}/status", - status_data) - except k_exc.K8sConflict: - raise k_exc.ResourceNotReady(name) - except k_exc.K8sClientException as e: - msg = (f'K8s API error when updating external FIP data of Service ' - f'{utils.get_res_unique_name(lb_crd)}') - LOG.exception(msg) - self._add_event(lb_crd, 'KuryrUpdateServiceStatusError', - f'{msg}: {e}', 'Warning') - raise - - def _sync_lbaas_loadbalancer(self, loadbalancer_crd): - lb = loadbalancer_crd['status'].get('loadbalancer') - - if lb and lb['ip'] != loadbalancer_crd['spec'].get('ip'): - # if loadbalancerIP was associated to lbaas VIP, disassociate it. - - try: - pub_info = loadbalancer_crd['status']['service_pub_ip_info'] - except KeyError: - pub_info = None - - if pub_info: - self._drv_service_pub_ip.disassociate_pub_ip( - loadbalancer_crd['status']['service_pub_ip_info']) - self._drv_service_pub_ip.release_pub_ip( - loadbalancer_crd['status']['service_pub_ip_info']) - - self._drv_lbaas.release_loadbalancer( - loadbalancer=lb) - - lb = {} - loadbalancer_crd['status'] = {} - - if not lb: - if loadbalancer_crd['spec'].get('ip'): - lb_name = self._drv_lbaas.get_service_loadbalancer_name( - loadbalancer_crd['metadata']['namespace'], - loadbalancer_crd['metadata']['name']) - lb = self._drv_lbaas.ensure_loadbalancer( - name=lb_name, - project_id=loadbalancer_crd['spec'].get('project_id'), - subnet_id=loadbalancer_crd['spec'].get('subnet_id'), - ip=loadbalancer_crd['spec'].get('ip'), - security_groups_ids=loadbalancer_crd['spec'].get( - 'security_groups_ids'), - service_type=loadbalancer_crd['spec'].get('type'), - provider=loadbalancer_crd['spec'].get('provider')) - loadbalancer_crd['status']['loadbalancer'] = lb - - return self._patch_status(loadbalancer_crd) - return False - - def _ensure_release_lbaas(self, loadbalancer_crd): - self._drv_lbaas.release_loadbalancer( - loadbalancer_crd['status'].get('loadbalancer')) - utils.clean_lb_crd_status( - utils.get_res_unique_name(loadbalancer_crd)) - # NOTE(ltomasbo): give some extra time to ensure the Load - # Balancer VIP is also released - time.sleep(1) diff --git a/kuryr_kubernetes/controller/handlers/machine.py b/kuryr_kubernetes/controller/handlers/machine.py deleted file mode 100644 index 1c4ffa9d1..000000000 --- a/kuryr_kubernetes/controller/handlers/machine.py +++ /dev/null @@ -1,67 +0,0 @@ -# Copyright 2020 Red Hat, Inc. -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. - -import uuid - -from oslo_log import log as logging - -from kuryr_kubernetes import clients -from kuryr_kubernetes import constants -from kuryr_kubernetes.controller.drivers import base as drivers -from kuryr_kubernetes import exceptions -from kuryr_kubernetes.handlers import k8s_base - -LOG = logging.getLogger(__name__) - - -class MachineHandler(k8s_base.ResourceEventHandler): - """MachineHandler gathers info about OpenShift nodes needed by Kuryr. - - At the moment that's the subnets of all the worker nodes. - """ - OBJECT_KIND = constants.OPENSHIFT_OBJ_MACHINE - OBJECT_WATCH_PATH = constants.OPENSHIFT_API_CRD_MACHINES - - def __init__(self): - super(MachineHandler, self).__init__() - self.node_subnets_driver = drivers.NodesSubnetsDriver.get_instance() - - def _bump_nps(self): - """Bump NetworkPolicy objects to have the SG rules recalculated.""" - k8s = clients.get_kubernetes_client() - # NOTE(dulek): Listing KuryrNetworkPolicies instead of NetworkPolicies, - # as we only care about NPs already handled. - knps = k8s.get(constants.K8S_API_CRD_KURYRNETWORKPOLICIES) - for knp in knps.get('items', []): - try: - k8s.annotate( - knp['metadata']['annotations']['networkPolicyLink'], - {constants.K8S_ANNOTATION_POLICY: str(uuid.uuid4())}) - except exceptions.K8sResourceNotFound: - # Had to be deleted in the meanwhile. - pass - - def on_present(self, machine, *args, **kwargs): - effect = self.node_subnets_driver.add_node(machine) - if effect: - # If the change was meaningful we need to make sure all the NPs - # are recalculated to get the new SG rules added. - self._bump_nps() - - def on_deleted(self, machine, *args, **kwargs): - effect = self.node_subnets_driver.delete_node(machine) - if effect: - # If the change was meaningful we need to make sure all the NPs - # are recalculated to get the old SG rule deleted. - self._bump_nps() diff --git a/kuryr_kubernetes/controller/handlers/namespace.py b/kuryr_kubernetes/controller/handlers/namespace.py deleted file mode 100644 index 455cda5f7..000000000 --- a/kuryr_kubernetes/controller/handlers/namespace.py +++ /dev/null @@ -1,147 +0,0 @@ -# Copyright 2018 Red Hat, Inc. -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. - -from oslo_log import log as logging - -from kuryr_kubernetes import clients -from kuryr_kubernetes import constants -from kuryr_kubernetes.controller.drivers import base as drivers -from kuryr_kubernetes import exceptions -from kuryr_kubernetes.handlers import k8s_base -from kuryr_kubernetes import utils - - -LOG = logging.getLogger(__name__) - - -class NamespaceHandler(k8s_base.ResourceEventHandler): - OBJECT_KIND = constants.K8S_OBJ_NAMESPACE - OBJECT_WATCH_PATH = constants.K8S_API_NAMESPACES - - def __init__(self): - super(NamespaceHandler, self).__init__() - self._drv_project = drivers.NamespaceProjectDriver.get_instance() - - def on_present(self, namespace, *args, **kwargs): - ns_labels = namespace['metadata'].get('labels', {}) - ns_name = namespace['metadata']['name'] - - kns_crd = self._get_kns_crd(ns_name) - if kns_crd: - LOG.debug("Previous CRD existing at the new namespace.") - self._update_labels(kns_crd, ns_labels) - return - - if not self._handle_namespace(ns_name): - LOG.debug("Namespace %s has no Pods that should be handled. " - "Skipping event.", ns_name) - return - - try: - self._add_kuryrnetwork_crd(namespace, ns_labels) - except exceptions.K8sClientException: - LOG.exception("Kuryrnetwork CRD creation failed.") - raise exceptions.ResourceNotReady(namespace) - - def _handle_namespace(self, namespace): - """Evaluate if the Namespace should be handled - - Fetches all the Pods in the Namespace and check - if there is any Pod in that Namespace on Pods Network. - - :param namespace: Namespace name - :returns: True if the Namespace resources should be - created, False if otherwise. - """ - kubernetes = clients.get_kubernetes_client() - pods = kubernetes.get( - '{}/namespaces/{}/pods'.format( - constants.K8S_API_BASE, namespace)) - return any(not utils.is_host_network(pod) - for pod in pods.get('items', [])) - - def _update_labels(self, kns_crd, ns_labels): - kns_status = kns_crd.get('status') - if kns_status: - kns_crd_labels = kns_crd['status'].get('nsLabels', {}) - if kns_crd_labels == ns_labels: - # Labels are already up to date, nothing to do - return - - kubernetes = clients.get_kubernetes_client() - LOG.debug('Patching KuryrNetwork CRD %s', kns_crd) - try: - kubernetes.patch_crd('spec', utils.get_res_link(kns_crd), - {'nsLabels': ns_labels}) - except exceptions.K8sResourceNotFound: - LOG.debug('KuryrNetwork CRD not found %s', kns_crd) - except exceptions.K8sClientException: - LOG.exception('Error updating kuryrnetwork CRD %s', kns_crd) - raise - - def _get_kns_crd(self, namespace): - k8s = clients.get_kubernetes_client() - try: - kuryrnetwork_crd = k8s.get('{}/{}/kuryrnetworks/{}'.format( - constants.K8S_API_CRD_NAMESPACES, namespace, - namespace)) - except exceptions.K8sResourceNotFound: - return None - except exceptions.K8sClientException: - LOG.exception("Kubernetes Client Exception.") - raise - return kuryrnetwork_crd - - def _add_kuryrnetwork_crd(self, namespace, ns_labels): - ns_name = namespace['metadata']['name'] - project_id = self._drv_project.get_project(namespace) - kubernetes = clients.get_kubernetes_client() - - kns_crd = { - 'apiVersion': 'openstack.org/v1', - 'kind': 'KuryrNetwork', - 'metadata': { - 'name': ns_name, - 'finalizers': [constants.KURYRNETWORK_FINALIZER], - }, - 'spec': { - 'nsName': ns_name, - 'projectId': project_id, - 'nsLabels': ns_labels, - } - } - try: - kubernetes.post('{}/{}/kuryrnetworks'.format( - constants.K8S_API_CRD_NAMESPACES, ns_name), kns_crd) - except exceptions.K8sClientException: - LOG.exception("Kubernetes Client Exception creating kuryrnetwork " - "CRD.") - raise - - def is_ready(self, quota): - if not (utils.has_kuryr_crd(constants.K8S_API_CRD_KURYRNETWORKS) and - self._check_quota(quota)): - LOG.error('Marking NamespaceHandler as not ready.') - return False - return True - - def _check_quota(self, quota): - resources = ('subnets', 'networks', 'security_groups') - - for resource in resources: - resource_quota = quota[resource] - if utils.has_limit(resource_quota): - if not utils.is_available(resource, resource_quota): - return False - return True diff --git a/kuryr_kubernetes/controller/handlers/pipeline.py b/kuryr_kubernetes/controller/handlers/pipeline.py deleted file mode 100644 index e9cebf9b0..000000000 --- a/kuryr_kubernetes/controller/handlers/pipeline.py +++ /dev/null @@ -1,72 +0,0 @@ -# Copyright (c) 2016 Mirantis, Inc. -# All Rights Reserved. -# -# Licensed under the Apache License, Version 2.0 (the "License"); you may -# not use this file except in compliance with the License. You may obtain -# a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT -# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the -# License for the specific language governing permissions and limitations -# under the License. - -from requests import exceptions as requests_exc - -from keystoneauth1 import exceptions as key_exc - -from kuryr_kubernetes import exceptions -from kuryr_kubernetes.handlers import asynchronous as h_async -from kuryr_kubernetes.handlers import dispatch as h_dis -from kuryr_kubernetes.handlers import k8s_base as h_k8s -from kuryr_kubernetes.handlers import logging as h_log -from kuryr_kubernetes.handlers import retry as h_retry - - -class ControllerPipeline(h_dis.EventPipeline): - """Serves as an entry point for controller Kubernetes events. - - `ControllerPipeline` is an entry point handler for the Kuryr-Kubernetes - controller. `ControllerPipeline` allows registering - :class:`kuryr_kubernetes.handlers.k8s_base.ResourceEventHandler`s and - ensures the proper handler is called for each event that is passed to the - `ControllerPipeline`. Also it ensures the following behavior: - - - multiple `ResourceEventHandler`s can be registered for the same - resource type (`OBJECT_KIND`) - - - failing handlers (i.e. ones that raise `Exception`s) are retried - until either the handler succeeds or a finite amount of time passes, - in which case the most recent exception is logged - - - in case there are multiple handlers registered for the same resource - type, all such handlers are considered independent (i.e. if one - handler fails, other handlers will still be called regardless; and the - order in which such handlers are called is not determined) - - - events for different Kubernetes objects can be handled concurrently - - - events for the same Kubernetes object are handled sequentially in - the order of arrival - """ - - def __init__(self, thread_group): - self._tg = thread_group - super(ControllerPipeline, self).__init__() - - def _wrap_consumer(self, consumer): - # TODO(ivc): tune retry interval/timeout - return h_log.LogExceptions( - h_retry.Retry( - consumer, - exceptions=(exceptions.ResourceNotReady, - key_exc.connection.ConnectFailure, - requests_exc.ConnectionError)), - ignore_exceptions=(exceptions.KuryrLoadBalancerNotCreated,)) - - def _wrap_dispatcher(self, dispatcher): - return h_log.LogExceptions(h_async.Async(dispatcher, self._tg, - h_k8s.object_uid, - h_k8s.object_info)) diff --git a/kuryr_kubernetes/controller/handlers/pod_label.py b/kuryr_kubernetes/controller/handlers/pod_label.py deleted file mode 100644 index b2d012e8c..000000000 --- a/kuryr_kubernetes/controller/handlers/pod_label.py +++ /dev/null @@ -1,124 +0,0 @@ -# Copyright 2018 Red Hat, Inc. -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. - -from oslo_config import cfg as oslo_cfg -from oslo_log import log as logging -from oslo_serialization import jsonutils - -from kuryr_kubernetes import clients -from kuryr_kubernetes import constants -from kuryr_kubernetes.controller.drivers import base as drivers -from kuryr_kubernetes.controller.drivers import utils as driver_utils -from kuryr_kubernetes import exceptions as k_exc -from kuryr_kubernetes.handlers import k8s_base -from kuryr_kubernetes import utils - -LOG = logging.getLogger(__name__) - - -class PodLabelHandler(k8s_base.ResourceEventHandler): - """Controller side of Pod Label process for Kubernetes pods. - - `PodLabelHandler` runs on the Kuryr-Kubernetes controller and is - responsible for triggering the vif port updates upon pod labels changes. - """ - - OBJECT_KIND = constants.K8S_OBJ_POD - OBJECT_WATCH_PATH = "%s/%s" % (constants.K8S_API_BASE, "pods") - - def __init__(self): - super(PodLabelHandler, self).__init__() - self._drv_project = drivers.PodProjectDriver.get_instance() - self._drv_sg = drivers.PodSecurityGroupsDriver.get_instance() - self._drv_svc_sg = drivers.ServiceSecurityGroupsDriver.get_instance() - self._drv_vif_pool = drivers.VIFPoolDriver.get_instance( - specific_driver='multi_pool') - self._drv_vif_pool.set_vif_driver() - self._drv_lbaas = drivers.LBaaSDriver.get_instance() - - def on_present(self, pod, *args, **kwargs): - if utils.is_host_network(pod) or not self._has_vifs(pod): - # NOTE(ltomasbo): The event will be retried once the vif handler - # annotates the pod with the pod state. - return - - current_pod_info = (pod['metadata'].get('labels'), - pod['status'].get('podIP')) - previous_pod_info = self._get_pod_info(pod) - LOG.debug("Got previous pod info from annotation: %r", - previous_pod_info) - - if current_pod_info == previous_pod_info: - return - - # FIXME(dulek): We should be able to just do create if only podIP - # changed, right? - crd_pod_selectors = self._drv_sg.update_sg_rules(pod) - - project_id = self._drv_project.get_project(pod) - security_groups = self._drv_sg.get_security_groups(pod, project_id) - self._drv_vif_pool.update_vif_sgs(pod, security_groups) - try: - self._set_pod_info(pod, current_pod_info) - except k_exc.K8sResourceNotFound: - LOG.debug("Pod already deleted, no need to retry.") - return - - if oslo_cfg.CONF.octavia_defaults.enforce_sg_rules: - services = driver_utils.get_services() - self._update_services(services, crd_pod_selectors, project_id) - - def _get_pod_info(self, pod): - try: - annotations = pod['metadata']['annotations'] - pod_labels_annotation = annotations[constants.K8S_ANNOTATION_LABEL] - pod_ip_annotation = annotations[constants.K8S_ANNOTATION_IP] - except KeyError: - return None, None - pod_labels = jsonutils.loads(pod_labels_annotation) - return pod_labels, pod_ip_annotation - - def _set_pod_info(self, pod, info): - if not info[0]: - LOG.debug("Removing info annotations: %r", info) - annotation = None, info[1] - else: - annotation = jsonutils.dumps(info[0], sort_keys=True), info[1] - LOG.debug("Setting info annotations: %r", annotation) - - k8s = clients.get_kubernetes_client() - k8s.annotate(utils.get_res_link(pod), - { - constants.K8S_ANNOTATION_LABEL: annotation[0], - constants.K8S_ANNOTATION_IP: annotation[1] - }, - resource_version=pod['metadata']['resourceVersion']) - - def _has_vifs(self, pod): - try: - kp = driver_utils.get_kuryrport(pod) - cr_vifs = driver_utils.get_vifs(kp) - vifs = cr_vifs['status']['vifs'] - LOG.debug("Pod have associated KuryrPort with vifs: %s", vifs) - except KeyError: - return False - return True - - def _update_services(self, services, crd_pod_selectors, project_id): - for service in services.get('items'): - if not driver_utils.service_matches_affected_pods( - service, crd_pod_selectors): - continue - sgs = self._drv_svc_sg.get_security_groups(service, project_id) - self._drv_lbaas.update_lbaas_sg(service, sgs) diff --git a/kuryr_kubernetes/controller/handlers/policy.py b/kuryr_kubernetes/controller/handlers/policy.py deleted file mode 100644 index d26ab3ff2..000000000 --- a/kuryr_kubernetes/controller/handlers/policy.py +++ /dev/null @@ -1,61 +0,0 @@ -# Copyright 2018 Red Hat, Inc. -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. - -from oslo_log import log as logging - -from kuryr_kubernetes import clients -from kuryr_kubernetes import constants as k_const -from kuryr_kubernetes.controller.drivers import base as drivers -from kuryr_kubernetes.handlers import k8s_base -from kuryr_kubernetes import utils - -LOG = logging.getLogger(__name__) - - -class NetworkPolicyHandler(k8s_base.ResourceEventHandler): - """NetworkPolicyHandler handles k8s Network Policies events""" - - OBJECT_KIND = k_const.K8S_OBJ_POLICY - OBJECT_WATCH_PATH = k_const.K8S_API_POLICIES - - def __init__(self): - super(NetworkPolicyHandler, self).__init__() - self._drv_policy = drivers.NetworkPolicyDriver.get_instance() - self.k8s = clients.get_kubernetes_client() - - def on_present(self, policy, *args, **kwargs): - LOG.debug("Created or updated: %s", policy) - - self._drv_policy.ensure_network_policy(policy) - - # Put finalizer in if it's not there already. - self.k8s.add_finalizer(policy, k_const.NETWORKPOLICY_FINALIZER) - - def on_finalize(self, policy, *args, **kwargs): - LOG.debug("Finalizing policy %s", policy) - if not self._drv_policy.release_network_policy(policy): - # KNP was not found, so we need to finalize on our own. - self.k8s.remove_finalizer(policy, k_const.NETWORKPOLICY_FINALIZER) - - def is_ready(self, quota): - if not (utils.has_kuryr_crd(k_const.K8S_API_CRD_KURYRNETWORKPOLICIES) - and self._check_quota(quota)): - LOG.error("Marking NetworkPolicyHandler as not ready.") - return False - return True - - def _check_quota(self, quota): - if utils.has_limit(quota.security_groups): - return utils.is_available('security_groups', quota.security_groups) - return True diff --git a/kuryr_kubernetes/controller/handlers/vif.py b/kuryr_kubernetes/controller/handlers/vif.py deleted file mode 100644 index 3f5705cb4..000000000 --- a/kuryr_kubernetes/controller/handlers/vif.py +++ /dev/null @@ -1,217 +0,0 @@ -# Copyright (c) 2016 Mirantis, Inc. -# All Rights Reserved. -# -# Licensed under the Apache License, Version 2.0 (the "License"); you may -# not use this file except in compliance with the License. You may obtain -# a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT -# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the -# License for the specific language governing permissions and limitations -# under the License. -import uuid - -from oslo_config import cfg -from oslo_log import log as logging - -from kuryr_kubernetes import clients -from kuryr_kubernetes import constants -from kuryr_kubernetes.controller.drivers import utils as driver_utils -from kuryr_kubernetes import exceptions as k_exc -from kuryr_kubernetes.handlers import k8s_base -from kuryr_kubernetes import utils - -CONF = cfg.CONF -LOG = logging.getLogger(__name__) -KURYRPORT_URI = constants.K8S_API_CRD_NAMESPACES + '/{ns}/kuryrports/{crd}' - - -class VIFHandler(k8s_base.ResourceEventHandler): - """Controller side of VIF binding process for Kubernetes pods. - - `VIFHandler` runs on the Kuryr-Kubernetes controller and together with - the CNI driver (that runs on 'kubelet' nodes) is responsible for providing - networking to Kubernetes pods. `VIFHandler` relies on a set of drivers - (which are responsible for managing Neutron resources) to define the VIF - objects and pass them to the CNI driver in form of the Kubernetes pod - annotation. - """ - - OBJECT_KIND = constants.K8S_OBJ_POD - OBJECT_WATCH_PATH = "%s/%s" % (constants.K8S_API_BASE, "pods") - - def __init__(self): - super(VIFHandler).__init__() - self.k8s = clients.get_kubernetes_client() - - def on_present(self, pod, *args, **kwargs): - if utils.is_host_network(pod): - return - - pod_name = pod['metadata']['name'] - if utils.is_pod_completed(pod): - LOG.debug("Pod %s has completed execution, " - "removing the vifs", pod_name) - self.on_finalize(pod) - return - - if not self._is_pod_scheduled(pod): - # REVISIT(ivc): consider an additional configurable check that - # would allow skipping pods to enable heterogeneous environments - # where certain pods/namespaces/nodes can be managed by other - # networking solutions/CNI drivers. - return - - namespace = pod['metadata']['namespace'] - kuryrnetwork_path = '{}/{}/kuryrnetworks/{}'.format( - constants.K8S_API_CRD_NAMESPACES, namespace, - namespace) - kuryrnetwork = driver_utils.get_k8s_resource(kuryrnetwork_path) - kuryrnetwork_status = kuryrnetwork.get('status', {}) - if (CONF.kubernetes.pod_subnets_driver == 'namespace' and - (not kuryrnetwork or not kuryrnetwork_status.get('routerId'))): - namespace_path = '{}/{}'.format( - constants.K8S_API_NAMESPACES, namespace) - LOG.debug("Triggering Namespace Handling %s", namespace_path) - try: - self.k8s.annotate(namespace_path, - {'KuryrTrigger': str(uuid.uuid4())}) - except k_exc.K8sResourceNotFound: - LOG.warning('Ignoring Pod handling, no Namespace %s.', - namespace) - return - raise k_exc.ResourceNotReady(pod) - - # NOTE(gryf): Set the finalizer as soon, as we have pod created. On - # subsequent updates of the pod, add_finalizer will ignore this if - # finalizer exist. - try: - if not self.k8s.add_finalizer(pod, constants.POD_FINALIZER): - # NOTE(gryf) It might happen that pod will be deleted even - # before we got here. - return - except k_exc.K8sClientException as ex: - self.k8s.add_event(pod, 'FailedToAddFinalizerToPod', - f'Adding finalizer to pod has failed: {ex}', - 'Warning') - LOG.exception("Failed to add finalizer to pod object: %s", ex) - raise - - kp = driver_utils.get_kuryrport(pod) - LOG.debug("Got KuryrPort: %r", kp) - if not kp: - try: - self._add_kuryrport_crd(pod) - except k_exc.K8sNamespaceTerminating: - # The underlying namespace is being terminated, we can - # ignore this and let `on_finalize` handle this now. - LOG.debug('Namespace %s is being terminated, ignoring Pod ' - '%s in that namespace.', - pod['metadata']['namespace'], pod_name) - return - except k_exc.K8sClientException as ex: - self.k8s.add_event(pod, 'FailedToCreateKuryrPortCRD', - f'Creating corresponding KuryrPort CRD has ' - f'failed: {ex}', 'Warning') - LOG.exception("Kubernetes Client Exception creating " - "KuryrPort CRD: %s", ex) - raise k_exc.ResourceNotReady(pod) - - def on_finalize(self, pod, *args, **kwargs): - - try: - kp = self.k8s.get(KURYRPORT_URI.format( - ns=pod["metadata"]["namespace"], crd=pod["metadata"]["name"])) - except k_exc.K8sResourceNotFound: - try: - self.k8s.remove_finalizer(pod, constants.POD_FINALIZER) - except k_exc.K8sClientException as ex: - self.k8s.add_event(pod, 'FailedRemovingFinalizerFromPod', - f'Removing finalizer from pod has failed: ' - f'{ex}', 'Warning') - LOG.exception('Failed to remove finalizer from pod: %s', ex) - raise - return - - if 'deletionTimestamp' in kp['metadata']: - # NOTE(gryf): Seems like KP was manually removed. By using - # annotations, force an emition of event to trigger on_finalize - # method on the KuryrPort. - try: - self.k8s.annotate(utils.get_res_link(kp), - {'KuryrTrigger': str(uuid.uuid4())}) - except k_exc.K8sResourceNotFound: - self.k8s.remove_finalizer(pod, constants.POD_FINALIZER) - except k_exc.K8sClientException as ex: - self.k8s.add_event(pod, 'FailedRemovingPodFinalzier', - f'Failed removing finalizer from pod: {ex}', - 'Warning') - raise k_exc.ResourceNotReady(pod['metadata']['name']) - else: - try: - self.k8s.delete(KURYRPORT_URI - .format(ns=pod["metadata"]["namespace"], - crd=pod["metadata"]["name"])) - except k_exc.K8sResourceNotFound: - self.k8s.remove_finalizer(pod, constants.POD_FINALIZER) - - except k_exc.K8sClientException as ex: - self.k8s.add_event(pod, 'FailedRemovingKuryrPortCRD', - f'Failed removing corresponding KuryrPort ' - f'CRD: {ex}', 'Warning') - LOG.exception("Could not remove KuryrPort CRD for pod %s.", - pod['metadata']['name']) - raise k_exc.ResourceNotReady(pod['metadata']['name']) - - def is_ready(self, quota): - if (utils.has_limit(quota.ports) and - not utils.is_available('ports', quota.ports)): - LOG.error('Marking VIFHandler as not ready.') - return False - return True - - @staticmethod - def _is_pod_scheduled(pod): - """Checks if Pod is in PENDING status and has node assigned.""" - try: - return (pod['spec']['nodeName'] and - pod['status']['phase'] == constants.K8S_POD_STATUS_PENDING) - except KeyError: - return False - - def _add_kuryrport_crd(self, pod): - LOG.debug('Adding CRD %s', pod["metadata"]["name"]) - - vifs = {} - - owner_reference = {'apiVersion': pod['apiVersion'], - 'kind': pod['kind'], - 'name': pod['metadata']['name'], - 'uid': pod['metadata']['uid']} - - kuryr_port = { - 'apiVersion': constants.K8S_API_CRD_VERSION, - 'kind': constants.K8S_OBJ_KURYRPORT, - 'metadata': { - 'name': pod['metadata']['name'], - 'finalizers': [constants.KURYRPORT_FINALIZER], - 'labels': { - constants.KURYRPORT_LABEL: pod['spec']['nodeName'] - }, - 'ownerReferences': [owner_reference] - }, - 'spec': { - 'podUid': pod['metadata']['uid'], - 'podNodeName': pod['spec']['nodeName'], - 'podStatic': utils.is_pod_static(pod) - }, - 'status': { - 'vifs': vifs - } - } - - self.k8s.post(KURYRPORT_URI.format(ns=pod["metadata"]["namespace"], - crd=''), kuryr_port) diff --git a/kuryr_kubernetes/controller/managers/__init__.py b/kuryr_kubernetes/controller/managers/__init__.py deleted file mode 100644 index e69de29bb..000000000 diff --git a/kuryr_kubernetes/controller/managers/health.py b/kuryr_kubernetes/controller/managers/health.py deleted file mode 100644 index e8cb79ea3..000000000 --- a/kuryr_kubernetes/controller/managers/health.py +++ /dev/null @@ -1,114 +0,0 @@ -# Copyright 2018 Maysa de Macedo Souza. -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. - -from http import client as httplib -import os - -from oslo_config import cfg -from oslo_log import log as logging - -from kuryr.lib._i18n import _ -from kuryr.lib import config as kuryr_config -from kuryr.lib import utils -from kuryr_kubernetes import clients -from kuryr_kubernetes import config -from kuryr_kubernetes.handlers import health as h_health -from kuryr_kubernetes import health as base_server - -LOG = logging.getLogger(__name__) -CONF = cfg.CONF - -health_server_opts = [ - cfg.IntOpt('port', - help=_('port for Health HTTP Server.'), - default=8082), -] - -CONF.register_opts(health_server_opts, "health_server") - - -class HealthServer(base_server.BaseHealthServer): - """Proxy server used by readiness and liveness probes to manage health checks. - - Allows to verify connectivity with Kubernetes API, Keystone and Neutron. - If pool ports functionality is enabled it is verified whether - the precreated ports are loaded into the pools. Also, checks handlers - states. - """ - - def __init__(self): - super().__init__('controller-health', CONF.health_server.port) - self._registry = h_health.HealthRegister.get_instance().registry - - def _components_ready(self): - os_net = clients.get_network_client() - project_id = config.CONF.neutron_defaults.project - quota = os_net.get_quota(quota=project_id, details=True) - - for component in self._registry: - if not component.is_ready(quota): - LOG.debug('Controller component not ready: %s.' % component) - return False - return True - - def readiness_status(self): - if CONF.kubernetes.vif_pool_driver != 'noop': - if not os.path.exists('/tmp/pools_loaded'): - error_message = 'Ports not loaded into the pools.' - LOG.error(error_message) - return error_message, httplib.NOT_FOUND, {} - - k8s_conn = self.verify_k8s_connection() - if not k8s_conn: - error_message = 'Error when processing k8s healthz request.' - LOG.error(error_message) - return error_message, httplib.INTERNAL_SERVER_ERROR, {} - try: - self.verify_keystone_connection() - except Exception as ex: - error_message = ('Error when creating a Keystone session and ' - 'getting a token: %s.' % ex) - LOG.exception(error_message) - return error_message, httplib.INTERNAL_SERVER_ERROR, {} - - try: - if not self._components_ready(): - return '', httplib.INTERNAL_SERVER_ERROR, {} - except Exception as ex: - error_message = ('Error when processing neutron request %s' % ex) - LOG.exception(error_message) - return error_message, httplib.INTERNAL_SERVER_ERROR, {} - - return 'ok', httplib.OK, {} - - def liveness_status(self): - for component in self._registry: - if not component.is_alive(): - exc = component.get_last_exception() - if not exc: - msg = f'Component {component.__class__.__name__} is dead.' - LOG.error(msg) - else: - msg = (f'Component {component.__class__.__name__} is dead.' - f' Last caught exception below') - LOG.exception(msg, exc_info=exc) - return msg, httplib.INTERNAL_SERVER_ERROR, {} - return 'ok', httplib.OK, {} - - def verify_keystone_connection(self): - # Obtain a new token to ensure connectivity with keystone - conf_group = kuryr_config.neutron_group.name - auth_plugin = utils.get_auth_plugin(conf_group) - sess = utils.get_keystone_session(conf_group, auth_plugin) - sess.get_token() diff --git a/kuryr_kubernetes/controller/managers/pool.py b/kuryr_kubernetes/controller/managers/pool.py deleted file mode 100644 index 8ea64e1cf..000000000 --- a/kuryr_kubernetes/controller/managers/pool.py +++ /dev/null @@ -1,255 +0,0 @@ -# Copyright 2017 Red Hat, Inc. -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. - -from http import server -import os -import socketserver -import threading - - -from openstack import exceptions as os_exc -from oslo_config import cfg as oslo_cfg -from oslo_log import log as logging -from oslo_serialization import jsonutils - -from kuryr.lib._i18n import _ - -from kuryr_kubernetes import constants -from kuryr_kubernetes.controller.drivers import base as drivers - -LOG = logging.getLogger(__name__) - -pool_manager_opts = [ - oslo_cfg.StrOpt('sock_file', - help=_("Absolute path to socket file that " - "will be used for communication with " - "the Pool Manager daemon"), - default='/run/kuryr/kuryr_manage.sock'), -] - -oslo_cfg.CONF.register_opts(pool_manager_opts, "pool_manager") - - -class UnixDomainHttpServer(socketserver.ThreadingUnixStreamServer): - pass - - -class RequestHandler(server.BaseHTTPRequestHandler): - protocol = "HTTP/1.0" - - def do_POST(self): - content_length = int(self.headers.get('Content-Length', 0)) - - body = self.rfile.read(content_length) - params = dict(jsonutils.loads(body)) - - if self.path.endswith(constants.VIF_POOL_POPULATE): - trunk_ips = params.get('trunks', None) - num_ports = params.get('num_ports', 1) - if trunk_ips: - try: - self._create_subports(num_ports, trunk_ips) - except Exception: - response = ('Error while populating pool {0} with {1} ' - 'ports.'.format(trunk_ips, num_ports)) - else: - response = ('Ports pool at {0} was populated with {1} ' - 'ports.'.format(trunk_ips, num_ports)) - - self.send_header('Content-Length', len(response)) - self.end_headers() - self.wfile.write(response.encode()) - else: - response = 'Trunk port IP(s) missing.' - self.send_header('Content-Length', len(response)) - self.end_headers() - self.wfile.write(response.encode()) - - elif self.path.endswith(constants.VIF_POOL_FREE): - trunk_ips = params.get('trunks', None) - if not trunk_ips: - pool = "all" - else: - pool = trunk_ips - - try: - self._delete_subports(trunk_ips) - except Exception: - response = 'Error freeing ports pool: {0}.'.format(pool) - else: - response = 'Ports pool belonging to {0} was freed.'.format( - pool) - - self.send_header('Content-Length', len(response)) - self.end_headers() - self.wfile.write(response.encode()) - - else: - response = 'Method not allowed.' - self.send_header('Content-Length', len(response)) - self.end_headers() - self.wfile.write(response.encode()) - - def do_GET(self): - content_length = int(self.headers.get('Content-Length', 0)) - - body = self.rfile.read(content_length) - params = dict(jsonutils.loads(body)) - - if self.path.endswith(constants.VIF_POOL_LIST): - try: - pools_info = self._list_pools() - except Exception: - response = 'Error listing the pools.' - else: - response = 'Pools:\n{0}'.format(pools_info) - - self.send_header('Content-Length', len(response)) - self.end_headers() - self.wfile.write(response.encode()) - - elif self.path.endswith(constants.VIF_POOL_SHOW): - raw_key = params.get('pool_key', None) - if len(raw_key) != 3: - response = ('Invalid pool key. Proper format is:\n' - '[trunk_ip, project_id, [security_groups]]\n') - else: - pool_key = (raw_key[0], raw_key[1], tuple(sorted(raw_key[2]))) - - try: - pool_info = self._show_pool(pool_key) - except Exception: - response = 'Error showing pool: {0}.'.format(pool_key) - else: - response = 'Pool {0} ports are:\n{1}'.format(pool_key, - pool_info) - - self.send_header('Content-Length', len(response)) - self.end_headers() - self.wfile.write(response.encode()) - - else: - response = 'Method not allowed.' - self.send_header('Content-Length', len(response)) - self.end_headers() - self.wfile.write(response.encode()) - - def _create_subports(self, num_ports, trunk_ips): - try: - drv_project = drivers.PodProjectDriver.get_instance() - drv_subnets = drivers.PodSubnetsDriver.get_instance() - drv_sg = drivers.PodSecurityGroupsDriver.get_instance() - drv_vif = drivers.PodVIFDriver.get_instance() - drv_vif_pool = drivers.VIFPoolDriver.get_instance() - drv_vif_pool.set_vif_driver(drv_vif) - project_id = drv_project.get_project({}) - security_groups = drv_sg.get_security_groups({}, project_id) - subnets = drv_subnets.get_subnets([], project_id) - except TypeError: - LOG.error("Invalid driver type") - raise - - for trunk_ip in trunk_ips: - try: - drv_vif_pool.force_populate_pool( - trunk_ip, project_id, subnets, security_groups, num_ports) - except os_exc.ConflictException: - LOG.error("VLAN Id conflict (already in use) at trunk %s", - trunk_ip) - raise - except os_exc.SDKException: - LOG.exception("Error happened during subports addition at " - "trunk: %s", trunk_ip) - raise - - def _delete_subports(self, trunk_ips): - try: - drv_vif = drivers.PodVIFDriver.get_instance() - drv_vif_pool = drivers.VIFPoolDriver.get_instance() - drv_vif_pool.set_vif_driver(drv_vif) - - drv_vif_pool.free_pool(trunk_ips) - except TypeError: - LOG.error("Invalid driver type") - raise - - def _list_pools(self): - try: - drv_vif = drivers.PodVIFDriver.get_instance() - drv_vif_pool = drivers.VIFPoolDriver.get_instance() - drv_vif_pool.set_vif_driver(drv_vif) - - available_pools = drv_vif_pool.list_pools() - except TypeError: - LOG.error("Invalid driver type") - raise - - pools_info = "" - for pool_key, pool_items in available_pools.items(): - pools_info += (jsonutils.dumps(pool_key) + " has " - + str(len(pool_items)) + " ports\n") - if pools_info: - return pools_info - return "There are no pools" - - def _show_pool(self, pool_key): - try: - drv_vif = drivers.PodVIFDriver.get_instance() - drv_vif_pool = drivers.VIFPoolDriver.get_instance() - drv_vif_pool.set_vif_driver(drv_vif) - - pool = drv_vif_pool.show_pool(pool_key) - except TypeError: - LOG.error("Invalid driver type") - raise - - if pool: - pool_info = "" - for pool_id in pool: - pool_info += str(pool_id) + "\n" - return pool_info - else: - return "Empty pool" - - -class PoolManager(object): - """Manages the ports pool enabling population and free actions. - - `PoolManager` runs on the Kuryr-kubernetes controller and allows to - populate specific pools with a given amount of ports. In addition, it also - allows to remove all the (unused) ports in the given pool(s), or from all - of the pool if none of them is specified. - """ - - def __init__(self): - pool_manager = threading.Thread(target=self._start_kuryr_manage_daemon) - pool_manager.setDaemon(True) - pool_manager.start() - - def _start_kuryr_manage_daemon(self): - LOG.info("Pool manager started") - server_address = oslo_cfg.CONF.pool_manager.sock_file - try: - os.unlink(server_address) - except OSError: - if os.path.exists(server_address): - raise - try: - httpd = UnixDomainHttpServer(server_address, RequestHandler) - httpd.serve_forever() - except KeyboardInterrupt: - pass - except Exception: - LOG.exception('Failed to start Pool Manager.') - httpd.socket.close() diff --git a/kuryr_kubernetes/controller/managers/prometheus_exporter.py b/kuryr_kubernetes/controller/managers/prometheus_exporter.py deleted file mode 100644 index 32a4a3bdd..000000000 --- a/kuryr_kubernetes/controller/managers/prometheus_exporter.py +++ /dev/null @@ -1,192 +0,0 @@ -# Copyright 2020 Red Hat, Inc. -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. - -import flask -import netaddr -import prometheus_client -from prometheus_client.exposition import generate_latest - -from oslo_config import cfg -from oslo_log import log as logging - -from kuryr_kubernetes import clients -from kuryr_kubernetes import config -from kuryr_kubernetes import utils - -LOG = logging.getLogger(__name__) -CONF = cfg.CONF -RESOURCES = ('ports', 'subnets', 'networks', 'security_groups', - 'security_group_rules') -_NO_QUOTA = 0 -_INF = float("inf") -_NO_LIMIT = -1 - - -class ControllerPrometheusExporter(object): - """Provides metrics to Prometheus""" - - instance = None - - def __init__(self): - self.application = flask.Flask('prometheus-exporter') - self.ctx = None - self.application.add_url_rule( - '/metrics', methods=['GET'], view_func=self.metrics) - self.headers = {'Connection': 'close'} - self._os_net = clients.get_network_client() - self._os_lb = clients.get_loadbalancer_client() - self._project_id = config.CONF.neutron_defaults.project - self._create_metrics() - - def metrics(self): - """Provides the registered metrics""" - self._record_quota_free_count_metric() - self._record_ports_quota_per_subnet_metric() - self._record_lbs_metrics() - - collected_metric = generate_latest(self.registry) - return flask.Response(collected_metric, mimetype='text/plain') - - def record_pod_creation_metric(self, duration): - """Records pod creation duration to the registry""" - self.pod_creation_latency.observe(duration) - - def record_lb_failure(self): - """Increase failure count for Load Balancer readiness""" - self.load_balancer_readiness.inc() - - def record_port_failure(self): - """Increase failure count to Port readiness""" - self.port_readiness.inc() - - @classmethod - def get_instance(cls): - if not ControllerPrometheusExporter.instance: - ControllerPrometheusExporter.instance = cls() - return ControllerPrometheusExporter.instance - - def run(self): - # Disable obtrusive werkzeug logs. - logging.getLogger('werkzeug').setLevel(logging.WARNING) - - address = '::' - try: - LOG.info('Starting Prometheus exporter') - self.application.run( - address, CONF.prometheus_exporter.controller_exporter_port) - except Exception: - LOG.exception('Failed to start Prometheus exporter') - raise - - def _record_quota_free_count_metric(self): - """Records Network resources availability to the registry""" - quota = self._os_net.get_quota(quota=self._project_id, details=True) - for resource in RESOURCES: - resource_quota = quota[resource] - labels = {'resource': resource} - quota_limit = resource_quota['limit'] - if quota_limit == _NO_LIMIT: - self.quota_free_count.labels(**labels).set(quota_limit) - continue - quota_used = resource_quota['used'] - availability = quota_limit - quota_used - if availability >= _NO_QUOTA: - self.quota_free_count.labels(**labels).set(availability) - - def _record_ports_quota_per_subnet_metric(self): - """Records the ports quota per subnet to the registry""" - subnets = self._os_net.subnets(project_id=self._project_id) - namespace_prefix = 'ns/' - for subnet in subnets: - if namespace_prefix not in subnet.name: - continue - total_num_addresses = 0 - ports_availability = 0 - for allocation in subnet.allocation_pools: - total_num_addresses += netaddr.IPRange( - netaddr.IPAddress(allocation['start']), - netaddr.IPAddress(allocation['end'])).size - ports_count = len(list(self._os_net.ports( - fixed_ips=[f'subnet_id={subnet.id}'], - project_id=self._project_id))) - # NOTE(maysams): As the allocation pools range does not take - # into account the Gateway IP, that port IP shouldn't - # be include when counting the used ports. - ports_count = ports_count - 1 - labels = {'subnet_id': subnet.id, 'subnet_name': subnet.name} - ports_availability = total_num_addresses-ports_count - self.port_quota_per_subnet.labels(**labels).set(ports_availability) - - def _record_lbs_metrics(self): - """Records the number of members available per LB and the LB state""" - critical_lbs = [ - ('dns-default', 'openshift-dns'), - ('kubernetes', 'default')] - for name, namespace in critical_lbs: - klb = utils.get_kuryrloadbalancer(name, namespace) - lb = klb.get('status', {}).get('loadbalancer', {}) - lb_id = lb.get('id') - if not lb_id: - continue - lb = self._os_lb.find_load_balancer(lb_id) - labels = {'lb_name': namespace + '/' + name} - if not lb: - self.lbs_state.labels(**labels).state('DELETED') - continue - self.lbs_state.labels(**labels).state(lb.provisioning_status) - pools = self._os_lb.pools(loadbalancer_id=lb.id) - for pool in pools: - labels = {'lb_name': lb.name, 'lb_pool_name': pool.name} - self.lbs_members_count.labels(**labels).set(len(pool.members)) - - def _create_metrics(self): - """Creates a registry and records metrics""" - self.registry = prometheus_client.CollectorRegistry() - self.quota_free_count = prometheus_client.Gauge( - 'kuryr_quota_free_count', 'Amount of quota available' - ' for the network resource', labelnames={'resource'}, - registry=self.registry) - - self.port_quota_per_subnet = prometheus_client.Gauge( - 'kuryr_port_quota_per_subnet', 'Amount of ports available' - ' on Subnet', labelnames={'subnet_id', 'subnet_name'}, - registry=self.registry) - - self.lbs_members_count = prometheus_client.Gauge( - 'kuryr_critical_lb_members_count', 'Amount of members per ' - 'critical Load Balancer pool', - labelnames={'lb_name', 'lb_pool_name'}, - registry=self.registry) - - self.lbs_state = prometheus_client.Enum( - 'kuryr_critical_lb_state', 'Critical Load Balancer State', - labelnames={'lb_name'}, - states=['ERROR', 'ACTIVE', 'DELETED', 'PENDING_CREATE', - 'PENDING_UPDATE', 'PENDING_DELETE'], - registry=self.registry) - - buckets = (10, 20, 30, 40, 50, 60, 70, 80, 90, 100, 110, 120, _INF) - self.pod_creation_latency = prometheus_client.Histogram( - 'kuryr_pod_creation_latency', 'Time taken for a pod to have' - ' Kuryr annotations set', buckets=buckets, registry=self.registry) - - self.load_balancer_readiness = prometheus_client.Counter( - 'kuryr_load_balancer_readiness', 'This counter is increased when ' - 'Kuryr notices that an Octavia load balancer is stuck in an ' - 'unexpected state', registry=self.registry) - - self.port_readiness = prometheus_client.Counter( - 'kuryr_port_readiness', 'This counter is increased when Kuryr ' - 'times out waiting for Neutron to move port to ACTIVE', - registry=self.registry) diff --git a/kuryr_kubernetes/controller/service.py b/kuryr_kubernetes/controller/service.py deleted file mode 100644 index d062e148b..000000000 --- a/kuryr_kubernetes/controller/service.py +++ /dev/null @@ -1,188 +0,0 @@ -# Copyright (c) 2016 Mirantis, Inc. -# All Rights Reserved. -# -# Licensed under the Apache License, Version 2.0 (the "License"); you may -# not use this file except in compliance with the License. You may obtain -# a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT -# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the -# License for the specific language governing permissions and limitations -# under the License. -import functools -import sys - -import urllib3 - -import os_vif -from oslo_config import cfg -from oslo_log import log as logging -from oslo_service import periodic_task -from oslo_service import service -from stevedore.named import NamedExtensionManager - -from kuryr_kubernetes import clients -from kuryr_kubernetes import config -from kuryr_kubernetes.controller.drivers import base as drivers -from kuryr_kubernetes.controller.handlers import pipeline as h_pipeline -from kuryr_kubernetes.controller.managers import health -from kuryr_kubernetes.controller.managers import prometheus_exporter as exp -from kuryr_kubernetes import objects -from kuryr_kubernetes import utils -from kuryr_kubernetes import watcher - - -CONF = cfg.CONF -LOG = logging.getLogger(__name__) - - -def _handler_not_found(names): - LOG.exception('Handlers "%s" were not found.', names) - LOG.critical('Handlers "%s" were not found.', names) - raise SystemExit() - - -def _handler_not_loaded(manager, entrypoint, exception): - LOG.exception('Exception when loading handlers %s.', entrypoint) - LOG.critical('Handlers entrypoint "%s" failed to load due to %s.', - entrypoint, exception) - raise SystemExit() - - -def _load_kuryr_ctrlr_handlers(): - configured_handlers = CONF.kubernetes.enabled_handlers - LOG.info('Configured handlers: %s', configured_handlers) - handlers = NamedExtensionManager( - 'kuryr_kubernetes.controller.handlers', - configured_handlers, - invoke_on_load=True, - on_missing_entrypoints_callback=_handler_not_found, - on_load_failure_callback=_handler_not_loaded) - LOG.info('Loaded handlers: %s', handlers.names()) - ctrlr_handlers = [] - for handler in handlers.extensions: - ctrlr_handlers.append(handler.obj) - return ctrlr_handlers - - -class KuryrK8sServiceMeta(type(service.Service), - type(periodic_task.PeriodicTasks)): - pass - - -class KuryrK8sService(service.Service, periodic_task.PeriodicTasks, - metaclass=KuryrK8sServiceMeta): - """Kuryr-Kubernetes controller Service.""" - - def __init__(self): - super(KuryrK8sService, self).__init__() - periodic_task.PeriodicTasks.__init__(self, CONF) - - objects.register_locally_defined_vifs() - pipeline = h_pipeline.ControllerPipeline(self.tg) - self.watcher = watcher.Watcher(pipeline, self.tg) - self.health_manager = health.HealthServer() - self.exporter = exp.ControllerPrometheusExporter.get_instance() - self.current_leader = None - self.node_name = utils.get_node_name() - - self.handlers = _load_kuryr_ctrlr_handlers() - for handler in self.handlers: - self.watcher.add(handler.get_watch_path()) - pipeline.register(handler) - self.pool_driver = drivers.VIFPoolDriver.get_instance( - specific_driver='multi_pool') - self.pool_driver.set_vif_driver() - - def is_leader(self): - return self.current_leader == self.node_name - - def start(self): - LOG.info("Service '%s' starting", self.__class__.__name__) - super(KuryrK8sService, self).start() - - if not CONF.kubernetes.controller_ha: - LOG.info('Running in non-HA mode, starting watcher immediately.') - self.watcher.start() - self.pool_driver.sync_pools() - else: - LOG.info('Running in HA mode, watcher will be started later.') - f = functools.partial(self.run_periodic_tasks, None) - self.tg.add_timer(1, f) - - self.tg.add_thread(self.exporter.run) - self.tg.add_thread(self.health_manager.run) - LOG.info("Service '%s' started", self.__class__.__name__) - - @periodic_task.periodic_task(spacing=5, run_immediately=True) - def monitor_leader(self, context): - if not CONF.kubernetes.controller_ha: - return - leader = utils.get_leader_name() - if leader is None: - # Error when fetching current leader. We're paranoid, so just to - # make sure we won't break anything we'll try to step down. - self.on_revoke_leader() - elif leader != self.current_leader and leader == self.node_name: - # I'm becoming the leader. - self.on_become_leader() - elif leader != self.current_leader and self.is_leader(): - # I'm revoked from being the leader. - self.on_revoke_leader() - elif leader == self.current_leader and self.is_leader(): - # I continue to be the leader - self.on_continue_leader() - - self.current_leader = leader - - def on_become_leader(self): - LOG.info('Controller %s becomes the leader, starting watcher.', - self.node_name) - self.watcher.start() - self.pool_driver.sync_pools() - - def on_revoke_leader(self): - LOG.info('Controller %s stops being the leader, stopping watcher.', - self.node_name) - if self.watcher.is_running(): - self.watcher.stop() - - def on_continue_leader(self): - # Just make sure my watcher is running. - if not self.watcher.is_running(): - LOG.warning('Controller %s is the leader, but has watcher ' - 'stopped. Restarting it.', self.node_name) - self.watcher.start() - - def wait(self): - super(KuryrK8sService, self).wait() - LOG.info("Service '%s' stopped", self.__class__.__name__) - - def stop(self, graceful=False): - LOG.info("Service '%s' stopping", self.__class__.__name__) - self.watcher.stop() - super(KuryrK8sService, self).stop(graceful) - - @periodic_task.periodic_task(spacing=600, run_immediately=False) - def reconcile_loadbalancers(self, context): - LOG.debug("Checking for Kubernetes resources reconciliations") - for handler in self.handlers: - handler.reconcile() - - @periodic_task.periodic_task(spacing=90, run_immediately=False) - def cleanup_dead_resources(self, context): - utils.cleanup_dead_ports() - utils.cleanup_dead_networks() - - -def start(): - urllib3.disable_warnings() - config.init(sys.argv[1:]) - config.setup_logging() - clients.setup_clients() - os_vif.initialize() - kuryrk8s_launcher = service.launch(config.CONF, KuryrK8sService()) - kuryrk8s_launcher.wait() diff --git a/kuryr_kubernetes/exceptions.py b/kuryr_kubernetes/exceptions.py deleted file mode 100644 index 0da90b495..000000000 --- a/kuryr_kubernetes/exceptions.py +++ /dev/null @@ -1,197 +0,0 @@ -# Copyright (c) 2016 Mirantis, Inc. -# All Rights Reserved. -# -# Licensed under the Apache License, Version 2.0 (the "License"); you may -# not use this file except in compliance with the License. You may obtain -# a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT -# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the -# License for the specific language governing permissions and limitations -# under the License. - -from kuryr_kubernetes import utils - - -class K8sClientException(Exception): - pass - - -class IntegrityError(RuntimeError): - pass - - -class InvalidKuryrConfiguration(RuntimeError): - pass - - -class ResourceNotReady(Exception): - def __init__(self, resource): - msg = resource - if type(resource) == dict: - if resource.get('metadata', {}).get('name', None): - res_name = utils.get_res_unique_name(resource) - kind = resource.get('kind') - if kind: - msg = f'{kind} {res_name}' - else: - msg = res_name - self.message = "Resource not ready: %r" % msg - super(ResourceNotReady, self).__init__(self.message) - - -class KuryrLoadBalancerNotCreated(Exception): - def __init__(self, res): - name = utils.get_res_unique_name(res) - super().__init__( - 'KuryrLoadBalancer not created yet for the Service %s' % name) - - -class LoadBalancerNotReady(ResourceNotReady): - def __init__(self, loadbalancer_id, status): - super().__init__( - 'Loadbalancer %s is stuck in %s status for several minutes. This ' - 'is unexpected and indicates problem with OpenStack Octavia. ' - 'Please contact your OpenStack administrator.' % ( - loadbalancer_id, status)) - - -class PortNotReady(ResourceNotReady): - def __init__(self, port_id, status): - super().__init__( - 'Port %s is stuck in %s status for several minutes. This ' - 'is unexpected and indicates problem with OpenStack Neutron. ' - 'Please contact your OpenStack administrator.' % (port_id, status)) - - -class K8sResourceNotFound(K8sClientException): - def __init__(self, resource): - super(K8sResourceNotFound, self).__init__("Resource not " - "found: %r" % resource) - - -class K8sConflict(K8sClientException): - def __init__(self, message): - super(K8sConflict, self).__init__("Conflict: %r" % message) - - -class K8sForbidden(K8sClientException): - def __init__(self, message): - super(K8sForbidden, self).__init__("Forbidden: %r" % message) - - -class K8sNamespaceTerminating(K8sForbidden): - # This is raised when K8s complains about operation failing because - # namespace is being terminated. - def __init__(self, message): - super(K8sNamespaceTerminating, self).__init__( - "Namespace already terminated: %r" % message) - - -class K8sUnprocessableEntity(K8sClientException): - def __init__(self, message): - super(K8sUnprocessableEntity, self).__init__( - "Unprocessable: %r" % message) - - -class K8sFieldValueForbidden(K8sUnprocessableEntity): - pass - - -class InvalidKuryrNetworkAnnotation(Exception): - pass - - -class CNIError(Exception): - pass - - -def format_msg(exception): - return "%s: %s" % (exception.__class__.__name__, exception) - - -class K8sNodeTrunkPortFailure(Exception): - """Exception represents that error is related to K8s node trunk port - - This exception is thrown when Neutron port is not associated to a Neutron - vlan trunk. - """ - - -class AllowedAddressAlreadyPresent(Exception): - """Exception indicates an already present 'allowed address pair' on port - - This exception is raised when an attempt to add an already inserted - 'allowed address pair' on a port is made. Such a condition likely indicates - a bad program state or a programming bug. - """ - - -class MultiPodDriverPoolConfigurationNotSupported(Exception): - """Exception indicates a wrong configuration of the multi pod driver pool - - This exception is raised when the multi pod driver pool is not properly - configured. This could be due to three different reasons: - 1. One of the pool drivers is not supported - 2. One of the pod drivers is not supported - 3. One of the pod drivers is not supported by its selected pool driver - """ - - -class CNITimeout(Exception): - """Exception groups various timeouts happening in the CNI """ - - -class CNIKuryrPortTimeout(CNITimeout): - """Excepton raised on timeout waiting for KuryrPort to be created""" - def __init__(self, name): - super().__init__( - f'Timed out waiting for KuryrPort to be created for pod {name}. ' - f'kuryr-controller is responsible for that, check logs there.') - - -class CNINeutronPortActivationTimeout(CNITimeout): - """Excepton raised on time out waiting for Neutron ports to be ACITVE""" - def __init__(self, name, vifs): - inactive = ', '.join(vif.id for vif in vifs.values() if not vif.active) - super().__init__( - f'Timed out waiting for Neutron port(s) {inactive} to be marked ' - f'as ACTIVE after being bound to a Pod {name}. Most likely this ' - f'indicates an issue with OpenStack Neutron. You can also check ' - f'logs of kuryr-controller to confirm.') - - -class CNIBindingFailure(Exception): - """Exception indicates a binding/unbinding VIF failure in CNI""" - def __init__(self, message): - super(CNIBindingFailure, self).__init__(message) - - -class CNIPodUidMismatch(Exception): - """Excepton raised on a mismatch of CNI request's pod UID and KuryrPort""" - def __init__(self, name, expected, observed): - super().__init__( - f'uid {observed} of the pod {name} does not match the uid ' - f'{expected} requested by the CNI. Dropping CNI request to prevent' - f' race conditions.') - - -class CNIPodGone(Exception): - """Excepton raised when Pod got deleted while processing a CNI request""" - def __init__(self, name): - super().__init__( - f'Pod {name} got deleted while processing the CNI ADD request.') - - -class UnreachableOctavia(Exception): - """Exception indicates Octavia API failure and can not be reached - - This exception is raised when Kuryr can not reach Octavia. The Octavia - API call returns 'None' on the version field and we need to properly log - a message informing the user - """ - def __init__(self, message): - super(UnreachableOctavia, self).__init__(message) diff --git a/kuryr_kubernetes/handlers/__init__.py b/kuryr_kubernetes/handlers/__init__.py deleted file mode 100644 index e69de29bb..000000000 diff --git a/kuryr_kubernetes/handlers/asynchronous.py b/kuryr_kubernetes/handlers/asynchronous.py deleted file mode 100755 index 6204974b2..000000000 --- a/kuryr_kubernetes/handlers/asynchronous.py +++ /dev/null @@ -1,123 +0,0 @@ -# Copyright (c) 2016 Mirantis, Inc. -# All Rights Reserved. -# -# Licensed under the Apache License, Version 2.0 (the "License"); you may -# not use this file except in compliance with the License. You may obtain -# a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT -# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the -# License for the specific language governing permissions and limitations -# under the License. - -import itertools -import queue as py_queue -import time - -from oslo_concurrency import lockutils -from oslo_log import log as logging - - -from kuryr_kubernetes.handlers import base - -LOG = logging.getLogger(__name__) - -DEFAULT_QUEUE_DEPTH = 100 -DEFAULT_GRACE_PERIOD = 5 -STALE_PERIOD = 0.5 - - -class Async(base.EventHandler): - """Handles events asynchronously. - - `Async` can be used to decorate another `handler` to be run asynchronously - using the specified `thread_group`. `Async` distinguishes *related* and - *unrelated* events (based on the result of `group_by`(`event`) function) - and handles *unrelated* events concurrently while *related* events are - handled serially and in the same order they arrived to `Async`. - """ - - def __init__(self, handler, thread_group, group_by, info_func, - queue_depth=DEFAULT_QUEUE_DEPTH, - grace_period=DEFAULT_GRACE_PERIOD): - self._handler = handler - self._thread_group = thread_group - self._group_by = group_by - self._info_func = info_func - self._queue_depth = queue_depth - self._grace_period = grace_period - self._queues = {} - - def __call__(self, event, *args, **kwargs): - group = self._group_by(event) - with lockutils.lock(group): - try: - queue = self._queues[group] - # NOTE(dulek): We don't want to risk injecting an outdated - # state if events for that resource are in queue. - if kwargs.get('injected', False): - return - except KeyError: - queue = py_queue.Queue(self._queue_depth) - self._queues[group] = queue - info = self._info_func(event) - thread = self._thread_group.add_thread(self._run, group, queue, - info) - thread.link(self._done, group, info) - queue.put((event, args, kwargs)) - - def _run(self, group, queue, info): - LOG.trace("Asynchronous handler started processing %s (%s)", group, - info) - for _ in itertools.count(): - # NOTE(ivc): this is a mock-friendly replacement for 'while True' - # to allow more controlled environment for unit-tests (e.g. to - # avoid tests getting stuck in infinite loops) - try: - event, args, kwargs = queue.get(timeout=self._grace_period) - except py_queue.Empty: - break - # FIXME(ivc): temporary workaround to skip stale events - # If K8s updates resource while the handler is processing it, - # when the handler finishes its work it can fail to update an - # annotation due to the 'resourceVersion' conflict. K8sClient - # was updated to allow *new* annotations to be set ignoring - # 'resourceVersion', but it leads to another problem as the - # Handler will receive old events (i.e. before annotation is set) - # and will start processing the event 'from scratch'. - # It has negative effect on handlers' performance (VIFHandler - # creates ports only to later delete them and LBaaS handler also - # produces some excess requests to Neutron, although with lesser - # impact). - # Possible solutions (can be combined): - # - use K8s ThirdPartyResources to store data/annotations instead - # of native K8s resources (assuming Kuryr-K8s will own those - # resources and no one else would update them) - # - use the resulting 'resourceVersion' received from K8sClient's - # 'annotate' to provide feedback to Async to skip all events - # until that version - # - stick to the 'get-or-create' behaviour in handlers and - # also introduce cache for long operations - time.sleep(STALE_PERIOD) - while not queue.empty(): - event, args, kwargs = queue.get() - if queue.empty(): - time.sleep(STALE_PERIOD) - self._handler(event, *args, **kwargs) - - def _done(self, thread, group, info): - LOG.trace("Asynchronous handler stopped processing group %s (%s)", - group, info) - queue = self._queues.pop(group) - - if not queue.empty(): - LOG.critical( - "Asynchronous handler thread terminated abnormally; %(count)s " - "events dropped for %(group)s (%(info)s)", - {'count': queue.qsize(), 'group': group, 'info': info}) - - if not self._queues: - LOG.trace("Asynchronous handler is idle") diff --git a/kuryr_kubernetes/handlers/base.py b/kuryr_kubernetes/handlers/base.py deleted file mode 100644 index 438625d47..000000000 --- a/kuryr_kubernetes/handlers/base.py +++ /dev/null @@ -1,28 +0,0 @@ -# Copyright (c) 2016 Mirantis, Inc. -# All Rights Reserved. -# -# Licensed under the Apache License, Version 2.0 (the "License"); you may -# not use this file except in compliance with the License. You may obtain -# a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT -# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the -# License for the specific language governing permissions and limitations -# under the License. - -import abc - - -class EventHandler(object, metaclass=abc.ABCMeta): - """Base class for event handlers.""" - - @abc.abstractmethod - def __call__(self, event, *args, **kwargs): - """Handle the event.""" - raise NotImplementedError() - - def __str__(self): - return self.__class__.__name__ diff --git a/kuryr_kubernetes/handlers/dispatch.py b/kuryr_kubernetes/handlers/dispatch.py deleted file mode 100644 index 56585bfcd..000000000 --- a/kuryr_kubernetes/handlers/dispatch.py +++ /dev/null @@ -1,126 +0,0 @@ -# Copyright (c) 2016 Mirantis, Inc. -# All Rights Reserved. -# -# Licensed under the Apache License, Version 2.0 (the "License"); you may -# not use this file except in compliance with the License. You may obtain -# a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT -# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the -# License for the specific language governing permissions and limitations -# under the License. - -import abc - -from oslo_log import log as logging - -from kuryr_kubernetes.handlers import base as h_base - -LOG = logging.getLogger(__name__) - - -class Dispatcher(h_base.EventHandler): - """Dispatches events to registered handlers. - - Dispatcher serves as both multiplexer and filter for dispatching events - to multiple registered handlers based on the event content and - predicates provided during the handler registration. - """ - - def __init__(self): - self._registry = {} - - def register(self, key_fn, key, handler): - """Adds handler to the registry. - - `key_fn` and `key` constitute the `key_fn(event) == key` predicate - that determines if the `handler` should be called for a given `event`. - - :param key_fn: function that will be called for each event to - determine the event `key` - :param key: value to match against the result of `key_fn` function - that determines if the `handler` should be called for an - event - :param handler: `callable` object that would be called if the - conditions specified by `key_fn` and `key` are met - """ - key_group = self._registry.setdefault(key_fn, {}) - handlers = key_group.setdefault(key, []) - handlers.append(handler) - - def __call__(self, event, *args, **kwargs): - handlers = set() - - for key_fn, key_group in self._registry.items(): - key = key_fn(event) - handlers.update(key_group.get(key, ())) - - obj = event.get('object', {}) - obj_meta = obj.get('metadata', {}) - - LOG.trace("%d handler(s) available for event %s %s:%s/%s (uid: %s)", - len(handlers), event.get('type'), obj.get('kind'), - obj_meta.get('namespace'), obj_meta.get('name'), - obj_meta.get('uid')) - - for handler in handlers: - handler(event, *args, **kwargs) - - -class EventConsumer(h_base.EventHandler, metaclass=abc.ABCMeta): - """Consumes events matching specified predicates. - - EventConsumer is an interface for all event handlers that are to be - registered by the `EventPipeline`. - """ - - def __init__(self): - super(EventConsumer, self).__init__() - - @property - @abc.abstractmethod - def consumes(self): - """Predicates determining events supported by this handler. - - :return: `dict` object containing {key_fn: key} predicates to be - used by `Dispatcher.register` - """ - raise NotImplementedError() - - -class EventPipeline(h_base.EventHandler, metaclass=abc.ABCMeta): - """Serves as an entry-point for event handling. - - Implementing subclasses should override `_wrap_dispatcher` and/or - `_wrap_consumer` methods to sanitize the consumers passed to `register` - (i.e. to satisfy the `Watcher` requirement that the event handler does - not raise exceptions) and to add features like asynchronous event - processing or retry-on-failure functionality. - """ - - def __init__(self): - self._dispatcher = Dispatcher() - self._handler = self._wrap_dispatcher(self._dispatcher) - - def register(self, consumer): - """Adds handler to the registry. - - :param consumer: `EventConsumer`-type object - """ - handler = self._wrap_consumer(consumer) - for key_fn, key in consumer.consumes.items(): - self._dispatcher.register(key_fn, key, handler) - - def __call__(self, event, *args, **kwargs): - self._handler(event, *args, **kwargs) - - @abc.abstractmethod - def _wrap_dispatcher(self, dispatcher): - raise NotImplementedError() - - @abc.abstractmethod - def _wrap_consumer(self, consumer): - raise NotImplementedError() diff --git a/kuryr_kubernetes/handlers/health.py b/kuryr_kubernetes/handlers/health.py deleted file mode 100644 index 2fe30cbd9..000000000 --- a/kuryr_kubernetes/handlers/health.py +++ /dev/null @@ -1,57 +0,0 @@ -# Copyright 2018 Maysa de Macedo Souza. -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. - - -class HealthRegister(object): - instance = None - - def __init__(self): - self.registry = [] - - def register(self, elem): - self.registry.append(elem) - - @classmethod - def get_instance(cls): - if not HealthRegister.instance: - HealthRegister.instance = cls() - return HealthRegister.instance - - -class HealthHandler(object): - """Base class for health handlers.""" - def __init__(self): - super(HealthHandler, self).__init__() - self._alive = True - self._ready = True - self._manager = HealthRegister.get_instance() - self._manager.register(self) - self._last_exception = None - - def set_liveness(self, alive, exc=None): - if exc: - self._last_exception = exc - self._alive = alive - - def set_readiness(self, ready): - self._ready = ready - - def is_alive(self): - return self._alive - - def is_ready(self, *args): - return self._ready - - def get_last_exception(self): - return self._last_exception diff --git a/kuryr_kubernetes/handlers/k8s_base.py b/kuryr_kubernetes/handlers/k8s_base.py deleted file mode 100755 index 78e479c2a..000000000 --- a/kuryr_kubernetes/handlers/k8s_base.py +++ /dev/null @@ -1,121 +0,0 @@ -# Copyright (c) 2016 Mirantis, Inc. -# All Rights Reserved. -# -# Licensed under the Apache License, Version 2.0 (the "License"); you may -# not use this file except in compliance with the License. You may obtain -# a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT -# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the -# License for the specific language governing permissions and limitations -# under the License. - -from kuryr_kubernetes.handlers import dispatch -from kuryr_kubernetes.handlers import health - - -def object_kind(event): - try: - return event['object']['kind'] - except KeyError: - return None - - -def object_uid(event): - try: - return event['object']['metadata']['uid'] - except KeyError: - return None - - -def object_info(event): - try: - resource = event['object'] - try: - return "%(kind)s %(namespace)s/%(name)s" % resource['metadata'] - except KeyError: - return "%(kind)s: %(name)s" % resource['metadata'] - except KeyError: - return None - - -class ResourceEventHandler(dispatch.EventConsumer, health.HealthHandler): - """Base class for K8s event handlers. - - Implementing classes should override both `OBJECT_KIND` and - 'OBJECT_WATCH_PATH' attributes. - The `OBJECT_KIND` should be set to a valid Kubernetes object type - name (e.g. 'Pod' or 'Namespace'; see [1] for more details). - - The `OBJECT_WATCH_PATH` should point to object's watched path, - (e.g. for the 'Pod' case the OBJECT_WATCH_PATH should be '/api/v1/pods'). - - Implementing classes are expected to override any or all of the - `on_added`, `on_present`, `on_modified`, `on_deleted` methods that would - be called depending on the type of the event (with K8s object as a single - argument). - - [1] https://github.com/kubernetes/kubernetes/blob/release-1.4/docs/devel\ - /api-conventions.md#types-kinds - """ - - OBJECT_KIND = None - OBJECT_WATCH_PATH = None - - def __init__(self): - super(ResourceEventHandler, self).__init__() - - def get_watch_path(self): - return self.OBJECT_WATCH_PATH - - @property - def consumes(self): - return {object_kind: self.OBJECT_KIND} - - def _check_finalize(self, obj): - deletion_timestamp = None - try: - deletion_timestamp = obj['metadata']['deletionTimestamp'] - except (KeyError, TypeError): - pass - - return deletion_timestamp - - def __call__(self, event, *args, **kwargs): - event_type = event.get('type') - obj = event.get('object') - if 'MODIFIED' == event_type: - if self._check_finalize(obj): - self.on_finalize(obj, *args, **kwargs) - return - self.on_modified(obj, *args, **kwargs) - self.on_present(obj, *args, **kwargs) - elif 'ADDED' == event_type: - if self._check_finalize(obj): - self.on_finalize(obj, *args, **kwargs) - return - self.on_added(obj, *args, **kwargs) - self.on_present(obj, *args, **kwargs) - elif 'DELETED' == event_type: - self.on_deleted(obj, *args, **kwargs) - - def on_added(self, obj, *args, **kwargs): - pass - - def on_present(self, obj, *args, **kwargs): - pass - - def on_modified(self, obj, *args, **kwargs): - pass - - def on_deleted(self, obj, *args, **kwargs): - pass - - def on_finalize(self, obj, *args, **kwargs): - pass - - def reconcile(self): - pass diff --git a/kuryr_kubernetes/handlers/logging.py b/kuryr_kubernetes/handlers/logging.py deleted file mode 100644 index 4c11c9524..000000000 --- a/kuryr_kubernetes/handlers/logging.py +++ /dev/null @@ -1,49 +0,0 @@ -# Copyright (c) 2016 Mirantis, Inc. -# All Rights Reserved. -# -# Licensed under the Apache License, Version 2.0 (the "License"); you may -# not use this file except in compliance with the License. You may obtain -# a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT -# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the -# License for the specific language governing permissions and limitations -# under the License. - -from oslo_log import log as logging - -from kuryr_kubernetes.handlers import base - -LOG = logging.getLogger(__name__) - - -class LogExceptions(base.EventHandler): - """Suppresses exceptions and sends them to log. - - LogExceptions wraps `handler` passed as an initialization parameter by - suppressing `exceptions` it raises and sending them to logging facility - instead. - """ - - def __init__(self, handler, exceptions=Exception, ignore_exceptions=None): - self._handler = handler - self._exceptions = exceptions - self._ignore_exceptions = ignore_exceptions or () - - def __call__(self, event, *args, **kwargs): - try: - self._handler(event, *args, **kwargs) - except self._ignore_exceptions: - pass - except self._exceptions as ex: - # If exception comes from OpenStack SDK and contains - # 'request_id' then print this 'request_id' along the Exception. - # This 'request_id' can be then used to search the OpenStack - # service logs. - req_id = '' - if hasattr(ex, 'request_id'): - req_id = f' [{ex.request_id}]' - LOG.exception("Failed to handle event%s: %s", req_id, event) diff --git a/kuryr_kubernetes/handlers/retry.py b/kuryr_kubernetes/handlers/retry.py deleted file mode 100644 index 33ea37bc4..000000000 --- a/kuryr_kubernetes/handlers/retry.py +++ /dev/null @@ -1,137 +0,0 @@ -# Copyright (c) 2016 Mirantis, Inc. -# All Rights Reserved. -# -# Licensed under the Apache License, Version 2.0 (the "License"); you may -# not use this file except in compliance with the License. You may obtain -# a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT -# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the -# License for the specific language governing permissions and limitations -# under the License. - -import itertools -import time - -import requests - -from openstack import exceptions as os_exc -from oslo_log import log as logging -from oslo_utils import excutils - -from kuryr_kubernetes import clients -from kuryr_kubernetes.controller.managers import prometheus_exporter -from kuryr_kubernetes import exceptions -from kuryr_kubernetes.handlers import base -from kuryr_kubernetes import utils - -LOG = logging.getLogger(__name__) - - -class Retry(base.EventHandler): - """Retries handler on failure. - - `Retry` can be used to decorate another `handler` to be retried whenever - it raises any of the specified `exceptions`. If the `handler` does not - succeed within the time limit specified by `timeout`, `Retry` will - raise the exception risen by `handler`. `Retry` does not interrupt the - `handler`, so the actual time spent within a single call to `Retry` may - exceed the `timeout` depending on responsiveness of the `handler`. - - `handler` is retried for the same `event` (expected backoff E(c) = - interval * 2 ** c / 2). - """ - - def __init__(self, handler, exceptions=Exception, - timeout=utils.DEFAULT_TIMEOUT, - interval=utils.DEFAULT_INTERVAL): - self._handler = handler - self._exceptions = exceptions - self._timeout = timeout - self._interval = interval - self._k8s = clients.get_kubernetes_client() - - def __call__(self, event, *args, **kwargs): - start_time = time.time() - deadline = time.time() + self._timeout - for attempt in itertools.count(1): - if event.get('type') in ['MODIFIED', 'ADDED']: - obj = event.get('object') - if obj: - try: - obj_link = utils.get_res_link(obj) - except KeyError: - LOG.debug("Unknown object, skipping: %s", obj) - else: - try: - self._k8s.get(obj_link) - except exceptions.K8sResourceNotFound: - LOG.debug("There is no need to process the " - "retry as the object %s has already " - "been deleted.", obj_link) - return - except (exceptions.K8sClientException, - requests.ConnectionError): - LOG.debug("Kubernetes client error getting the " - "object. Continuing with handler " - "execution.") - try: - info = { - 'elapsed': time.time() - start_time - } - self._handler(event, *args, retry_info=info, **kwargs) - break - except (exceptions.LoadBalancerNotReady, - exceptions.PortNotReady) as exc: - cls_map = {'LoadBalancerNotReady': 'record_lb_failure', - 'PortNotReady': 'record_port_failure'} - with excutils.save_and_reraise_exception() as ex: - if self._sleep(deadline, attempt, ex.value): - ex.reraise = False - else: - exporter = (prometheus_exporter - .ControllerPrometheusExporter - .get_instance()) - method = getattr(exporter, cls_map[type(exc).__name__]) - method() - except exceptions.KuryrLoadBalancerNotCreated: - with excutils.save_and_reraise_exception() as ex: - if self._sleep(deadline, attempt, ex.value): - ex.reraise = False - except os_exc.ConflictException: - with excutils.save_and_reraise_exception() as ex: - error_type = clients.get_neutron_error_type(ex.value) - if error_type == 'OverQuota': - if self._sleep(deadline, attempt, ex.value): - ex.reraise = False - except self._exceptions: - with excutils.save_and_reraise_exception() as ex: - if self._sleep(deadline, attempt, ex.value): - ex.reraise = False - else: - LOG.exception('Report handler unhealthy %s', - self._handler) - self._handler.set_liveness(alive=False, exc=ex.value) - except Exception as ex: - LOG.exception('Report handler unhealthy %s', self._handler) - self._handler.set_liveness(alive=False, exc=ex) - raise - - def _sleep(self, deadline, attempt, exception): - LOG.debug("Handler %s failed (attempt %s; %s)", - self._handler, attempt, exceptions.format_msg(exception)) - interval = utils.exponential_sleep(deadline, attempt, - self._interval) - if not interval: - LOG.debug("Handler %s failed (attempt %s; %s), " - "timeout exceeded (%s seconds)", - self._handler, attempt, exceptions.format_msg(exception), - self._timeout) - return 0 - - LOG.debug("Resumed after %s seconds. Retry handler %s", interval, - self._handler) - return interval diff --git a/kuryr_kubernetes/health.py b/kuryr_kubernetes/health.py deleted file mode 100644 index d45d80017..000000000 --- a/kuryr_kubernetes/health.py +++ /dev/null @@ -1,76 +0,0 @@ -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. - -import abc - -from flask import Flask -from oslo_config import cfg -from oslo_log import log as logging - -from kuryr_kubernetes import clients - -LOG = logging.getLogger(__name__) -CONF = cfg.CONF - - -class BaseHealthServer(abc.ABC): - """Base class of server used to provide readiness and liveness probes.""" - - def __init__(self, app_name, port): - self.app_name = app_name - self.port = port - self.ctx = None - self.application = Flask(app_name) - self.application.add_url_rule( - '/ready', methods=['GET'], view_func=self.readiness_status) - self.application.add_url_rule( - '/alive', methods=['GET'], view_func=self.liveness_status) - - def apply_conn_close(response): - response.headers['Connection'] = 'close' - return response - - self.application.after_request(apply_conn_close) - - @abc.abstractmethod - def readiness_status(self): - raise NotImplementedError() - - @abc.abstractmethod - def liveness_status(self): - raise NotImplementedError() - - def run(self): - # Disable obtrusive werkzeug logs. - logging.getLogger('werkzeug').setLevel(logging.WARNING) - - address = '::' - LOG.info('Starting %s health check server on %s:%d.', self.app_name, - address, self.port) - try: - self.application.run(address, self.port) - except Exception: - LOG.exception('Failed to start %s health check server.', - self.app_name) - raise - - def verify_k8s_connection(self): - k8s = clients.get_kubernetes_client() - try: - k8s.get('/healthz', json=False, headers={'Connection': 'close'}) - except Exception as e: - # Not LOG.exception to make sure long message from K8s API is not - # repeated. - LOG.error('Exception when trying to reach Kubernetes API: %s.', e) - return False - - return True diff --git a/kuryr_kubernetes/k8s_client.py b/kuryr_kubernetes/k8s_client.py deleted file mode 100644 index 25d4d8602..000000000 --- a/kuryr_kubernetes/k8s_client.py +++ /dev/null @@ -1,466 +0,0 @@ -# Copyright (c) 2016 Mirantis, Inc. -# All Rights Reserved. -# -# Licensed under the Apache License, Version 2.0 (the "License"); you may -# not use this file except in compliance with the License. You may obtain -# a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT -# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the -# License for the specific language governing permissions and limitations -# under the License. -import contextlib -import datetime -import functools -import itertools -import os -import ssl -import time -from urllib import parse -import urllib3 - -from oslo_log import log as logging -from oslo_serialization import jsonutils -import requests -from requests import adapters - -from kuryr.lib._i18n import _ -from kuryr_kubernetes import config -from kuryr_kubernetes import constants -from kuryr_kubernetes import exceptions as exc -from kuryr_kubernetes import utils - -CONF = config.CONF -LOG = logging.getLogger(__name__) - - -class K8sClient(object): - # REVISIT(ivc): replace with python-k8sclient if it could be extended - # with 'WATCH' support - - def __init__(self, base_url): - self._base_url = base_url - cert_file = config.CONF.kubernetes.ssl_client_crt_file - key_file = config.CONF.kubernetes.ssl_client_key_file - ca_crt_file = config.CONF.kubernetes.ssl_ca_crt_file - self.verify_server = config.CONF.kubernetes.ssl_verify_server_crt - token_file = config.CONF.kubernetes.token_file - self.token = None - self.cert = (None, None) - self.are_events_enabled = config.CONF.kubernetes.use_events - - # Setting higher numbers regarding connection pools as we're running - # with max of 1000 green threads. - self.session = requests.Session() - prefix = '%s://' % parse.urlparse(base_url).scheme - self.session.mount(prefix, adapters.HTTPAdapter(pool_maxsize=1000)) - if token_file: - if os.path.exists(token_file): - with open(token_file, 'r') as f: - self.token = f.readline().rstrip('\n') - else: - raise RuntimeError( - _("Unable to find token_file : %s") % token_file) - else: - if cert_file and not os.path.exists(cert_file): - raise RuntimeError( - _("Unable to find ssl cert_file : %s") % cert_file) - if key_file and not os.path.exists(key_file): - raise RuntimeError( - _("Unable to find ssl key_file : %s") % key_file) - self.cert = (cert_file, key_file) - if self.verify_server: - if not ca_crt_file: - raise RuntimeError( - _("ssl_ca_crt_file cannot be None")) - elif not os.path.exists(ca_crt_file): - raise RuntimeError( - _("Unable to find ca cert_file : %s") % ca_crt_file) - else: - self.verify_server = ca_crt_file - - # Let's setup defaults for our Session. - self.session.cert = self.cert - self.session.verify = self.verify_server - if self.token: - self.session.headers['Authorization'] = f'Bearer {self.token}' - # NOTE(dulek): Seems like this is the only way to set is globally. - self.session.request = functools.partial( - self.session.request, timeout=( - CONF.kubernetes.watch_connection_timeout, - CONF.kubernetes.watch_read_timeout)) - - def _raise_from_response(self, response): - if response.status_code == requests.codes.not_found: - raise exc.K8sResourceNotFound(response.text) - if response.status_code == requests.codes.conflict: - raise exc.K8sConflict(response.text) - if response.status_code == requests.codes.forbidden: - if 'because it is being terminated' in response.json()['message']: - raise exc.K8sNamespaceTerminating(response.text) - raise exc.K8sForbidden(response.text) - if response.status_code == requests.codes.unprocessable_entity: - # NOTE(gryf): on k8s API code 422 is also Forbidden, but specified - # to FieldValueForbidden. Perhaps there are other usages for - # throwing unprocessable entity errors in different cases. - if ('FieldValueForbidden' in response.text and - 'Forbidden' in response.json()['message']): - raise exc.K8sFieldValueForbidden(response.text) - raise exc.K8sUnprocessableEntity(response.text) - if not response.ok: - raise exc.K8sClientException(response.text) - - def get(self, path, json=True, headers=None): - LOG.debug("Get %(path)s", {'path': path}) - url = self._base_url + path - response = self.session.get(url, headers=headers) - self._raise_from_response(response) - - if json: - result = response.json() - kind = result['kind'] - - api_version = result.get('apiVersion') - if not api_version: - api_version = utils.get_api_ver(path) - - # Strip List from e.g. PodList. For some reason `.items` of a list - # returned from API doesn't have `kind` set. - # NOTE(gryf): Also, for the sake of calculating selfLink - # equivalent, we need to have both: kind and apiVersion, while the - # latter is not present on items list for core resources, while - # for custom resources there are both kind and apiVersion.. - if kind.endswith('List'): - kind = kind[:-4] - - # NOTE(gryf): In case we get null/None for items from the API, - # we need to convert it to the empty list, otherwise it might - # be propagated to the consumers of this method and sent back - # to the Kubernetes as is, and fail as a result. - if result['items'] is None: - result['items'] = [] - - for item in result['items']: - if not item.get('kind'): - item['kind'] = kind - if not item.get('apiVersion'): - item['apiVersion'] = api_version - - if not result.get('apiVersion'): - result['apiVersion'] = api_version - else: - result = response.text - - return result - - def _get_url_and_header(self, path, content_type): - url = self._base_url + path - header = {'Content-Type': content_type, - 'Accept': 'application/json'} - - return url, header - - def patch(self, field, path, data): - LOG.debug("Patch %(path)s: %(data)s", {'path': path, 'data': data}) - content_type = 'application/merge-patch+json' - url, header = self._get_url_and_header(path, content_type) - response = self.session.patch(url, json={field: data}, headers=header) - self._raise_from_response(response) - return response.json().get('status') - - def patch_crd(self, field, path, data, action='replace'): - content_type = 'application/json-patch+json' - url, header = self._get_url_and_header(path, content_type) - - if action == 'remove': - data = [{'op': action, - 'path': f'/{field}/{data}'}] - else: - if data: - data = [{'op': action, - 'path': f'/{field}/{crd_field}', - 'value': value} - for crd_field, value in data.items()] - else: - data = [{'op': action, - 'path': f'/{field}', - 'value': data}] - - LOG.debug("Patch %(path)s: %(data)s", { - 'path': path, 'data': data}) - - response = self.session.patch(url, data=jsonutils.dumps(data), - headers=header) - self._raise_from_response(response) - return response.json().get('status') - - def post(self, path, body): - LOG.debug("Post %(path)s: %(body)s", {'path': path, 'body': body}) - url = self._base_url + path - header = {'Content-Type': 'application/json'} - - response = self.session.post(url, json=body, headers=header) - self._raise_from_response(response) - return response.json() - - def delete(self, path): - LOG.debug("Delete %(path)s", {'path': path}) - url = self._base_url + path - header = {'Content-Type': 'application/json'} - - response = self.session.delete(url, headers=header) - self._raise_from_response(response) - return response.json() - - # TODO(dulek): add_finalizer() and remove_finalizer() have some code - # duplication, but I don't see a nice way to avoid it. - def add_finalizer(self, obj, finalizer): - if finalizer in obj['metadata'].get('finalizers', []): - return True - - path = utils.get_res_link(obj) - LOG.debug(f"Add finalizer {finalizer} to {path}") - url, headers = self._get_url_and_header( - path, 'application/merge-patch+json') - - for i in range(3): # Let's make sure it's not infinite loop - finalizers = obj['metadata'].get('finalizers', []).copy() - finalizers.append(finalizer) - - data = { - 'metadata': { - 'finalizers': finalizers, - 'resourceVersion': obj['metadata']['resourceVersion'], - }, - } - - response = self.session.patch(url, json=data, headers=headers) - - if response.ok: - return True - - try: - self._raise_from_response(response) - except (exc.K8sFieldValueForbidden, exc.K8sResourceNotFound): - # Object is being deleting or gone. Return. - return False - except exc.K8sConflict: - try: - obj = self.get(path) - except exc.K8sResourceNotFound: - # Object got removed before finalizer was set - return False - if finalizer in obj['metadata'].get('finalizers', []): - # Finalizer is there, return. - return True - - # If after 3 iterations there's still conflict, just raise. - self._raise_from_response(response) - - def remove_finalizer(self, obj, finalizer): - path = utils.get_res_link(obj) - LOG.debug(f"Remove finalizer {finalizer} from {path}") - url, headers = self._get_url_and_header( - path, 'application/merge-patch+json') - - for i in range(3): # Let's make sure it's not infinite loop - finalizers = obj['metadata'].get('finalizers', []).copy() - try: - finalizers.remove(finalizer) - except ValueError: - # Finalizer is not there, return. - return True - - data = { - 'metadata': { - 'finalizers': finalizers, - 'resourceVersion': obj['metadata']['resourceVersion'], - }, - } - - response = self.session.patch(url, json=data, headers=headers) - - if response.ok: - return True - - try: - try: - self._raise_from_response(response) - except exc.K8sConflict: - obj = self.get(path) - except (exc.K8sFieldValueForbidden, exc.K8sResourceNotFound): - # Object is being deleted or gone already, stop. - return False - - # If after 3 iterations there's still conflict, just raise. - self._raise_from_response(response) - - def get_loadbalancer_crd(self, obj): - name = obj['metadata']['name'] - namespace = obj['metadata']['namespace'] - - try: - crd = self.get('{}/{}/kuryrloadbalancers/{}'.format( - constants.K8S_API_CRD_NAMESPACES, namespace, - name)) - except exc.K8sResourceNotFound: - return None - except exc.K8sClientException: - LOG.exception("Kubernetes Client Exception.") - raise - return crd - - def annotate(self, path, annotations, resource_version=None): - """Pushes a resource annotation to the K8s API resource - - The annotate operation is made with a PATCH HTTP request of kind: - application/merge-patch+json as described in: - - https://github.com/kubernetes/community/blob/master/contributors/devel/sig-architecture/api-conventions.md#patch-operations # noqa - """ - LOG.debug("Annotate %(path)s: %(names)s", { - 'path': path, 'names': list(annotations)}) - - content_type = 'application/merge-patch+json' - url, header = self._get_url_and_header(path, content_type) - - while itertools.count(1): - metadata = {"annotations": annotations} - if resource_version: - metadata['resourceVersion'] = resource_version - data = jsonutils.dumps({"metadata": metadata}, sort_keys=True) - response = self.session.patch(url, data=data, headers=header) - if response.ok: - return response.json()['metadata'].get('annotations', {}) - if response.status_code == requests.codes.conflict: - resource = self.get(path) - new_version = resource['metadata']['resourceVersion'] - retrieved_annotations = resource['metadata'].get( - 'annotations', {}) - - for k, v in annotations.items(): - if v != retrieved_annotations.get(k): - break - else: - LOG.debug("Annotations for %(path)s already present: " - "%(names)s", {'path': path, - 'names': retrieved_annotations}) - return retrieved_annotations - # Retry patching with updated resourceVersion - resource_version = new_version - continue - - LOG.error("Exception response, headers: %(headers)s, " - "content: %(content)s, text: %(text)s" - % {'headers': response.headers, - 'content': response.content, 'text': response.text}) - - self._raise_from_response(response) - - def watch(self, path): - url = self._base_url + path - resource_version = None - - attempt = 0 - while True: - try: - params = {'watch': 'true'} - if resource_version: - params['resourceVersion'] = resource_version - with contextlib.closing( - self.session.get( - url, params=params, stream=True)) as response: - if not response.ok: - raise exc.K8sClientException(response.text) - attempt = 0 - for line in response.iter_lines(): - line = line.decode('utf-8').strip() - if line: - line_dict = jsonutils.loads(line) - yield line_dict - # Saving the resourceVersion in case of a restart. - # At this point it's safely passed to handler. - m = line_dict.get('object', {}).get('metadata', {}) - resource_version = m.get('resourceVersion', None) - except (requests.ReadTimeout, requests.ConnectionError, - ssl.SSLError, requests.exceptions.ChunkedEncodingError, - urllib3.exceptions.SSLError): - t = utils.exponential_backoff(attempt) - log = LOG.debug - if attempt > 0: - # Only make it a warning if it's happening again, no need - # to inform about all the read timeouts. - log = LOG.warning - log('Connection error when watching %s. Retrying in %ds with ' - 'resourceVersion=%s', path, t, - params.get('resourceVersion')) - time.sleep(t) - attempt += 1 - - def add_event(self, resource, reason, message, type_='Normal', - component='kuryr-controller'): - """Create an Event object for the provided resource.""" - if not self.are_events_enabled: - return {} - - if not resource: - return {} - - involved_object = {'apiVersion': resource['apiVersion'], - 'kind': resource['kind'], - 'name': resource['metadata']['name'], - 'namespace': resource['metadata']['namespace'], - 'uid': resource['metadata']['uid']} - - # This is needed for Event date, otherwise LAST SEEN/Age will be empty - # and misleading. - now = datetime.datetime.utcnow().replace(tzinfo=datetime.timezone.utc) - date_time = now.strftime("%Y-%m-%dT%H:%M:%SZ") - - name = ".".join((resource['metadata']['name'], - self._get_hex_timestamp(now))) - - event = {'kind': 'Event', - 'apiVersion': 'v1', - 'firstTimestamp': date_time, - 'metadata': {'name': name}, - 'reason': reason, - 'message': message, - 'type': type_, - 'involvedObject': involved_object, - 'source': {'component': component, - 'host': utils.get_nodename()}} - - try: - return self.post(f'{constants.K8S_API_BASE}/namespaces/' - f'{resource["metadata"]["namespace"]}/events', - event) - except exc.K8sNamespaceTerminating: - # We can't create events in a Namespace that is being terminated, - # there's no workaround, no need to log it, just ignore it. - return {} - except exc.K8sClientException: - LOG.warning(f'There was non critical error during creating an ' - 'Event for resource: "{resource}", with reason: ' - f'"{reason}", message: "{message}" and type: ' - f'"{type_}"') - return {} - - def _get_hex_timestamp(self, datetimeobj): - """Get hex representation for timestamp. - - In Kuberenets, Event name is constructed name of the bounded object - and timestamp in hexadecimal representation. - Note, that Python timestamp is represented as floating figure: - 1631622163.8534190654754638671875 - while those which origin from K8s, after change to int: - 1631622163915909162 - so, to get similar integer, we need to multiply the float by - 100000000 to get the same precision and cast to integer, to get rid - of the fractures, and finally convert it to hex representation. - """ - timestamp = datetime.datetime.timestamp(datetimeobj) - return format(int(timestamp * 100000000), 'x') diff --git a/kuryr_kubernetes/linux_net_utils.py b/kuryr_kubernetes/linux_net_utils.py deleted file mode 100644 index 424e9bc56..000000000 --- a/kuryr_kubernetes/linux_net_utils.py +++ /dev/null @@ -1,57 +0,0 @@ -# Derived from nova/network/linux_net.py -# -# Copyright (c) 2011 X.commerce, a business unit of eBay Inc. -# Copyright 2010 United States Government as represented by the -# Administrator of the National Aeronautics and Space Administration. -# All Rights Reserved. -# -# Licensed under the Apache License, Version 2.0 (the "License"); you may -# not use this file except in compliance with the License. You may obtain -# a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT -# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the -# License for the specific language governing permissions and limitations -# under the License. - -""" Implements linux net utils""" - -from oslo_concurrency import processutils -from oslo_log import log as logging - -LOG = logging.getLogger(__name__) - - -def _ovs_vsctl(args, timeout=None): - full_args = ['ovs-vsctl'] - if timeout is not None: - full_args += ['--timeout=%s' % timeout] - full_args += args - try: - return processutils.execute(*full_args, run_as_root=True) - except Exception as e: - LOG.error("Unable to execute %(cmd)s. Exception: %(exception)s", - {'cmd': full_args, 'exception': e}) - raise - - -def _create_ovs_vif_cmd(bridge, dev, iface_id, mac, instance_id): - cmd = ['--', '--if-exists', 'del-port', dev, '--', - 'add-port', bridge, dev, - '--', 'set', 'Interface', dev, - 'external-ids:iface-id=%s' % iface_id, - 'external-ids:iface-status=active', - 'external-ids:attached-mac=%s' % mac, - 'external-ids:vm-uuid=%s' % instance_id] - return cmd - - -def create_ovs_vif_port(bridge, dev, iface_id, mac, instance_id): - _ovs_vsctl(_create_ovs_vif_cmd(bridge, dev, iface_id, mac, instance_id)) - - -def delete_ovs_vif_port(bridge, dev): - _ovs_vsctl(['--', '--if-exists', 'del-port', bridge, dev]) diff --git a/kuryr_kubernetes/objects/__init__.py b/kuryr_kubernetes/objects/__init__.py deleted file mode 100644 index 6d97e444d..000000000 --- a/kuryr_kubernetes/objects/__init__.py +++ /dev/null @@ -1,2 +0,0 @@ -def register_locally_defined_vifs(): - __import__('kuryr_kubernetes.objects.vif') diff --git a/kuryr_kubernetes/objects/base.py b/kuryr_kubernetes/objects/base.py deleted file mode 100644 index 75498d05e..000000000 --- a/kuryr_kubernetes/objects/base.py +++ /dev/null @@ -1,30 +0,0 @@ -# Copyright (c) 2016 Mirantis, Inc. -# All Rights Reserved. -# -# Licensed under the Apache License, Version 2.0 (the "License"); you may -# not use this file except in compliance with the License. You may obtain -# a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT -# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the -# License for the specific language governing permissions and limitations -# under the License. - -import abc - -from oslo_versionedobjects import base as obj_base - - -class KuryrK8sObjectBase(obj_base.VersionedObject, - obj_base.ComparableVersionedObject, - metaclass=abc.ABCMeta): - - OBJ_PROJECT_NAMESPACE = 'kuryr_kubernetes' - - def __init__(self, context=None, **kwargs): - super(KuryrK8sObjectBase, self).__init__(context, **kwargs) - self.obj_set_defaults() - self.obj_reset_changes() diff --git a/kuryr_kubernetes/objects/fields.py b/kuryr_kubernetes/objects/fields.py deleted file mode 100644 index df19295b3..000000000 --- a/kuryr_kubernetes/objects/fields.py +++ /dev/null @@ -1,26 +0,0 @@ -# Copyright (c) 2016 Mirantis, Inc. -# All Rights Reserved. -# -# Licensed under the Apache License, Version 2.0 (the "License"); you may -# not use this file except in compliance with the License. You may obtain -# a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT -# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the -# License for the specific language governing permissions and limitations -# under the License. - -from os_vif.objects import vif -from oslo_versionedobjects import fields as obj_fields - - -class ListOfUUIDField(obj_fields.AutoTypedField): - AUTO_TYPE = obj_fields.List(obj_fields.UUID()) - - -class DictOfVIFsField(obj_fields.AutoTypedField): - AUTO_TYPE = obj_fields.Dict(obj_fields.Object(vif.VIFBase.__name__, - subclasses=True)) diff --git a/kuryr_kubernetes/objects/lbaas.py b/kuryr_kubernetes/objects/lbaas.py deleted file mode 100644 index 51f87633d..000000000 --- a/kuryr_kubernetes/objects/lbaas.py +++ /dev/null @@ -1,164 +0,0 @@ -# Copyright (c) 2016 Mirantis, Inc. -# All Rights Reserved. -# -# Licensed under the Apache License, Version 2.0 (the "License"); you may -# not use this file except in compliance with the License. You may obtain -# a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT -# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the -# License for the specific language governing permissions and limitations -# under the License. - -from oslo_versionedobjects import base as obj_base -from oslo_versionedobjects import fields as obj_fields - -from kuryr_kubernetes.objects import base as k_obj -from kuryr_kubernetes.objects import fields as k_fields - - -@obj_base.VersionedObjectRegistry.register -class LBaaSLoadBalancer(k_obj.KuryrK8sObjectBase): - # Version 1.0: Initial version - # Version 1.1: Added provider field and security_groups field. - # Version 1.2: Added support for security_groups=None - # Version 1.3: Added support for provider=None - # Version 1.4: Added support for security_groups=[] - VERSION = '1.4' - - fields = { - 'id': obj_fields.UUIDField(), - 'project_id': obj_fields.StringField(), - 'name': obj_fields.StringField(), - 'ip': obj_fields.IPAddressField(), - 'subnet_id': obj_fields.UUIDField(), - 'port_id': obj_fields.UUIDField(), - 'provider': obj_fields.StringField(nullable=True, - default=None), - 'security_groups': k_fields.ListOfUUIDField(nullable=True, - default=[]), - } - - -@obj_base.VersionedObjectRegistry.register -class LBaaSListener(k_obj.KuryrK8sObjectBase): - VERSION = '1.0' - - fields = { - 'id': obj_fields.UUIDField(), - 'project_id': obj_fields.StringField(), - 'name': obj_fields.StringField(), - 'loadbalancer_id': obj_fields.UUIDField(), - 'protocol': obj_fields.StringField(), - 'port': obj_fields.IntegerField(), - } - - -@obj_base.VersionedObjectRegistry.register -class LBaaSPool(k_obj.KuryrK8sObjectBase): - # Version 1.0: Initial version - # Version 1.1: Added support for pool attached directly to loadbalancer. - VERSION = '1.1' - - fields = { - 'id': obj_fields.UUIDField(), - 'project_id': obj_fields.StringField(), - 'name': obj_fields.StringField(), - 'loadbalancer_id': obj_fields.UUIDField(), - 'listener_id': obj_fields.UUIDField(nullable=True), - 'protocol': obj_fields.StringField(), - } - - -@obj_base.VersionedObjectRegistry.register -class LBaaSMember(k_obj.KuryrK8sObjectBase): - VERSION = '1.0' - - fields = { - 'id': obj_fields.UUIDField(), - 'project_id': obj_fields.StringField(), - 'name': obj_fields.StringField(), - 'pool_id': obj_fields.UUIDField(), - 'subnet_id': obj_fields.UUIDField(), - 'ip': obj_fields.IPAddressField(), - 'port': obj_fields.IntegerField(), - } - - -@obj_base.VersionedObjectRegistry.register -class LBaaSPubIp(k_obj.KuryrK8sObjectBase): - VERSION = '1.0' - - fields = { - 'ip_id': obj_fields.UUIDField(), - 'ip_addr': obj_fields.IPAddressField(), - 'alloc_method': obj_fields.StringField(), - } - - -@obj_base.VersionedObjectRegistry.register -class LBaaSState(k_obj.KuryrK8sObjectBase): - VERSION = '1.0' - - fields = { - 'loadbalancer': obj_fields.ObjectField(LBaaSLoadBalancer.__name__, - nullable=True, - default=None), - 'listeners': obj_fields.ListOfObjectsField(LBaaSListener.__name__, - default=[]), - 'pools': obj_fields.ListOfObjectsField(LBaaSPool.__name__, - default=[]), - 'members': obj_fields.ListOfObjectsField(LBaaSMember.__name__, - default=[]), - 'service_pub_ip_info': obj_fields.ObjectField(LBaaSPubIp.__name__, - nullable=True, - default=None), - } - - -@obj_base.VersionedObjectRegistry.register -class LBaaSPortSpec(k_obj.KuryrK8sObjectBase): - VERSION = '1.1' - # Version 1.0: Initial version - # Version 1.1: Added targetPort field. - - fields = { - 'name': obj_fields.StringField(nullable=True), - 'protocol': obj_fields.StringField(), - 'port': obj_fields.IntegerField(), - 'targetPort': obj_fields.StringField(), - } - - -@obj_base.VersionedObjectRegistry.register -class LBaaSServiceSpec(k_obj.KuryrK8sObjectBase): - VERSION = '1.0' - - fields = { - 'ip': obj_fields.IPAddressField(nullable=True, default=None), - 'ports': obj_fields.ListOfObjectsField(LBaaSPortSpec.__name__, - default=[]), - 'project_id': obj_fields.StringField(nullable=True, default=None), - 'subnet_id': obj_fields.UUIDField(nullable=True, default=None), - 'security_groups_ids': k_fields.ListOfUUIDField(default=[]), - 'type': obj_fields.StringField(nullable=True, default=None), - 'lb_ip': obj_fields.IPAddressField(nullable=True, default=None), - } - - -def flatten_object(ovo_primitive): - if type(ovo_primitive) is dict: - d = {} - for k, v in ovo_primitive['versioned_object.data'].items(): - d[k] = flatten_object(v) - return d - elif type(ovo_primitive) is list: - ls = [] - for v in ovo_primitive: - ls.append(flatten_object(v)) - return ls - else: - return ovo_primitive diff --git a/kuryr_kubernetes/objects/vif.py b/kuryr_kubernetes/objects/vif.py deleted file mode 100644 index 92b9a9ea2..000000000 --- a/kuryr_kubernetes/objects/vif.py +++ /dev/null @@ -1,82 +0,0 @@ -# Licensed under the Apache License, Version 2.0 (the "License"); you may -# not use this file except in compliance with the License. You may obtain -# a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT -# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the -# License for the specific language governing permissions and limitations -# under the License. - -from oslo_versionedobjects import base as obj_base -from oslo_versionedobjects import fields as obj_fields - -from os_vif.objects import vif as obj_osvif - -from kuryr_kubernetes import constants -from kuryr_kubernetes.objects import base -from kuryr_kubernetes.objects import fields - - -@obj_base.VersionedObjectRegistry.register -class PodState(base.KuryrK8sObjectBase): - VERSION = '1.0' - - # FIXME(dulek): I know it's an ugly hack, but turns out you cannot - # serialize-deserialize objects containing objects from - # different namespaces, so we need 'os_vif' namespace here. - OBJ_PROJECT_NAMESPACE = 'os_vif' - - fields = { - 'default_vif': obj_fields.ObjectField(obj_osvif.VIFBase.__name__, - subclasses=True, nullable=False), - 'additional_vifs': fields.DictOfVIFsField(default={}), - } - - @property - def vifs(self): - d = { - constants.DEFAULT_IFNAME: self.default_vif, - } - d.update(self.additional_vifs) - return d - - -@obj_base.VersionedObjectRegistry.register -class VIFVlanNested(obj_osvif.VIFBase): - # This is OVO based vlan vif. - - VERSION = '1.0' - - fields = { - # Name of the device to create - 'vif_name': obj_fields.StringField(), - # vlan ID allocated to this vif - 'vlan_id': obj_fields.IntegerField() - } - - -@obj_base.VersionedObjectRegistry.register -class VIFMacvlanNested(obj_osvif.VIFBase): - # This is OVO based macvlan vif. - - VERSION = '1.0' - - fields = { - # Name of the device to create - 'vif_name': obj_fields.StringField(), - } - - -@obj_base.VersionedObjectRegistry.register -class VIFDPDKNested(obj_osvif.VIFNestedDPDK): - # This is OVO based DPDK Nested vif. - - VERSION = '1.0' - - fields = { - # name of the VIF - 'vif_name': obj_fields.StringField(), - } diff --git a/kuryr_kubernetes/opts.py b/kuryr_kubernetes/opts.py deleted file mode 100644 index 1b02ea39a..000000000 --- a/kuryr_kubernetes/opts.py +++ /dev/null @@ -1,61 +0,0 @@ -# Licensed under the Apache License, Version 2.0 (the "License"); you may -# not use this file except in compliance with the License. You may obtain -# a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT -# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the -# License for the specific language governing permissions and limitations -# under the License. -import copy - -from oslo_log import _options - -from kuryr.lib import opts as lib_opts -from kuryr_kubernetes.cni import health as cni_health -from kuryr_kubernetes import config -from kuryr_kubernetes.controller.drivers import namespace_subnet -from kuryr_kubernetes.controller.drivers import vif_pool -from kuryr_kubernetes.controller.managers import health -from kuryr_kubernetes.controller.managers import pool -from kuryr_kubernetes import utils - -_kuryr_k8s_opts = [ - ('kubernetes', config.k8s_opts), - ('kuryr-kubernetes', config.kuryr_k8s_opts), - ('neutron_defaults', config.neutron_defaults), - ('pod_vif_nested', config.nested_vif_driver_opts), - ('vif_pool', vif_pool.vif_pool_driver_opts), - ('octavia_defaults', config.octavia_defaults), - ('cache_defaults', config.cache_defaults), - ('subnet_caching', utils.subnet_caching_opts), - ('node_driver_caching', vif_pool.node_vif_driver_caching_opts), - ('pool_manager', pool.pool_manager_opts), - ('cni_daemon', config.daemon_opts), - ('health_server', health.health_server_opts), - ('cni_health_server', cni_health.cni_health_server_opts), - ('namespace_subnet', namespace_subnet.namespace_subnet_driver_opts), -] - - -def list_kuryr_opts(): - """Return a list of oslo_config options available in Kuryr service. - - Each element of the list is a tuple. The first element is the name of the - group under which the list of elements in the second element will be - registered. A group name of None corresponds to the [DEFAULT] group in - config files. - - This function is also discoverable via the 'kuryr' entry point under - the 'oslo_config.opts' namespace. - - The purpose of this is to allow tools like the Oslo sample config file - generator to discover the options exposed to users by Kuryr. - - :returns: a list of (group_name, opts) tuples - """ - - return ([(k, copy.deepcopy(o)) for k, o in _kuryr_k8s_opts] + - lib_opts.list_kuryr_opts() + _options.list_opts()) diff --git a/kuryr_kubernetes/os_vif_plug_noop.py b/kuryr_kubernetes/os_vif_plug_noop.py deleted file mode 100644 index 4690ef0db..000000000 --- a/kuryr_kubernetes/os_vif_plug_noop.py +++ /dev/null @@ -1,44 +0,0 @@ -# Licensed under the Apache License, Version 2.0 (the "License"); you may -# not use this file except in compliance with the License. You may obtain -# a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT -# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the -# License for the specific language governing permissions and limitations -# under the License. - -from os_vif import objects -from os_vif.plugin import PluginBase - -from kuryr_kubernetes.objects import vif as k_vif - - -class NoOpPlugin(PluginBase): - """No Op Plugin to be used with VIF types that dont need plugging""" - - def describe(self): - return objects.host_info.HostPluginInfo( - plugin_name='noop', - vif_info=[ - objects.host_info.HostVIFInfo( - vif_object_name=k_vif.VIFVlanNested.__name__, - min_version="1.0", - max_version="1.0"), - objects.host_info.HostVIFInfo( - vif_object_name=k_vif.VIFMacvlanNested.__name__, - min_version="1.0", - max_version="1.0"), - objects.host_info.HostVIFInfo( - vif_object_name=k_vif.VIFDPDKNested.__name__, - min_version="1.0", - max_version="1.0"), - ]) - - def plug(self, vif, instance_info): - pass - - def unplug(self, vif, instance_info): - pass diff --git a/kuryr_kubernetes/os_vif_util.py b/kuryr_kubernetes/os_vif_util.py deleted file mode 100644 index fa2f616ab..000000000 --- a/kuryr_kubernetes/os_vif_util.py +++ /dev/null @@ -1,432 +0,0 @@ -# Copyright (c) 2016 Mirantis, Inc. -# All Rights Reserved. -# -# Licensed under the Apache License, Version 2.0 (the "License"); you may -# not use this file except in compliance with the License. You may obtain -# a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT -# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the -# License for the specific language governing permissions and limitations -# under the License. - - -import os - -from kuryr.lib._i18n import _ -from kuryr.lib.binding.drivers import utils as kl_utils -from kuryr.lib import constants as kl_const -from os_vif.objects import fixed_ip as osv_fixed_ip -from os_vif.objects import network as osv_network -from os_vif.objects import route as osv_route -from os_vif.objects import subnet as osv_subnet -from os_vif.objects import vif as osv_vif -from oslo_config import cfg as oslo_cfg -from oslo_log import log as logging -from stevedore import driver as stv_driver -from vif_plug_ovs import constants as osv_const - -from kuryr_kubernetes import config -from kuryr_kubernetes import constants as const -from kuryr_kubernetes import exceptions as k_exc -from kuryr_kubernetes.objects import vif as k_vif -from kuryr_kubernetes import utils - - -LOG = logging.getLogger(__name__) - -# REVISIT(ivc): consider making this module part of kuryr-lib -_VIF_TRANSLATOR_NAMESPACE = "kuryr_kubernetes.vif_translators" -_VIF_MANAGERS = {} - - -def neutron_to_osvif_network(os_network): - """Converts Neutron network to os-vif Subnet. - - :param os_network: openstack.network.v2.netwrork.Network object. - :return: an os-vif Network object - """ - - obj = osv_network.Network(id=os_network.id) - - if os_network.name is not None: - obj.label = os_network.name - - if os_network.mtu is not None: - obj.mtu = os_network.mtu - - # Vlan information will be used later in Sriov binding driver - if os_network.provider_network_type == 'vlan': - obj.should_provide_vlan = True - obj.vlan = os_network.provider_segmentation_id - - return obj - - -def neutron_to_osvif_subnet(os_subnet): - """Converts Neutron subnet to os-vif Subnet. - - :param os_subnet: openstack.network.v2.subnet.Subnet object - :return: an os-vif Subnet object - """ - - obj = osv_subnet.Subnet( - cidr=os_subnet.cidr, - dns=os_subnet.dns_nameservers, - routes=_neutron_to_osvif_routes(os_subnet.host_routes)) - - if os_subnet.gateway_ip is not None: - obj.gateway = os_subnet.gateway_ip - - return obj - - -def _neutron_to_osvif_routes(neutron_routes): - """Converts Neutron host_routes to os-vif RouteList. - - :param neutron_routes: list of routes as returned by neutron client's - 'show_subnet' in 'host_routes' attribute - :return: an os-vif RouteList object - """ - - # NOTE(gryf): Nested attributes for OpenStackSDK objects are simple types, - # like dicts and lists, that's why neutron_routes is a list of dicts. - obj_list = [osv_route.Route(cidr=route['destination'], - gateway=route['nexthop']) - for route in neutron_routes] - - return osv_route.RouteList(objects=obj_list) - - -def _make_vif_subnet(subnets, subnet_id): - """Makes a copy of an os-vif Subnet from subnets mapping. - - :param subnets: subnet mapping as returned by PodSubnetsDriver.get_subnets - :param subnet_id: ID of the subnet to extract from 'subnets' mapping - :return: a copy of an os-vif Subnet object matching 'subnet_id' - """ - - network = subnets[subnet_id] - - if len(network.subnets.objects) != 1: - raise k_exc.IntegrityError(_( - "Network object for subnet %(subnet_id)s is invalid, " - "must contain a single subnet, but %(num_subnets)s found") % { - 'subnet_id': subnet_id, - 'num_subnets': len(network.subnets.objects)}) - - subnet = network.subnets.objects[0].obj_clone() - subnet.ips = osv_fixed_ip.FixedIPList(objects=[]) - return subnet - - -def _make_vif_subnets(neutron_port, subnets): - """Gets a list of os-vif Subnet objects for port. - - :param neutron_port: dict containing port information as returned by - neutron client's 'show_port' or - openstack.network.v2.port.Port object - :param subnets: subnet mapping as returned by PodSubnetsDriver.get_subnets - :return: list of os-vif Subnet object - """ - - vif_subnets = {} - try: - fixed_ips = neutron_port.get('fixed_ips', []) - port_id = neutron_port.get('id') - except TypeError: - fixed_ips = neutron_port.fixed_ips - port_id = neutron_port.get.id - - for neutron_fixed_ip in fixed_ips: - subnet_id = neutron_fixed_ip['subnet_id'] - ip_address = neutron_fixed_ip['ip_address'] - - if subnet_id not in subnets: - continue - - try: - subnet = vif_subnets[subnet_id] - except KeyError: - subnet = _make_vif_subnet(subnets, subnet_id) - vif_subnets[subnet_id] = subnet - - subnet.ips.objects.append(osv_fixed_ip.FixedIP(address=ip_address)) - - if not vif_subnets: - raise k_exc.IntegrityError(_( - "No valid subnets found for port %(port_id)s") % { - 'port_id': port_id}) - - return list(vif_subnets.values()) - - -def _make_vif_network(neutron_port, subnets): - """Get an os-vif Network object for port. - - :param neutron_port: dict containing port information as returned by - neutron client's 'show_port', or - openstack.network.v2.port.Port object - :param subnets: subnet mapping as returned by PodSubnetsDriver.get_subnets - :return: os-vif Network object - """ - - # NOTE(gryf): Because we didn't convert macvlan driver, neutron_port can - # be either a dict or an object - try: - network_id = neutron_port.get('network_id') - port_id = neutron_port.get('id') - except TypeError: - network_id = neutron_port.network_id - port_id = neutron_port.id - - try: - network = next(net.obj_clone() for net in subnets.values() - if net.id == network_id) - except StopIteration: - raise k_exc.IntegrityError(_( - "Port %(port_id)s belongs to network %(network_id)s, " - "but requested networks are: %(requested_networks)s") % { - 'port_id': port_id, - 'network_id': network_id, - 'requested_networks': [net.id for net in subnets.values()]}) - - network.subnets = osv_subnet.SubnetList( - objects=_make_vif_subnets(neutron_port, subnets)) - - return network - - -# TODO(a.perevalov) generalize it with get_veth_pair_names -# but it's reasonable if we're going to add vhostuser support -# into kuryr project -def _get_vhu_vif_name(port_id): - ifname = osv_const.OVS_VHOSTUSER_PREFIX + port_id - ifname = ifname[:kl_const.NIC_NAME_LEN] - return ifname - - -def _get_vif_name(neutron_port): - """Gets a VIF device name for port. - - :param neutron_port: dict containing port information as returned by - neutron client's 'show_port', or an port object - returned by openstack client. - """ - - try: - port_id = neutron_port['id'] - except TypeError: - port_id = neutron_port.id - - vif_name, _ = kl_utils.get_veth_pair_names(port_id) - return vif_name - - -def _get_ovs_hybrid_bridge_name(os_port): - """Gets a name of the Linux bridge name for hybrid OpenVSwitch port. - - :param os_port: openstack.network.v2.port.Port object - """ - return ('qbr' + os_port.id)[:kl_const.NIC_NAME_LEN] - - -def _is_port_active(neutron_port): - """Checks if port is active. - - :param neutron_port: dict containing port information as returned by - neutron client's 'show_port' or - openstack.network.v2.port.Port object - """ - try: - return (neutron_port['status'] == kl_const.PORT_STATUS_ACTIVE) - except TypeError: - return (neutron_port.status == kl_const.PORT_STATUS_ACTIVE) - - -def neutron_to_osvif_vif_ovs(vif_plugin, os_port, subnets): - """Converts Neutron port to VIF object for os-vif 'ovs' plugin. - - :param vif_plugin: name of the os-vif plugin to use (i.e. 'ovs') - :param os_port: openstack.network.v2.port.Port object - :param subnets: subnet mapping as returned by PodSubnetsDriver.get_subnets - :return: os-vif VIF object - """ - profile = osv_vif.VIFPortProfileOpenVSwitch(interface_id=os_port.id) - - details = os_port.binding_vif_details or {} - ovs_bridge = details.get('bridge_name', - config.CONF.neutron_defaults.ovs_bridge) - if not ovs_bridge: - raise oslo_cfg.RequiredOptError('ovs_bridge', 'neutron_defaults') - - network = _make_vif_network(os_port, subnets) - network.bridge = ovs_bridge - vhostuser_mode = details.get('vhostuser_mode', False) - - LOG.debug('Detected vhostuser_mode=%s for port %s', vhostuser_mode, - os_port.id) - if vhostuser_mode: - # TODO(a.perevalov) obtain path to mount point from pod's mountVolumes - vhostuser_mount_point = (config.CONF.vhostuser.mount_point) - if not vhostuser_mount_point: - raise oslo_cfg.RequiredOptError('vhostuser_mount_point', - 'neutron_defaults') - vif_name = _get_vhu_vif_name(os_port.id) - vif = osv_vif.VIFVHostUser( - id=os_port.id, - address=os_port.mac_address, - network=network, - has_traffic_filtering=details.get('port_filter', False), - preserve_on_delete=False, - active=_is_port_active(os_port), - port_profile=profile, - plugin='ovs', - path=os.path.join(vhostuser_mount_point, vif_name), - mode=vhostuser_mode, - vif_name=vif_name, - bridge_name=network.bridge) - elif details.get('ovs_hybrid_plug'): - vif = osv_vif.VIFBridge( - id=os_port.id, - address=os_port.mac_address, - network=network, - has_traffic_filtering=details.get('port_filter', False), - preserve_on_delete=False, - active=_is_port_active(os_port), - port_profile=profile, - plugin=vif_plugin, - vif_name=_get_vif_name(os_port), - bridge_name=_get_ovs_hybrid_bridge_name(os_port)) - else: - vif = osv_vif.VIFOpenVSwitch( - id=os_port.id, - address=os_port.mac_address, - network=network, - has_traffic_filtering=details.get('port_filter', False), - preserve_on_delete=False, - active=_is_port_active(os_port), - port_profile=profile, - plugin=vif_plugin, - vif_name=_get_vif_name(os_port), - bridge_name=network.bridge) - - return vif - - -def neutron_to_osvif_vif_nested_vlan(os_port, subnets, vlan_id): - """Converts Neutron port to VIF object for VLAN nested containers. - - :param os_port: openstack.network.v2.port.Port object - :param subnets: subnet mapping as returned by PodSubnetsDriver.get_subnets - :param vlan_id: VLAN id associated to the VIF object for the pod - :return: kuryr-k8s native VIF object for VLAN nested - """ - details = os_port.binding_vif_details or {} - - return k_vif.VIFVlanNested( - id=os_port.id, - address=os_port.mac_address, - network=_make_vif_network(os_port, subnets), - has_traffic_filtering=details.get('port_filter', False), - preserve_on_delete=False, - active=_is_port_active(os_port), - plugin=const.K8S_OS_VIF_NOOP_PLUGIN, - vif_name=_get_vif_name(os_port), - vlan_id=vlan_id) - - -def neutron_to_osvif_vif_nested_macvlan(neutron_port, subnets): - """Converts Neutron port to VIF object for MACVLAN nested containers. - - :param neutron_port: dict containing port information as returned by - neutron client's 'show_port' - :param subnets: subnet mapping as returned by PodSubnetsDriver.get_subnets - :return: kuryr-k8s native VIF object for MACVLAN nested - """ - details = neutron_port.get('binding:vif_details', {}) - - return k_vif.VIFMacvlanNested( - id=neutron_port['id'], - address=neutron_port['mac_address'], - network=_make_vif_network(neutron_port, subnets), - has_traffic_filtering=details.get('port_filter', False), - preserve_on_delete=False, - active=_is_port_active(neutron_port), - plugin=const.K8S_OS_VIF_NOOP_PLUGIN, - vif_name=_get_vif_name(neutron_port)) - - -def neutron_to_osvif_vif_dpdk(os_port, subnets, pod): - """Converts Neutron port to VIF object for nested dpdk containers. - - :param os_port: dict containing port information as returned by - neutron client's 'show_port' - :param subnets: subnet mapping as returned by PodSubnetsDriver.get_subnets - :param pod: pod object received by k8s and containing profile details - :return: os-vif VIF object - """ - - details = os_port.get('binding:vif_details', {}) - profile = osv_vif.VIFPortProfileK8sDPDK( - l3_setup=False, - selflink=utils.get_res_link(pod)) - - return k_vif.VIFDPDKNested( - id=os_port['id'], - port_profile=profile, - address=os_port['mac_address'], - network=_make_vif_network(os_port, subnets), - has_traffic_filtering=details.get('port_filter', False), - preserve_on_delete=False, - active=_is_port_active(os_port), - plugin=const.K8S_OS_VIF_NOOP_PLUGIN, - pci_address="", - dev_driver="", - vif_name=_get_vif_name(os_port)) - - -def neutron_to_osvif_vif(vif_translator, os_port, subnets): - """Converts Neutron port to os-vif VIF object. - - :param vif_translator: name of the traslator for the os-vif plugin to use - :param os_port: openstack.network.v2.port.Port object - :param subnets: subnet mapping as returned by PodSubnetsDriver.get_subnets - :return: os-vif VIF object - """ - try: - mgr = _VIF_MANAGERS[vif_translator] - except KeyError: - mgr = stv_driver.DriverManager( - namespace=_VIF_TRANSLATOR_NAMESPACE, - name=vif_translator, invoke_on_load=False) - _VIF_MANAGERS[vif_translator] = mgr - - return mgr.driver(vif_translator, os_port, subnets) - - -def osvif_to_neutron_fixed_ips(subnets): - fixed_ips = [] - - for subnet_id, network in subnets.items(): - ips = [] - if len(network.subnets.objects) > 1: - raise k_exc.IntegrityError(_( - "Network object for subnet %(subnet_id)s is invalid, " - "must contain a single subnet, but %(num_subnets)s found") % { - 'subnet_id': subnet_id, - 'num_subnets': len(network.subnets.objects)}) - - for subnet in network.subnets.objects: - if subnet.obj_attr_is_set('ips'): - ips.extend([str(ip.address) for ip in subnet.ips.objects]) - if ips: - fixed_ips.extend([{'subnet_id': subnet_id, 'ip_address': ip} - for ip in ips]) - else: - fixed_ips.append({'subnet_id': subnet_id}) - - return fixed_ips diff --git a/kuryr_kubernetes/tests/__init__.py b/kuryr_kubernetes/tests/__init__.py deleted file mode 100644 index e69de29bb..000000000 diff --git a/kuryr_kubernetes/tests/base.py b/kuryr_kubernetes/tests/base.py deleted file mode 100644 index b84e00c0d..000000000 --- a/kuryr_kubernetes/tests/base.py +++ /dev/null @@ -1,24 +0,0 @@ -# Licensed under the Apache License, Version 2.0 (the "License"); you may -# not use this file except in compliance with the License. You may obtain -# a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT -# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the -# License for the specific language governing permissions and limitations -# under the License. -from kuryr_kubernetes import config -import os_vif -from oslotest import base - - -class TestCase(base.BaseTestCase): - - """Test case base class for all unit tests.""" - def setUp(self): - super(TestCase, self).setUp() - args = [] - config.init(args=args) - os_vif.initialize() diff --git a/kuryr_kubernetes/tests/fake.py b/kuryr_kubernetes/tests/fake.py deleted file mode 100644 index 57e00ea31..000000000 --- a/kuryr_kubernetes/tests/fake.py +++ /dev/null @@ -1,182 +0,0 @@ -# Copyright (c) 2017 Red Hat. -# All Rights Reserved. -# -# Licensed under the Apache License, Version 2.0 (the "License"); you may -# not use this file except in compliance with the License. You may obtain -# a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT -# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the -# License for the specific language governing permissions and limitations -# under the License. - -import uuid - -from openstack.network.v2 import port as os_port -from openstack.network.v2 import security_group_rule as os_sgr -from os_vif import objects as osv_objects -from os_vif.objects import vif as osv_vif -from oslo_serialization import jsonutils - -from kuryr_kubernetes import constants - - -def _fake_vif(cls=osv_vif.VIFOpenVSwitch): - vif = cls( - id=uuid.uuid4(), - vif_name='h_interface', - bridge_name='bridge', - address='3e:94:b7:31:a0:83', - port_profile=osv_objects.vif.VIFPortProfileOpenVSwitch( - interface_id='89eccd45-43e9-43d8-b4cc-4c13db13f782', - profile_id=str(uuid.uuid4()), - ), - ) - vif.network = osv_objects.network.Network(id=uuid.uuid4(), mtu=1) - subnet = osv_objects.subnet.Subnet( - uuid=uuid.uuid4(), - dns=['192.168.0.1'], - cidr='192.168.0.0/24', - gateway='192.168.0.1', - routes=osv_objects.route.RouteList(objects=[]), - ) - subnet.ips = osv_objects.fixed_ip.FixedIPList(objects=[]) - subnet.ips.objects.append( - osv_objects.fixed_ip.FixedIP(address='192.168.0.2')) - vif.network.subnets.objects.append(subnet) - vif.active = True - return vif - - -def _fake_vif_dict(obj=None): - if obj: - return obj.obj_to_primitive() - else: - return _fake_vif().obj_to_primitive() - - -def _fake_vif_string(dictionary=None): - if dictionary: - return jsonutils.dumps(dictionary) - else: - return jsonutils.dumps(_fake_vif_dict()) - - -def _fake_vifs(cls=osv_vif.VIFOpenVSwitch, prefix='eth'): - return {'eth0': _fake_vif(cls), prefix+'1': _fake_vif(cls)} - - -def _fake_vifs_dict(obj=None): - if obj: - return { - ifname: vif.obj_to_primitive() for - ifname, vif in obj.items() - } - else: - return { - ifname: vif.obj_to_primitive() for - ifname, vif in _fake_vifs().items() - } - - -def _fake_vifs_string(dictionary=None): - if dictionary: - return jsonutils.dumps(dictionary) - else: - return jsonutils.dumps(_fake_vifs_dict()) - - -def get_port_obj(port_id='07cfe856-11cc-43d9-9200-ff4dc02d3620', - device_owner='compute:kuryr', ip_address=None, - vif_details=None, **kwargs): - - fixed_ips = [{'subnet_id': 'e1942bb1-5f51-4646-9885-365b66215592', - 'ip_address': '10.10.0.5'}, - {'subnet_id': '4894baaf-df06-4a54-9885-9cd99d1cc245', - 'ip_address': 'fd35:7db5:e3fc:0:f816:3eff:fe80:d421'}] - if ip_address: - fixed_ips[0]['ip_address'] = ip_address - security_group_ids = ['cfb3dfc4-7a43-4ba1-b92d-b8b2650d7f88'] - - if not vif_details: - vif_details = {'port_filter': True, 'ovs_hybrid_plug': False} - - port_data = {'allowed_address_pairs': [], - 'binding_host_id': 'kuryr-devstack', - 'binding_profile': {}, - 'binding_vif_details': vif_details, - 'binding_vif_type': 'ovs', - 'binding_vnic_type': 'normal', - 'created_at': '2017-06-09T13:23:24Z', - 'data_plane_status': None, - 'description': '', - 'device_id': '', - 'device_owner': device_owner, - 'dns_assignment': None, - 'dns_domain': None, - 'dns_name': None, - 'extra_dhcp_opts': [], - 'fixed_ips': fixed_ips, - 'id': port_id, - 'ip_address': None, - 'is_admin_state_up': True, - 'is_port_security_enabled': True, - 'location': None, - 'mac_address': 'fa:16:3e:80:d4:21', - 'name': constants.KURYR_PORT_NAME, - 'network_id': 'ba44f957-c467-412b-b985-ae720514bc46', - 'option_name': None, - 'option_value': None, - 'project_id': 'b6e8fb2bde594673923afc19cf168f3a', - 'qos_policy_id': None, - 'revision_number': 9, - 'security_group_ids': security_group_ids, - 'status': u'DOWN', - 'subnet_id': None, - 'tags': [], - 'trunk_details': None, - 'updated_at': u'2019-12-04T15:06:09Z'} - port_data.update(kwargs) - return os_port.Port(**port_data) - - -def get_sgr_obj(sgr_id='7621d1e0-a2d2-4496-94eb-ffd375d20877', - sg_id='cfb3dfc4-7a43-4ba1-b92d-b8b2650d7f88', - protocol='tcp', direction='ingress'): - - sgr_data = {'description': '', - 'direction': direction, - 'ether_type': 'IPv4', - 'id': sgr_id, - 'port_range_max': 8080, - 'port_range_min': 8080, - 'project_id': '5ea46368c7fe436bb8732738c149fbce', - 'protocol': protocol, - 'remote_group_id': None, - 'remote_ip_prefix': None, - 'security_group_id': sg_id, - 'tenant_id': '5ea46368c7fe436bb8732738c149fbce'} - - return os_sgr.SecurityGroupRule(**sgr_data) - - -def get_k8s_pod(name='pod-5bb648d658-55n76', namespace='namespace', - uid='683da866-6bb1-4da2-bf6a-a5f4137c38e7'): - - return {'apiVersion': 'v1', - 'kind': 'Pod', - 'metadata': {'creationTimestamp': '2020-12-22T09:04:29Z', - 'finalizers': ['kuryr.openstack.org/pod-finalizer'], - 'generateName': 'pod-5bb648d658-', - 'labels': {'app': 'pod', - 'pod-template-hash': '5bb648d658'}, - 'operation': 'Update', - 'name': name, - 'namespace': namespace, - 'resourceVersion': '19416', - 'uid': uid}, - 'spec': {}, - 'status': {}} diff --git a/kuryr_kubernetes/tests/unit/__init__.py b/kuryr_kubernetes/tests/unit/__init__.py deleted file mode 100644 index e69de29bb..000000000 diff --git a/kuryr_kubernetes/tests/unit/cmd/__init__.py b/kuryr_kubernetes/tests/unit/cmd/__init__.py deleted file mode 100644 index e69de29bb..000000000 diff --git a/kuryr_kubernetes/tests/unit/cmd/eventlet/__init__.py b/kuryr_kubernetes/tests/unit/cmd/eventlet/__init__.py deleted file mode 100644 index e69de29bb..000000000 diff --git a/kuryr_kubernetes/tests/unit/cmd/eventlet/test_controller.py b/kuryr_kubernetes/tests/unit/cmd/eventlet/test_controller.py deleted file mode 100644 index 992972849..000000000 --- a/kuryr_kubernetes/tests/unit/cmd/eventlet/test_controller.py +++ /dev/null @@ -1,34 +0,0 @@ -# Copyright (c) 2016 Mirantis, Inc. -# All Rights Reserved. -# -# Licensed under the Apache License, Version 2.0 (the "License"); you may -# not use this file except in compliance with the License. You may obtain -# a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT -# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the -# License for the specific language governing permissions and limitations -# under the License. - -from unittest import mock - -from kuryr_kubernetes.tests import base as test_base - - -class TestControllerCmd(test_base.TestCase): - - @mock.patch('kuryr_kubernetes.controller.service.start') - @mock.patch('eventlet.monkey_patch') - def test_start(self, m_evmp, m_start): - # NOTE(ivc): eventlet.monkey_patch is invoked during the module - # import, so the controller cmd has to be imported locally to verify - # that monkey_patch is called - from kuryr_kubernetes.cmd.eventlet import controller - - controller.start() - - m_evmp.assert_called() - m_start.assert_called() diff --git a/kuryr_kubernetes/tests/unit/cmd/test_daemon.py b/kuryr_kubernetes/tests/unit/cmd/test_daemon.py deleted file mode 100644 index fa4a5c1e5..000000000 --- a/kuryr_kubernetes/tests/unit/cmd/test_daemon.py +++ /dev/null @@ -1,27 +0,0 @@ -# Copyright (c) 2017 NEC Corporation. -# All Rights Reserved. -# -# Licensed under the Apache License, Version 2.0 (the "License"); you may -# not use this file except in compliance with the License. You may obtain -# a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT -# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the -# License for the specific language governing permissions and limitations -# under the License. - -from unittest import mock - -from kuryr_kubernetes.tests import base as test_base - - -class TestDaemonCmd(test_base.TestCase): - @mock.patch('kuryr_kubernetes.cni.daemon.service.start') - def test_start(self, m_start): - from kuryr_kubernetes.cmd import daemon # To make it import a mock. - daemon.start() - - m_start.assert_called() diff --git a/kuryr_kubernetes/tests/unit/cmd/test_status.py b/kuryr_kubernetes/tests/unit/cmd/test_status.py deleted file mode 100644 index fc00c0b21..000000000 --- a/kuryr_kubernetes/tests/unit/cmd/test_status.py +++ /dev/null @@ -1,148 +0,0 @@ -# Copyright 2018 Red Hat -# -# Licensed under the Apache License, Version 2.0 (the "License"); you may -# not use this file except in compliance with the License. You may obtain -# a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT -# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the -# License for the specific language governing permissions and limitations -# under the License. - -import io -from unittest import mock - -from oslo_serialization import jsonutils - -from kuryr_kubernetes.cmd import status -from kuryr_kubernetes import constants -from kuryr_kubernetes.objects import vif -from kuryr_kubernetes.tests import base as test_base - - -class TestStatusCmd(test_base.TestCase): - @mock.patch('kuryr_kubernetes.clients.get_kubernetes_client') - @mock.patch('kuryr_kubernetes.clients.setup_kubernetes_client') - def setUp(self, m_client_setup, m_client_get): - super(TestStatusCmd, self).setUp() - self.cmd = status.UpgradeCommands() - - def test_upgrade_result_get_details(self): - res = status.UpgradeCheckResult(0, 'a ' * 50) - - self.assertEqual( - (('a ' * 30).rstrip() + '\n' + (' ' * 9) + ('a ' * 20)).rstrip(), - res.get_details()) - - def test__get_annotation_missing(self): - pod = { - 'metadata': { - 'annotations': {} - } - } - - self.assertIsNone(self.cmd._get_annotation(pod)) - - def test__get_annotation_existing(self): - mock_obj = vif.PodState( - default_vif=vif.VIFMacvlanNested(vif_name='foo')) - - pod = { - 'metadata': { - 'annotations': { - constants.K8S_ANNOTATION_VIF: jsonutils.dumps( - mock_obj.obj_to_primitive()) - } - } - } - - obj = self.cmd._get_annotation(pod) - self.assertEqual(mock_obj, obj) - - @mock.patch('sys.stdout', new_callable=io.StringIO) - def _test_upgrade_check(self, code, code_name, m_stdout): - method_success_m = mock.Mock() - method_success_m.return_value = status.UpgradeCheckResult(0, 'foo') - method_code_m = mock.Mock() - method_code_m.return_value = status.UpgradeCheckResult(code, 'bar') - - self.cmd.check_methods = {'baz': method_success_m, - 'blah': method_code_m} - self.assertEqual(code, self.cmd.upgrade_check()) - - output = m_stdout.getvalue() - self.assertIn('baz', output) - self.assertIn('bar', output) - self.assertIn('foo', output) - self.assertIn('blah', output) - self.assertIn('Success', output) - self.assertIn(code_name, output) - - def test_upgrade_check_success(self): - self._test_upgrade_check(0, 'Success') - - def test_upgrade_check_warning(self): - self._test_upgrade_check(1, 'Warning') - - def test_upgrade_check_failure(self): - self._test_upgrade_check(2, 'Failure') - - def _test__check_annotations(self, ann_objs, code): - pods = { - 'items': [ - { - 'metadata': { - 'annotations': { - constants.K8S_ANNOTATION_VIF: ann - } - } - } for ann in ann_objs - ] - } - self.cmd.k8s = mock.Mock(get=mock.Mock(return_value=pods)) - res = self.cmd._check_annotations() - self.assertEqual(code, res.code) - - def test__check_annotations_succeed(self): - ann_objs = [ - vif.PodState(default_vif=vif.VIFMacvlanNested(vif_name='foo')), - vif.PodState(default_vif=vif.VIFMacvlanNested(vif_name='bar')), - ] - ann_objs = [jsonutils.dumps(ann.obj_to_primitive()) - for ann in ann_objs] - - self._test__check_annotations(ann_objs, 0) - - def test__check_annotations_failure(self): - ann_objs = [ - vif.PodState(default_vif=vif.VIFMacvlanNested(vif_name='foo')), - vif.VIFMacvlanNested(vif_name='bar'), - ] - ann_objs = [jsonutils.dumps(ann.obj_to_primitive()) - for ann in ann_objs] - - self._test__check_annotations(ann_objs, 2) - - def test__check_annotations_malformed_and_old(self): - ann_objs = [ - vif.PodState(default_vif=vif.VIFMacvlanNested(vif_name='foo')), - vif.VIFMacvlanNested(vif_name='bar'), - ] - ann_objs = [jsonutils.dumps(ann.obj_to_primitive()) - for ann in ann_objs] - ann_objs.append('{}') - - self._test__check_annotations(ann_objs, 2) - - def test__check_annotations_malformed(self): - ann_objs = [ - vif.PodState(default_vif=vif.VIFMacvlanNested(vif_name='foo')), - ] - ann_objs = [jsonutils.dumps(ann.obj_to_primitive()) - for ann in ann_objs] - ann_objs.append('{}') - - self._test__check_annotations(ann_objs, 1) diff --git a/kuryr_kubernetes/tests/unit/cni/__init__.py b/kuryr_kubernetes/tests/unit/cni/__init__.py deleted file mode 100644 index e69de29bb..000000000 diff --git a/kuryr_kubernetes/tests/unit/cni/plugins/__init__.py b/kuryr_kubernetes/tests/unit/cni/plugins/__init__.py deleted file mode 100644 index e69de29bb..000000000 diff --git a/kuryr_kubernetes/tests/unit/cni/plugins/test_k8s_cni_registry.py b/kuryr_kubernetes/tests/unit/cni/plugins/test_k8s_cni_registry.py deleted file mode 100644 index 678b70ad6..000000000 --- a/kuryr_kubernetes/tests/unit/cni/plugins/test_k8s_cni_registry.py +++ /dev/null @@ -1,258 +0,0 @@ -# Copyright 2017 Red Hat, Inc. -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. - -from unittest import mock - -from oslo_config import cfg - -from kuryr_kubernetes.cni.plugins import k8s_cni_registry -from kuryr_kubernetes.cni import utils -from kuryr_kubernetes import exceptions -from kuryr_kubernetes.tests import base -from kuryr_kubernetes.tests import fake -from kuryr_kubernetes.tests.unit import kuryr_fixtures - - -class TestK8sCNIRegistryPlugin(base.TestCase): - def setUp(self): - super(TestK8sCNIRegistryPlugin, self).setUp() - self.k8s_mock = self.useFixture(kuryr_fixtures.MockK8sClient()).client - self.default_iface = 'baz' - self.additional_iface = 'eth1' - self.kp = {'apiVersion': 'openstack.org/v1', - 'kind': 'KuryrPort', - 'metadata': {'name': 'foo', 'uid': 'bar', - 'namespace': 'default'}, - 'spec': {'podUid': 'bar', 'podStatic': False}} - self.vifs = fake._fake_vifs() - registry = {'default/foo': {'kp': self.kp, 'vifs': self.vifs, - 'containerid': None, - 'vif_unplugged': False, - 'del_received': False}} - healthy = mock.Mock() - self.plugin = k8s_cni_registry.K8sCNIRegistryPlugin(registry, healthy) - self.params = mock.Mock( - args=utils.CNIArgs('K8S_POD_NAME=foo;K8S_POD_NAMESPACE=default;' - 'K8S_POD_UID=bar'), - CNI_IFNAME=self.default_iface, CNI_NETNS=123, - CNI_CONTAINERID='cont_id') - - @mock.patch('oslo_concurrency.lockutils.lock') - @mock.patch('kuryr_kubernetes.cni.binding.base.connect') - def test_add_present(self, m_connect, m_lock): - self.k8s_mock.get.return_value = self.kp - - self.plugin.add(self.params) - - m_lock.assert_called_with('default/foo', external=True) - m_connect.assert_any_call(mock.ANY, mock.ANY, self.default_iface, - 123, report_health=mock.ANY, - is_default_gateway=True, - container_id='cont_id') - m_connect.assert_any_call(mock.ANY, mock.ANY, self.additional_iface, - 123, report_health=mock.ANY, - is_default_gateway=False, - container_id='cont_id') - self.assertEqual('cont_id', - self.plugin.registry['default/foo']['containerid']) - - @mock.patch('oslo_concurrency.lockutils.lock') - @mock.patch('kuryr_kubernetes.cni.binding.base.connect') - def test_add_no_uid(self, m_connect, m_lock): - self.k8s_mock.get.return_value = self.kp - - self.params.args = utils.CNIArgs( - 'K8S_POD_NAME=foo;K8S_POD_NAMESPACE=default') - self.plugin.add(self.params) - - m_lock.assert_called_with('default/foo', external=True) - m_connect.assert_any_call(mock.ANY, mock.ANY, self.default_iface, - 123, report_health=mock.ANY, - is_default_gateway=True, - container_id='cont_id') - m_connect.assert_any_call(mock.ANY, mock.ANY, self.additional_iface, - 123, report_health=mock.ANY, - is_default_gateway=False, - container_id='cont_id') - self.k8s_mock.get.assert_any_call( - '/api/v1/namespaces/default/pods/foo') - self.assertEqual('cont_id', - self.plugin.registry['default/foo']['containerid']) - - @mock.patch('kuryr_kubernetes.cni.binding.base.connect') - def test_add_wrong_uid(self, m_connect): - cfg.CONF.set_override('vif_annotation_timeout', 0, group='cni_daemon') - self.addCleanup(cfg.CONF.set_override, 'vif_annotation_timeout', 120, - group='cni_daemon') - self.k8s_mock.get.return_value = self.kp - - self.params.args = utils.CNIArgs( - 'K8S_POD_NAME=foo;K8S_POD_NAMESPACE=default;K8S_POD_UID=blob') - self.assertRaises(exceptions.CNIPodUidMismatch, self.plugin.add, - self.params) - - m_connect.assert_not_called() - self.k8s_mock.get.assert_not_called() - - @mock.patch('oslo_concurrency.lockutils.lock') - @mock.patch('kuryr_kubernetes.cni.binding.base.connect') - def test_add_wrong_uid_static(self, m_connect, m_lock): - cfg.CONF.set_override('vif_annotation_timeout', 0, group='cni_daemon') - self.addCleanup(cfg.CONF.set_override, 'vif_annotation_timeout', 120, - group='cni_daemon') - self.k8s_mock.get.return_value = self.kp - - self.params.args = utils.CNIArgs( - 'K8S_POD_NAME=foo;K8S_POD_NAMESPACE=default;K8S_POD_UID=blob') - self.kp['spec']['podStatic'] = True - self.plugin.add(self.params) - - m_lock.assert_called_with('default/foo', external=True) - m_connect.assert_any_call(mock.ANY, mock.ANY, self.default_iface, - 123, report_health=mock.ANY, - is_default_gateway=True, - container_id='cont_id') - m_connect.assert_any_call(mock.ANY, mock.ANY, self.additional_iface, - 123, report_health=mock.ANY, - is_default_gateway=False, - container_id='cont_id') - self.k8s_mock.get.assert_any_call( - '/api/v1/namespaces/default/pods/foo') - self.assertEqual('cont_id', - self.plugin.registry['default/foo']['containerid']) - - @mock.patch('oslo_concurrency.lockutils.lock') - @mock.patch('kuryr_kubernetes.cni.binding.base.connect') - def test_add_wrong_uid_none_static(self, m_connect, m_lock): - cfg.CONF.set_override('vif_annotation_timeout', 0, group='cni_daemon') - self.addCleanup(cfg.CONF.set_override, 'vif_annotation_timeout', 120, - group='cni_daemon') - self.k8s_mock.get.side_effect = [ - {'metadata': { - 'annotations': {'kubernetes.io/config.source': 'file'}}}, - self.kp] - - self.params.args = utils.CNIArgs( - 'K8S_POD_NAME=foo;K8S_POD_NAMESPACE=default;K8S_POD_UID=blob') - del self.kp['spec']['podStatic'] - self.plugin.add(self.params) - - m_lock.assert_called_with('default/foo', external=True) - m_connect.assert_any_call(mock.ANY, mock.ANY, self.default_iface, - 123, report_health=mock.ANY, - is_default_gateway=True, - container_id='cont_id') - m_connect.assert_any_call(mock.ANY, mock.ANY, self.additional_iface, - 123, report_health=mock.ANY, - is_default_gateway=False, - container_id='cont_id') - self.k8s_mock.get.assert_any_call( - '/api/v1/namespaces/default/pods/foo') - self.assertEqual('cont_id', - self.plugin.registry['default/foo']['containerid']) - - @mock.patch('oslo_concurrency.lockutils.lock') - @mock.patch('kuryr_kubernetes.cni.binding.base.disconnect') - def test_del_present(self, m_disconnect, m_lock): - self.plugin.delete(self.params) - - m_lock.assert_called_with('default/foo', external=True) - m_disconnect.assert_any_call(mock.ANY, mock.ANY, self.default_iface, - 123, report_health=mock.ANY, - is_default_gateway=True, - container_id='cont_id') - m_disconnect.assert_any_call(mock.ANY, mock.ANY, self.additional_iface, - 123, report_health=mock.ANY, - is_default_gateway=False, - container_id='cont_id') - self.assertIn('default/foo', self.plugin.registry) - self.assertEqual(True, - self.plugin.registry['default/foo']['vif_unplugged']) - - @mock.patch('oslo_concurrency.lockutils.lock') - @mock.patch('kuryr_kubernetes.cni.binding.base.disconnect') - def test_remove_pod_from_registry_after_del(self, m_disconnect, m_lock): - self.plugin.registry['default/foo']['del_received'] = True - self.plugin.delete(self.params) - - m_lock.assert_called_with('default/foo', external=True) - self.assertNotIn('default/foo', self.plugin.registry) - m_disconnect.assert_any_call(mock.ANY, mock.ANY, self.default_iface, - 123, report_health=mock.ANY, - is_default_gateway=True, - container_id='cont_id') - m_disconnect.assert_any_call(mock.ANY, mock.ANY, self.additional_iface, - 123, report_health=mock.ANY, - is_default_gateway=False, - container_id='cont_id') - - @mock.patch('oslo_concurrency.lockutils.lock') - @mock.patch('kuryr_kubernetes.cni.binding.base.disconnect') - def test_del_wrong_container_id(self, m_disconnect, m_lock): - registry = {'default/foo': {'kp': self.kp, 'vifs': self.vifs, - 'containerid': 'different'}} - healthy = mock.Mock() - self.plugin = k8s_cni_registry.K8sCNIRegistryPlugin(registry, healthy) - self.plugin.delete(self.params) - - m_disconnect.assert_not_called() - m_lock.assert_called_with('default/foo', external=True) - - @mock.patch('oslo_concurrency.lockutils.lock') - @mock.patch('time.sleep', mock.Mock()) - @mock.patch('kuryr_kubernetes.cni.binding.base.connect') - def test_add_present_on_5_try(self, m_connect, m_lock): - se = [KeyError] * 5 - se.append({'kp': self.kp, 'vifs': self.vifs, 'containerid': None, - 'vif_unplugged': False, 'del_received': False}) - se.append({'kp': self.kp, 'vifs': self.vifs, 'containerid': None, - 'vif_unplugged': False, 'del_received': False}) - se.append({'kp': self.kp, 'vifs': self.vifs, 'containerid': None, - 'vif_unplugged': False, 'del_received': False}) - m_getitem = mock.Mock(side_effect=se) - m_setitem = mock.Mock() - m_registry = mock.Mock(__getitem__=m_getitem, __setitem__=m_setitem, - __contains__=mock.Mock(return_value=False)) - self.plugin.registry = m_registry - self.plugin.add(self.params) - - m_lock.assert_called_with('default/foo', external=True) - m_setitem.assert_called_once_with('default/foo', - {'kp': self.kp, - 'vifs': self.vifs, - 'containerid': 'cont_id', - 'vif_unplugged': False, - 'del_received': False}) - m_connect.assert_any_call(mock.ANY, mock.ANY, self.default_iface, - 123, report_health=mock.ANY, - is_default_gateway=True, - container_id='cont_id') - m_connect.assert_any_call(mock.ANY, mock.ANY, self.additional_iface, - 123, report_health=mock.ANY, - is_default_gateway=False, - container_id='cont_id') - - @mock.patch('time.sleep', mock.Mock()) - @mock.patch('oslo_concurrency.lockutils.lock', mock.Mock( - return_value=mock.Mock(__enter__=mock.Mock(), __exit__=mock.Mock()))) - def test_add_not_present(self): - cfg.CONF.set_override('vif_annotation_timeout', 0, group='cni_daemon') - self.addCleanup(cfg.CONF.set_override, 'vif_annotation_timeout', 120, - group='cni_daemon') - - m_getitem = mock.Mock(side_effect=KeyError) - m_registry = mock.Mock(__getitem__=m_getitem, __contains__=False) - self.plugin.registry = m_registry - self.assertRaises(exceptions.CNIKuryrPortTimeout, self.plugin.add, - self.params) diff --git a/kuryr_kubernetes/tests/unit/cni/test_api.py b/kuryr_kubernetes/tests/unit/cni/test_api.py deleted file mode 100644 index 6bb4a14ab..000000000 --- a/kuryr_kubernetes/tests/unit/cni/test_api.py +++ /dev/null @@ -1,102 +0,0 @@ -# Copyright (c) 2017 NEC Corporation. -# All Rights Reserved. -# -# Licensed under the Apache License, Version 2.0 (the "License"); you may -# not use this file except in compliance with the License. You may obtain -# a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT -# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the -# License for the specific language governing permissions and limitations -# under the License. - -from io import StringIO -from unittest import mock - -from oslo_config import cfg -from oslo_serialization import jsonutils -import requests - -from kuryr_kubernetes.cni import api -from kuryr_kubernetes.tests import base as test_base -from kuryr_kubernetes.tests import fake - -CONF = cfg.CONF - - -class TestCNIRunnerMixin(object): - def test_run_invalid(self, *args): - m_fin = StringIO() - m_fout = StringIO() - code = self.runner.run( - {'CNI_COMMAND': 'INVALID', 'CNI_ARGS': 'foo=bar'}, m_fin, m_fout) - - self.assertEqual(1, code) - - def test_run_write_version(self, *args): - m_fin = StringIO() - m_fout = StringIO() - code = self.runner.run( - {'CNI_COMMAND': 'VERSION', 'CNI_ARGS': 'foo=bar'}, m_fin, m_fout) - result = jsonutils.loads(m_fout.getvalue()) - - self.assertEqual(0, code) - self.assertEqual(api.CNIRunner.SUPPORTED_VERSIONS, - result['supportedVersions']) - self.assertEqual(api.CNIRunner.VERSION, result['cniVersion']) - - -@mock.patch('requests.post') -class TestCNIDaemonizedRunner(test_base.TestCase, TestCNIRunnerMixin): - def setUp(self): - super(TestCNIDaemonizedRunner, self).setUp() - self.runner = api.CNIDaemonizedRunner() - self.port = int(CONF.cni_daemon.bind_address.split(':')[1]) - - def _test_run(self, cni_cmd, path, m_post): - m_fin = StringIO() - m_fout = StringIO() - env = { - 'CNI_COMMAND': cni_cmd, - 'CNI_CONTAINERID': 'a4181c680a39', - 'CNI_ARGS': 'foo=bar', - 'CNI_IFNAME': 'eth0', - } - result = self.runner.run(env, m_fin, m_fout) - m_post.assert_called_with( - 'http://127.0.0.1:%d/%s' % (self.port, path), - json=mock.ANY, headers={'Connection': 'close'}) - return result - - def test_run_add(self, m_post): - m_response = mock.Mock(status_code=202) - m_response.json = mock.Mock(return_value=fake._fake_vif_dict()) - m_post.return_value = m_response - result = self._test_run('ADD', 'addNetwork', m_post) - self.assertEqual(0, result) - - def test_run_add_invalid(self, m_post): - m_response = mock.Mock(status_code=400) - m_response.json = mock.Mock() - m_post.return_value = m_response - result = self._test_run('ADD', 'addNetwork', m_post) - self.assertEqual(1, result) - m_response.json.assert_not_called() - - def test_run_del(self, m_post): - m_post.return_value = mock.Mock(status_code=204) - result = self._test_run('DEL', 'delNetwork', m_post) - self.assertEqual(0, result) - - def test_run_del_invalid(self, m_post): - m_post.return_value = mock.Mock(status_code=400) - result = self._test_run('DEL', 'delNetwork', m_post) - self.assertEqual(1, result) - - def test_run_socket_error(self, m_post): - m_post.side_effect = requests.ConnectionError - result = self._test_run('DEL', 'delNetwork', m_post) - self.assertEqual(1, result) diff --git a/kuryr_kubernetes/tests/unit/cni/test_binding.py b/kuryr_kubernetes/tests/unit/cni/test_binding.py deleted file mode 100644 index 8f066db52..000000000 --- a/kuryr_kubernetes/tests/unit/cni/test_binding.py +++ /dev/null @@ -1,383 +0,0 @@ -# Copyright 2017 Red Hat, Inc. -# All Rights Reserved. -# -# Licensed under the Apache License, Version 2.0 (the "License"); you may -# not use this file except in compliance with the License. You may obtain -# a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT -# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the -# License for the specific language governing permissions and limitations -# under the License. -import collections -import os -from unittest import mock -import uuid - - -from os_vif import objects as osv_objects -from os_vif.objects import fields as osv_fields -from oslo_config import cfg -from oslo_utils import uuidutils - -from kuryr_kubernetes.cni.binding import base -from kuryr_kubernetes.cni.binding import nested -from kuryr_kubernetes.cni.binding import vhostuser -from kuryr_kubernetes import exceptions -from kuryr_kubernetes import objects -from kuryr_kubernetes.tests import base as test_base -from kuryr_kubernetes.tests import fake - -CONF = cfg.CONF - - -class TestDriverMixin(test_base.TestCase): - def setUp(self): - super(TestDriverMixin, self).setUp() - self.instance_info = osv_objects.instance_info.InstanceInfo( - uuid=uuid.uuid4(), name='foo') - self.ifname = 'c_interface' - self.netns = '/proc/netns/1234' - - # Mock IPDB context managers - self.ipdbs = {} - self.m_bridge_iface = mock.Mock(__exit__=mock.Mock(return_value=None)) - self.m_c_iface = mock.Mock() - self.m_h_iface = mock.Mock() - self.h_ipdb, self.h_ipdb_exit = self._mock_ipdb_context_manager(None) - self.c_ipdb, self.c_ipdb_exit = self._mock_ipdb_context_manager( - self.netns) - self.m_create = mock.Mock() - self.h_ipdb.create = mock.Mock( - return_value=mock.Mock( - __enter__=mock.Mock(return_value=self.m_create), - __exit__=mock.Mock(return_value=None))) - self.c_ipdb.create = mock.Mock( - return_value=mock.Mock( - __enter__=mock.Mock(return_value=self.m_create), - __exit__=mock.Mock(return_value=None))) - - def _mock_ipdb_context_manager(self, netns): - mock_ipdb = mock.Mock( - interfaces={ - 'bridge': mock.Mock( - __enter__=mock.Mock(return_value=self.m_bridge_iface), - __exit__=mock.Mock(return_value=None), - mtu=1, - ), - 'c_interface': mock.Mock( - __enter__=mock.Mock(return_value=self.m_c_iface), - __exit__=mock.Mock(return_value=None), - ), - 'h_interface': mock.Mock( - __enter__=mock.Mock(return_value=self.m_h_iface), - __exit__=mock.Mock(return_value=None), - ), - } - ) - mock_exit = mock.Mock(return_value=None) - mock_ipdb.__exit__ = mock_exit - mock_ipdb.__enter__ = mock.Mock(return_value=mock_ipdb) - self.ipdbs[netns] = mock_ipdb - - return mock_ipdb, mock_exit - - @mock.patch('kuryr_kubernetes.cni.binding.base._need_configure_l3') - @mock.patch('kuryr_kubernetes.cni.binding.base.get_ipdb') - @mock.patch('os_vif.plug') - def _test_connect(self, m_vif_plug, m_get_ipdb, m_need_l3, report=None): - def get_ipdb(netns=None): - return self.ipdbs[netns] - - m_get_ipdb.side_effect = get_ipdb - m_need_l3.return_value = True - - base.connect(self.vif, self.instance_info, self.ifname, self.netns, - report) - m_vif_plug.assert_called_once_with(self.vif, self.instance_info) - self.m_c_iface.add_ip.assert_called_once_with('192.168.0.2/24') - if report: - report.assert_called_once() - - @mock.patch('kuryr_kubernetes.cni.binding.base.get_ipdb') - @mock.patch('os_vif.unplug') - def _test_disconnect(self, m_vif_unplug, m_get_ipdb, report=None): - def get_ipdb(netns=None): - return self.ipdbs[netns] - m_get_ipdb.side_effect = get_ipdb - - base.disconnect(self.vif, self.instance_info, self.ifname, self.netns, - report) - m_vif_unplug.assert_called_once_with(self.vif, self.instance_info) - if report: - report.assert_called_once() - - -class TestOpenVSwitchDriver(TestDriverMixin, test_base.TestCase): - def setUp(self): - super(TestOpenVSwitchDriver, self).setUp() - self.vif = fake._fake_vif(osv_objects.vif.VIFOpenVSwitch) - - @mock.patch('kuryr_kubernetes.cni.plugins.k8s_cni_registry.' - 'K8sCNIRegistryPlugin.report_drivers_health') - @mock.patch('os.getpid', mock.Mock(return_value=123)) - @mock.patch('kuryr_kubernetes.linux_net_utils.create_ovs_vif_port') - def test_connect(self, mock_create_ovs, m_report): - self._test_connect(report=m_report) - self.assertEqual(3, self.h_ipdb_exit.call_count) - self.assertEqual(2, self.c_ipdb_exit.call_count) - self.c_ipdb.create.assert_called_once_with( - ifname=self.ifname, peer='h_interface', kind='veth') - self.assertEqual(1, self.m_create.mtu) - self.assertEqual(str(self.vif.address), - self.m_create.address) - self.m_create.up.assert_called_once_with() - self.assertEqual(123, self.m_h_iface.net_ns_pid) - self.assertEqual(1, self.m_h_iface.mtu) - self.m_h_iface.up.assert_called_once_with() - - mock_create_ovs.assert_called_once_with( - 'bridge', 'h_interface', '89eccd45-43e9-43d8-b4cc-4c13db13f782', - '3e:94:b7:31:a0:83', 'kuryr') - - @mock.patch('kuryr_kubernetes.cni.plugins.k8s_cni_registry.' - 'K8sCNIRegistryPlugin.report_drivers_health') - @mock.patch('kuryr_kubernetes.linux_net_utils.delete_ovs_vif_port') - def test_disconnect(self, mock_delete_ovs, m_report): - self._test_disconnect(report=m_report) - mock_delete_ovs.assert_called_once_with('bridge', 'h_interface') - - -class TestBridgeDriver(TestDriverMixin, test_base.TestCase): - def setUp(self): - super(TestBridgeDriver, self).setUp() - self.vif = fake._fake_vif(osv_objects.vif.VIFBridge) - - @mock.patch('os.getpid', mock.Mock(return_value=123)) - def test_connect(self): - self._test_connect() - - self.m_h_iface.remove.assert_called_once_with() - - self.assertEqual(3, self.h_ipdb_exit.call_count) - self.assertEqual(2, self.c_ipdb_exit.call_count) - self.c_ipdb.create.assert_called_once_with( - ifname=self.ifname, peer='h_interface', kind='veth') - self.assertEqual(1, self.m_create.mtu) - self.assertEqual(str(self.vif.address), - self.m_create.address) - self.m_create.up.assert_called_once_with() - self.assertEqual(123, self.m_h_iface.net_ns_pid) - self.assertEqual(1, self.m_h_iface.mtu) - self.m_h_iface.up.assert_called_once_with() - - self.m_bridge_iface.add_port.assert_called_once_with('h_interface') - - def test_disconnect(self): - self._test_disconnect() - - -class TestNestedDriver(TestDriverMixin, test_base.TestCase): - def setUp(self): - super(TestNestedDriver, self).setUp() - ifaces = { - 'lo': {'flags': 0x8, 'ipaddr': (('127.0.0.1', 8),)}, - 'first': {'flags': 0, 'ipaddr': (('192.168.0.1', 8),)}, - 'kubelet': {'flags': 0, 'ipaddr': (('192.168.1.1', 8),)}, - 'bridge': {'flags': 0, 'ipaddr': (('192.168.2.1', 8),)}, - } - self.h_ipdb = mock.Mock(interfaces=ifaces) - self.h_ipdb_loopback = mock.Mock(interfaces=ifaces) - self.sconn = collections.namedtuple( - 'sconn', ['fd', 'family', 'type', 'laddr', 'raddr', 'status', - 'pid']) - self.addr = collections.namedtuple('addr', ['ip', 'port']) - - @mock.patch.multiple(nested.NestedDriver, __abstractmethods__=set()) - def test_detect_config(self): - driver = nested.NestedDriver() - self.addCleanup(CONF.clear_override, 'link_iface', group='binding') - CONF.set_override('link_iface', 'bridge', group='binding') - iface = driver._detect_iface_name(self.h_ipdb) - self.assertEqual('bridge', iface) - - @mock.patch.multiple(nested.NestedDriver, __abstractmethods__=set()) - @mock.patch('psutil.net_connections') - def test_detect_kubelet_port(self, m_net_connections): - driver = nested.NestedDriver() - m_net_connections.return_value = [ - self.sconn(-1, 2, 2, laddr=self.addr(ip='192.168.1.1', port=53), - raddr=(), status='LISTEN', pid=None), - self.sconn(-1, 2, 2, laddr=self.addr(ip='192.168.1.1', port=10250), - raddr=(), status='ESTABLISHED', pid=None), - self.sconn(-1, 2, 2, laddr=self.addr(ip='192.168.1.1', port=10250), - raddr=(), status='LISTEN', pid=None), - ] - iface = driver._detect_iface_name(self.h_ipdb) - self.assertEqual('kubelet', iface) - - @mock.patch.multiple(nested.NestedDriver, __abstractmethods__=set()) - @mock.patch('psutil.net_connections') - def test_detect_non_loopback(self, m_net_connections): - driver = nested.NestedDriver() - m_net_connections.return_value = [] - - iface = driver._detect_iface_name(self.h_ipdb) - self.assertEqual('first', iface) - - @mock.patch.multiple(nested.NestedDriver, __abstractmethods__=set()) - @mock.patch('psutil.net_connections') - def test_detect_none(self, m_net_connections): - driver = nested.NestedDriver() - m_net_connections.return_value = [] - - self.h_ipdb.interfaces = { - 'lo': {'flags': 0x8, 'ipaddr': (('127.0.0.1', 8),)}, - } - self.assertRaises(exceptions.CNIBindingFailure, - driver._detect_iface_name, self.h_ipdb) - - -class TestNestedVlanDriver(TestDriverMixin, test_base.TestCase): - def setUp(self): - super(TestNestedVlanDriver, self).setUp() - self.vif = fake._fake_vif(objects.vif.VIFVlanNested) - self.vif.vlan_id = 7 - CONF.set_override('link_iface', 'bridge', group='binding') - self.addCleanup(CONF.clear_override, 'link_iface', group='binding') - - def test_connect(self): - self._test_connect() - - self.assertEqual(2, self.h_ipdb_exit.call_count) - self.assertEqual(3, self.c_ipdb_exit.call_count) - - self.assertEqual(self.ifname, self.m_h_iface.ifname) - self.assertEqual(1, self.m_h_iface.mtu) - self.assertEqual(str(self.vif.address), self.m_h_iface.address) - self.m_h_iface.up.assert_called_once_with() - - def test_connect_mtu_mismatch(self): - self.vif.network.mtu = 2 - self.assertRaises(exceptions.CNIBindingFailure, self._test_connect) - - def test_disconnect(self): - self._test_disconnect() - - -class TestNestedMacvlanDriver(TestDriverMixin, test_base.TestCase): - def setUp(self): - super(TestNestedMacvlanDriver, self).setUp() - self.vif = fake._fake_vif(objects.vif.VIFMacvlanNested) - CONF.set_override('link_iface', 'bridge', group='binding') - self.addCleanup(CONF.clear_override, 'link_iface', group='binding') - - def test_connect(self): - self._test_connect() - - self.assertEqual(2, self.h_ipdb_exit.call_count) - self.assertEqual(3, self.c_ipdb_exit.call_count) - - self.assertEqual(self.ifname, self.m_h_iface.ifname) - self.assertEqual(1, self.m_h_iface.mtu) - self.assertEqual(str(self.vif.address), self.m_h_iface.address) - self.m_h_iface.up.assert_called_once_with() - - def test_connect_mtu_mismatch(self): - self.vif.network.mtu = 2 - self.assertRaises(exceptions.CNIBindingFailure, self._test_connect) - - def test_disconnect(self): - self._test_disconnect() - - -class TestVHostUserDriver(TestDriverMixin, test_base.TestCase): - def setUp(self): - super(TestVHostUserDriver, self).setUp() - self.vu_mount_point = '/var/run/cni' - self.vu_ovs_path = '/var/run/openvswitch' - CONF.set_override('mount_point', self.vu_mount_point, - group='vhostuser') - CONF.set_override('ovs_vhu_path', self.vu_ovs_path, - group='vhostuser') - self.vif = fake._fake_vif(osv_objects.vif.VIFVHostUser) - self.vif.path = self.vu_mount_point - self.vif.address = '64:0f:2b:5f:0c:1c' - self.port_name = vhostuser._get_vhostuser_port_name(self.vif) - self.cont_id = uuidutils.generate_uuid() - - @mock.patch('kuryr_kubernetes.cni.binding.base._need_configure_l3') - @mock.patch('kuryr_kubernetes.cni.plugins.k8s_cni_registry.' - 'K8sCNIRegistryPlugin.report_drivers_health') - @mock.patch('os.rename') - @mock.patch('os.path.exists', mock.Mock(return_value=True)) - @mock.patch('kuryr_kubernetes.cni.binding.vhostuser.VIFVHostUserDriver.' - '_write_config') - @mock.patch('kuryr_kubernetes.cni.binding.vhostuser._check_sock_file') - @mock.patch('os_vif.plug') - def test_connect_client(self, m_vif_plug, m_check_sock, m_write_conf, - m_os_rename, m_report, m_need_l3): - m_need_l3.return_value = False - self.vif.mode = osv_fields.VIFVHostUserMode.CLIENT - m_check_sock.return_value = True - base.connect(self.vif, self.instance_info, self.ifname, self.netns, - m_report, container_id=self.cont_id) - vu_dst_socket = os.path.join(self.vu_mount_point, self.port_name) - vu_src_socket = os.path.join(self.vu_ovs_path, self.port_name) - - m_vif_plug.assert_called_once_with(self.vif, self.instance_info) - m_os_rename.assert_called_once_with(vu_src_socket, vu_dst_socket) - m_write_conf.assert_called_once_with(self.cont_id, self.ifname, - self.port_name, self.vif) - m_report.assert_called_once() - - @mock.patch('kuryr_kubernetes.cni.binding.base._need_configure_l3') - @mock.patch('kuryr_kubernetes.cni.plugins.k8s_cni_registry.' - 'K8sCNIRegistryPlugin.report_drivers_health') - @mock.patch('kuryr_kubernetes.cni.binding.vhostuser.VIFVHostUserDriver.' - '_write_config') - @mock.patch('os_vif.plug') - def test_connect_server(self, m_vif_plug, m_write_conf, - m_report, m_need_l3): - m_need_l3.return_value = False - self.vif.mode = osv_fields.VIFVHostUserMode.SERVER - base.connect(self.vif, self.instance_info, self.ifname, self.netns, - m_report, container_id=self.cont_id) - m_vif_plug.assert_called_once_with(self.vif, self.instance_info) - m_write_conf.assert_called_once_with(self.cont_id, self.ifname, - self.port_name, self.vif) - m_report.assert_called_once() - - @mock.patch('kuryr_kubernetes.cni.plugins.k8s_cni_registry.' - 'K8sCNIRegistryPlugin.report_drivers_health') - @mock.patch('kuryr_kubernetes.cni.binding.vhostuser._check_sock_file', - mock.Mock(return_value=False)) - @mock.patch('kuryr_kubernetes.cni.binding.vhostuser.VIFVHostUserDriver.' - '_write_config', mock.Mock()) - @mock.patch('os_vif.plug') - def test_connect_nosocket(self, m_vif_plug, m_report): - self.vif.mode = osv_fields.VIFVHostUserMode.CLIENT - self.assertRaises(exceptions.CNIError, base.connect, self.vif, - self.instance_info, self.ifname, self.netns, - m_report, container_id=self.cont_id) - - @mock.patch('kuryr_kubernetes.cni.plugins.k8s_cni_registry.' - 'K8sCNIRegistryPlugin.report_drivers_health') - @mock.patch('kuryr_kubernetes.cni.binding.vhostuser._get_vhu_sock') - @mock.patch('os.remove') - @mock.patch('os.path.exists', mock.Mock(return_value=True)) - @mock.patch('os_vif.unplug') - def test_disconnect(self, m_os_unplug, m_os_remove, m_get_vhu_sock, - m_report): - m_get_vhu_sock.return_value = self.port_name - base.disconnect(self.vif, self.instance_info, self.ifname, self.netns, - m_report, container_id=self.cont_id) - conf_file_path = '{}/{}-{}'.format(self.vu_mount_point, - self.cont_id, self.ifname) - vhu_sock_path = '{}/{}'.format(self.vu_mount_point, - self.port_name) - os_remove_calls = [mock.call(vhu_sock_path), mock.call(conf_file_path)] - m_os_remove.assert_has_calls(os_remove_calls) diff --git a/kuryr_kubernetes/tests/unit/cni/test_handlers.py b/kuryr_kubernetes/tests/unit/cni/test_handlers.py deleted file mode 100644 index 75dff35b6..000000000 --- a/kuryr_kubernetes/tests/unit/cni/test_handlers.py +++ /dev/null @@ -1,68 +0,0 @@ -# Copyright 2021 Red Hat, Inc. -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. - -from unittest import mock - -from kuryr_kubernetes.cni import handlers -from kuryr_kubernetes.tests import base - - -class TestCNIDaemonHandlers(base.TestCase): - def setUp(self): - super().setUp() - self.registry = {} - self.pod = {'metadata': {'namespace': 'testing', - 'name': 'default'}, - 'vif_unplugged': False, - 'del_receieved': False} - self.healthy = mock.Mock() - self.port_handler = handlers.CNIKuryrPortHandler(self.registry) - self.pod_handler = handlers.CNIPodHandler(self.registry) - - @mock.patch('oslo_concurrency.lockutils.lock') - def test_kp_on_deleted(self, m_lock): - pod = self.pod - pod['vif_unplugged'] = True - pod_name = 'testing/default' - self.registry[pod_name] = pod - self.port_handler.on_deleted(pod) - self.assertNotIn(pod_name, self.registry) - - @mock.patch('oslo_concurrency.lockutils.lock') - def test_kp_on_deleted_false(self, m_lock): - pod = self.pod - pod_name = 'testing/default' - self.registry[pod_name] = pod - self.port_handler.on_deleted(pod) - self.assertIn(pod_name, self.registry) - self.assertIs(True, pod['del_received']) - - @mock.patch('oslo_concurrency.lockutils.lock') - def test_pod_on_finalize(self, m_lock): - pod = self.pod - pod_name = 'testing/default' - self.pod_handler.on_finalize(pod) - self.assertIn(pod_name, self.registry) - self.assertIsNone(self.registry[pod_name]) - m_lock.assert_called_once_with(pod_name, external=True) - - @mock.patch('oslo_concurrency.lockutils.lock') - def test_pod_on_finalize_exists(self, m_lock): - pod = self.pod - pod_name = 'testing/default' - self.registry[pod_name] = pod - self.pod_handler.on_finalize(pod) - self.assertIn(pod_name, self.registry) - self.assertIsNotNone(self.registry[pod_name]) - m_lock.assert_called_once_with(pod_name, external=True) diff --git a/kuryr_kubernetes/tests/unit/cni/test_health.py b/kuryr_kubernetes/tests/unit/cni/test_health.py deleted file mode 100644 index 8aaedfa17..000000000 --- a/kuryr_kubernetes/tests/unit/cni/test_health.py +++ /dev/null @@ -1,127 +0,0 @@ -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. - -from ctypes import c_bool -from kuryr_kubernetes.cni import health -from kuryr_kubernetes.tests import base -import multiprocessing -import os -import tempfile -from unittest import mock - -from oslo_config import cfg - - -class TestCNIHealthServer(base.TestCase): - - def setUp(self): - super(TestCNIHealthServer, self).setUp() - healthy = multiprocessing.Value(c_bool, True) - self.srv = health.CNIHealthServer(healthy) - self.srv.application.testing = True - self.test_client = self.srv.application.test_client() - - @mock.patch('kuryr_kubernetes.cni.health._has_cap') - @mock.patch('kuryr_kubernetes.cni.health.CNIHealthServer.' - 'verify_k8s_connection') - def test_readiness_status(self, m_verify_k8s_conn, cap_tester): - cap_tester.return_value = True - m_verify_k8s_conn.return_value = True, 200 - resp = self.test_client.get('/ready') - self.assertEqual(200, resp.status_code) - - @mock.patch('kuryr_kubernetes.cni.health._has_cap') - @mock.patch('kuryr_kubernetes.cni.health.CNIHealthServer.' - 'verify_k8s_connection') - def test_readiness_status_net_admin_error(self, m_verify_k8s_conn, - cap_tester): - cap_tester.return_value = False - m_verify_k8s_conn.return_value = True, 200 - resp = self.test_client.get('/ready') - self.assertEqual(500, resp.status_code) - - @mock.patch('kuryr_kubernetes.cni.health._has_cap') - @mock.patch('kuryr_kubernetes.cni.health.CNIHealthServer.' - 'verify_k8s_connection') - def test_readiness_status_k8s_error(self, m_verify_k8s_conn, cap_tester): - cap_tester.return_value = True - m_verify_k8s_conn.return_value = False - resp = self.test_client.get('/ready') - self.assertEqual(500, resp.status_code) - - @mock.patch('pyroute2.IPDB.release') - def test_liveness_status(self, m_ipdb): - self.srv._components_healthy.value = True - resp = self.test_client.get('/alive') - m_ipdb.assert_called() - self.assertEqual(200, resp.status_code) - - def test_liveness_status_components_error(self): - self.srv._components_healthy.value = False - resp = self.test_client.get('/alive') - self.assertEqual(500, resp.status_code) - - @mock.patch('pyroute2.IPDB.release') - def test_liveness_status_ipdb_error(self, m_ipdb): - m_ipdb.side_effect = Exception - resp = self.test_client.get('/alive') - self.assertEqual(500, resp.status_code) - - @mock.patch('kuryr_kubernetes.cni.health._get_memsw_usage') - def test_liveness_status_mem_usage_error(self, get_memsw_usage): - get_memsw_usage.return_value = 5368709120 / health.BYTES_AMOUNT - cfg.CONF.set_override('max_memory_usage', 4096, - group='cni_health_server') - - resp = self.test_client.get('/alive') - self.assertEqual(500, resp.status_code) - - -class TestCNIHealthUtils(base.TestCase): - def test_has_cap(self): - with tempfile.NamedTemporaryFile() as fake_status: - fake_status.write(b'CapInh:\t0000000000000000\n' - b'CapPrm:\t0000000000000000\n' - b'CapEff:\t0000000000000000\n' - b'CapBnd:\t0000003fffffffff\n') - fake_status.flush() - self.assertTrue( - health._has_cap(health.CAP_NET_ADMIN, - 'CapBnd:\t', - fake_status.name)) - - def test__get_mem_usage(self): - mem_usage = 500 # Arbitrary mem usage amount - fake_cg_path = tempfile.mkdtemp(suffix='kuryr') - usage_in_bytes_path = os.path.join(fake_cg_path, health.MEMSW_FILENAME) - try: - with open(usage_in_bytes_path, 'w') as cgroup_mem_usage: - cgroup_mem_usage.write('{}\n'.format( - mem_usage * health.BYTES_AMOUNT)) - self.assertEqual(health._get_memsw_usage(fake_cg_path), mem_usage) - finally: - os.unlink(usage_in_bytes_path) - os.rmdir(fake_cg_path) - - @mock.patch('kuryr_kubernetes.cni.utils.running_under_container_runtime') - def test__get_cni_cgroup_path_system(self, running_containerized): - running_containerized.return_value = False - fake_path = '/kuryr/rules' - cfg.CONF.set_override('cg_path', fake_path, - group='cni_health_server') - self.assertEqual(health._get_cni_cgroup_path(), fake_path) - - @mock.patch('kuryr_kubernetes.cni.utils.running_under_container_runtime') - def test__get_cni_cgroup_path_container(self, running_containerized): - running_containerized.return_value = True - self.assertEqual(health._get_cni_cgroup_path(), - health.TOP_CGROUP_MEMORY_PATH) diff --git a/kuryr_kubernetes/tests/unit/cni/test_main.py b/kuryr_kubernetes/tests/unit/cni/test_main.py deleted file mode 100644 index e360b8f3c..000000000 --- a/kuryr_kubernetes/tests/unit/cni/test_main.py +++ /dev/null @@ -1,43 +0,0 @@ -# Copyright (c) 2017 NEC Corporation. -# All Rights Reserved. -# -# Licensed under the Apache License, Version 2.0 (the "License"); you may -# not use this file except in compliance with the License. You may obtain -# a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT -# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the -# License for the specific language governing permissions and limitations -# under the License. - -from unittest import mock - -from kuryr_kubernetes.cni import main -from kuryr_kubernetes.tests import base as test_base - - -class TestCNIMain(test_base.TestCase): - @mock.patch('kuryr_kubernetes.cni.main.jsonutils.load') - @mock.patch('sys.exit') - @mock.patch('sys.stdin') - @mock.patch('kuryr_kubernetes.cni.utils.CNIConfig') - @mock.patch('kuryr_kubernetes.cni.api') - @mock.patch('kuryr_kubernetes.config.init') - @mock.patch('kuryr_kubernetes.config.setup_logging') - @mock.patch('kuryr_kubernetes.cni.api.CNIDaemonizedRunner') - def test_daemonized_run(self, m_cni_dr, m_setup_logging, m_config_init, - m_api, m_conf, m_sys, m_sysexit, m_json): - m_conf.debug = mock.Mock() - m_conf.debug.return_value = True - m_cni_dr.return_value = mock.MagicMock() - m_cni_daemon = m_cni_dr.return_value - - main.run() - - m_config_init.assert_called() - m_setup_logging.assert_called() - m_cni_daemon.run.assert_called() - m_sysexit.assert_called() diff --git a/kuryr_kubernetes/tests/unit/cni/test_service.py b/kuryr_kubernetes/tests/unit/cni/test_service.py deleted file mode 100644 index ef1ece7fe..000000000 --- a/kuryr_kubernetes/tests/unit/cni/test_service.py +++ /dev/null @@ -1,109 +0,0 @@ -# Copyright 2017 Red Hat, Inc. -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. - -import queue -from unittest import mock - -from oslo_serialization import jsonutils - -from kuryr_kubernetes.cni.daemon import service -from kuryr_kubernetes.cni.plugins import k8s_cni_registry -from kuryr_kubernetes import exceptions -from kuryr_kubernetes.tests import base -from kuryr_kubernetes.tests import fake -from kuryr_kubernetes.tests.unit import kuryr_fixtures - - -class TestDaemonServer(base.TestCase): - def setUp(self): - super(TestDaemonServer, self).setUp() - healthy = mock.Mock() - self.k8s_mock = self.useFixture(kuryr_fixtures.MockK8sClient()) - self.plugin = k8s_cni_registry.K8sCNIRegistryPlugin({}, healthy) - self.health_registry = mock.Mock() - self.metrics = queue.Queue() - self.srv = service.DaemonServer( - self.plugin, self.health_registry, self.metrics) - - self.srv.application.testing = True - self.test_client = self.srv.application.test_client() - cni_args = 'foo=bar;K8S_POD_NAMESPACE=test;K8S_POD_NAME=test' - params = {'config_kuryr': {}, 'CNI_ARGS': cni_args, - 'CNI_CONTAINERID': 'baz', 'CNI_COMMAND': 'ADD'} - self.params_str = jsonutils.dumps(params) - - @mock.patch('kuryr_kubernetes.cni.plugins.k8s_cni_registry.' - 'K8sCNIRegistryPlugin.add') - def test_add(self, m_add): - vif = fake._fake_vif() - m_add.return_value = vif - - resp = self.test_client.post('/addNetwork', data=self.params_str, - content_type='application/json') - - m_add.assert_called_once_with(mock.ANY) - self.assertEqual( - fake._fake_vif_string(vif.obj_to_primitive()).encode(), resp.data) - self.assertEqual(202, resp.status_code) - - @mock.patch('kuryr_kubernetes.cni.plugins.k8s_cni_registry.' - 'K8sCNIRegistryPlugin.add') - def test_add_timeout(self, m_add): - m_add.side_effect = exceptions.CNIKuryrPortTimeout('bar') - - resp = self.test_client.post('/addNetwork', data=self.params_str, - content_type='application/json') - - m_add.assert_called_once_with(mock.ANY) - self.assertEqual(504, resp.status_code) - - @mock.patch('kuryr_kubernetes.cni.plugins.k8s_cni_registry.' - 'K8sCNIRegistryPlugin.add') - def test_add_error(self, m_add): - m_add.side_effect = Exception - - resp = self.test_client.post('/addNetwork', data=self.params_str, - content_type='application/json') - - m_add.assert_called_once_with(mock.ANY) - self.assertEqual(500, resp.status_code) - - @mock.patch('kuryr_kubernetes.cni.plugins.k8s_cni_registry.' - 'K8sCNIRegistryPlugin.delete') - def test_delete(self, m_delete): - resp = self.test_client.post('/delNetwork', data=self.params_str, - content_type='application/json') - - m_delete.assert_called_once_with(mock.ANY) - self.assertEqual(204, resp.status_code) - - @mock.patch('kuryr_kubernetes.cni.plugins.k8s_cni_registry.' - 'K8sCNIRegistryPlugin.delete') - def test_delete_timeout(self, m_delete): - m_delete.side_effect = exceptions.CNIKuryrPortTimeout('foo') - resp = self.test_client.post('/delNetwork', data=self.params_str, - content_type='application/json') - - m_delete.assert_called_once_with(mock.ANY) - self.assertEqual(204, resp.status_code) - - @mock.patch('kuryr_kubernetes.cni.plugins.k8s_cni_registry.' - 'K8sCNIRegistryPlugin.delete') - def test_delete_error(self, m_delete): - m_delete.side_effect = Exception - resp = self.test_client.post('/delNetwork', data=self.params_str, - content_type='application/json') - - m_delete.assert_called_once_with(mock.ANY) - self.assertEqual(500, resp.status_code) diff --git a/kuryr_kubernetes/tests/unit/cni/test_utils.py b/kuryr_kubernetes/tests/unit/cni/test_utils.py deleted file mode 100644 index c2abe4ada..000000000 --- a/kuryr_kubernetes/tests/unit/cni/test_utils.py +++ /dev/null @@ -1,34 +0,0 @@ -# Copyright Red Hat, Inc. 2018 -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. -import tempfile - -import ddt -from kuryr_kubernetes.cni import utils -from kuryr_kubernetes.tests import base - - -@ddt.ddt -class TestCNIUtils(base.TestCase): - @ddt.data(*utils.CONTAINER_RUNTIME_CGROUP_IDS) - def test_running_under_container_runtime(self, container_runtime_id): - with tempfile.NamedTemporaryFile() as proc_one_cgroup: - proc_one_cgroup.write(container_runtime_id.encode()) - proc_one_cgroup.write(b'\n') - proc_one_cgroup.flush() - self.assertTrue( - utils.running_under_container_runtime(proc_one_cgroup.name)) - - def test_not_running_under_container_runtime(self): - with tempfile.NamedTemporaryFile() as proc_one_cgroup: - self.assertFalse( - utils.running_under_container_runtime(proc_one_cgroup.name)) diff --git a/kuryr_kubernetes/tests/unit/controller/__init__.py b/kuryr_kubernetes/tests/unit/controller/__init__.py deleted file mode 100644 index e69de29bb..000000000 diff --git a/kuryr_kubernetes/tests/unit/controller/drivers/__init__.py b/kuryr_kubernetes/tests/unit/controller/drivers/__init__.py deleted file mode 100644 index e69de29bb..000000000 diff --git a/kuryr_kubernetes/tests/unit/controller/drivers/test_annotation_project.py b/kuryr_kubernetes/tests/unit/controller/drivers/test_annotation_project.py deleted file mode 100644 index be6d13815..000000000 --- a/kuryr_kubernetes/tests/unit/controller/drivers/test_annotation_project.py +++ /dev/null @@ -1,122 +0,0 @@ -# Copyright (c) 2022 Troila -# All Rights Reserved. -# -# Licensed under the Apache License, Version 2.0 (the "License"); you may -# not use this file except in compliance with the License. You may obtain -# a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT -# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the -# License for the specific language governing permissions and limitations -# under the License. - -from unittest import mock - -from oslo_config import cfg - -from kuryr_kubernetes import constants -from kuryr_kubernetes.controller.drivers import annotation_project -from kuryr_kubernetes.tests import base as test_base - - -class TestAnnotationProjectDriverBase(test_base.TestCase): - - project_id = 'fake_project_id' - - def _get_project_from_namespace(self, resource, driver): - m_get_k8s_res = mock.patch('kuryr_kubernetes.controller.drivers.' - 'utils.get_k8s_resource').start() - m_get_k8s_res.return_value = { - 'metadata': { - 'name': 'fake_namespace', - 'annotations': { - constants.K8s_ANNOTATION_PROJECT: self.project_id}}} - project_id = driver.get_project(resource) - self.assertEqual(self.project_id, project_id) - - def _get_project_from_configure_option(self, resource, driver): - m_cfg = mock.patch('kuryr_kubernetes.config.CONF').start() - m_cfg.neutron_defaults.project = self.project_id - m_get_k8s_res = mock.patch('kuryr_kubernetes.controller.drivers.' - 'utils.get_k8s_resource').start() - m_get_k8s_res.return_value = { - 'metadata': { - 'name': 'fake_namespace', - 'annotations': {}}} - project_id = driver.get_project(resource) - self.assertEqual(self.project_id, project_id) - - def _project_id_not_set(self, resource, driver): - m_cfg = mock.patch('kuryr_kubernetes.config.CONF').start() - m_cfg.neutron_defaults.project = "" - m_get_k8s_res = mock.patch('kuryr_kubernetes.controller.drivers.' - 'utils.get_k8s_resource').start() - m_get_k8s_res.return_value = { - 'metadata': { - 'name': 'fake_namespace', - 'annotations': {}}} - self.assertRaises(cfg.RequiredOptError, driver.get_project, resource) - - -class TestAnnotationPodProjectDriver(TestAnnotationProjectDriverBase): - - pod = {'metadata': {'namespace': 'fake_namespace'}} - - def test_get_project(self): - driver = annotation_project.AnnotationPodProjectDriver() - self._get_project_from_namespace(self.pod, driver) - self._get_project_from_configure_option(self.pod, driver) - self._project_id_not_set(self.pod, driver) - - -class TestAnnotationServiceProjectDriver(TestAnnotationProjectDriverBase): - - service = {'metadata': {'namespace': 'fake_namespace'}} - - def test_get_project(self): - driver = annotation_project.AnnotationPodProjectDriver() - self._get_project_from_namespace(self.service, driver) - self._get_project_from_configure_option(self.service, driver) - self._project_id_not_set(self.service, driver) - - -class TestAnnotationNetworkPolicyProjectDriver( - TestAnnotationProjectDriverBase): - - network_policy = {'metadata': {'namespace': 'fake_namespace'}} - - def test_get_project(self): - driver = annotation_project.AnnotationPodProjectDriver() - self._get_project_from_namespace(self.network_policy, driver) - self._get_project_from_configure_option(self.network_policy, driver) - self._project_id_not_set(self.network_policy, driver) - - -class TestAnnotationNamespaceProjectDriver(test_base.TestCase): - - project_id = 'fake_project_id' - driver = annotation_project.AnnotationNamespaceProjectDriver() - - def test_get_project_from_annotation(self): - namespace = {'metadata': { - 'annotations': { - constants.K8s_ANNOTATION_PROJECT: self.project_id}}} - project_id = self.driver.get_project(namespace) - self.assertEqual(self.project_id, project_id) - - @mock.patch('kuryr_kubernetes.config.CONF') - def test_get_project_from_configure_option(self, m_cfg): - m_cfg.neutron_defaults.project = self.project_id - namespace = {'metadata': {'name': 'fake_namespace'}} - project_id = self.driver.get_project(namespace) - self.assertEqual(self.project_id, project_id) - - @mock.patch('kuryr_kubernetes.config.CONF') - def test_project_not_set(self, m_cfg): - m_cfg.neutron_defaults.project = "" - namespace = {'metadata': {'name': 'fake_namespace'}} - self.assertRaises( - cfg.RequiredOptError, self.driver.get_project, namespace) diff --git a/kuryr_kubernetes/tests/unit/controller/drivers/test_base.py b/kuryr_kubernetes/tests/unit/controller/drivers/test_base.py deleted file mode 100644 index c96e98691..000000000 --- a/kuryr_kubernetes/tests/unit/controller/drivers/test_base.py +++ /dev/null @@ -1,114 +0,0 @@ -# Copyright (c) 2016 Mirantis, Inc. -# All Rights Reserved. -# -# Licensed under the Apache License, Version 2.0 (the "License"); you may -# not use this file except in compliance with the License. You may obtain -# a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT -# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the -# License for the specific language governing permissions and limitations -# under the License. - -import abc -from unittest import mock - - -from kuryr_kubernetes.controller.drivers import base as d_base -from kuryr_kubernetes.tests import base as test_base - - -class _TestDriver(d_base.DriverBase, metaclass=abc.ABCMeta): - ALIAS = 'test_alias' - - @abc.abstractmethod - def test(self): - raise NotImplementedError() - - -class TestDriverBase(test_base.TestCase): - - @mock.patch.object(d_base, '_DRIVER_MANAGERS') - @mock.patch('kuryr_kubernetes.config.CONF') - @mock.patch('stevedore.driver.DriverManager') - def test_get_instance(self, m_stv_mgr, m_cfg, m_mgrs): - m_drv = mock.MagicMock(spec=_TestDriver) - m_mgr = mock.MagicMock() - m_mgr.driver = m_drv - m_mgrs.__getitem__.return_value = m_mgr - - self.assertEqual(m_drv, _TestDriver.get_instance()) - m_cfg.assert_not_called() - m_stv_mgr.assert_not_called() - - @mock.patch.object(d_base, '_DRIVER_MANAGERS') - @mock.patch('kuryr_kubernetes.config.CONF') - @mock.patch('stevedore.driver.DriverManager') - def test_get_instance_not_loaded(self, m_stv_mgr, m_cfg, m_mgrs): - alias = _TestDriver.ALIAS - cfg_name = '%s_driver' % (alias) - mgr_key = '%s:_from_cfg:default' % (alias) - drv_name = 'driver_impl' - namespace = '%s.%s' % (d_base._DRIVER_NAMESPACE_BASE, alias) - m_cfg.kubernetes.__getitem__.return_value = drv_name - m_drv = mock.MagicMock(spec=_TestDriver) - m_mgr = mock.MagicMock() - m_mgr.driver = m_drv - m_stv_mgr.return_value = m_mgr - m_mgrs.__getitem__.side_effect = KeyError - - self.assertEqual(m_drv, _TestDriver.get_instance()) - m_cfg.kubernetes.__getitem__.assert_called_with(cfg_name) - m_stv_mgr.assert_called_with(namespace=namespace, name=drv_name, - invoke_on_load=True) - m_mgrs.__setitem__.assert_called_once_with(mgr_key, m_mgr) - - @mock.patch.object(d_base, '_DRIVER_MANAGERS') - @mock.patch('kuryr_kubernetes.config.CONF') - @mock.patch('stevedore.driver.DriverManager') - def test_get_instance_invalid_type(self, m_stv_mgr, m_cfg, m_mgrs): - class _InvalidDriver(object): - pass - - m_drv = mock.MagicMock(spec=_InvalidDriver) - m_mgr = mock.MagicMock() - m_mgr.driver = m_drv - m_mgrs.__getitem__.return_value = m_mgr - - self.assertRaises(TypeError, _TestDriver.get_instance) - m_cfg.assert_not_called() - m_stv_mgr.assert_not_called() - - -class TestMultiVIFDriver(test_base.TestCase): - - @mock.patch.object(d_base, '_MULTI_VIF_DRIVERS', []) - @mock.patch('kuryr_kubernetes.config.CONF') - def test_get_enabled_drivers(self, m_cfg): - cfg_name = 'multi_vif_drivers' - drv_name = 'driver_impl' - m_cfg.kubernetes.__getitem__.return_value = [drv_name] - m_drv = mock.MagicMock() - d_base.MultiVIFDriver.get_instance = mock.MagicMock(return_value=m_drv) - - self.assertIn(m_drv, d_base.MultiVIFDriver.get_enabled_drivers()) - m_cfg.kubernetes.__getitem__.assert_called_once_with(cfg_name) - - @mock.patch.object(d_base, '_MULTI_VIF_DRIVERS', []) - @mock.patch('kuryr_kubernetes.config.CONF') - def test_get_enabled_drivers_multiple(self, m_cfg): - cfg_name = 'multi_vif_drivers' - drv1_name = 'driver_impl_1' - drv2_name = 'driver_impl_2' - m_cfg.kubernetes.__getitem__.return_value = [drv1_name, drv2_name] - m_drv1 = mock.MagicMock() - m_drv2 = mock.MagicMock() - d_base.MultiVIFDriver.get_instance = mock.MagicMock() - d_base.MultiVIFDriver.get_instance.side_effect = [m_drv1, m_drv2] - - self.assertIn(m_drv1, d_base.MultiVIFDriver.get_enabled_drivers()) - self.assertIn(m_drv2, d_base.MultiVIFDriver.get_enabled_drivers()) - m_cfg.kubernetes.__getitem__.assert_called_once_with(cfg_name) diff --git a/kuryr_kubernetes/tests/unit/controller/drivers/test_default_project.py b/kuryr_kubernetes/tests/unit/controller/drivers/test_default_project.py deleted file mode 100644 index 7c1e29baf..000000000 --- a/kuryr_kubernetes/tests/unit/controller/drivers/test_default_project.py +++ /dev/null @@ -1,89 +0,0 @@ -# Copyright (c) 2016 Mirantis, Inc. -# All Rights Reserved. -# -# Licensed under the Apache License, Version 2.0 (the "License"); you may -# not use this file except in compliance with the License. You may obtain -# a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT -# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the -# License for the specific language governing permissions and limitations -# under the License. - -from unittest import mock - -from oslo_config import cfg - -from kuryr_kubernetes.controller.drivers import default_project -from kuryr_kubernetes.tests import base as test_base - - -class TestDefaultPodProjectDriver(test_base.TestCase): - - @mock.patch('kuryr_kubernetes.config.CONF') - def test_get_project(self, m_cfg): - project_id = mock.sentinel.project_id - pod = mock.sentinel.pod - m_cfg.neutron_defaults.project = project_id - driver = default_project.DefaultPodProjectDriver() - - self.assertEqual(project_id, driver.get_project(pod)) - - def test_get_project_not_set(self): - pod = mock.sentinel.pod - driver = default_project.DefaultPodProjectDriver() - self.assertRaises(cfg.RequiredOptError, driver.get_project, pod) - - -class TestDefaultServiceProjectDriver(test_base.TestCase): - - @mock.patch('kuryr_kubernetes.config.CONF') - def test_get_project(self, m_cfg): - project_id = mock.sentinel.project_id - service = mock.sentinel.service - m_cfg.neutron_defaults.project = project_id - driver = default_project.DefaultServiceProjectDriver() - - self.assertEqual(project_id, driver.get_project(service)) - - def test_get_project_not_set(self): - service = mock.sentinel.service - driver = default_project.DefaultServiceProjectDriver() - self.assertRaises(cfg.RequiredOptError, driver.get_project, service) - - -class TestDefaultNamespaceProjectDriver(test_base.TestCase): - - @mock.patch('kuryr_kubernetes.config.CONF') - def test_get_project(self, m_cfg): - project_id = mock.sentinel.project_id - namespace = mock.sentinel.namespace - m_cfg.neutron_defaults.project = project_id - driver = default_project.DefaultNamespaceProjectDriver() - - self.assertEqual(project_id, driver.get_project(namespace)) - - def test_get_project_not_set(self): - namespace = mock.sentinel.namespace - driver = default_project.DefaultNamespaceProjectDriver() - self.assertRaises(cfg.RequiredOptError, driver.get_project, namespace) - - -class TestDefaultNetworkPolicyProjectDriver(test_base.TestCase): - - @mock.patch('kuryr_kubernetes.config.CONF') - def test_get_project(self, m_cfg): - project_id = mock.sentinel.project_id - policy = mock.sentinel.policy - m_cfg.neutron_defaults.project = project_id - driver = default_project.DefaultNetworkPolicyProjectDriver() - - self.assertEqual(project_id, driver.get_project(policy)) - - def test_get_project_not_set(self): - policy = mock.sentinel.policy - driver = default_project.DefaultNetworkPolicyProjectDriver() - self.assertRaises(cfg.RequiredOptError, driver.get_project, policy) diff --git a/kuryr_kubernetes/tests/unit/controller/drivers/test_default_security_groups.py b/kuryr_kubernetes/tests/unit/controller/drivers/test_default_security_groups.py deleted file mode 100644 index bc6c5ad08..000000000 --- a/kuryr_kubernetes/tests/unit/controller/drivers/test_default_security_groups.py +++ /dev/null @@ -1,69 +0,0 @@ -# Copyright (c) 2016 Mirantis, Inc. -# All Rights Reserved. -# -# Licensed under the Apache License, Version 2.0 (the "License"); you may -# not use this file except in compliance with the License. You may obtain -# a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT -# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the -# License for the specific language governing permissions and limitations -# under the License. - -from unittest import mock - -from oslo_config import cfg - -from kuryr_kubernetes.controller.drivers import default_security_groups -from kuryr_kubernetes.tests import base as test_base - - -class TestDefaultPodSecurityGroupsDriver(test_base.TestCase): - - @mock.patch('kuryr_kubernetes.config.CONF') - def test_get_security_groups(self, m_cfg): - sg_list = [mock.sentinel.sg_id] - project_id = mock.sentinel.project_id - pod = mock.sentinel.pod - m_cfg.neutron_defaults.pod_security_groups = sg_list - driver = default_security_groups.DefaultPodSecurityGroupsDriver() - - ret = driver.get_security_groups(pod, project_id) - - self.assertEqual(sg_list, ret) - self.assertIsNot(sg_list, ret) - - def test_get_security_groups_not_set(self): - project_id = mock.sentinel.project_id - pod = mock.sentinel.pod - driver = default_security_groups.DefaultPodSecurityGroupsDriver() - - self.assertRaises(cfg.RequiredOptError, driver.get_security_groups, - pod, project_id) - - -class TestDefaultServiceSecurityGroupsDriver(test_base.TestCase): - - @mock.patch('kuryr_kubernetes.config.CONF') - def test_get_security_groups(self, m_cfg): - sg_list = [mock.sentinel.sg_id] - project_id = mock.sentinel.project_id - service = mock.sentinel.service - m_cfg.neutron_defaults.pod_security_groups = sg_list - driver = default_security_groups.DefaultServiceSecurityGroupsDriver() - - ret = driver.get_security_groups(service, project_id) - - self.assertEqual(sg_list, ret) - self.assertIsNot(sg_list, ret) - - def test_get_security_groups_not_set(self): - project_id = mock.sentinel.project_id - service = mock.sentinel.service - driver = default_security_groups.DefaultServiceSecurityGroupsDriver() - - self.assertRaises(cfg.RequiredOptError, driver.get_security_groups, - service, project_id) diff --git a/kuryr_kubernetes/tests/unit/controller/drivers/test_default_subnet.py b/kuryr_kubernetes/tests/unit/controller/drivers/test_default_subnet.py deleted file mode 100644 index 90dc1fdee..000000000 --- a/kuryr_kubernetes/tests/unit/controller/drivers/test_default_subnet.py +++ /dev/null @@ -1,78 +0,0 @@ -# Copyright (c) 2016 Mirantis, Inc. -# All Rights Reserved. -# -# Licensed under the Apache License, Version 2.0 (the "License"); you may -# not use this file except in compliance with the License. You may obtain -# a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT -# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the -# License for the specific language governing permissions and limitations -# under the License. - -from unittest import mock - -from oslo_config import cfg - -from kuryr_kubernetes.controller.drivers import default_subnet -from kuryr_kubernetes.tests import base as test_base - - -class TestDefaultPodSubnetDriver(test_base.TestCase): - - @mock.patch('kuryr_kubernetes.utils.get_subnet') - @mock.patch('kuryr_kubernetes.config.CONF') - def test_get_subnets(self, m_cfg, m_get_subnet): - subnet_id = mock.sentinel.subnet_id - subnet = mock.sentinel.subnet - pod = mock.sentinel.pod - project_id = mock.sentinel.project_id - m_cfg.neutron_defaults.pod_subnet = subnet_id - m_get_subnet.return_value = subnet - driver = default_subnet.DefaultPodSubnetDriver() - - subnets = driver.get_subnets(pod, project_id) - - self.assertEqual({subnet_id: subnet}, subnets) - m_get_subnet.assert_called_once_with(subnet_id) - - @mock.patch('kuryr_kubernetes.utils.get_subnet') - def test_get_subnets_not_set(self, m_get_subnet): - pod = mock.sentinel.pod - project_id = mock.sentinel.project_id - driver = default_subnet.DefaultPodSubnetDriver() - - self.assertRaises(cfg.RequiredOptError, driver.get_subnets, - pod, project_id) - m_get_subnet.assert_not_called() - - -class TestDefaultServiceSubnetDriver(test_base.TestCase): - - @mock.patch('kuryr_kubernetes.utils.get_subnet') - @mock.patch('kuryr_kubernetes.config.CONF') - def test_get_subnets(self, m_cfg, m_get_subnet): - subnet_id = mock.sentinel.subnet_id - subnet = mock.sentinel.subnet - service = mock.sentinel.service - project_id = mock.sentinel.project_id - m_cfg.neutron_defaults.service_subnet = subnet_id - m_get_subnet.return_value = subnet - driver = default_subnet.DefaultServiceSubnetDriver() - - subnets = driver.get_subnets(service, project_id) - - self.assertEqual({subnet_id: subnet}, subnets) - m_get_subnet.assert_called_once_with(subnet_id) - - @mock.patch('kuryr_kubernetes.utils.get_subnet') - def test_get_subnets_not_set(self, m_get_subnet): - service = mock.sentinel.service - project_id = mock.sentinel.project_id - driver = default_subnet.DefaultPodSubnetDriver() - self.assertRaises(cfg.RequiredOptError, driver.get_subnets, - service, project_id) - m_get_subnet.assert_not_called() diff --git a/kuryr_kubernetes/tests/unit/controller/drivers/test_lb_public_ip.py b/kuryr_kubernetes/tests/unit/controller/drivers/test_lb_public_ip.py deleted file mode 100644 index 1bc78769d..000000000 --- a/kuryr_kubernetes/tests/unit/controller/drivers/test_lb_public_ip.py +++ /dev/null @@ -1,390 +0,0 @@ -# Copyright (c) 2017 RedHat, Inc. -# All Rights Reserved. -# -# Licensed under the Apache License, Version 2.0 (the "License"); you may -# not use this file except in compliance with the License. You may obtain -# a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT -# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the -# License for the specific language governing permissions and limitations -# under the License. - -from openstack import exceptions as os_exc -from openstack.network.v2 import floating_ip as os_fip -from openstack.network.v2 import subnet as os_subnet -from unittest import mock - -from oslo_config import cfg - -from kuryr_kubernetes.controller.drivers import lb_public_ip\ - as d_lb_public_ip -from kuryr_kubernetes.controller.drivers import public_ip -from kuryr_kubernetes.objects import lbaas as obj_lbaas -from kuryr_kubernetes.tests import base as test_base -from kuryr_kubernetes.tests.unit import kuryr_fixtures as k_fix - -CONF = cfg.CONF - - -class TestFloatingIpServicePubIPDriverDriver(test_base.TestCase): - - def test_acquire_service_pub_ip_info_clusterip(self): - cls = d_lb_public_ip.FloatingIpServicePubIPDriver - m_driver = mock.Mock(spec=cls) - m_driver._drv_pub_ip = public_ip.FipPubIpDriver() - project_id = mock.sentinel.project_id - cur_service_pub_ip_info = None - service = {'spec': {'type': 'ClusterIP'}} - - result = cls.acquire_service_pub_ip_info(m_driver, service, project_id, - cur_service_pub_ip_info) - self.assertIsNone(result) - - def test_acquire_service_pub_ip_info_usr_specified_ip(self): - cls = d_lb_public_ip.FloatingIpServicePubIPDriver - m_driver = mock.Mock(spec=cls) - m_driver._drv_pub_ip = public_ip.FipPubIpDriver() - os_net = self.useFixture(k_fix.MockNetworkClient()).client - - fip = os_fip.FloatingIP( - floating_ip_address='1.2.3.4', - port_id=None, - id='a2a62ea7-e3bf-40df-8c09-aa0c29876a6b', - ) - os_net.ips.return_value = (ip for ip in [fip]) - project_id = mock.sentinel.project_id - spec_type = 'LoadBalancer' - spec_lb_ip = '1.2.3.4' - CONF.set_override('external_svc_net', - '9767e1bd-40a7-4294-8e59-29dd77edb0e3', - group='neutron_defaults') - - expected_resp = { - 'ip_id': fip.id, - 'ip_addr': fip.floating_ip_address, - 'alloc_method': 'user' - } - - result = cls.acquire_service_pub_ip_info(m_driver, spec_type, - spec_lb_ip, project_id) - self.assertEqual(result, expected_resp) - - def test_acquire_service_pub_ip_info_user_specified_non_exist_fip(self): - cls = d_lb_public_ip.FloatingIpServicePubIPDriver - m_driver = mock.Mock(spec=cls) - m_driver._drv_pub_ip = public_ip.FipPubIpDriver() - os_net = self.useFixture(k_fix.MockNetworkClient()).client - - fip = os_fip.FloatingIP( - floating_ip_address='1.2.3.5', - port_id=None, - ) - os_net.ips.return_value = (ip for ip in [fip]) - - project_id = mock.sentinel.project_id - - spec_type = 'LoadBalancer' - spec_lb_ip = '1.2.3.4' - - result = cls.acquire_service_pub_ip_info(m_driver, spec_type, - spec_lb_ip, project_id) - self.assertIsNone(result) - - def test_acquire_service_pub_ip_info_user_specified_occupied_fip(self): - cls = d_lb_public_ip.FloatingIpServicePubIPDriver - m_driver = mock.Mock(spec=cls) - m_driver._drv_pub_ip = public_ip.FipPubIpDriver() - os_net = self.useFixture(k_fix.MockNetworkClient()).client - - fip = os_fip.FloatingIP( - floating_ip_address='1.2.3.4', - port_id='ec29d641-fec4-4f67-928a-124a76b3a8e6', - ) - os_net.ips.return_value = (ip for ip in [fip]) - - project_id = mock.sentinel.project_id - spec_type = 'LoadBalancer' - spec_lb_ip = '1.2.3.4' - - result = cls.acquire_service_pub_ip_info(m_driver, spec_type, - spec_lb_ip, project_id) - self.assertIsNone(result) - - @mock.patch('kuryr_kubernetes.config.CONF') - def test_acquire_service_pub_ip_info_pool_net_not_defined(self, m_cfg): - driver = d_lb_public_ip.FloatingIpServicePubIPDriver() - public_net = '' - m_cfg.neutron_defaults.external_svc_net = public_net - os_net = self.useFixture(k_fix.MockNetworkClient()).client - os_net.ips.return_value = (ip for ip in []) - project_id = mock.sentinel.project_id - spec_type = 'LoadBalancer' - spec_lb_ip = None - - result = driver.acquire_service_pub_ip_info( - spec_type, spec_lb_ip, project_id) - self.assertIsNone(result) - - @mock.patch('kuryr_kubernetes.config.CONF') - def test_acquire_service_pub_ip_info_pool_subnet_is_none(self, m_cfg): - cls = d_lb_public_ip.FloatingIpServicePubIPDriver - m_driver = mock.Mock(spec=cls) - m_driver._drv_pub_ip = public_ip.FipPubIpDriver() - os_net = self.useFixture(k_fix.MockNetworkClient()).client - public_net = mock.sentinel.public_subnet - m_cfg.neutron_defaults.external_svc_net = public_net - m_cfg.neutron_defaults.external_svc_subnet = None - - os_net.get_subnet.return_value = os_subnet.Subnet( - network_id='ec29d641-fec4-4f67-928a-124a76b3a8e6', - ) - fip = os_fip.FloatingIP( - floating_ip_address='1.2.3.5', - id='ec29d641-fec4-4f67-928a-124a76b3a888', - ) - os_net.create_ip.return_value = fip - - project_id = mock.sentinel.project_id - spec_type = 'LoadBalancer' - spec_lb_ip = None - - expected_resp = { - 'ip_id': fip.id, - 'ip_addr': fip.floating_ip_address, - 'alloc_method': 'pool' - } - - result = cls.acquire_service_pub_ip_info(m_driver, spec_type, - spec_lb_ip, project_id) - self.assertEqual(result, expected_resp) - - @mock.patch('kuryr_kubernetes.config.CONF') - def test_acquire_service_pub_ip_info_alloc_from_pool(self, m_cfg): - cls = d_lb_public_ip.FloatingIpServicePubIPDriver - m_driver = mock.Mock(spec=cls) - m_driver._drv_pub_ip = public_ip.FipPubIpDriver() - os_net = self.useFixture(k_fix.MockNetworkClient()).client - m_cfg.neutron_defaults.external_svc_subnet = (mock.sentinel - .external_svc_subnet) - - os_net.get_subnet.return_value = os_subnet.Subnet( - network_id='ec29d641-fec4-4f67-928a-124a76b3a8e6', - ) - fip = os_fip.FloatingIP( - floating_ip_address='1.2.3.5', - id='ec29d641-fec4-4f67-928a-124a76b3a888', - ) - os_net.create_ip.return_value = fip - - project_id = mock.sentinel.project_id - spec_type = 'LoadBalancer' - spec_lb_ip = None - - expected_resp = { - 'ip_id': fip.id, - 'ip_addr': fip.floating_ip_address, - 'alloc_method': 'pool' - } - - result = cls.acquire_service_pub_ip_info(m_driver, spec_type, - spec_lb_ip, project_id) - self.assertEqual(result, expected_resp) - - def test_release_pub_ip_empty_lb_ip_info(self): - cls = d_lb_public_ip.FloatingIpServicePubIPDriver - m_driver = mock.Mock(spec=cls) - service_pub_ip_info = None - - rc = cls.release_pub_ip(m_driver, service_pub_ip_info) - self.assertIs(rc, True) - - def test_release_pub_ip_alloc_method_non_pool(self): - cls = d_lb_public_ip.FloatingIpServicePubIPDriver - m_driver = mock.Mock(spec=cls) - - fip = os_fip.FloatingIP( - floating_ip_address='1.2.3.5', - id='ec29d641-fec4-4f67-928a-124a76b3a888', - ) - - service_pub_ip_info = { - 'ip_id': fip.id, - 'ip_addr': fip.floating_ip_address, - 'alloc_method': 'kk' - } - - rc = cls.release_pub_ip(m_driver, service_pub_ip_info) - self.assertIs(rc, True) - - def test_release_pub_ip_alloc_method_user(self): - cls = d_lb_public_ip.FloatingIpServicePubIPDriver - m_driver = mock.Mock(spec=cls) - - fip = os_fip.FloatingIP( - floating_ip_address='1.2.3.5', - id='ec29d641-fec4-4f67-928a-124a76b3a888', - ) - - service_pub_ip_info = { - 'ip_id': fip.id, - 'ip_addr': fip.floating_ip_address, - 'alloc_method': 'user' - } - - rc = cls.release_pub_ip(m_driver, service_pub_ip_info) - self.assertIs(rc, True) - - def test_release_pub_ip_alloc_method_pool_neutron_exception(self): - cls = d_lb_public_ip.FloatingIpServicePubIPDriver - m_driver = mock.Mock(spec=cls) - m_driver._drv_pub_ip = public_ip.FipPubIpDriver() - os_net = self.useFixture(k_fix.MockNetworkClient()).client - os_net.delete_ip.side_effect = os_exc.SDKException - - fip = os_fip.FloatingIP( - floating_ip_address='1.2.3.5', - id='ec29d641-fec4-4f67-928a-124a76b3a888', - ) - - service_pub_ip_info = { - 'ip_id': fip.id, - 'ip_addr': fip.floating_ip_address, - 'alloc_method': 'pool' - } - rc = cls.release_pub_ip(m_driver, service_pub_ip_info) - self.assertIs(rc, False) - - def test_release_pub_ip_alloc_method_pool_neutron_succeeded(self): - cls = d_lb_public_ip.FloatingIpServicePubIPDriver - m_driver = mock.Mock(spec=cls) - m_driver._drv_pub_ip = public_ip.FipPubIpDriver() - self.useFixture(k_fix.MockNetworkClient()).client - - fip = os_fip.FloatingIP( - floating_ip_address='1.2.3.5', - id='ec29d641-fec4-4f67-928a-124a76b3a888', - ) - - service_pub_ip_info = { - 'ip_id': fip.id, - 'ip_addr': fip.floating_ip_address, - 'alloc_method': 'pool' - } - rc = cls.release_pub_ip(m_driver, service_pub_ip_info) - self.assertIs(rc, True) - - def test_associate_pub_ip_empty_params(self): - cls = d_lb_public_ip.FloatingIpServicePubIPDriver - m_driver = mock.Mock(spec=cls) - os_net = self.useFixture(k_fix.MockNetworkClient()).client - os_net.update_floatingip.return_value = None - - service_pub_ip_info = None - vip_port_id = None - - result = cls.associate_pub_ip(m_driver, service_pub_ip_info, - vip_port_id) - self.assertIsNone(result) - - def test_associate_lb_fip_id_not_exist(self): - cls = d_lb_public_ip.FloatingIpServicePubIPDriver - m_driver = mock.Mock(spec=cls) - os_net = self.useFixture(k_fix.MockNetworkClient()).client - os_net.update_floatingip.return_value = None - m_driver._drv_pub_ip = public_ip.FipPubIpDriver() - - fip = os_fip.FloatingIP( - floating_ip_address='1.2.3.5', - id='ec29d641-fec4-4f67-928a-124a76b3a888', - ) - service_pub_ip_info = (obj_lbaas - .LBaaSPubIp(ip_id=0, - ip_addr=fip.floating_ip_address, - alloc_method='pool')) - service_pub_ip_info = { - 'ip_id': 0, - 'ip_addr': fip.floating_ip_address, - 'alloc_method': 'pool' - } - - vip_port_id = 'ec29d641-fec4-4f67-928a-124a76b3a777' - - result = cls.associate_pub_ip(m_driver, service_pub_ip_info, - vip_port_id) - self.assertIsNone(result) - - def test_associate_lb_fip_id_not_exist_neutron_exception(self): - cls = d_lb_public_ip.FloatingIpServicePubIPDriver - m_driver = mock.Mock(spec=cls) - m_driver._drv_pub_ip = public_ip.FipPubIpDriver() - os_net = self.useFixture(k_fix.MockNetworkClient()).client - os_net.update_ip.side_effect = os_exc.SDKException - - fip = os_fip.FloatingIP( - floating_ip_address='1.2.3.5', - id='ec29d641-fec4-4f67-928a-124a76b3a888', - ) - - service_pub_ip_info = { - 'ip_id': fip.id, - 'ip_addr': fip.floating_ip_address, - 'alloc_method': 'pool' - } - vip_port_id = 'ec29d641-fec4-4f67-928a-124a76b3a777' - - self.assertRaises(os_exc.SDKException, cls.associate_pub_ip, - m_driver, service_pub_ip_info, vip_port_id) - - def test_disassociate_pub_ip_empty_param(self): - cls = d_lb_public_ip.FloatingIpServicePubIPDriver - m_driver = mock.Mock(spec=cls) - self.useFixture(k_fix.MockNetworkClient()).client - service_pub_ip_info = None - - result = cls.disassociate_pub_ip(m_driver, service_pub_ip_info) - - self.assertIsNone(result) - - def test_disassociate_pub_ip_fip_id_not_exist(self): - cls = d_lb_public_ip.FloatingIpServicePubIPDriver - m_driver = mock.Mock(spec=cls) - m_driver._drv_pub_ip = public_ip.FipPubIpDriver() - os_net = self.useFixture(k_fix.MockNetworkClient()).client - os_net.update_floatingip.return_value = None - fip = os_fip.FloatingIP( - floating_ip_address='1.2.3.5', - id='ec29d641-fec4-4f67-928a-124a76b3a888', - ) - service_pub_ip_info = { - 'ip_id': 0, - 'ip_addr': fip.floating_ip_address, - 'alloc_method': 'pool' - } - - result = cls.disassociate_pub_ip(m_driver, service_pub_ip_info) - - self.assertIsNone(result) - - def test_disassociate_pub_ip_neutron_exception(self): - cls = d_lb_public_ip.FloatingIpServicePubIPDriver - m_driver = mock.Mock(spec=cls) - m_driver._drv_pub_ip = public_ip.FipPubIpDriver() - os_net = self.useFixture(k_fix.MockNetworkClient()).client - os_net.update_ip.side_effect = os_exc.SDKException - fip = os_fip.FloatingIP( - floating_ip_address='1.2.3.5', - id='ec29d641-fec4-4f67-928a-124a76b3a888', - ) - - service_pub_ip_info = { - 'ip_id': fip.id, - 'ip_addr': fip.floating_ip_address, - 'alloc_method': 'pool' - } - - self.assertRaises(os_exc.SDKException, cls.disassociate_pub_ip, - m_driver, service_pub_ip_info) diff --git a/kuryr_kubernetes/tests/unit/controller/drivers/test_lbaasv2.py b/kuryr_kubernetes/tests/unit/controller/drivers/test_lbaasv2.py deleted file mode 100644 index d6b6e2e05..000000000 --- a/kuryr_kubernetes/tests/unit/controller/drivers/test_lbaasv2.py +++ /dev/null @@ -1,1602 +0,0 @@ -# Copyright (c) 2016 Mirantis, Inc. -# All Rights Reserved. -# -# Licensed under the Apache License, Version 2.0 (the "License"); you may -# not use this file except in compliance with the License. You may obtain -# a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT -# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the -# License for the specific language governing permissions and limitations -# under the License. - -from unittest import mock - -from openstack import exceptions as os_exc -from openstack.load_balancer.v2 import listener as o_lis -from openstack.load_balancer.v2 import load_balancer as o_lb -from openstack.load_balancer.v2 import member as o_mem -from openstack.load_balancer.v2 import pool as o_pool -from openstack.network.v2 import port as os_port -from oslo_config import cfg - -from kuryr_kubernetes import constants as k_const -from kuryr_kubernetes.controller.drivers import lbaasv2 as d_lbaasv2 -from kuryr_kubernetes import exceptions as k_exc -from kuryr_kubernetes.objects import lbaas as obj_lbaas -from kuryr_kubernetes.tests import base as test_base -from kuryr_kubernetes.tests import fake -from kuryr_kubernetes.tests.unit import kuryr_fixtures as k_fix - -CONF = cfg.CONF - -OCTAVIA_VERSIONS = { - 'regionOne': { - 'public': { - 'load-balancer': [ - { - 'status': 'SUPPORTED', - 'version': '2.0', - 'raw_status': u'SUPPORTED', - }, - { - 'status': 'SUPPORTED', - 'version': '2.1', - 'raw_status': u'SUPPORTED', - }, - { - 'status': 'CURRENT', - 'version': '2.2', - 'raw_status': u'CURRENT', - }, - ], - }, - }, -} - -BAD_OCTAVIA_VERSIONS = { - 'regionOne': { - 'public': { - 'load-balancer': [ - { - 'status': 'CURRENT', - 'version': None, - 'raw_status': u'CURRENT', - }, - ], - }, - }, -} - - -class TestLBaaSv2Driver(test_base.TestCase): - @mock.patch('kuryr_kubernetes.controller.drivers.lbaasv2.LBaaSv2Driver.' - 'get_octavia_version', return_value=(2, 5)) - def test_add_tags(self, _m_get): - CONF.set_override('resource_tags', ['foo'], group='neutron_defaults') - self.addCleanup(CONF.clear_override, 'resource_tags', - group='neutron_defaults') - d = d_lbaasv2.LBaaSv2Driver() - req = {} - d.add_tags('loadbalancer', req) - self.assertEqual({'tags': ['foo']}, req) - - @mock.patch('kuryr_kubernetes.controller.drivers.lbaasv2.LBaaSv2Driver.' - 'get_octavia_version', return_value=(2, 5)) - def test_add_tags_no_tag(self, _m_get): - d = d_lbaasv2.LBaaSv2Driver() - req = {} - d.add_tags('loadbalancer', req) - self.assertEqual({}, req) - - @mock.patch('kuryr_kubernetes.controller.drivers.lbaasv2.LBaaSv2Driver.' - 'get_octavia_version', return_value=(2, 4)) - def test_add_tags_no_support(self, _m_get): - CONF.set_override('resource_tags', ['foo'], group='neutron_defaults') - self.addCleanup(CONF.clear_override, 'resource_tags', - group='neutron_defaults') - d = d_lbaasv2.LBaaSv2Driver() - for res in ('loadbalancer', 'listener', 'pool'): - req = {} - d.add_tags(res, req) - self.assertEqual({'description': 'foo'}, req, - 'No description added to resource %s' % res) - - @mock.patch('kuryr_kubernetes.controller.drivers.lbaasv2.LBaaSv2Driver.' - 'get_octavia_version', return_value=(2, 4)) - def test_add_tags_no_support_resource_no_description(self, _m_get): - CONF.set_override('resource_tags', ['foo'], group='neutron_defaults') - self.addCleanup(CONF.clear_override, 'resource_tags', - group='neutron_defaults') - d = d_lbaasv2.LBaaSv2Driver() - for res in ('member', 'rule'): - req = {} - d.add_tags(res, req) - self.assertEqual({}, req, 'Unnecessary description added to ' - 'resource %s' % res) - - def test_get_octavia_version(self): - lbaas = self.useFixture(k_fix.MockLBaaSClient()).client - lbaas.get_all_version_data.return_value = OCTAVIA_VERSIONS - self.assertEqual((2, 2), - d_lbaasv2.LBaaSv2Driver.get_octavia_version(None)) - - def test_get_octavia_version_is_none(self): - lbaas = self.useFixture(k_fix.MockLBaaSClient()).client - lbaas.get_all_version_data.return_value = BAD_OCTAVIA_VERSIONS - self.assertRaises(k_exc.UnreachableOctavia, - d_lbaasv2.LBaaSv2Driver.get_octavia_version, None) - - def test_ensure_loadbalancer(self): - os_net = self.useFixture(k_fix.MockNetworkClient()).client - cls = d_lbaasv2.LBaaSv2Driver - m_driver = mock.Mock(spec=d_lbaasv2.LBaaSv2Driver) - expected_resp = { - 'provide': 'octavia', - 'port_id': 'D3FA400A-F543-4B91-9CD3-047AF0CE42E2', - 'security_groups': [] - } - project_id = 'TEST_PROJECT' - subnet_id = 'D3FA400A-F543-4B91-9CD3-047AF0CE42D1' - ip = '1.2.3.4' - sg_ids = ['foo', 'bar'] - lb_name = 'just_a_name' - - m_driver._ensure_loadbalancer.return_value = expected_resp - os_net.update_port = mock.Mock() - resp = cls.ensure_loadbalancer(m_driver, lb_name, project_id, - subnet_id, ip, sg_ids, 'ClusterIP') - m_driver._ensure_loadbalancer.assert_called_once_with( - mock.ANY) - req = m_driver._ensure_loadbalancer.call_args[0][0] - self.assertEqual(lb_name, req['name']) - self.assertEqual(project_id, req['project_id']) - self.assertEqual(subnet_id, req['subnet_id']) - self.assertEqual(ip, str(req['ip'])) - self.assertEqual(expected_resp, resp) - os_net.update_port.assert_not_called() - - def test_ensure_loadbalancer_not_ready(self): - cls = d_lbaasv2.LBaaSv2Driver - m_driver = mock.Mock(spec=d_lbaasv2.LBaaSv2Driver) - name = 'TEST_NAME' - project_id = 'TEST_PROJECT' - subnet_id = 'D3FA400A-F543-4B91-9CD3-047AF0CE42D1' - ip = '1.2.3.4' - # TODO(ivc): handle security groups - sg_ids = [] - - m_driver._ensure_loadbalancer.return_value = None - self.assertRaises(k_exc.ResourceNotReady, cls.ensure_loadbalancer, - m_driver, name, project_id, subnet_id, ip, - sg_ids, 'ClusterIP') - - def test_cascade_release_loadbalancer(self): - self.useFixture(k_fix.MockNetworkClient()).client - lbaas = self.useFixture(k_fix.MockLBaaSClient()).client - lbaas.lbaas_loadbalancer_path = "boo %s" - cls = d_lbaasv2.LBaaSv2Driver - m_driver = mock.Mock(spec=d_lbaasv2.LBaaSv2Driver) - loadbalancer = { - 'name': 'TEST_NAME', - 'project_id': 'TEST_PROJECT', - 'subnet_id': 'D3FA400A-F543-4B91-9CD3-047AF0CE42D1', - 'ip': '1.2.3.4', - 'security_groups': [], - 'id': '00EE9E11-91C2-41CF-8FD4-7970579E5C4C', - 'provider': None - } - - cls.release_loadbalancer(m_driver, loadbalancer) - - m_driver._release.assert_called_once_with( - loadbalancer, loadbalancer, lbaas.delete_load_balancer, - loadbalancer['id'], cascade=True) - - def _test_ensure_listener(self): - cls = d_lbaasv2.LBaaSv2Driver - m_driver = mock.Mock(spec=d_lbaasv2.LBaaSv2Driver) - expected_resp = mock.sentinel.expected_resp - project_id = 'TEST_PROJECT' - loadbalancer_id = '00EE9E11-91C2-41CF-8FD4-7970579E5C4C' - protocol = 'TCP' - port = 1234 - loadbalancer = { - 'name': 'TEST_NAME', - 'project_id': project_id, - 'subnet_id': 'D3FA400A-F543-4B91-9CD3-047AF0CE42D1', - 'ip': '1.2.3.4', - 'id': '00EE9E11-91C2-41CF-8FD4-7970579E5C4C', - 'provider': 'amphora' - } - # TODO(ivc): handle security groups - m_driver._ensure_provisioned.return_value = expected_resp - - resp = cls.ensure_listener(m_driver, loadbalancer, protocol, port) - - m_driver._ensure_provisioned.assert_called_once_with( - loadbalancer, mock.ANY, m_driver._create_listener, - m_driver._find_listener, d_lbaasv2._LB_STS_POLL_SLOW_INTERVAL) - listener = m_driver._ensure_provisioned.call_args[0][1] - - self.assertEqual("%s:%s:%s" % (loadbalancer['name'], protocol, port), - listener['name']) - self.assertEqual(project_id, listener['project_id']) - self.assertEqual(loadbalancer_id, listener['loadbalancer_id']) - self.assertEqual(protocol, listener['protocol']) - self.assertEqual(port, listener['port']) - self.assertEqual(expected_resp, resp) - - def test_ensure_listener_bad_request_exception(self): - cls = d_lbaasv2.LBaaSv2Driver - m_driver = mock.Mock(spec=d_lbaasv2.LBaaSv2Driver) - port = 1234 - protocol = 'TCP' - loadbalancer = { - 'name': 'TEST_NAME', - 'project_id': 'TEST_PROJECT', - 'subnet_id': 'D3FA400A-F543-4B91-9CD3-047AF0CE42D1', - 'ip': '1.2.3.4', - 'id': '00EE9E11-91C2-41CF-8FD4-7970579E5C4C', - 'provider': 'amphora' - } - m_driver._ensure_provisioned.side_effect = os_exc.BadRequestException - - resp = cls.ensure_listener(m_driver, loadbalancer, - protocol, port) - self.assertIsNone(resp) - - def test_release_listener(self): - os_net = self.useFixture(k_fix.MockNetworkClient()).client - lbaas = self.useFixture(k_fix.MockLBaaSClient()).client - os_net.security_group_rules.return_value = (x for x in []) - cls = d_lbaasv2.LBaaSv2Driver - m_driver = mock.Mock(spec=d_lbaasv2.LBaaSv2Driver) - m_driver._get_vip_port.return_value = os_port.Port( - security_group_ids=[mock.sentinel.sg_id], - ) - loadbalancer = { - 'name': 'TEST_NAME', - 'project_id': 'TEST_PROJECT', - 'subnet_id': 'D3FA400A-F543-4B91-9CD3-047AF0CE42D1', - 'ip': '1.2.3.4', - 'id': '00EE9E11-91C2-41CF-8FD4-7970579E5C4C', - 'security_groups': [], - 'provider': 'amphora' - } - listener = { - 'name': 'TEST_NAME', - 'project_id': 'TEST_PROJECT', - 'loadbalancer_id': '00EE9E11-91C2-41CF-8FD4-7970579E5C4C', - 'protocol': 'TCP', - 'port': 1234, - 'id': 'A57B7771-6050-4CA8-A63C-443493EC98AB' - } - - cls.release_listener(m_driver, loadbalancer, listener) - - m_driver._release.assert_called_once_with(loadbalancer, listener, - lbaas.delete_listener, - listener['id']) - - def test_ensure_pool(self): - cls = d_lbaasv2.LBaaSv2Driver - m_driver = mock.Mock(spec=d_lbaasv2.LBaaSv2Driver) - expected_resp = mock.sentinel.expected_resp - loadbalancer = { - 'project_id': 'TEST_PROJECT', - 'id': '00EE9E11-91C2-41CF-8FD4-7970579E5C4C', - } - listener = { - 'id': 'A57B7771-6050-4CA8-A63C-443493EC98AB', - 'name': 'TEST_LISTENER_NAME', - 'protocol': 'TCP', - } - m_driver._ensure_provisioned.return_value = expected_resp - - resp = cls.ensure_pool(m_driver, loadbalancer, listener) - - m_driver._ensure_provisioned.assert_called_once_with( - loadbalancer, mock.ANY, m_driver._create_pool, - m_driver._find_pool) - pool = m_driver._ensure_provisioned.call_args[0][1] - self.assertEqual(listener['name'], pool['name']) - self.assertEqual(loadbalancer['project_id'], pool['project_id']) - self.assertEqual(listener['id'], pool['listener_id']) - self.assertEqual(listener['protocol'], pool['protocol']) - self.assertEqual(expected_resp, resp) - - def test_release_pool(self): - lbaas = self.useFixture(k_fix.MockLBaaSClient()).client - cls = d_lbaasv2.LBaaSv2Driver - m_driver = mock.Mock(spec=d_lbaasv2.LBaaSv2Driver) - loadbalancer = mock.Mock() - pool = { - 'name': 'TEST_NAME', - 'project_id': 'TEST_PROJECT', - 'loadbalancer_id': '00EE9E11-91C2-41CF-8FD4-7970579E5C4C', - 'listener_id': 'A57B7771-6050-4CA8-A63C-443493EC98AB', - 'protocol': 'TCP', - 'id': 'D4F35594-27EB-4F4C-930C-31DD40F53B77' - } - - cls.release_pool(m_driver, loadbalancer, pool) - - m_driver._release.assert_called_once_with(loadbalancer, pool, - lbaas.delete_pool, - pool['id']) - - def test_ensure_member(self): - lbaas = self.useFixture(k_fix.MockLBaaSClient()).client - cls = d_lbaasv2.LBaaSv2Driver - m_driver = mock.Mock(spec=d_lbaasv2.LBaaSv2Driver) - expected_resp = mock.sentinel.expected_resp - loadbalancer = { - 'id': '00EE9E11-91C2-41CF-8FD4-7970579E5C4C', - 'project_id': 'TEST_PROJECT' - } - pool = { - 'id': 'D4F35594-27EB-4F4C-930C-31DD40F53B77', - 'project_id': 'TEST_PROJECT' - } - - subnet_id = 'D3FA400A-F543-4B91-9CD3-047AF0CE42D1' - ip = '1.2.3.4' - port = 1234 - namespace = 'TEST_NAMESPACE' - name = 'TEST_NAME' - target_ref = {'namespace': namespace, 'name': name} - m_driver._ensure_provisioned.return_value = expected_resp - - resp = cls.ensure_member(m_driver, loadbalancer, pool, - subnet_id, ip, port, - target_ref['namespace'], target_ref['name']) - - m_driver._ensure_provisioned.assert_called_once_with( - loadbalancer, mock.ANY, m_driver._create_member, - m_driver._find_member, update=lbaas.update_member) - member = m_driver._ensure_provisioned.call_args[0][1] - self.assertEqual("%s/%s:%s" % (namespace, name, port), member['name']) - self.assertEqual(pool['project_id'], member['project_id']) - self.assertEqual(pool['id'], member['pool_id']) - self.assertEqual(subnet_id, member['subnet_id']) - self.assertEqual(ip, str(member['ip'])) - self.assertEqual(port, member['port']) - self.assertEqual(expected_resp, resp) - - def test_release_member(self): - lbaas = self.useFixture(k_fix.MockLBaaSClient()).client - cls = d_lbaasv2.LBaaSv2Driver - m_driver = mock.Mock(spec=d_lbaasv2.LBaaSv2Driver) - loadbalancer = { - 'name': 'TEST_NAME', - 'project_id': 'TEST_PROJECT', - 'subnet_id': 'D3FA400A-F543-4B91-9CD3-047AF0CE42D1', - 'ip': '1.2.3.4', - 'security_groups': [], - 'provider': None - } - - member = { - 'name': 'TEST_NAME', - 'project_id': 'TEST_PROJECT', - 'pool_id': 'D4F35594-27EB-4F4C-930C-31DD40F53B77', - 'subnet_id': 'D3FA400A-F543-4B91-9CD3-047AF0CE42D1', - 'ip': '1.2.3.4', - 'port': 1234, - 'id': '3A70CEC0-392D-4BC1-A27C-06E63A0FD54F' - } - - cls.release_member(m_driver, loadbalancer, member) - - m_driver._release.assert_called_once_with(loadbalancer, member, - lbaas.delete_member, - member['id'], - member['pool_id']) - - def test_create_loadbalancer(self): - cls = d_lbaasv2.LBaaSv2Driver - m_driver = mock.Mock(spec=d_lbaasv2.LBaaSv2Driver) - lbaas = self.useFixture(k_fix.MockLBaaSClient()).client - loadbalancer = { - 'name': 'TEST_NAME', - 'project_id': 'TEST_PROJECT', - 'subnet_id': 'D3FA400A-F543-4B91-9CD3-047AF0CE42D1', - 'ip': '1.2.3.4', - 'security_groups': [], - 'provider': None - } - - loadbalancer_id = '00EE9E11-91C2-41CF-8FD4-7970579E5C4C' - req = { - 'name': loadbalancer['name'], - 'project_id': loadbalancer['project_id'], - 'vip_address': str(loadbalancer['ip']), - 'vip_subnet_id': loadbalancer['subnet_id'], - } - resp = o_lb.LoadBalancer(id=loadbalancer_id, provider='haproxy') - lbaas.create_load_balancer.return_value = resp - m_driver._get_vip_port.return_value = os_port.Port( - id=mock.sentinel.port_id, - ) - - ret = cls._create_loadbalancer(m_driver, loadbalancer) - lbaas.create_load_balancer.assert_called_once_with(**req) - self.assertEqual(loadbalancer, ret) - self.assertEqual(loadbalancer_id, ret['id']) - - def test_create_loadbalancer_provider_defined(self): - cls = d_lbaasv2.LBaaSv2Driver - m_driver = mock.Mock(spec=d_lbaasv2.LBaaSv2Driver) - lbaas = self.useFixture(k_fix.MockLBaaSClient()).client - loadbalancer = { - 'name': 'TEST_NAME', - 'project_id': 'TEST_PROJECT', - 'subnet_id': 'D3FA400A-F543-4B91-9CD3-047AF0CE42D1', - 'ip': '1.2.3.4', - 'security_groups': [], - 'provider': 'amphora' - } - loadbalancer_id = '00EE9E11-91C2-41CF-8FD4-7970579E5C4C' - req = { - 'name': loadbalancer['name'], - 'project_id': loadbalancer['project_id'], - 'vip_address': str(loadbalancer['ip']), - 'vip_subnet_id': loadbalancer['subnet_id'], - 'provider': loadbalancer['provider'], - } - resp = o_lb.LoadBalancer(id=loadbalancer_id, provider='amphora') - lbaas.create_load_balancer.return_value = resp - m_driver._get_vip_port.return_value = os_port.Port( - id=mock.sentinel.port_id, - ) - - ret = cls._create_loadbalancer(m_driver, loadbalancer) - lbaas.create_load_balancer.assert_called_once_with(**req) - self.assertEqual(loadbalancer, ret) - self.assertEqual(loadbalancer_id, ret['id']) - - def test_create_loadbalancer_provider_mismatch(self): - cls = d_lbaasv2.LBaaSv2Driver - m_driver = mock.Mock(spec=d_lbaasv2.LBaaSv2Driver) - lbaas = self.useFixture(k_fix.MockLBaaSClient()).client - loadbalancer = { - 'name': 'TEST_NAME', - 'project_id': 'TEST_PROJECT', - 'subnet_id': 'D3FA400A-F543-4B91-9CD3-047AF0CE42D1', - 'ip': '1.2.3.4', - 'security_groups': [], - 'provider': 'amphora' - } - loadbalancer_id = '00EE9E11-91C2-41CF-8FD4-7970579E5C4C' - req = { - 'name': loadbalancer['name'], - 'project_id': loadbalancer['project_id'], - 'vip_address': str(loadbalancer['ip']), - 'vip_subnet_id': loadbalancer['subnet_id'], - 'provider': loadbalancer['provider'], - } - resp = o_lb.LoadBalancer(id=loadbalancer_id, provider='haproxy') - lbaas.create_load_balancer.return_value = resp - m_driver._get_vip_port.return_value = os_port.Port( - id=mock.sentinel.port_id, - ) - - ret = cls._create_loadbalancer(m_driver, loadbalancer) - lbaas.create_load_balancer.assert_called_once_with(**req) - self.assertIsNone(ret) - - def test_find_loadbalancer(self): - lbaas = self.useFixture(k_fix.MockLBaaSClient()).client - cls = d_lbaasv2.LBaaSv2Driver - m_driver = mock.Mock(spec=d_lbaasv2.LBaaSv2Driver) - loadbalancer = { - 'name': 'TEST_NAME', - 'project_id': 'TEST_PROJECT', - 'subnet_id': 'D3FA400A-F543-4B91-9CD3-047AF0CE42D1', - 'ip': '1.2.3.4', - 'security_groups': [], - 'provider': 'haproxy' - } - loadbalancer_id = '00EE9E11-91C2-41CF-8FD4-7970579E5C4C' - resp = iter([o_lb.LoadBalancer(id=loadbalancer_id, provider='haproxy', - provisioning_status='ACTIVE')]) - lbaas.load_balancers.return_value = resp - m_driver._get_vip_port.return_value = os_port.Port( - id=mock.sentinel.port_id, - ) - - ret = cls._find_loadbalancer(m_driver, loadbalancer) - lbaas.load_balancers.assert_called_once_with( - name=loadbalancer['name'], - project_id=loadbalancer['project_id'], - vip_address=str(loadbalancer['ip']), - vip_subnet_id=loadbalancer['subnet_id'], - provider='haproxy') - self.assertEqual(loadbalancer, ret) - self.assertEqual(loadbalancer_id, ret['id']) - m_driver.release_loadbalancer.assert_not_called() - - def test_find_loadbalancer_not_found(self): - lbaas = self.useFixture(k_fix.MockLBaaSClient()).client - cls = d_lbaasv2.LBaaSv2Driver - m_driver = mock.Mock(spec=d_lbaasv2.LBaaSv2Driver) - loadbalancer = obj_lbaas.LBaaSLoadBalancer( - name='TEST_NAME', project_id='TEST_PROJECT', ip='1.2.3.4', - subnet_id='D3FA400A-F543-4B91-9CD3-047AF0CE42D1') - loadbalancer = { - 'name': 'TEST_NAME', - 'project_id': 'TEST_PROJECT', - 'subnet_id': 'D3FA400A-F543-4B91-9CD3-047AF0CE42D1', - 'ip': '1.2.3.4', - 'provider': None - } - resp = iter([]) - lbaas.load_balancers.return_value = resp - - ret = cls._find_loadbalancer(m_driver, loadbalancer) - lbaas.load_balancers.assert_called_once_with( - name=loadbalancer['name'], - project_id=loadbalancer['project_id'], - vip_address=str(loadbalancer['ip']), - vip_subnet_id=loadbalancer['subnet_id'], - provider=None) - self.assertIsNone(ret) - m_driver.release_loadbalancer.assert_not_called() - - def test_find_loadbalancer_error(self): - lbaas = self.useFixture(k_fix.MockLBaaSClient()).client - cls = d_lbaasv2.LBaaSv2Driver - m_driver = mock.Mock(spec=d_lbaasv2.LBaaSv2Driver) - loadbalancer = { - 'name': 'test_namespace/test_name', - 'project_id': 'TEST_PROJECT', - 'subnet_id': 'D3FA400A-F543-4B91-9CD3-047AF0CE42D1', - 'ip': '1.2.3.4', - 'provider': None - } - loadbalancer_id = '00EE9E11-91C2-41CF-8FD4-7970579E5C4C' - resp = iter([o_lb.LoadBalancer(id=loadbalancer_id, provider='haproxy', - provisioning_status='ERROR')]) - lbaas.load_balancers.return_value = resp - m_driver._get_vip_port.return_value = os_port.Port( - id=mock.sentinel.port_id, - ) - - ret = cls._find_loadbalancer(m_driver, loadbalancer) - lbaas.load_balancers.assert_called_once_with( - name=loadbalancer['name'], - project_id=loadbalancer['project_id'], - vip_address=str(loadbalancer['ip']), - vip_subnet_id=loadbalancer['subnet_id'], - provider=None) - self.assertIsNone(ret) - m_driver.release_loadbalancer.assert_called_once() - - def test_create_listener(self): - cls = d_lbaasv2.LBaaSv2Driver - m_driver = mock.Mock(spec=d_lbaasv2.LBaaSv2Driver) - lbaas = self.useFixture(k_fix.MockLBaaSClient()).client - listener = { - 'name': 'TEST_NAME', - 'project_id': 'TEST_PROJECT', - 'loadbalancer_id': '00EE9E11-91C2-41CF-8FD4-7970579E5C4C', - 'protocol': 'TCP', - 'port': 1234 - } - listener_id = 'A57B7771-6050-4CA8-A63C-443493EC98AB' - - req = { - 'name': listener['name'], - 'project_id': listener['project_id'], - 'loadbalancer_id': listener['loadbalancer_id'], - 'protocol': listener['protocol'], - 'protocol_port': listener['port']} - resp = o_lis.Listener(id=listener_id) - lbaas.create_listener.return_value = resp - - ret = cls._create_listener(m_driver, listener) - lbaas.create_listener.assert_called_once_with(**req) - self.assertEqual(listener, ret) - self.assertEqual(listener_id, ret['id']) - - def test_create_listener_with_different_timeouts(self): - cls = d_lbaasv2.LBaaSv2Driver - m_driver = mock.Mock(spec=d_lbaasv2.LBaaSv2Driver) - lbaas = self.useFixture(k_fix.MockLBaaSClient()).client - listener = { - 'name': 'TEST_NAME', - 'project_id': 'TEST_PROJECT', - 'loadbalancer_id': '00EE9E11-91C2-41CF-8FD4-7970579E5C4C', - 'protocol': 'TCP', - 'port': 5678, - 'timeout_client_data': 75000, - 'timeout_member_data': 0 - } - listener_id = 'A57B7771-6050-4CA8-A63C-443493EC98AB' - - req = { - 'name': listener['name'], - 'project_id': listener['project_id'], - 'loadbalancer_id': listener['loadbalancer_id'], - 'protocol': listener['protocol'], - 'protocol_port': listener['port'], - 'timeout_client_data': listener['timeout_client_data']} - resp = o_lis.Listener(id=listener_id) - lbaas.create_listener.return_value = resp - - ret = cls._create_listener(m_driver, listener) - lbaas.create_listener.assert_called_once_with(**req) - self.assertEqual(listener, ret) - self.assertEqual(listener_id, ret['id']) - - def test_find_listener(self): - lbaas = self.useFixture(k_fix.MockLBaaSClient()).client - cls = d_lbaasv2.LBaaSv2Driver - m_driver = mock.Mock(spec=d_lbaasv2.LBaaSv2Driver) - loadbalancer = { - 'id': '00EE9E11-91C2-41CF-8FD4-7970579E5C4C', - } - listener = { - 'name': 'TEST_NAME', - 'project_id': 'TEST_PROJECT', - 'loadbalancer_id': '00EE9E11-91C2-41CF-8FD4-7970579E5C4C', - 'protocol': 'TCP', - 'port': 1234 - } - listener_id = 'A57B7771-6050-4CA8-A63C-443493EC98AB' - lbaas.listeners.return_value = iter([o_lis.Listener(id=listener_id)]) - - ret = cls._find_listener(m_driver, listener, loadbalancer) - lbaas.listeners.assert_called_once_with( - name=listener['name'], - project_id=listener['project_id'], - load_balancer_id=listener['loadbalancer_id'], - protocol=listener['protocol'], - protocol_port=listener['port']) - self.assertEqual(listener, ret) - self.assertEqual(listener_id, ret['id']) - - def test_find_listener_not_found(self): - lbaas = self.useFixture(k_fix.MockLBaaSClient()).client - cls = d_lbaasv2.LBaaSv2Driver - m_driver = mock.Mock(spec=d_lbaasv2.LBaaSv2Driver) - loadbalancer = { - 'id': '00EE9E11-91C2-41CF-8FD4-7970579E5C4C', - } - listener = { - 'name': 'TEST_NAME', - 'project_id': 'TEST_PROJECT', - 'loadbalancer_id': '00EE9E11-91C2-41CF-8FD4-7970579E5C4C', - 'protocol': 'TCP', - 'port': 1234 - } - resp = iter([]) - lbaas.listeners.return_value = resp - - ret = cls._find_listener(m_driver, listener, loadbalancer) - lbaas.listeners.assert_called_once_with( - name=listener['name'], - project_id=listener['project_id'], - load_balancer_id=listener['loadbalancer_id'], - protocol=listener['protocol'], - protocol_port=listener['port']) - self.assertIsNone(ret) - - def test_create_pool(self): - cls = d_lbaasv2.LBaaSv2Driver - m_driver = mock.Mock(spec=d_lbaasv2.LBaaSv2Driver) - lbaas = self.useFixture(k_fix.MockLBaaSClient()).client - lb_algorithm = 'ROUND_ROBIN' - pool = { - 'name': 'TEST_NAME', - 'project_id': 'TEST_PROJECT', - 'loadbalancer_id': '00EE9E11-91C2-41CF-8FD4-7970579E5C4C', - 'listener_id': 'A57B7771-6050-4CA8-A63C-443493EC98AB', - 'protocol': 'TCP' - } - pool_id = 'D4F35594-27EB-4F4C-930C-31DD40F53B77' - - req = { - 'name': pool['name'], - 'project_id': pool['project_id'], - 'listener_id': pool['listener_id'], - 'loadbalancer_id': pool['loadbalancer_id'], - 'protocol': pool['protocol'], - 'lb_algorithm': lb_algorithm} - resp = o_pool.Pool(id=pool_id) - lbaas.create_pool.return_value = resp - - ret = cls._create_pool(m_driver, pool) - lbaas.create_pool.assert_called_once_with(**req) - self.assertEqual(pool, ret) - self.assertEqual(pool_id, ret['id']) - - def test_create_pool_with_different_lb_algorithm(self): - cls = d_lbaasv2.LBaaSv2Driver - m_driver = mock.Mock(spec=d_lbaasv2.LBaaSv2Driver) - lbaas = self.useFixture(k_fix.MockLBaaSClient()).client - lb_algorithm = 'SOURCE_IP_PORT' - pool = { - 'name': 'TEST_NAME', - 'project_id': 'TEST_PROJECT', - 'loadbalancer_id': '00EE9E11-91C2-41CF-8FD4-7970579E5C4C', - 'listener_id': 'A57B7771-6050-4CA8-A63C-443493EC98AB', - 'protocol': 'TCP' - } - pool_id = 'D4F35594-27EB-4F4C-930C-31DD40F53B77' - req = { - 'name': pool['name'], - 'project_id': pool['project_id'], - 'listener_id': pool['listener_id'], - 'loadbalancer_id': pool['loadbalancer_id'], - 'protocol': pool['protocol'], - 'lb_algorithm': lb_algorithm} - resp = o_pool.Pool(id=pool_id) - lbaas.create_pool.return_value = resp - CONF.set_override('lb_algorithm', lb_algorithm, - group='octavia_defaults') - self.addCleanup(CONF.clear_override, 'lb_algorithm', - group='octavia_defaults') - - ret = cls._create_pool(m_driver, pool) - lbaas.create_pool.assert_called_once_with(**req) - self.assertEqual(pool, ret) - self.assertEqual(pool_id, ret['id']) - - def test_create_pool_conflict(self): - cls = d_lbaasv2.LBaaSv2Driver - m_driver = mock.Mock(spec=d_lbaasv2.LBaaSv2Driver) - lbaas = self.useFixture(k_fix.MockLBaaSClient()).client - lb_algorithm = 'ROUND_ROBIN' - pool = { - 'name': 'TEST_NAME', - 'project_id': 'TEST_PROJECT', - 'loadbalancer_id': '00EE9E11-91C2-41CF-8FD4-7970579E5C4C', - 'listener_id': 'A57B7771-6050-4CA8-A63C-443493EC98AB', - 'protocol': 'TCP' - } - req = { - 'name': pool['name'], - 'project_id': pool['project_id'], - 'listener_id': pool['listener_id'], - 'loadbalancer_id': pool['loadbalancer_id'], - 'protocol': pool['protocol'], - 'lb_algorithm': lb_algorithm} - lbaas.create_pool.side_effect = os_exc.BadRequestException - - self.assertRaises(os_exc.BadRequestException, cls._create_pool, - m_driver, pool) - lbaas.create_pool.assert_called_once_with(**req) - - def test_find_pool_by_listener(self): - lbaas = self.useFixture(k_fix.MockLBaaSClient()).client - cls = d_lbaasv2.LBaaSv2Driver - m_driver = mock.Mock(spec=d_lbaasv2.LBaaSv2Driver) - loadbalancer = { - 'id': '00EE9E11-91C2-41CF-8FD4-7970579E5C4C', - } - pool = { - 'name': 'TEST_NAME', - 'project_id': 'TEST_PROJECT', - 'loadbalancer_id': '00EE9E11-91C2-41CF-8FD4-7970579E5C4C', - 'listener_id': 'A57B7771-6050-4CA8-A63C-443493EC98AB', - 'protocol': 'TCP' - } - pool_id = 'D4F35594-27EB-4F4C-930C-31DD40F53B77' - resp = [o_pool.Pool(id=pool_id, - listeners=[{"id": pool['listener_id']}])] - lbaas.pools.return_value = resp - - ret = cls._find_pool(m_driver, pool, loadbalancer) - lbaas.pools.assert_called_once_with( - name=pool['name'], - project_id=pool['project_id'], - loadbalancer_id=pool['loadbalancer_id'], - protocol=pool['protocol']) - self.assertEqual(pool, ret) - self.assertEqual(pool_id, ret['id']) - - def test_find_pool_by_listener_not_found(self): - lbaas = self.useFixture(k_fix.MockLBaaSClient()).client - cls = d_lbaasv2.LBaaSv2Driver - m_driver = mock.Mock(spec=d_lbaasv2.LBaaSv2Driver) - loadbalancer = { - 'id': '00EE9E11-91C2-41CF-8FD4-7970579E5C4C' - } - pool = { - 'name': 'TEST_NAME', - 'project_id': 'TEST_PROJECT', - 'loadbalancer_id': '00EE9E11-91C2-41CF-8FD4-7970579E5C4C', - 'listener_id': 'A57B7771-6050-4CA8-A63C-443493EC98AB', - 'protocol': 'TCP' - } - resp = [] - lbaas.pools.return_value = resp - - ret = cls._find_pool(m_driver, pool, loadbalancer) - lbaas.pools.assert_called_once_with( - name=pool['name'], - project_id=pool['project_id'], - loadbalancer_id=pool['loadbalancer_id'], - protocol=pool['protocol']) - self.assertIsNone(ret) - - def test_create_member(self): - cls = d_lbaasv2.LBaaSv2Driver - m_driver = mock.Mock(spec=d_lbaasv2.LBaaSv2Driver) - lbaas = self.useFixture(k_fix.MockLBaaSClient()).client - member = { - 'name': 'TEST_NAME', - 'project_id': 'TEST_PROJECT', - 'pool_id': 'D4F35594-27EB-4F4C-930C-31DD40F53B77', - 'subnet_id': 'D3FA400A-F543-4B91-9CD3-047AF0CE42D1', - 'ip': '1.2.3.4', - 'port': 1234 - } - member_id = '3A70CEC0-392D-4BC1-A27C-06E63A0FD54F' - req = { - 'name': member['name'], - 'project_id': member['project_id'], - 'subnet_id': member['subnet_id'], - 'address': str(member['ip']), - 'protocol_port': member['port']} - resp = o_mem.Member(id=member_id) - lbaas.create_member.return_value = resp - - ret = cls._create_member(m_driver, member) - lbaas.create_member.assert_called_once_with(member['pool_id'], **req) - self.assertEqual(member, ret) - self.assertEqual(member_id, ret['id']) - - def test_find_member(self): - lbaas = self.useFixture(k_fix.MockLBaaSClient()).client - cls = d_lbaasv2.LBaaSv2Driver - m_driver = mock.Mock(spec=d_lbaasv2.LBaaSv2Driver) - loadbalancer = obj_lbaas.LBaaSLoadBalancer() - member = { - 'name': 'TEST_NAME', - 'project_id': 'TEST_PROJECT', - 'pool_id': 'D4F35594-27EB-4F4C-930C-31DD40F53B77', - 'subnet_id': 'D3FA400A-F543-4B91-9CD3-047AF0CE42D1', - 'ip': '1.2.3.4', - 'port': 1234 - } - member_id = '3A70CEC0-392D-4BC1-A27C-06E63A0FD54F' - resp = iter([o_mem.Member(id=member_id, name='TEST_NAME')]) - lbaas.members.return_value = resp - ret = cls._find_member(m_driver, member, loadbalancer) - lbaas.members.assert_called_once_with( - member['pool_id'], - project_id=member['project_id'], - subnet_id=member['subnet_id'], - address=member['ip'], - protocol_port=member['port']) - # the member dict is copied, so the id is added to the return obj - member['id'] = member_id - self.assertEqual(member, ret) - self.assertEqual(member_id, ret['id']) - - def test_find_member_not_found(self): - lbaas = self.useFixture(k_fix.MockLBaaSClient()).client - cls = d_lbaasv2.LBaaSv2Driver - m_driver = mock.Mock(spec=d_lbaasv2.LBaaSv2Driver) - loadbalancer = obj_lbaas.LBaaSLoadBalancer() - member = { - 'name': 'TEST_NAME', - 'project_id': 'TEST_PROJECT', - 'pool_id': 'D4F35594-27EB-4F4C-930C-31DD40F53B77', - 'subnet_id': 'D3FA400A-F543-4B91-9CD3-047AF0CE42D1', - 'ip': '1.2.3.4', - 'port': 1234 - } - resp = iter([]) - lbaas.members.return_value = resp - - ret = cls._find_member(m_driver, member, loadbalancer) - lbaas.members.assert_called_once_with( - member['pool_id'], - project_id=member['project_id'], - subnet_id=member['subnet_id'], - address=member['ip'], - protocol_port=member['port']) - self.assertIsNone(ret) - - def test_ensure(self): - cls = d_lbaasv2.LBaaSv2Driver - m_driver = mock.Mock(spec=d_lbaasv2.LBaaSv2Driver) - obj = mock.Mock() - lb = mock.Mock() - m_create = mock.Mock() - m_find = mock.Mock() - expected_result = mock.sentinel.expected_result - m_create.return_value = expected_result - - ret = cls._ensure(m_driver, m_create, m_find, - obj, lb) - m_create.assert_called_once_with(obj) - self.assertEqual(expected_result, ret) - - def _verify_ensure_with_exception(self, exception_value): - cls = d_lbaasv2.LBaaSv2Driver - m_driver = mock.Mock(spec=d_lbaasv2.LBaaSv2Driver) - obj = mock.Mock() - lb = mock.Mock() - m_create = mock.Mock() - m_find = mock.Mock() - expected_result = None - m_create.side_effect = exception_value - m_find.return_value = expected_result - - ret = cls._ensure(m_driver, m_create, m_find, - obj, lb) - m_create.assert_called_once_with(obj) - m_find.assert_called_once_with(obj, lb) - self.assertEqual(expected_result, ret) - - def test_ensure_with_conflict(self): - self._verify_ensure_with_exception( - os_exc.ConflictException(http_status=409)) - - def test_ensure_with_internalservererror(self): - self._verify_ensure_with_exception( - os_exc.HttpException(http_status=500)) - - def test_request(self): - cls = d_lbaasv2.LBaaSv2Driver - m_driver = mock.Mock(spec=d_lbaasv2.LBaaSv2Driver) - loadbalancer = mock.sentinel.loadbalancer - obj = mock.sentinel.obj - create = mock.sentinel.create - find = mock.sentinel.find - timer = [mock.sentinel.t0] - m_driver._provisioning_timer.return_value = timer - m_driver._ensure.side_effect = os_exc.BadRequestException() - - self.assertRaises(os_exc.BadRequestException, - cls._ensure_provisioned, m_driver, - loadbalancer, obj, create, find) - - m_driver._wait_for_provisioning.assert_has_calls( - [mock.call(loadbalancer, t, d_lbaasv2._LB_STS_POLL_FAST_INTERVAL) - for t in timer]) - m_driver._ensure.assert_has_calls( - [mock.call(create, find, obj, loadbalancer) for _ in timer]) - - def test_ensure_not_ready(self): - cls = d_lbaasv2.LBaaSv2Driver - m_driver = mock.Mock(spec=d_lbaasv2.LBaaSv2Driver) - loadbalancer = mock.sentinel.loadbalancer - obj = mock.sentinel.obj - create = mock.sentinel.create - find = mock.sentinel.find - timer = [mock.sentinel.t0, mock.sentinel.t1] - m_driver._provisioning_timer.return_value = timer - m_driver._ensure.return_value = None - - self.assertRaises(k_exc.ResourceNotReady, cls._ensure_provisioned, - m_driver, - loadbalancer, obj, create, find) - - m_driver._wait_for_provisioning.assert_has_calls( - [mock.call(loadbalancer, t, d_lbaasv2._LB_STS_POLL_FAST_INTERVAL) - for t in timer]) - m_driver._ensure.assert_has_calls( - [mock.call(create, find, obj, loadbalancer) for _ in timer]) - - def test_release(self): - cls = d_lbaasv2.LBaaSv2Driver - m_driver = mock.Mock(spec=d_lbaasv2.LBaaSv2Driver) - loadbalancer = mock.sentinel.loadbalancer - obj = mock.sentinel.obj - m_delete = mock.Mock() - timer = [mock.sentinel.t0, mock.sentinel.t1] - m_driver._provisioning_timer.return_value = timer - - cls._release(m_driver, loadbalancer, obj, m_delete) - - m_driver._wait_for_provisioning.assert_not_called() - m_delete.assert_called_once() - - def test_release_with_wait(self): - cls = d_lbaasv2.LBaaSv2Driver - m_driver = mock.Mock(spec=d_lbaasv2.LBaaSv2Driver) - loadbalancer = mock.sentinel.loadbalancer - obj = mock.sentinel.obj - m_delete = mock.Mock() - timer = [mock.sentinel.t0, mock.sentinel.t1] - m_driver._provisioning_timer.return_value = timer - m_delete.side_effect = [os_exc.BadRequestException, None] - - cls._release(m_driver, loadbalancer, obj, m_delete) - - m_driver._wait_for_provisioning.assert_called_once_with(loadbalancer, - mock.ANY) - self.assertEqual(2, m_delete.call_count) - - def test_release_not_found(self): - cls = d_lbaasv2.LBaaSv2Driver - m_driver = mock.Mock(spec=d_lbaasv2.LBaaSv2Driver) - loadbalancer = mock.sentinel.loadbalancer - obj = mock.sentinel.obj - m_delete = mock.Mock() - timer = [mock.sentinel.t0, mock.sentinel.t1] - m_driver._provisioning_timer.return_value = timer - m_delete.side_effect = os_exc.NotFoundException - - cls._release(m_driver, loadbalancer, obj, m_delete) - - m_driver._wait_for_provisioning.assert_not_called() - self.assertEqual(1, m_delete.call_count) - - def test_release_not_ready(self): - cls = d_lbaasv2.LBaaSv2Driver - m_driver = mock.Mock(spec=d_lbaasv2.LBaaSv2Driver) - loadbalancer = mock.sentinel.loadbalancer - obj = mock.sentinel.obj - m_delete = mock.Mock() - timer = [mock.sentinel.t0, mock.sentinel.t1] - m_driver._provisioning_timer.return_value = timer - m_delete.side_effect = os_exc.ConflictException - - self.assertRaises(k_exc.ResourceNotReady, cls._release, m_driver, - loadbalancer, obj, m_delete) - - call_count = len(timer) - self.assertEqual(call_count, - m_driver._wait_for_provisioning.call_count) - self.assertEqual(call_count, m_delete.call_count) - - def test_wait_for_provisioning(self): - lbaas = self.useFixture(k_fix.MockLBaaSClient()).client - cls = d_lbaasv2.LBaaSv2Driver - m_driver = mock.Mock(spec=d_lbaasv2.LBaaSv2Driver) - loadbalancer = { - 'name': 'TEST_NAME', - 'project_id': 'TEST_PROJECT', - 'subnet_id': 'D3FA400A-F543-4B91-9CD3-047AF0CE42D1', - 'ip': '1.2.3.4', - 'provider': None, - 'id': '00EE9E11-91C2-41CF-8FD4-7970579E5C4C' - } - timeout = mock.sentinel.timeout - timer = [mock.sentinel.t0, mock.sentinel.t1] - m_driver._provisioning_timer.return_value = timer - resp = o_lb.LoadBalancer(provisioning_status='ACTIVE') - lbaas.get_load_balancer.return_value = resp - - cls._wait_for_provisioning(m_driver, loadbalancer, timeout) - - lbaas.get_load_balancer.assert_called_once_with(loadbalancer['id']) - - def test_wait_for_provisioning_not_ready(self): - lbaas = self.useFixture(k_fix.MockLBaaSClient()).client - cls = d_lbaasv2.LBaaSv2Driver - m_driver = mock.Mock(spec=d_lbaasv2.LBaaSv2Driver) - loadbalancer = { - 'name': 'TEST_NAME', - 'project_id': 'TEST_PROJECT', - 'subnet_id': 'D3FA400A-F543-4B91-9CD3-047AF0CE42D1', - 'ip': '1.2.3.4', - 'provider': None, - 'id': '00EE9E11-91C2-41CF-8FD4-7970579E5C4C' - } - timeout = mock.sentinel.timeout - timer = [mock.sentinel.t0, mock.sentinel.t1] - m_driver._provisioning_timer.return_value = timer - resp = o_lb.LoadBalancer(provisioning_status='NOT_ACTIVE') - lbaas.get_load_balancer.return_value = resp - - self.assertRaises(k_exc.ResourceNotReady, cls._wait_for_provisioning, - m_driver, loadbalancer, timeout) - - self.assertEqual(len(timer), lbaas.get_load_balancer.call_count) - - def test_provisioning_timer(self): - # REVISIT(ivc): add test if _provisioning_timer is to stay - self.skipTest("not implemented") - - -class TestLBaaSv2AppyMembersSecurityGroup(test_base.TestCase): - - def setUp(self): - super().setUp() - self.lb = {'id': 'a4de5f1a-ac03-45b1-951d-39f108d52e7d', - 'ip': '10.0.0.142', - 'name': 'default/lb', - 'port_id': '5be1b3c4-7d44-4597-9294-cadafdf1ec69', - 'project_id': '7ef23242bb3f4773a58da681421ab26e', - 'provider': 'amphora', - 'security_groups': ['328900a2-c328-41cc-946f-56ae8720ec0d'], - 'subnet_id': 'c85e2e10-1fad-4218-ad10-7de4aa5de7ce'} - self.port = 80 - self.target_port = 8080 - self.protocol = 'TCP' - self.sg_rule_name = 'default/lb:TCP:80' - self.listener_id = '858869ec-e4fa-4715-b22f-bd08889c6235' - self.new_sgs = ['48cfc812-a442-44bf-989f-8dbaf23a7007'] - self.vip = fake.get_port_obj() - - @mock.patch('kuryr_kubernetes.clients.get_network_client') - def test__apply_members_security_groups_no_enforce(self, gnc): - CONF.set_override('enforce_sg_rules', False, group='octavia_defaults') - self.addCleanup(CONF.clear_override, 'enforce_sg_rules', - group='octavia_defaults') - cls = d_lbaasv2.LBaaSv2Driver - m_driver = mock.Mock(spec=d_lbaasv2.LBaaSv2Driver) - m_driver._get_vip_port.return_value = None - - cls._apply_members_security_groups(m_driver, self.lb, self.port, - self.target_port, self.protocol, - self.sg_rule_name, - self.listener_id, self.new_sgs) - - m_driver._get_vip_port.assert_not_called() - - @mock.patch('kuryr_kubernetes.clients.get_network_client') - def test__apply_members_security_groups_no_vip(self, gnc): - cls = d_lbaasv2.LBaaSv2Driver - m_driver = mock.Mock(spec=d_lbaasv2.LBaaSv2Driver) - m_driver._get_vip_port.return_value = None - - cls._apply_members_security_groups(m_driver, self.lb, self.port, - self.target_port, self.protocol, - self.sg_rule_name, - self.listener_id, self.new_sgs) - - m_driver._get_vip_port.assert_called_once_with(self.lb) - - @mock.patch('kuryr_kubernetes.clients.get_network_client') - def test__apply_members_security_groups_no_sg(self, gnc): - self.new_sgs = None - self.vip.security_group_ids = [] - cls = d_lbaasv2.LBaaSv2Driver - m_driver = mock.Mock(spec=d_lbaasv2.LBaaSv2Driver) - m_driver._get_vip_port.return_value = self.vip - - self.assertRaises(k_exc.ResourceNotReady, - cls._apply_members_security_groups, m_driver, - self.lb, self.port, self.target_port, self.protocol, - self.sg_rule_name, self.listener_id, self.new_sgs) - - m_driver._get_vip_port.assert_called_once_with(self.lb) - - @mock.patch('kuryr_kubernetes.clients.get_network_client') - def test__apply_members_security_groups_conf_with_octavia_acls(self, gnc): - self.new_sgs = None - cls = d_lbaasv2.LBaaSv2Driver - m_driver = mock.Mock(spec=d_lbaasv2.LBaaSv2Driver) - m_driver._get_vip_port = mock.Mock(return_value=self.vip) - m_driver._octavia_acls = True - m_driver._create_listeners_acls = mock.Mock() - - cls._apply_members_security_groups(m_driver, self.lb, self.port, - self.target_port, self.protocol, - self.sg_rule_name, self.listener_id, - self.new_sgs) - - m_driver._get_vip_port.assert_called_once_with(self.lb) - m_driver._create_listeners_acls.assert_called_once_with( - self.lb, self.port, self.target_port, self.protocol, - self.vip.security_group_ids[0], self.new_sgs, self.listener_id) - - def test__apply_members_security_groups_new_sgs(self): - os_net = self.useFixture(k_fix.MockNetworkClient()).client - cls = d_lbaasv2.LBaaSv2Driver - m_driver = mock.Mock(spec=d_lbaasv2.LBaaSv2Driver) - m_driver._get_vip_port.return_value = self.vip - m_driver._octavia_acls = False - os_net.security_group_rules.return_value = [] - CONF.set_override('pod_security_groups', [], group='neutron_defaults') - self.addCleanup(CONF.clear_override, 'pod_security_groups', - group='neutron_defaults') - - cls._apply_members_security_groups(m_driver, self.lb, self.port, - self.target_port, self.protocol, - self.sg_rule_name, self.listener_id, - self.new_sgs) - - m_driver._get_vip_port.assert_called_once_with(self.lb) - os_net.security_group_rules.assert_has_calls([ - mock.call(security_group_id=self.vip.security_group_ids[0], - project_id=self.lb['project_id']), - mock.call(security_group_id=self.new_sgs[0])]) - - def test__apply_members_security_groups_conf_lb_sgs(self): - os_net = self.useFixture(k_fix.MockNetworkClient()).client - cls = d_lbaasv2.LBaaSv2Driver - m_driver = mock.Mock(spec=d_lbaasv2.LBaaSv2Driver) - m_driver._get_vip_port.return_value = self.vip - m_driver._octavia_acls = False - sgr = fake.get_sgr_obj() - os_net.security_group_rules.side_effect = ([], [sgr]) - self.new_sgs = [] - CONF.set_override('pod_security_groups', [], group='neutron_defaults') - self.addCleanup(CONF.clear_override, 'pod_security_groups', - group='neutron_defaults') - - cls._apply_members_security_groups(m_driver, self.lb, self.port, - self.target_port, self.protocol, - self.sg_rule_name, - self.listener_id, self.new_sgs) - - m_driver._get_vip_port.assert_called_once_with(self.lb) - os_net.security_group_rules.assert_has_calls([ - mock.call(security_group_id=self.vip.security_group_ids[0], - project_id=self.lb['project_id']), - mock.call(security_group_id=self.lb['security_groups'][0])]) - os_net.create_security_group_rule.assert_called_once_with( - direction='ingress', - ether_type=k_const.IPv4, - port_range_min=self.port, - port_range_max=self.port, - protocol=self.protocol, - remote_ip_prefix=sgr.remote_ip_prefix, - security_group_id=sgr.security_group_id, - description=self.sg_rule_name) - - def test__apply_members_security_groups_conf_lb_sgs_conflict(self): - os_net = self.useFixture(k_fix.MockNetworkClient()).client - cls = d_lbaasv2.LBaaSv2Driver - m_driver = mock.Mock(spec=d_lbaasv2.LBaaSv2Driver) - m_driver._get_vip_port.return_value = self.vip - m_driver._octavia_acls = False - sgr = fake.get_sgr_obj() - os_net.security_group_rules.side_effect = ([], [sgr]) - os_net.create_security_group_rule.side_effect = (os_exc - .ConflictException) - self.new_sgs = [] - CONF.set_override('pod_security_groups', [], group='neutron_defaults') - self.addCleanup(CONF.clear_override, 'pod_security_groups', - group='neutron_defaults') - - cls._apply_members_security_groups(m_driver, self.lb, self.port, - self.target_port, self.protocol, - self.sg_rule_name, - self.listener_id, self.new_sgs) - - m_driver._get_vip_port.assert_called_once_with(self.lb) - os_net.security_group_rules.assert_has_calls([ - mock.call(security_group_id=self.vip.security_group_ids[0], - project_id=self.lb['project_id']), - mock.call(security_group_id=self.lb['security_groups'][0])]) - os_net.create_security_group_rule.assert_called_once_with( - direction='ingress', - ether_type=k_const.IPv4, - port_range_min=self.port, - port_range_max=self.port, - protocol=self.protocol, - remote_ip_prefix=None, - security_group_id=self.vip.security_group_ids[0], - description=self.sg_rule_name) - - def test__apply_members_security_groups_conf_lb_sgs_sdkexception(self): - os_net = self.useFixture(k_fix.MockNetworkClient()).client - cls = d_lbaasv2.LBaaSv2Driver - m_driver = mock.Mock(spec=d_lbaasv2.LBaaSv2Driver) - m_driver._get_vip_port.return_value = self.vip - m_driver._octavia_acls = False - sgr = fake.get_sgr_obj() - os_net.security_group_rules.side_effect = ([], [sgr]) - os_net.create_security_group_rule.side_effect = os_exc.SDKException - self.new_sgs = [] - CONF.set_override('pod_security_groups', [], group='neutron_defaults') - self.addCleanup(CONF.clear_override, 'pod_security_groups', - group='neutron_defaults') - - cls._apply_members_security_groups(m_driver, self.lb, self.port, - self.target_port, self.protocol, - self.sg_rule_name, - self.listener_id, self.new_sgs) - - m_driver._get_vip_port.assert_called_once_with(self.lb) - os_net.security_group_rules.assert_has_calls([ - mock.call(security_group_id=self.vip.security_group_ids[0], - project_id=self.lb['project_id']), - mock.call(security_group_id=self.lb['security_groups'][0])]) - os_net.create_security_group_rule.assert_called_once_with( - direction='ingress', - ether_type=k_const.IPv4, - port_range_min=self.port, - port_range_max=self.port, - protocol=self.protocol, - remote_ip_prefix=None, - security_group_id=self.vip.security_group_ids[0], - description=self.sg_rule_name) - - @mock.patch("kuryr_kubernetes.utils.get_service_subnet_version", - return_value=k_const.IP_VERSION_6) - def test__apply_members_security_groups_ipv6_add_default(self, gssv): - os_net = self.useFixture(k_fix.MockNetworkClient()).client - cls = d_lbaasv2.LBaaSv2Driver - m_driver = mock.Mock(spec=d_lbaasv2.LBaaSv2Driver) - m_driver._get_vip_port.return_value = self.vip - m_driver._octavia_acls = False - os_net.security_group_rules.return_value = [] - CONF.set_override('pod_security_groups', self.new_sgs, - group='neutron_defaults') - self.addCleanup(CONF.clear_override, 'pod_security_groups', - group='neutron_defaults') - - cls._apply_members_security_groups(m_driver, self.lb, self.port, - self.target_port, self.protocol, - self.sg_rule_name, self.listener_id, - self.new_sgs) - - m_driver._get_vip_port.assert_called_once_with(self.lb) - os_net.security_group_rules.assert_called_once_with( - security_group_id=self.vip.security_group_ids[0], - project_id=self.lb['project_id']) - os_net.create_security_group_rule.assert_called_once_with( - direction='ingress', - ether_type=k_const.IPv6, - port_range_min=self.port, - port_range_max=self.port, - protocol=self.protocol, - security_group_id=self.vip.security_group_ids[0], - description=self.sg_rule_name) - - def test__apply_members_security_groups_add_default_conflict(self): - os_net = self.useFixture(k_fix.MockNetworkClient()).client - cls = d_lbaasv2.LBaaSv2Driver - m_driver = mock.Mock(spec=d_lbaasv2.LBaaSv2Driver) - m_driver._get_vip_port.return_value = self.vip - m_driver._octavia_acls = False - os_net.security_group_rules.return_value = [] - CONF.set_override('pod_security_groups', self.new_sgs, - group='neutron_defaults') - self.addCleanup(CONF.clear_override, 'pod_security_groups', - group='neutron_defaults') - os_net.create_security_group_rule.side_effect = (os_exc - .ConflictException) - - cls._apply_members_security_groups(m_driver, self.lb, self.port, - self.target_port, self.protocol, - self.sg_rule_name, self.listener_id, - self.new_sgs) - - m_driver._get_vip_port.assert_called_once_with(self.lb) - os_net.security_group_rules.assert_called_once_with( - security_group_id=self.vip.security_group_ids[0], - project_id=self.lb['project_id']) - os_net.create_security_group_rule.assert_called_once_with( - direction='ingress', - ether_type=k_const.IPv4, - port_range_min=self.port, - port_range_max=self.port, - protocol=self.protocol, - security_group_id=self.vip.security_group_ids[0], - description=self.sg_rule_name) - - def test__apply_members_security_groups_add_default_sdk_exception(self): - os_net = self.useFixture(k_fix.MockNetworkClient()).client - cls = d_lbaasv2.LBaaSv2Driver - m_driver = mock.Mock(spec=d_lbaasv2.LBaaSv2Driver) - m_driver._get_vip_port.return_value = self.vip - m_driver._octavia_acls = False - os_net.security_group_rules.return_value = [] - CONF.set_override('pod_security_groups', self.new_sgs, - group='neutron_defaults') - self.addCleanup(CONF.clear_override, 'pod_security_groups', - group='neutron_defaults') - os_net.create_security_group_rule.side_effect = os_exc.SDKException - - cls._apply_members_security_groups(m_driver, self.lb, self.port, - self.target_port, self.protocol, - self.sg_rule_name, self.listener_id, - self.new_sgs) - - m_driver._get_vip_port.assert_called_once_with(self.lb) - os_net.security_group_rules.assert_called_once_with( - security_group_id=self.vip.security_group_ids[0], - project_id=self.lb['project_id']) - os_net.create_security_group_rule.assert_called_once_with( - direction='ingress', - ether_type=k_const.IPv4, - port_range_min=self.port, - port_range_max=self.port, - protocol=self.protocol, - security_group_id=self.vip.security_group_ids[0], - description=self.sg_rule_name) - - def test__apply_members_security_groups_same_sg(self): - os_net = self.useFixture(k_fix.MockNetworkClient()).client - cls = d_lbaasv2.LBaaSv2Driver - m_driver = mock.Mock(spec=d_lbaasv2.LBaaSv2Driver) - self.vip.security_group_ids = self.new_sgs - m_driver._get_vip_port.return_value = self.vip - m_driver._octavia_acls = False - os_net.security_group_rules.return_value = [] - CONF.set_override('pod_security_groups', [], group='neutron_defaults') - self.addCleanup(CONF.clear_override, 'pod_security_groups', - group='neutron_defaults') - - cls._apply_members_security_groups(m_driver, self.lb, self.port, - self.target_port, self.protocol, - self.sg_rule_name, self.listener_id, - self.new_sgs) - - m_driver._get_vip_port.assert_called_once_with(self.lb) - os_net.security_group_rules.assert_called_once_with( - security_group_id=self.vip.security_group_ids[0], - project_id=self.lb['project_id']) - - def test__apply_members_security_groups_unmatched_target_port(self): - os_net = self.useFixture(k_fix.MockNetworkClient()).client - cls = d_lbaasv2.LBaaSv2Driver - m_driver = mock.Mock(spec=d_lbaasv2.LBaaSv2Driver) - m_driver._get_vip_port.return_value = self.vip - m_driver._octavia_acls = False - sgr = fake.get_sgr_obj() - self.target_port = 9090 - os_net.security_group_rules.side_effect = ([], [sgr]) - self.new_sgs = [] - CONF.set_override('pod_security_groups', [], group='neutron_defaults') - self.addCleanup(CONF.clear_override, 'pod_security_groups', - group='neutron_defaults') - - cls._apply_members_security_groups(m_driver, self.lb, self.port, - self.target_port, self.protocol, - self.sg_rule_name, - self.listener_id, self.new_sgs) - - m_driver._get_vip_port.assert_called_once_with(self.lb) - os_net.security_group_rules.assert_has_calls([ - mock.call(security_group_id=self.vip.security_group_ids[0], - project_id=self.lb['project_id']), - mock.call(security_group_id=self.lb['security_groups'][0])]) - os_net.create_security_group_rule.assert_not_called() - - def test__apply_members_security_groups_egress(self): - os_net = self.useFixture(k_fix.MockNetworkClient()).client - cls = d_lbaasv2.LBaaSv2Driver - m_driver = mock.Mock(spec=d_lbaasv2.LBaaSv2Driver) - m_driver._get_vip_port.return_value = self.vip - m_driver._octavia_acls = False - sgr = fake.get_sgr_obj(direction='egress') - os_net.security_group_rules.side_effect = ([], [sgr]) - self.new_sgs = [] - CONF.set_override('pod_security_groups', [], group='neutron_defaults') - self.addCleanup(CONF.clear_override, 'pod_security_groups', - group='neutron_defaults') - - cls._apply_members_security_groups(m_driver, self.lb, self.port, - self.target_port, self.protocol, - self.sg_rule_name, - self.listener_id, self.new_sgs) - - m_driver._get_vip_port.assert_called_once_with(self.lb) - os_net.security_group_rules.assert_has_calls([ - mock.call(security_group_id=self.vip.security_group_ids[0], - project_id=self.lb['project_id']), - mock.call(security_group_id=self.lb['security_groups'][0])]) - os_net.create_security_group_rule.assert_not_called() - - def test__apply_members_security_groups_no_delete_lbaas_rules(self): - os_net = self.useFixture(k_fix.MockNetworkClient()).client - cls = d_lbaasv2.LBaaSv2Driver - m_driver = mock.Mock(spec=d_lbaasv2.LBaaSv2Driver) - m_driver._get_vip_port.return_value = self.vip - m_driver._octavia_acls = False - self.lb['security_groups'] = [] - self.new_sgs = [] - sgr = fake.get_sgr_obj() - os_net.security_group_rules.return_value = [sgr] - - cls._apply_members_security_groups(m_driver, self.lb, self.port, - self.target_port, self.protocol, - self.sg_rule_name, - self.listener_id, self.new_sgs) - - m_driver._get_vip_port.assert_called_once_with(self.lb) - os_net.security_group_rules.assert_called_once_with( - security_group_id=self.vip.security_group_ids[0], - project_id=self.lb['project_id']) - os_net.create_security_group_rule.assert_not_called() - - def test__apply_members_security_groups_delete_matched_lbaas_rules(self): - os_net = self.useFixture(k_fix.MockNetworkClient()).client - cls = d_lbaasv2.LBaaSv2Driver - m_driver = mock.Mock(spec=d_lbaasv2.LBaaSv2Driver) - m_driver._get_vip_port.return_value = self.vip - m_driver._octavia_acls = False - sgr = fake.get_sgr_obj() - os_net.security_group_rules.side_effect = ([sgr], [sgr]) - self.new_sgs = [] - CONF.set_override('pod_security_groups', [], group='neutron_defaults') - self.addCleanup(CONF.clear_override, 'pod_security_groups', - group='neutron_defaults') - - cls._apply_members_security_groups(m_driver, self.lb, self.port, - self.target_port, self.protocol, - self.sg_rule_name, - self.listener_id, self.new_sgs) - - m_driver._get_vip_port.assert_called_once_with(self.lb) - os_net.security_group_rules.assert_has_calls([ - mock.call(security_group_id=self.vip.security_group_ids[0], - project_id=self.lb['project_id']), - mock.call(security_group_id=self.lb['security_groups'][0])]) - os_net.create_security_group_rule.assert_called_once_with( - direction='ingress', - ether_type=k_const.IPv4, - port_range_min=self.port, - port_range_max=self.port, - protocol=self.protocol, - remote_ip_prefix=sgr.remote_ip_prefix, - security_group_id=sgr.security_group_id, - description=self.sg_rule_name) - os_net.delete_security_group_rule.assert_called_once_with(sgr.id) - - def test__apply_members_security_groups_delete_unmatched_lbaas_rules(self): - os_net = self.useFixture(k_fix.MockNetworkClient()).client - cls = d_lbaasv2.LBaaSv2Driver - m_driver = mock.Mock(spec=d_lbaasv2.LBaaSv2Driver) - m_driver._get_vip_port.return_value = self.vip - m_driver._octavia_acls = False - sgr = fake.get_sgr_obj() - os_net.security_group_rules.side_effect = ([sgr], [sgr]) - self.new_sgs = [] - CONF.set_override('pod_security_groups', [], group='neutron_defaults') - self.addCleanup(CONF.clear_override, 'pod_security_groups', - group='neutron_defaults') - self.port = 8080 - - cls._apply_members_security_groups(m_driver, self.lb, self.port, - self.target_port, self.protocol, - self.sg_rule_name, - self.listener_id, self.new_sgs) - - m_driver._get_vip_port.assert_called_once_with(self.lb) - os_net.security_group_rules.assert_has_calls([ - mock.call(security_group_id=self.vip.security_group_ids[0], - project_id=self.lb['project_id']), - mock.call(security_group_id=self.lb['security_groups'][0])]) - os_net.create_security_group_rule.assert_called_once_with( - direction='ingress', - ether_type=k_const.IPv4, - port_range_min=self.port, - port_range_max=self.port, - protocol=self.protocol, - remote_ip_prefix=sgr.remote_ip_prefix, - security_group_id=sgr.security_group_id, - description=self.sg_rule_name) - m_driver._delete_rule_if_no_match.assert_called_once_with(sgr, [sgr]) - - def test__apply_members_security_groups_delete_no_default_lbaas_rules( - self): - os_net = self.useFixture(k_fix.MockNetworkClient()).client - cls = d_lbaasv2.LBaaSv2Driver - m_driver = mock.Mock(spec=d_lbaasv2.LBaaSv2Driver) - m_driver._get_vip_port.return_value = self.vip - m_driver._octavia_acls = False - sgr = fake.get_sgr_obj() - os_net.security_group_rules.side_effect = ([sgr], [sgr]) - self.new_sgs = [] - CONF.set_override('pod_security_groups', [], group='neutron_defaults') - self.addCleanup(CONF.clear_override, 'pod_security_groups', - group='neutron_defaults') - m_driver._is_default_rule.return_value = False - - cls._apply_members_security_groups(m_driver, self.lb, self.port, - self.target_port, self.protocol, - self.sg_rule_name, - self.listener_id, self.new_sgs) - - m_driver._get_vip_port.assert_called_once_with(self.lb) - os_net.security_group_rules.assert_has_calls([ - mock.call(security_group_id=self.vip.security_group_ids[0], - project_id=self.lb['project_id']), - mock.call(security_group_id=self.lb['security_groups'][0])]) - os_net.create_security_group_rule.assert_called_once_with( - direction='ingress', - ether_type=k_const.IPv4, - port_range_min=self.port, - port_range_max=self.port, - protocol=self.protocol, - remote_ip_prefix=sgr.remote_ip_prefix, - security_group_id=sgr.security_group_id, - description=self.sg_rule_name) diff --git a/kuryr_kubernetes/tests/unit/controller/drivers/test_multi_vif.py b/kuryr_kubernetes/tests/unit/controller/drivers/test_multi_vif.py deleted file mode 100644 index cde0e1235..000000000 --- a/kuryr_kubernetes/tests/unit/controller/drivers/test_multi_vif.py +++ /dev/null @@ -1,206 +0,0 @@ -# Copyright 2018 Red Hat, Inc. -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. - -from unittest import mock - -from kuryr_kubernetes import constants -from kuryr_kubernetes.controller.drivers import base as drivers -from kuryr_kubernetes.controller.drivers import multi_vif -from kuryr_kubernetes import exceptions -from kuryr_kubernetes.tests import base as test_base -from oslo_serialization import jsonutils - - -def get_pod_obj(): - return { - 'status': { - 'qosClass': 'BestEffort', - 'hostIP': '192.168.1.2', - }, - 'kind': 'Pod', - 'spec': { - 'schedulerName': 'default-scheduler', - 'containers': [{ - 'name': 'busybox', - 'image': 'busybox', - 'resources': {} - }], - 'nodeName': 'kuryr-devstack' - }, - 'metadata': { - 'name': 'busybox-sleep1', - 'namespace': 'default', - 'resourceVersion': '53808', - 'uid': '452176db-4a85-11e7-80bd-fa163e29dbbb', - 'annotations': { - 'openstack.org/kuryr-vif': {}, - 'k8s.v1.cni.cncf.io/networks': - "net-a,net-b,other-ns/net-c" - } - } - } - - -def get_nets(): - return [ - {"name": "net-a"}, - {"name": "net-b"}, - { - "name": "net-c", - "namespace": "other-ns" - } - ] - - -def get_crd_objs(): - return [ - { - 'name': 'net-a', - 'metadata': { - 'annotations': { - 'openstack.org/kuryr-config': - '''{"subnetId": "subnet-a"}''' - } - } - }, - { - 'name': 'net-b', - 'metadata': { - 'annotations': { - 'openstack.org/kuryr-config': - '''{"subnetId": "subnet-b"}''' - } - } - }, - { - 'name': 'net-c', - 'metadata': { - 'annotations': { - 'openstack.org/kuryr-config': - '''{"subnetId": "subnet-c"}''' - } - } - } - ] - - -def get_subnet_objs(): - return [ - {'subnet-a': mock.sentinel.subneta}, - {'subnet-b': mock.sentinel.subnetb}, - {'subnet-c': mock.sentinel.subnetc}, - ] - - -class TestNPWGMultiVIFDriver(test_base.TestCase): - - def setUp(self): - super(TestNPWGMultiVIFDriver, self).setUp() - self._project_id = mock.sentinel.project_id - self._subnet = mock.sentinel.subnet - self._vif = mock.sentinel.vif - self._subnets = [self._subnet] - self._security_groups = mock.sentinel.security_groups - self._pod = get_pod_obj() - self._vif_pool_drv = mock.Mock(spec=drivers.VIFPoolDriver) - self._request_vif = self._vif_pool_drv.request_vif - self._request_vif.return_value = self._vif - - self._cls = multi_vif.NPWGMultiVIFDriver - self._drv = mock.Mock(spec=self._cls) - self._drv._get_networks = mock.Mock() - self._drv._drv_vif_pool = self._vif_pool_drv - - @mock.patch.object(drivers.VIFPoolDriver, 'set_vif_driver') - @mock.patch.object(drivers.VIFPoolDriver, 'get_instance') - def test_init(self, m_get_vif_pool_driver, m_set_vifs_driver): - m_get_vif_pool_driver.return_value = self._vif_pool_drv - self._vif_pool_drv.set_vif_driver = m_set_vifs_driver - - m_drv = multi_vif.NPWGMultiVIFDriver() - self.assertEqual(self._vif_pool_drv, m_drv._drv_vif_pool) - m_get_vif_pool_driver.assert_called_once_with( - specific_driver='multi_pool') - m_set_vifs_driver.assert_called_once() - - @mock.patch('kuryr_kubernetes.utils.get_subnet') - @mock.patch('kuryr_kubernetes.clients.get_kubernetes_client') - def test_request_additional_vifs(self, m_get_client, m_get_subnet): - vifs = [mock.sentinel.vif_a, mock.sentinel.vif_b, mock.sentinel.vif_c] - self._request_vif.side_effect = vifs - net_crds = get_crd_objs() - client = mock.Mock() - m_get_client.return_value = client - m_get_subnet.side_effect = [mock.sentinel.subneta, - mock.sentinel.subnetb, - mock.sentinel.subnetc] - client.get = mock.Mock() - client.get.side_effect = net_crds - self._drv._get_networks.return_value = get_nets() - - self.assertEqual(vifs, self._cls.request_additional_vifs( - self._drv, self._pod, self._project_id, self._security_groups)) - - def test_get_networks_str(self): - networks = get_nets() - self.assertEqual(networks, - self._cls._get_networks(self._drv, self._pod)) - - def test_get_networks_json(self): - networks = get_nets() - self._pod['metadata']['annotations'][ - 'kubernetes.v1.cni.cncf.io/networks'] = jsonutils.dumps(networks) - self.assertEqual(networks, - self._cls._get_networks(self._drv, self._pod)) - - def test_get_networks_with_invalid_annotation(self): - self._pod['metadata']['annotations'][ - constants.K8S_ANNOTATION_NPWG_NETWORK] = 'ns/net-a/invalid' - self.assertRaises(exceptions.InvalidKuryrNetworkAnnotation, - self._cls._get_networks, self._drv, self._pod) - - def test_get_networks_without_annotation(self): - pod = { - 'metadata': { - 'annotations': { - } - } - } - - self.assertEqual([], self._cls._get_networks(self._drv, pod)) - - @mock.patch('kuryr_kubernetes.clients.get_kubernetes_client') - def test_request_additional_vifs_without_networks(self, m_get_client): - self._drv._get_networks.return_value = [] - - self.assertEqual([], - self._cls.request_additional_vifs( - self._drv, self._pod, self._project_id, - self._security_groups)) - m_get_client.assert_not_called() - - @mock.patch('kuryr_kubernetes.clients.get_kubernetes_client') - def test_request_additional_vifs_with_invalid_network(self, m_get_client): - net_crds = get_crd_objs() - client = mock.Mock() - m_get_client.return_value = client - client.get = mock.Mock() - client.get.side_effects = net_crds - networks = [{'invalid_key': 'net-x'}] - self._drv._get_networks.return_value = networks - - self.assertRaises(exceptions.InvalidKuryrNetworkAnnotation, - self._cls.request_additional_vifs, - self._drv, self._pod, self._project_id, - self._security_groups) diff --git a/kuryr_kubernetes/tests/unit/controller/drivers/test_namespace_subnet.py b/kuryr_kubernetes/tests/unit/controller/drivers/test_namespace_subnet.py deleted file mode 100644 index dbc596fdd..000000000 --- a/kuryr_kubernetes/tests/unit/controller/drivers/test_namespace_subnet.py +++ /dev/null @@ -1,296 +0,0 @@ -# Copyright (c) 2018 Red Hat, Inc. -# All Rights Reserved. -# -# Licensed under the Apache License, Version 2.0 (the "License"); you may -# not use this file except in compliance with the License. You may obtain -# a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT -# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the -# License for the specific language governing permissions and limitations -# under the License. - -from unittest import mock - -from openstack import exceptions as os_exc -from openstack.network.v2 import network as os_network -from openstack.network.v2 import subnet as os_subnet -from oslo_config import cfg as oslo_cfg - -from kuryr_kubernetes.controller.drivers import namespace_subnet as subnet_drv -from kuryr_kubernetes import exceptions as k_exc -from kuryr_kubernetes.tests import base as test_base -from kuryr_kubernetes.tests.unit import kuryr_fixtures as k_fix - - -def get_pod_obj(): - return { - 'status': { - 'qosClass': 'BestEffort', - 'hostIP': '192.168.1.2', - }, - 'kind': 'Pod', - 'spec': { - 'schedulerName': 'default-scheduler', - 'containers': [{ - 'name': 'busybox', - 'image': 'busybox', - 'resources': {} - }], - 'nodeName': 'kuryr-devstack' - }, - 'metadata': { - 'name': 'busybox-sleep1', - 'namespace': 'default', - 'resourceVersion': '53808', - 'uid': '452176db-4a85-11e7-80bd-fa163e29dbbb', - 'annotations': { - 'openstack.org/kuryr-vif': {} - } - }} - - -class TestNamespacePodSubnetDriver(test_base.TestCase): - - @mock.patch('kuryr_kubernetes.utils.get_subnet') - def test_get_subnets(self, m_get_subnet): - pod = get_pod_obj() - pod_namespace = pod['metadata']['namespace'] - subnet_id = mock.sentinel.subnet_id - subnet = mock.sentinel.subnet - - cls = subnet_drv.NamespacePodSubnetDriver - m_driver = mock.MagicMock(spec=cls) - - m_driver._get_namespace_subnet_id.return_value = subnet_id - m_get_subnet.return_value = subnet - - subnets = cls.get_namespace_subnet(m_driver, pod_namespace) - - self.assertEqual({subnet_id: subnet}, subnets) - m_driver._get_namespace_subnet_id.assert_called_once_with( - pod_namespace) - m_get_subnet.assert_called_once_with(subnet_id) - - @mock.patch('kuryr_kubernetes.utils.get_subnet') - def test_get_subnets_namespace_not_ready(self, m_get_subnet): - pod = get_pod_obj() - pod_namespace = pod['metadata']['namespace'] - - cls = subnet_drv.NamespacePodSubnetDriver - m_driver = mock.MagicMock(spec=cls) - - m_driver._get_namespace_subnet_id.side_effect = ( - k_exc.ResourceNotReady(pod_namespace)) - - self.assertRaises(k_exc.ResourceNotReady, cls.get_namespace_subnet, - m_driver, pod_namespace) - - m_driver._get_namespace_subnet_id.assert_called_once_with( - pod_namespace) - m_get_subnet.assert_not_called() - - def test__get_namespace_subnet_id(self): - cls = subnet_drv.NamespacePodSubnetDriver - m_driver = mock.MagicMock(spec=cls) - - namespace = mock.sentinel.namespace - subnet_id = mock.sentinel.subnet_id - crd = { - 'status': { - 'subnetId': subnet_id - } - } - - kubernetes = self.useFixture(k_fix.MockK8sClient()).client - kubernetes.get.return_value = crd - - subnet_id_resp = cls._get_namespace_subnet_id(m_driver, namespace) - kubernetes.get.assert_called() - self.assertEqual(subnet_id, subnet_id_resp) - - def test__get_namespace_subnet_id_get_crd_exception(self): - cls = subnet_drv.NamespacePodSubnetDriver - m_driver = mock.MagicMock(spec=cls) - - namespace = mock.sentinel.namespace - - kubernetes = self.useFixture(k_fix.MockK8sClient()).client - kubernetes.get.side_effect = k_exc.K8sClientException - - self.assertRaises(k_exc.K8sClientException, - cls._get_namespace_subnet_id, m_driver, namespace) - kubernetes.get.assert_called() - - def test_delete_namespace_subnet(self): - cls = subnet_drv.NamespacePodSubnetDriver - m_driver = mock.MagicMock(spec=cls) - - net_id = mock.sentinel.net_id - subnet_id = mock.sentinel.subnet_id - os_net = self.useFixture(k_fix.MockNetworkClient()).client - os_net.ports.return_value = [] - os_net.remove_interface_from_router.return_value = {} - - cls._delete_namespace_network_resources(m_driver, subnet_id, net_id) - - os_net.remove_interface_from_router.assert_called_once() - os_net.delete_network.assert_called_once_with(net_id) - - def test_delete_namespace_subnet_openstacksdk_error(self): - cls = subnet_drv.NamespacePodSubnetDriver - m_driver = mock.MagicMock(spec=cls) - - net_id = mock.sentinel.net_id - subnet_id = mock.sentinel.subnet_id - os_net = self.useFixture(k_fix.MockNetworkClient()).client - os_net.delete_network.side_effect = os_exc.ConflictException - os_net.ports.return_value = [] - os_net.remove_interface_from_router.return_value = {} - - self.assertRaises(k_exc.ResourceNotReady, - cls._delete_namespace_network_resources, m_driver, - subnet_id, net_id) - - os_net.remove_interface_from_router.assert_called_once() - os_net.delete_network.assert_called_once_with(net_id) - os_net.ports.assert_called_with(network_id=net_id) - - def test_create_network(self): - cls = subnet_drv.NamespacePodSubnetDriver - m_driver = mock.MagicMock(spec=cls) - - ns_uid = 'e65542a5-7e82-4b59-b3c5-c04b485d19eb' - namespace = {'metadata': {'name': 'test', 'uid': ns_uid}} - project_id = mock.sentinel.project_id - os_net = self.useFixture(k_fix.MockNetworkClient()).client - os_net.networks.return_value = iter([]) - net = os_network.Network(id=mock.sentinel.net) - os_net.create_network.return_value = net - - net_id_resp = cls.create_network(m_driver, namespace, project_id) - - self.assertEqual(net_id_resp, net['id']) - os_net.create_network.assert_called_once() - os_net.networks.assert_called_once() - - def test_create_network_existing(self): - cls = subnet_drv.NamespacePodSubnetDriver - m_driver = mock.MagicMock(spec=cls) - - ns_uid = '4f7ea026-3ae4-4baa-84df-1942977fe1be' - namespace = {'metadata': {'name': 'test', 'uid': ns_uid}} - project_id = mock.sentinel.project_id - os_net = self.useFixture(k_fix.MockNetworkClient()).client - net = os_network.Network( - id=mock.sentinel.net, - description=ns_uid, - name='test', - ) - os_net.networks.return_value = iter([net]) - - net_id_resp = cls.create_network(m_driver, namespace, project_id) - - self.assertEqual(net_id_resp, net['id']) - os_net.create_network.assert_not_called() - os_net.networks.assert_called_once() - - def test_create_subnet(self): - cls = subnet_drv.NamespacePodSubnetDriver - m_driver = mock.MagicMock(spec=cls) - - ns_uid = '95e2a3c5-f723-4936-b598-cf3a59861bcf' - namespace = {'metadata': {'name': 'test', 'uid': ns_uid}} - project_id = mock.sentinel.project_id - net_id = mock.sentinel.net_id - subnet = os_subnet.Subnet( - id=mock.sentinel.subnet, - cidr=mock.sentinel.cidr, - ) - os_net = self.useFixture(k_fix.MockNetworkClient()).client - os_net.subnets.return_value = iter([]) - os_net.create_subnet.return_value = subnet - - subnet_id, subnet_cidr = cls.create_subnet(m_driver, namespace, - project_id, net_id) - - self.assertEqual(subnet_id, subnet['id']) - self.assertEqual(subnet_cidr, subnet['cidr']) - os_net.create_subnet.assert_called_once() - os_net.subnets.assert_called_once() - - def test_create_subnet_existing(self): - cls = subnet_drv.NamespacePodSubnetDriver - m_driver = mock.MagicMock(spec=cls) - - ns_uid = '7f3a59b4-dd81-490d-9904-8294a6c93326' - namespace = {'metadata': {'name': 'test', 'uid': ns_uid}} - project_id = mock.sentinel.project_id - net_id = mock.sentinel.net_id - subnet = os_subnet.Subnet( - id=mock.sentinel.subnet, - cidr=mock.sentinel.cidr, - ) - os_net = self.useFixture(k_fix.MockNetworkClient()).client - os_net.subnets.return_value = iter([subnet]) - - subnet_id, subnet_cidr = cls.create_subnet(m_driver, namespace, - project_id, net_id) - - self.assertEqual(subnet_id, subnet['id']) - self.assertEqual(subnet_cidr, subnet['cidr']) - os_net.create_subnet.assert_not_called() - os_net.subnets.assert_called_once() - - def test_add_subnet_to_router(self): - cls = subnet_drv.NamespacePodSubnetDriver - m_driver = mock.MagicMock(spec=cls) - - subnet_id = mock.sentinel.subnet_id - os_net = self.useFixture(k_fix.MockNetworkClient()).client - os_net.add_interface_to_router.return_value = {} - router_id = 'router1' - oslo_cfg.CONF.set_override('pod_router', - router_id, - group='namespace_subnet') - - router_id_resp = cls.add_subnet_to_router(m_driver, subnet_id) - self.assertEqual(router_id_resp, router_id) - os_net.add_interface_to_router.assert_called_once() - - def test_add_subnet_to_router_already_connected(self): - cls = subnet_drv.NamespacePodSubnetDriver - m_driver = mock.MagicMock(spec=cls) - - subnet_id = mock.sentinel.subnet_id - os_net = self.useFixture(k_fix.MockNetworkClient()).client - os_net.add_interface_to_router.side_effect = ( - os_exc.BadRequestException) - router_id = 'router1' - oslo_cfg.CONF.set_override('pod_router', - router_id, - group='namespace_subnet') - - router_id_resp = cls.add_subnet_to_router(m_driver, subnet_id) - self.assertEqual(router_id_resp, router_id) - os_net.add_interface_to_router.assert_called_once() - - def test_add_subnet_to_router_exception(self): - cls = subnet_drv.NamespacePodSubnetDriver - m_driver = mock.MagicMock(spec=cls) - - subnet_id = mock.sentinel.subnet_id - os_net = self.useFixture(k_fix.MockNetworkClient()).client - os_net.add_interface_to_router.side_effect = ( - os_exc.SDKException) - router_id = 'router1' - oslo_cfg.CONF.set_override('pod_router', - router_id, - group='namespace_subnet') - - self.assertRaises(os_exc.SDKException, - cls.add_subnet_to_router, m_driver, subnet_id) - os_net.add_interface_to_router.assert_called_once() diff --git a/kuryr_kubernetes/tests/unit/controller/drivers/test_nested_dpdk.py b/kuryr_kubernetes/tests/unit/controller/drivers/test_nested_dpdk.py deleted file mode 100644 index 8f0fd4eee..000000000 --- a/kuryr_kubernetes/tests/unit/controller/drivers/test_nested_dpdk.py +++ /dev/null @@ -1,227 +0,0 @@ -# Copyright (C) 2020 Intel Corporation -# All Rights Reserved. -# -# Licensed under the Apache License, Version 2.0 (the "License"); you may -# not use this file except in compliance with the License. You may obtain -# a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT -# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the -# License for the specific language governing permissions and limitations -# under the License. - -from unittest import mock - -import ddt - -from kuryr_kubernetes.controller.drivers import nested_dpdk_vif -from kuryr_kubernetes.tests import base as test_base -from kuryr_kubernetes.tests.unit import kuryr_fixtures as k_fix - -from openstack import exceptions as o_exc - - -@ddt.ddt -class TestNestedDpdkVIFDriver(test_base.TestCase): - - @mock.patch( - 'kuryr_kubernetes.os_vif_util.neutron_to_osvif_vif_dpdk') - @mock.patch('kuryr_kubernetes.controller.drivers.utils.get_network_id') - def test_request_vif(self, m_get_network_id, m_to_vif): - cls = nested_dpdk_vif.NestedDpdkPodVIFDriver - m_driver = mock.Mock(spec=cls) - os_net = self.useFixture(k_fix.MockNetworkClient()).client - compute = self.useFixture(k_fix.MockComputeClient()).client - - pod = mock.sentinel.pod - project_id = mock.sentinel.project_id - subnets = mock.sentinel.subnets - security_groups = mock.sentinel.security_groups - vm_id = mock.sentinel.parent_port_id - net_id = mock.sentinel.net_id - port_id = mock.sentinel.port_id - port = mock.sentinel.port - - parent_port = mock.MagicMock() - vif = mock.Mock() - result = mock.Mock() - - parent_port.device_id = vm_id - result.port_id = port_id - compute.create_server_interface.return_value = result - m_to_vif.return_value = vif - m_driver._get_parent_port.return_value = parent_port - m_get_network_id.return_value = net_id - os_net.get_port.return_value = port - - self.assertEqual(vif, cls.request_vif(m_driver, pod, project_id, - subnets, security_groups)) - - m_driver._get_parent_port.assert_called_once_with(pod) - m_get_network_id.assert_called_once_with(subnets) - compute.create_server_interface.assert_called_once_with( - vm_id, net_id=net_id) - os_net.get_port.assert_called_once_with(result.port_id) - m_to_vif.assert_called_once_with(port, subnets, pod) - - @mock.patch( - 'kuryr_kubernetes.os_vif_util.neutron_to_osvif_vif_dpdk') - @mock.patch('kuryr_kubernetes.controller.drivers.utils.get_network_id') - def test_request_vif_parent_not_found(self, m_get_network_id, m_to_vif): - cls = nested_dpdk_vif.NestedDpdkPodVIFDriver - m_driver = mock.Mock(spec=cls) - os_net = self.useFixture(k_fix.MockNetworkClient()).client - compute = self.useFixture(k_fix.MockComputeClient()).client - - pod = mock.sentinel.pod - project_id = mock.sentinel.project_id - subnets = mock.sentinel.subnets - security_groups = mock.sentinel.security_groups - vm_id = mock.sentinel.parent_port_id - net_id = mock.sentinel.net_id - port_id = mock.sentinel.port_id - port = mock.sentinel.port - - parent_port = mock.MagicMock() - vif = mock.Mock() - result = mock.Mock() - - parent_port.__getitem__.return_value = vm_id - result.port_id = port_id - compute.create_server_interface.return_value = result - m_to_vif.return_value = vif - m_driver._get_parent_port.side_effect = \ - o_exc.SDKException - m_get_network_id.return_value = net_id - os_net.get_port.return_value = port - - self.assertRaises(o_exc.SDKException, cls.request_vif, - m_driver, pod, project_id, subnets, security_groups) - - m_driver._get_parent_port.assert_called_once_with(pod) - m_get_network_id.assert_not_called() - compute.create_server_interface.assert_not_called() - os_net.get_port.assert_not_called() - m_to_vif.assert_not_called() - - @mock.patch( - 'kuryr_kubernetes.os_vif_util.neutron_to_osvif_vif_dpdk') - @mock.patch('kuryr_kubernetes.controller.drivers.utils.get_network_id') - def test_request_vif_attach_failed(self, m_get_network_id, m_to_vif): - cls = nested_dpdk_vif.NestedDpdkPodVIFDriver - m_driver = mock.Mock(spec=cls) - os_net = self.useFixture(k_fix.MockNetworkClient()).client - compute = self.useFixture(k_fix.MockComputeClient()).client - - pod = mock.sentinel.pod - project_id = mock.sentinel.project_id - subnets = mock.sentinel.subnets - security_groups = mock.sentinel.security_groups - vm_id = mock.sentinel.parent_port_id - net_id = mock.sentinel.net_id - port_id = mock.sentinel.port_id - port = mock.sentinel.port - - parent_port = mock.MagicMock() - vif = mock.Mock() - result = mock.Mock() - - parent_port.device_id = vm_id - result.port_id = port_id - m_to_vif.return_value = vif - m_driver._get_parent_port.return_value = parent_port - m_get_network_id.return_value = net_id - os_net.get_port.return_value = port - compute.create_server_interface.side_effect = o_exc.SDKException - - self.assertRaises(o_exc.SDKException, cls.request_vif, - m_driver, pod, project_id, subnets, security_groups) - - m_driver._get_parent_port.assert_called_once_with(pod) - m_get_network_id.assert_called_once_with(subnets) - compute.create_server_interface.assert_called_once_with( - vm_id, net_id=net_id) - os_net.get_port.assert_not_called() - m_to_vif.assert_not_called() - - def test_release_vif(self): - cls = nested_dpdk_vif.NestedDpdkPodVIFDriver - m_driver = mock.Mock(spec=cls) - compute = self.useFixture(k_fix.MockComputeClient()).client - - port_id = mock.sentinel.port_id - pod = mock.sentinel.pod - vif = mock.Mock() - vif.id = port_id - - vm_id = mock.sentinel.vm_id - vm_port = mock.MagicMock() - vm_port.device_id = vm_id - - m_driver._get_parent_port.return_value = vm_port - - cls.release_vif(m_driver, pod, vif) - - m_driver._get_parent_port.assert_called_once_with(pod) - compute.delete_server_interface.assert_called_once_with( - vif.id, server=vm_id) - - def test_release_parent_not_found(self): - cls = nested_dpdk_vif.NestedDpdkPodVIFDriver - m_driver = mock.Mock(spec=cls) - compute = self.useFixture(k_fix.MockComputeClient()).client - - pod = mock.sentinel.pod - vif = mock.Mock() - vif.id = mock.sentinel.vif_id - - vm_id = mock.sentinel.parent_port_id - parent_port = mock.MagicMock() - parent_port.__getitem__.return_value = vm_id - - m_driver._get_parent_port.side_effect = \ - o_exc.SDKException - - self.assertRaises(o_exc.SDKException, cls.release_vif, - m_driver, pod, vif) - - m_driver._get_parent_port.assert_called_once_with(pod) - compute.delete_server_interface.assert_not_called() - - def test_release_detach_failed(self): - cls = nested_dpdk_vif.NestedDpdkPodVIFDriver - m_driver = mock.Mock(spec=cls) - compute = self.useFixture(k_fix.MockComputeClient()).client - - pod = mock.sentinel.pod - vif = mock.Mock() - vif.id = mock.sentinel.vif_id - - vm_id = mock.sentinel.parent_port_id - parent_port = mock.MagicMock() - parent_port.device_id = vm_id - - compute.delete_server_interface.side_effect = o_exc.SDKException - - m_driver._get_parent_port.return_value = parent_port - - self.assertRaises(o_exc.SDKException, cls.release_vif, - m_driver, pod, vif) - - m_driver._get_parent_port.assert_called_once_with(pod) - compute.delete_server_interface.assert_called_once_with( - vif.id, server=vm_id) - - @ddt.data((False), (True)) - def test_activate_vif(self, active_value): - cls = nested_dpdk_vif.NestedDpdkPodVIFDriver - m_driver = mock.Mock(spec=cls) - vif = mock.Mock() - vif.active = active_value - - cls.activate_vif(m_driver, vif) - - self.assertEqual(vif.active, True) diff --git a/kuryr_kubernetes/tests/unit/controller/drivers/test_nested_macvlan_vif.py b/kuryr_kubernetes/tests/unit/controller/drivers/test_nested_macvlan_vif.py deleted file mode 100644 index 0c3dde09d..000000000 --- a/kuryr_kubernetes/tests/unit/controller/drivers/test_nested_macvlan_vif.py +++ /dev/null @@ -1,510 +0,0 @@ -# Licensed under the Apache License, Version 2.0 (the "License"); you may -# not use this file except in compliance with the License. You may obtain -# a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT -# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the -# License for the specific language governing permissions and limitations -# under the License. - -from unittest import mock - -import ddt -import threading - -from kuryr.lib import utils as lib_utils -from openstack import exceptions as o_exc - -from kuryr_kubernetes.controller.drivers import nested_macvlan_vif -from kuryr_kubernetes import exceptions as k_exc -from kuryr_kubernetes.tests import base as test_base -from kuryr_kubernetes.tests import fake -from kuryr_kubernetes.tests.unit import kuryr_fixtures as k_fix - - -@ddt.ddt -class TestNestedMacvlanPodVIFDriver(test_base.TestCase): - - @mock.patch( - 'kuryr_kubernetes.os_vif_util.neutron_to_osvif_vif_nested_macvlan') - def test_request_vif(self, m_to_vif): - cls = nested_macvlan_vif.NestedMacvlanPodVIFDriver - cls._tag_on_creation = True - m_driver = mock.Mock(spec=cls) - os_net = self.useFixture(k_fix.MockNetworkClient()).client - - pod = mock.sentinel.pod - project_id = mock.sentinel.project_id - subnets = mock.sentinel.subnets - security_groups = mock.sentinel.security_groups - container_mac = mock.sentinel.mac_address - container_ip = mock.sentinel.ip_address - container_port = fake.get_port_obj(mac_address=container_mac, - ip_address=container_ip) - - vif = mock.Mock() - port_request = {'foo': mock.sentinel.port_request} - vm_port = fake.get_port_obj() - - m_to_vif.return_value = vif - m_driver._get_port_request.return_value = port_request - m_driver._get_parent_port.return_value = vm_port - m_driver._try_update_port.return_value = 0 - m_driver.lock = mock.MagicMock(spec=threading.Lock()) - os_net.create_port.return_value = container_port - - self.assertEqual(vif, cls.request_vif(m_driver, pod, project_id, - subnets, security_groups)) - - m_driver._get_port_request.assert_called_once_with( - pod, project_id, subnets, security_groups) - os_net.create_port.assert_called_once_with(**port_request) - m_driver._get_parent_port.assert_called_once_with(pod) - m_driver._try_update_port.assert_called_once() - m_to_vif.assert_called_once_with(container_port, subnets) - - @mock.patch( - 'kuryr_kubernetes.os_vif_util.neutron_to_osvif_vif_nested_macvlan') - def test_request_vif_port_create_failed(self, m_to_vif): - cls = nested_macvlan_vif.NestedMacvlanPodVIFDriver - cls._tag_on_creation = True - m_driver = mock.Mock(spec=cls) - os_net = self.useFixture(k_fix.MockNetworkClient()).client - - pod = mock.sentinel.pod - project_id = mock.sentinel.project_id - subnets = mock.sentinel.subnets - security_groups = mock.sentinel.security_groups - - port_request = {'foo': mock.sentinel.port_request} - m_driver._get_port_request.return_value = port_request - os_net.create_port.side_effect = o_exc.SDKException - - self.assertRaises(o_exc.SDKException, cls.request_vif, - m_driver, pod, project_id, subnets, security_groups) - m_driver._get_port_request.assert_called_once_with( - pod, project_id, subnets, security_groups) - os_net.create_port.assert_called_once_with(**port_request) - m_driver._try_update_port.assert_not_called() - m_to_vif.assert_not_called() - - @mock.patch( - 'kuryr_kubernetes.os_vif_util.neutron_to_osvif_vif_nested_macvlan') - def test_request_vif_parent_not_found(self, m_to_vif): - cls = nested_macvlan_vif.NestedMacvlanPodVIFDriver - cls._tag_on_creation = True - m_driver = mock.Mock(spec=cls) - os_net = self.useFixture(k_fix.MockNetworkClient()).client - - pod = mock.sentinel.pod - project_id = mock.sentinel.project_id - subnets = mock.sentinel.subnets - security_groups = mock.sentinel.security_groups - container_mac = mock.sentinel.mac_address - container_ip = mock.sentinel.ip_address - container_port = fake.get_port_obj(mac_address=container_mac, - ip_address=container_ip) - - port_request = mock.sentinel.port_request - m_driver._get_port_request.return_value = port_request - m_driver.lock = mock.MagicMock(spec=threading.Lock()) - os_net.create_port.return_value = container_port - m_driver._get_parent_port.side_effect = o_exc.SDKException - - self.assertRaises(o_exc.SDKException, cls.request_vif, - m_driver, pod, project_id, subnets, security_groups) - m_driver._get_port_request.assert_called_once_with( - pod, project_id, subnets, security_groups) - os_net.create_port.assert_not_called() - m_driver._get_parent_port.assert_called_once_with(pod) - m_driver._try_update_port.assert_not_called() - m_to_vif.assert_not_called() - - def test_release_vif(self): - cls = nested_macvlan_vif.NestedMacvlanPodVIFDriver - m_driver = mock.Mock(spec=cls) - os_net = self.useFixture(k_fix.MockNetworkClient()).client - - port_id = lib_utils.get_hash() - pod = mock.sentinel.pod - vif = mock.Mock() - vif.id = port_id - - container_mac = mock.sentinel.mac_address - container_ip = mock.sentinel.ip_address - container_port = fake.get_port_obj( - port_id=port_id, ip_address=container_ip, - mac_address=container_mac) - os_net.get_port.return_value = container_port - - vm_port = fake.get_port_obj() - m_driver._get_parent_port.return_value = vm_port - m_driver._try_update_port.return_value = 0 - m_driver.lock = mock.MagicMock(spec=threading.Lock()) - - cls.release_vif(m_driver, pod, vif) - - os_net.get_port.assert_called_once_with(port_id) - m_driver._get_parent_port.assert_called_once_with(pod) - m_driver._try_update_port.assert_called_once() - os_net.delete_port.assert_called_once_with(vif.id, - ignore_missing=False) - - def test_release_vif_not_found(self): - cls = nested_macvlan_vif.NestedMacvlanPodVIFDriver - m_driver = mock.Mock(spec=cls) - os_net = self.useFixture(k_fix.MockNetworkClient()).client - - pod = mock.sentinel.pod - vif = mock.Mock() - vif.id = lib_utils.get_hash() - - os_net.get_port.side_effect = o_exc.NotFoundException - - self.assertRaises(o_exc.NotFoundException, cls.release_vif, - m_driver, pod, vif) - m_driver._remove_from_allowed_address_pairs.assert_not_called() - os_net.delete_port.assert_not_called() - - def test_release_vif_parent_not_found(self): - cls = nested_macvlan_vif.NestedMacvlanPodVIFDriver - m_driver = mock.Mock(spec=cls) - os_net = self.useFixture(k_fix.MockNetworkClient()).client - - port_id = lib_utils.get_hash() - pod = mock.sentinel.pod - vif = mock.Mock() - vif.id = port_id - - container_mac = mock.sentinel.mac_address - container_ip = mock.sentinel.ip_address - container_port = fake.get_port_obj( - port_id=port_id, ip_address=container_ip, - mac_address=container_mac) - os_net.get_port.return_value = container_port - - m_driver.lock = mock.MagicMock(spec=threading.Lock()) - m_driver._get_parent_port.side_effect = o_exc.SDKException - - self.assertRaises(o_exc.SDKException, cls.release_vif, - m_driver, pod, vif) - os_net.get_port.assert_called_with(port_id) - self.assertEqual(os_net.get_port.call_count, 1) - m_driver._get_parent_port.assert_called_with(pod) - self.assertEqual(m_driver._get_parent_port.call_count, 1) - m_driver._remove_from_allowed_address_pairs.assert_not_called() - os_net.delete_port.assert_not_called() - - def test_release_vif_delete_failed(self): - cls = nested_macvlan_vif.NestedMacvlanPodVIFDriver - m_driver = mock.Mock(spec=cls) - os_net = self.useFixture(k_fix.MockNetworkClient()).client - - port_id = lib_utils.get_hash() - pod = mock.sentinel.pod - vif = mock.Mock() - vif.id = port_id - - container_mac = mock.sentinel.mac_address - container_ip = mock.sentinel.ip_addresses - container_port = fake.get_port_obj( - port_id=port_id, ip_address=container_ip, - mac_address=container_mac) - os_net.get_port.return_value = container_port - os_net.delete_port.side_effect = o_exc.NotFoundException - - vm_port = fake.get_port_obj() - m_driver._get_parent_port.return_value = vm_port - m_driver._try_update_port.return_value = 0 - m_driver.lock = mock.MagicMock(spec=threading.Lock()) - - cls.release_vif(m_driver, pod, vif) - - os_net.get_port.assert_called_once_with(port_id) - m_driver._get_parent_port.assert_called_once_with(pod) - m_driver._try_update_port.assert_called_once() - os_net.delete_port.assert_called_once_with(vif.id, - ignore_missing=False) - - @ddt.data((False), (True)) - def test_activate_vif(self, active_value): - cls = nested_macvlan_vif.NestedMacvlanPodVIFDriver - m_driver = mock.Mock(spec=cls) - vif = mock.Mock() - vif.active = active_value - - cls.activate_vif(m_driver, vif) - - self.assertEqual(vif.active, True) - - @ddt.data((None), ('fa:16:3e:71:cb:80')) - def test_add_to_allowed_address_pairs(self, m_mac): - cls = nested_macvlan_vif.NestedMacvlanPodVIFDriver - m_driver = mock.Mock(spec=cls) - self.useFixture(k_fix.MockNetworkClient()).client - - port_id = lib_utils.get_hash() - vm_port = fake.get_port_obj(port_id) - - mac_addr = 'fa:16:3e:1b:30:00' if m_mac else vm_port['mac_address'] - address_pairs = [ - {'ip_address': '10.0.0.30', - 'mac_address': mac_addr}, - {'ip_address': 'fe80::f816:3eff:fe1c:36a9', - 'mac_address': mac_addr}, - ] - vm_port['allowed_address_pairs'].extend(address_pairs) - - ip_addr = '10.0.0.29' - address_pairs.append( - {'ip_address': ip_addr, - 'mac_address': m_mac if m_mac else vm_port['mac_address']} - ) - - cls._add_to_allowed_address_pairs(m_driver, vm_port, - frozenset([ip_addr]), m_mac) - - m_driver._update_port_address_pairs.assert_called_once_with( - port_id, address_pairs, revision_number=9) - - def test_add_to_allowed_address_pairs_no_ip_addresses(self): - cls = nested_macvlan_vif.NestedMacvlanPodVIFDriver - m_driver = mock.Mock(spec=cls) - self.useFixture(k_fix.MockNetworkClient()).client - - port_id = lib_utils.get_hash() - vm_port = fake.get_port_obj(port_id) - - self.assertRaises(k_exc.IntegrityError, - cls._add_to_allowed_address_pairs, m_driver, - vm_port, frozenset()) - - def test_add_to_allowed_address_pairs_same_ip(self): - cls = nested_macvlan_vif.NestedMacvlanPodVIFDriver - m_driver = mock.Mock(spec=cls) - self.useFixture(k_fix.MockNetworkClient()).client - - port_id = lib_utils.get_hash() - vm_port = fake.get_port_obj(port_id) - address_pairs = [ - {'ip_address': '10.0.0.30', - 'mac_address': 'fa:16:3e:1b:30:00'}, - {'ip_address': 'fe80::f816:3eff:fe1c:36a9', - 'mac_address': 'fa:16:3e:1b:30:00'}, - ] - vm_port['allowed_address_pairs'].extend(address_pairs) - - mac_addr = 'fa:16:3e:71:cb:80' - ip_addr = '10.0.0.30' - address_pairs.append({'ip_address': ip_addr, 'mac_address': mac_addr}) - - cls._add_to_allowed_address_pairs(m_driver, vm_port, - frozenset([ip_addr]), mac_addr) - - m_driver._update_port_address_pairs.assert_called_once_with( - port_id, address_pairs, revision_number=9) - - def test_add_to_allowed_address_pairs_already_present(self): - cls = nested_macvlan_vif.NestedMacvlanPodVIFDriver - m_driver = mock.Mock(spec=cls) - self.useFixture(k_fix.MockNetworkClient()).client - - port_id = lib_utils.get_hash() - vm_port = fake.get_port_obj(port_id) - address_pairs = [ - {'ip_address': '10.0.0.30', - 'mac_address': 'fa:16:3e:1b:30:00'}, - {'ip_address': 'fe80::f816:3eff:fe1c:36a9', - 'mac_address': 'fa:16:3e:1b:30:00'}, - ] - vm_port['allowed_address_pairs'].extend(address_pairs) - - mac_addr = 'fa:16:3e:1b:30:00' - ip_addr = '10.0.0.30' - - self.assertRaises(k_exc.AllowedAddressAlreadyPresent, - cls._add_to_allowed_address_pairs, m_driver, - vm_port, frozenset([ip_addr]), mac_addr) - - @ddt.data((None), ('fa:16:3e:71:cb:80')) - def test_remove_from_allowed_address_pairs(self, m_mac): - cls = nested_macvlan_vif.NestedMacvlanPodVIFDriver - m_driver = mock.Mock(spec=cls) - self.useFixture(k_fix.MockNetworkClient()).client - - port_id = lib_utils.get_hash() - vm_port = fake.get_port_obj(port_id) - - mac_addr = 'fa:16:3e:1b:30:00' if m_mac else vm_port['mac_address'] - address_pairs = [ - {'ip_address': '10.0.0.30', - 'mac_address': mac_addr}, - {'ip_address': 'fe80::f816:3eff:fe1c:36a9', - 'mac_address': mac_addr}, - ] - vm_port['allowed_address_pairs'].extend(address_pairs) - - ip_addr = '10.0.0.29' - vm_port['allowed_address_pairs'].append( - {'ip_address': ip_addr, - 'mac_address': m_mac if m_mac else vm_port['mac_address']} - ) - - cls._remove_from_allowed_address_pairs( - m_driver, vm_port, frozenset([ip_addr]), m_mac) - - m_driver._update_port_address_pairs.assert_called_once_with( - port_id, address_pairs, revision_number=9) - - def test_remove_from_allowed_address_pairs_no_ip_addresses(self): - cls = nested_macvlan_vif.NestedMacvlanPodVIFDriver - m_driver = mock.Mock(spec=cls) - self.useFixture(k_fix.MockNetworkClient()).client - - port_id = lib_utils.get_hash() - vm_port = fake.get_port_obj(port_id) - - self.assertRaises(k_exc.IntegrityError, - cls._remove_from_allowed_address_pairs, m_driver, - vm_port, frozenset()) - - @ddt.data((None), ('fa:16:3e:71:cb:80')) - def test_remove_from_allowed_address_pairs_missing(self, m_mac): - cls = nested_macvlan_vif.NestedMacvlanPodVIFDriver - m_driver = mock.Mock(spec=cls) - self.useFixture(k_fix.MockNetworkClient()).client - - port_id = lib_utils.get_hash() - vm_port = fake.get_port_obj(port_id) - - mac_addr = 'fa:16:3e:1b:30:00' if m_mac else vm_port['mac_address'] - address_pairs = [ - {'ip_address': '10.0.0.30', - 'mac_address': mac_addr}, - {'ip_address': 'fe80::f816:3eff:fe1c:36a9', - 'mac_address': mac_addr}, - ] - mac_addr = m_mac if m_mac else vm_port['mac_address'] - vm_port['allowed_address_pairs'].extend(address_pairs) - vm_port['allowed_address_pairs'].append({'ip_address': '10.0.0.28', - 'mac_address': mac_addr}) - ip_addr = ['10.0.0.29', '10.0.0.28'] - - cls._remove_from_allowed_address_pairs( - m_driver, vm_port, frozenset(ip_addr), m_mac) - - m_driver._update_port_address_pairs.assert_called_once_with( - port_id, address_pairs, revision_number=9) - - @ddt.data((None), ('fa:16:3e:71:cb:80')) - def test_remove_from_allowed_address_pairs_no_update(self, m_mac): - cls = nested_macvlan_vif.NestedMacvlanPodVIFDriver - m_driver = mock.Mock(spec=cls) - self.useFixture(k_fix.MockNetworkClient()).client - - port_id = lib_utils.get_hash() - vm_port = fake.get_port_obj(port_id) - - mac_addr = 'fa:16:3e:1b:30:00' if m_mac else vm_port['mac_address'] - address_pairs = [ - {'ip_address': '10.0.0.30', - 'mac_address': mac_addr}, - {'ip_address': 'fe80::f816:3eff:fe1c:36a9', - 'mac_address': mac_addr}, - ] - vm_port['allowed_address_pairs'].extend(address_pairs) - - ip_addr = ['10.0.0.29'] - - cls._remove_from_allowed_address_pairs( - m_driver, vm_port, frozenset(ip_addr), m_mac) - - m_driver._update_port_address_pairs.assert_not_called() - - def test_update_port_address_pairs(self): - cls = nested_macvlan_vif.NestedMacvlanPodVIFDriver - m_driver = mock.Mock(spec=cls) - os_net = self.useFixture(k_fix.MockNetworkClient()).client - - port_id = lib_utils.get_hash() - pairs = mock.sentinel.allowed_address_pairs - - cls._update_port_address_pairs(m_driver, port_id, pairs, - revision_number=9) - - os_net.update_port.assert_called_with( - port_id, allowed_address_pairs=pairs, if_revision=9) - - def test_update_port_address_pairs_failure(self): - cls = nested_macvlan_vif.NestedMacvlanPodVIFDriver - m_driver = mock.Mock(spec=cls) - os_net = self.useFixture(k_fix.MockNetworkClient()).client - - port_id = lib_utils.get_hash() - pairs = mock.sentinel.allowed_address_pairs - os_net.update_port.side_effect = o_exc.SDKException - - self.assertRaises(o_exc.SDKException, - cls._update_port_address_pairs, m_driver, - port_id, pairs, revision_number=9) - - os_net.update_port.assert_called_with( - port_id, allowed_address_pairs=pairs, if_revision=9) - - @mock.patch('kuryr_kubernetes.controller.drivers.nested_macvlan_vif.' - 'NestedMacvlanPodVIFDriver._add_to_allowed_address_pairs') - def test_try_update_port(self, aaapf_mock): - cls = nested_macvlan_vif.NestedMacvlanPodVIFDriver - m_driver = mock.Mock(spec=cls) - m_driver.lock = mock.MagicMock(spec=threading.Lock()) - self.useFixture(k_fix.MockNetworkClient()).client - - port_id = lib_utils.get_hash() - vm_port = fake.get_port_obj(port_id) - - mac_addr = 'fa:16:3e:1b:30:00' - address_pairs = [ - {'ip_address': '10.0.0.30', - 'mac_address': mac_addr}, - {'ip_address': 'fe80::f816:3eff:fe1c:36a9', - 'mac_address': mac_addr}, - ] - vm_port['allowed_address_pairs'].extend(address_pairs) - - ip_addr = ['10.0.0.29'] - attempts = cls._try_update_port(m_driver, 3, - cls._add_to_allowed_address_pairs, - vm_port, frozenset(ip_addr), mac_addr) - self.assertEqual(attempts, 0) - aaapf_mock.assert_called_once() - - @mock.patch('kuryr_kubernetes.controller.drivers.nested_macvlan_vif.' - 'NestedMacvlanPodVIFDriver._add_to_allowed_address_pairs') - def test_try_update_port_failure(self, aaapf_mock): - cls = nested_macvlan_vif.NestedMacvlanPodVIFDriver - m_driver = mock.Mock(spec=cls) - m_driver.lock = mock.MagicMock(spec=threading.Lock()) - self.useFixture(k_fix.MockNetworkClient()).client - - port_id = lib_utils.get_hash() - vm_port = fake.get_port_obj(port_id) - - mac_addr = 'fa:16:3e:1b:30:00' - address_pairs = [ - {'ip_address': '10.0.0.30', - 'mac_address': mac_addr}, - {'ip_address': 'fe80::f816:3eff:fe1c:36a9', - 'mac_address': mac_addr}, - ] - vm_port['allowed_address_pairs'].extend(address_pairs) - - ip_addr = ['10.0.0.29'] - - aaapf_mock.side_effect = o_exc.SDKException - self.assertRaises(o_exc.SDKException, - cls._try_update_port, m_driver, 1, - cls._add_to_allowed_address_pairs, - vm_port, frozenset(ip_addr), mac_addr) diff --git a/kuryr_kubernetes/tests/unit/controller/drivers/test_nested_vif.py b/kuryr_kubernetes/tests/unit/controller/drivers/test_nested_vif.py deleted file mode 100644 index 9ed004d27..000000000 --- a/kuryr_kubernetes/tests/unit/controller/drivers/test_nested_vif.py +++ /dev/null @@ -1,129 +0,0 @@ -# Licensed under the Apache License, Version 2.0 (the "License"); you may -# not use this file except in compliance with the License. You may obtain -# a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT -# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the -# License for the specific language governing permissions and limitations -# under the License. - -from unittest import mock - -from kuryr.lib import exceptions as kl_exc -from oslo_config import cfg as oslo_cfg - -from kuryr_kubernetes.controller.drivers import nested_vif -from kuryr_kubernetes.controller.drivers import node_subnets -from kuryr_kubernetes.tests import base as test_base -from kuryr_kubernetes.tests.unit import kuryr_fixtures as k_fix - - -class TestNestedPodVIFDriver(test_base.TestCase): - - def test_get_parent_port(self): - cls = nested_vif.NestedPodVIFDriver - m_driver = mock.Mock(spec=cls) - self.useFixture(k_fix.MockNetworkClient()).client - - node_fixed_ip = mock.sentinel.node_fixed_ip - pod_status = mock.MagicMock() - pod_status.__getitem__.return_value = node_fixed_ip - - pod = mock.MagicMock() - pod.__getitem__.return_value = pod_status - parent_port = mock.sentinel.parent_port - - m_driver._get_parent_port_by_host_ip.return_value = parent_port - - cls._get_parent_port(m_driver, pod) - m_driver._get_parent_port_by_host_ip.assert_called_once() - - def test_get_parent_port_by_host_ip(self): - cls = nested_vif.NestedPodVIFDriver - m_driver = mock.Mock( - spec=cls, nodes_subnets_driver=node_subnets.ConfigNodesSubnets()) - os_net = self.useFixture(k_fix.MockNetworkClient()).client - - node_subnet_id1 = 'node_subnet_id1' - node_subnet_id2 = 'node_subnet_id2' - oslo_cfg.CONF.set_override('worker_nodes_subnets', - [node_subnet_id2], - group='pod_vif_nested') - - node_fixed_ip = mock.sentinel.node_fixed_ip - - ports = [ - mock.Mock(fixed_ips=[{'subnet_id': node_subnet_id1}]), - mock.Mock(fixed_ips=[{'subnet_id': node_subnet_id2}]), - ] - os_net.ports.return_value = iter(ports) - - self.assertEqual(ports[1], cls._get_parent_port_by_host_ip( - m_driver, node_fixed_ip)) - fixed_ips = ['ip_address=%s' % str(node_fixed_ip)] - os_net.ports.assert_called_once_with(fixed_ips=fixed_ips) - - def test_get_parent_port_by_host_ip_multiple(self): - cls = nested_vif.NestedPodVIFDriver - m_driver = mock.Mock( - spec=cls, nodes_subnets_driver=node_subnets.ConfigNodesSubnets()) - os_net = self.useFixture(k_fix.MockNetworkClient()).client - - node_subnet_id1 = 'node_subnet_id1' - node_subnet_id2 = 'node_subnet_id2' - node_subnet_id3 = 'node_subnet_id3' - oslo_cfg.CONF.set_override('worker_nodes_subnets', - [node_subnet_id3, node_subnet_id2], - group='pod_vif_nested') - - node_fixed_ip = mock.sentinel.node_fixed_ip - - ports = [ - mock.Mock(fixed_ips=[{'subnet_id': node_subnet_id1}]), - mock.Mock(fixed_ips=[{'subnet_id': node_subnet_id2}]), - ] - os_net.ports.return_value = (p for p in ports) - - self.assertEqual(ports[1], cls._get_parent_port_by_host_ip( - m_driver, node_fixed_ip)) - fixed_ips = ['ip_address=%s' % str(node_fixed_ip)] - os_net.ports.assert_called_with(fixed_ips=fixed_ips) - - def test_get_parent_port_by_host_ip_subnet_id_not_configured(self): - cls = nested_vif.NestedPodVIFDriver - m_driver = mock.Mock( - spec=cls, nodes_subnets_driver=node_subnets.ConfigNodesSubnets()) - self.useFixture(k_fix.MockNetworkClient()).client - oslo_cfg.CONF.set_override('worker_nodes_subnets', - '', - group='pod_vif_nested') - node_fixed_ip = mock.sentinel.node_fixed_ip - self.assertRaises(oslo_cfg.RequiredOptError, - cls._get_parent_port_by_host_ip, - m_driver, node_fixed_ip) - - def test_get_parent_port_by_host_ip_trunk_not_found(self): - cls = nested_vif.NestedPodVIFDriver - m_driver = mock.Mock( - spec=cls, nodes_subnets_driver=node_subnets.ConfigNodesSubnets()) - os_net = self.useFixture(k_fix.MockNetworkClient()).client - - node_subnet_id = 'node_subnet_id' - - oslo_cfg.CONF.set_override('worker_nodes_subnets', - [node_subnet_id], - group='pod_vif_nested') - - node_fixed_ip = mock.sentinel.node_fixed_ip - - ports = (p for p in []) - os_net.ports.return_value = ports - - self.assertRaises(kl_exc.NoResourceException, - cls._get_parent_port_by_host_ip, m_driver, - node_fixed_ip) - fixed_ips = ['ip_address=%s' % str(node_fixed_ip)] - os_net.ports.assert_called_once_with(fixed_ips=fixed_ips) diff --git a/kuryr_kubernetes/tests/unit/controller/drivers/test_nested_vlan_vif.py b/kuryr_kubernetes/tests/unit/controller/drivers/test_nested_vlan_vif.py deleted file mode 100644 index b01c4c363..000000000 --- a/kuryr_kubernetes/tests/unit/controller/drivers/test_nested_vlan_vif.py +++ /dev/null @@ -1,651 +0,0 @@ -# Licensed under the Apache License, Version 2.0 (the "License"); you may -# not use this file except in compliance with the License. You may obtain -# a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT -# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the -# License for the specific language governing permissions and limitations -# under the License. - -import eventlet -from unittest import mock - -from kuryr.lib import constants as kl_const -from kuryr.lib import exceptions as kl_exc -from openstack import exceptions as os_exc -from openstack.network.v2 import port as os_port -from openstack.network.v2 import trunk as os_trunk -from oslo_config import cfg as oslo_cfg - -from kuryr_kubernetes import constants -from kuryr_kubernetes.controller.drivers import nested_vlan_vif -from kuryr_kubernetes import exceptions as k_exc -from kuryr_kubernetes.tests import base as test_base -from kuryr_kubernetes.tests.unit import kuryr_fixtures as k_fix - - -class TestNestedVlanPodVIFDriver(test_base.TestCase): - - @mock.patch( - 'kuryr_kubernetes.os_vif_util.neutron_to_osvif_vif_nested_vlan') - def test_request_vif(self, m_to_vif): - cls = nested_vlan_vif.NestedVlanPodVIFDriver - cls._tag_on_creation = True - m_driver = mock.Mock(spec=cls) - os_net = self.useFixture(k_fix.MockNetworkClient()).client - - pod = mock.sentinel.pod - project_id = mock.sentinel.project_id - subnets = mock.sentinel.subnets - security_groups = mock.sentinel.security_groups - - parent_port = mock.sentinel.parent_port - trunk_id = mock.sentinel.trunk_id - port_id = mock.sentinel.port_id - port = os_port.Port(id=port_id) - port_request = {'project_id': project_id, - 'name': mock.sentinel.name, - 'network_id': mock.sentinel.network_id, - 'fixed_ips': mock.sentinel.fixed_ips, - 'admin_state_up': True} - vlan_id = mock.sentinel.vlan_id - - vif = mock.Mock() - - m_to_vif.return_value = vif - m_driver._get_parent_port.return_value = parent_port - m_driver._get_trunk_id.return_value = trunk_id - m_driver._get_port_request.return_value = port_request - m_driver._add_subport.return_value = vlan_id - os_net.ports.return_value = (p for p in [parent_port]) - os_net.create_port.return_value = port - - self.assertEqual(vif, cls.request_vif(m_driver, pod, project_id, - subnets, security_groups)) - - m_driver._get_parent_port.assert_called_once_with(pod) - m_driver._get_trunk_id.assert_called_once_with(parent_port) - m_driver._get_port_request.assert_called_once_with( - pod, project_id, subnets, security_groups) - os_net.create_port.assert_called_once_with(**port_request) - m_driver._add_subport.assert_called_once_with(trunk_id, port_id) - m_to_vif.assert_called_once_with(port, subnets, vlan_id) - - @mock.patch( - 'kuryr_kubernetes.os_vif_util.neutron_to_osvif_vif_nested_vlan') - def test_request_vifs(self, m_to_vif): - cls = nested_vlan_vif.NestedVlanPodVIFDriver - cls._tag_on_creation = True - m_driver = mock.Mock(spec=cls) - os_net = self.useFixture(k_fix.MockNetworkClient()).client - - pod = mock.sentinel.pod - project_id = mock.sentinel.project_id - subnets = mock.sentinel.subnets - security_groups = mock.sentinel.security_groups - num_ports = 2 - - parent_port = mock.sentinel.parent_port - trunk_id = mock.sentinel.trunk_id - port_request = mock.sentinel.port_request - subports_info = [{'segmentation_id': 1, - 'port_id': '', - 'segmentation_type': 'vlan'}, - {'segmentation_id': 2, - 'port_id': '', - 'segmentation_type': 'vlan'}] - port = os_port.Port(id=mock.sentinel.id) - vif = mock.sentinel.vif - - bulk_rq = [port_request for _ in range(len(subports_info))] - - m_driver._get_parent_port.return_value = parent_port - m_driver._get_trunk_id.return_value = trunk_id - m_driver._create_subports_info.return_value = (port_request, - subports_info) - os_net.create_ports.return_value = (p for p in [port, port]) - m_to_vif.return_value = vif - semaphore = mock.MagicMock(spec=eventlet.semaphore.Semaphore(20)) - - self.assertEqual([vif, vif], cls.request_vifs( - m_driver, pod, project_id, subnets, security_groups, num_ports, - semaphore)) - - m_driver._get_parent_port.assert_called_once_with(pod) - m_driver._get_trunk_id.assert_called_once_with(parent_port) - m_driver._create_subports_info.assert_called_once_with( - pod, project_id, subnets, security_groups, trunk_id, num_ports, - unbound=True) - os_net.create_ports.assert_called_once_with(bulk_rq) - os_net.add_trunk_subports.assert_called_once_with(trunk_id, - subports_info) - os_net.delete_port.assert_not_called() - - calls = [mock.call(port, subnets, info['segmentation_id']) - for info in subports_info] - m_to_vif.assert_has_calls(calls) - - def test_request_vifs_no_vlans(self): - cls = nested_vlan_vif.NestedVlanPodVIFDriver - cls._tag_on_creation = False - m_driver = mock.Mock(spec=cls) - self.useFixture(k_fix.MockNetworkClient()).client - - pod = mock.sentinel.pod - project_id = mock.sentinel.project_id - subnets = mock.sentinel.subnets - security_groups = mock.sentinel.security_groups - num_ports = 2 - - parent_port = mock.sentinel.parent_port - trunk_id = mock.sentinel.trunk_id - port_request = mock.sentinel.port_request - subports_info = [] - - m_driver._get_parent_port.return_value = parent_port - m_driver._get_trunk_id.return_value = trunk_id - m_driver._create_subports_info.return_value = (port_request, - subports_info) - semaphore = mock.MagicMock(spec=eventlet.semaphore.Semaphore(20)) - - self.assertEqual([], cls.request_vifs(m_driver, pod, project_id, - subnets, security_groups, - num_ports, semaphore)) - - m_driver._get_parent_port.assert_called_once_with(pod) - m_driver._get_trunk_id.assert_called_once_with(parent_port) - m_driver._create_subports_info.assert_called_once_with( - pod, project_id, subnets, security_groups, - trunk_id, num_ports, unbound=True) - - def test_request_vifs_bulk_creation_exception(self): - cls = nested_vlan_vif.NestedVlanPodVIFDriver - cls._tag_on_creation = True - m_driver = mock.Mock(spec=cls) - os_net = self.useFixture(k_fix.MockNetworkClient()).client - - pod = mock.sentinel.pod - project_id = mock.sentinel.project_id - subnets = mock.sentinel.subnets - security_groups = mock.sentinel.security_groups - num_ports = 2 - - parent_port = mock.sentinel.parent_port - trunk_id = mock.sentinel.trunk_id - port_request = mock.sentinel.port_request - subports_info = [{'segmentation_id': 1, - 'port_id': '', - 'segmentation_type': 'vlan'}, - {'segmentation_id': 2, - 'port_id': '', - 'segmentation_type': 'vlan'}] - - bulk_rq = [port_request for _ in range(len(subports_info))] - - m_driver._get_parent_port.return_value = parent_port - m_driver._get_trunk_id.return_value = trunk_id - m_driver._create_subports_info.return_value = (port_request, - subports_info) - os_net.create_ports.side_effect = os_exc.SDKException - semaphore = mock.MagicMock(spec=eventlet.semaphore.Semaphore(20)) - - self.assertRaises( - os_exc.SDKException, cls.request_vifs, - m_driver, pod, project_id, subnets, security_groups, num_ports, - semaphore) - - m_driver._get_parent_port.assert_called_once_with(pod) - m_driver._get_trunk_id.assert_called_once_with(parent_port) - m_driver._create_subports_info.assert_called_once_with( - pod, project_id, subnets, security_groups, - trunk_id, num_ports, unbound=True) - os_net.create_ports.assert_called_once_with(bulk_rq) - - @mock.patch('kuryr_kubernetes.controller.drivers.utils.delete_ports') - def test_request_vifs_trunk_subports_conflict(self, m_del_ports): - cls = nested_vlan_vif.NestedVlanPodVIFDriver - cls._tag_on_creation = True - m_driver = mock.Mock(spec=cls) - os_net = self.useFixture(k_fix.MockNetworkClient()).client - - pod = mock.sentinel.pod - project_id = mock.sentinel.project_id - subnets = mock.sentinel.subnets - security_groups = mock.sentinel.security_groups - num_ports = 2 - - parent_port = mock.sentinel.parent_port - trunk_id = mock.sentinel.trunk_id - port_request = mock.sentinel.port_request - subports_info = [{'segmentation_id': 1, - 'port_id': '', - 'segmentation_type': 'vlan'}, - {'segmentation_id': 2, - 'port_id': '', - 'segmentation_type': 'vlan'}] - port = os_port.Port(id=mock.sentinel.id) - - bulk_rq = [port_request for _ in range(len(subports_info))] - - m_driver._get_parent_port.return_value = parent_port - m_driver._get_trunk_id.return_value = trunk_id - m_driver._create_subports_info.return_value = (port_request, - subports_info) - os_net.create_ports.return_value = (p for p in [port, port]) - os_net.add_trunk_subports.side_effect = os_exc.ConflictException - semaphore = mock.MagicMock(spec=eventlet.semaphore.Semaphore(20)) - - self.assertEqual([], cls.request_vifs(m_driver, pod, project_id, - subnets, security_groups, num_ports, semaphore)) - - m_driver._get_parent_port.assert_called_once_with(pod) - m_driver._get_trunk_id.assert_called_once_with(parent_port) - m_driver._create_subports_info.assert_called_once_with( - pod, project_id, subnets, security_groups, - trunk_id, num_ports, unbound=True) - os_net.create_ports.assert_called_once_with(bulk_rq) - os_net.add_trunk_subports.assert_called_once_with(trunk_id, - subports_info) - m_del_ports.assert_called_once_with([port, port]) - - @mock.patch('kuryr_kubernetes.controller.drivers.utils.delete_ports') - def test_request_vifs_trunk_subports_exception(self, m_del_ports): - cls = nested_vlan_vif.NestedVlanPodVIFDriver - cls._tag_on_creation = False - m_driver = mock.Mock(spec=cls) - os_net = self.useFixture(k_fix.MockNetworkClient()).client - - pod = mock.sentinel.pod - project_id = mock.sentinel.project_id - subnets = mock.sentinel.subnets - security_groups = mock.sentinel.security_groups - num_ports = 2 - - parent_port = mock.sentinel.parent_port - trunk_id = mock.sentinel.trunk_id - port_request = mock.sentinel.port_request - subports_info = [{'segmentation_id': 1, - 'port_id': '', - 'segmentation_type': 'vlan'}, - {'segmentation_id': 2, - 'port_id': '', - 'segmentation_type': 'vlan'}] - port = os_port.Port(id=mock.sentinel.id) - - bulk_rq = [port_request for _ in range(len(subports_info))] - - m_driver._get_parent_port.return_value = parent_port - m_driver._get_trunk_id.return_value = trunk_id - m_driver._create_subports_info.return_value = (port_request, - subports_info) - os_net.create_ports.return_value = (p for p in [port, port]) - os_net.add_trunk_subports.side_effect = os_exc.SDKException - semaphore = mock.MagicMock(spec=eventlet.semaphore.Semaphore(20)) - - self.assertEqual([], cls.request_vifs(m_driver, pod, project_id, - subnets, security_groups, num_ports, semaphore)) - - m_driver._get_parent_port.assert_called_once_with(pod) - m_driver._get_trunk_id.assert_called_once_with(parent_port) - m_driver._create_subports_info.assert_called_once_with( - pod, project_id, subnets, security_groups, - trunk_id, num_ports, unbound=True) - os_net.create_ports.assert_called_once_with(bulk_rq) - os_net.add_trunk_subports.assert_called_once_with(trunk_id, - subports_info) - m_del_ports.assert_called_once_with([port, port]) - - def test_release_vif(self): - cls = nested_vlan_vif.NestedVlanPodVIFDriver - m_driver = mock.Mock(spec=cls) - os_net = self.useFixture(k_fix.MockNetworkClient()).client - parent_port = mock.sentinel.parent_port - trunk_id = mock.sentinel.trunk_id - - m_driver._get_parent_port.return_value = parent_port - m_driver._get_trunk_id.return_value = trunk_id - pod = mock.sentinel.pod - vif = mock.Mock() - - cls.release_vif(m_driver, pod, vif) - - m_driver._get_parent_port.assert_called_once_with(pod) - m_driver._get_trunk_id.assert_called_once_with(parent_port) - m_driver._remove_subport.assert_called_once_with(trunk_id, vif.id) - os_net.delete_port.assert_called_once_with(vif.id) - - def test_release_vif_not_found(self): - cls = nested_vlan_vif.NestedVlanPodVIFDriver - m_driver = mock.Mock(spec=cls) - os_net = self.useFixture(k_fix.MockNetworkClient()).client - parent_port = mock.sentinel.parent_port - trunk_id = mock.sentinel.trunk_id - - m_driver._get_parent_port.return_value = parent_port - m_driver._get_trunk_id.return_value = trunk_id - pod = mock.sentinel.pod - vlan_id = mock.sentinel.vlan_id - vif = mock.Mock() - m_driver._port_vlan_mapping = {vif.id: vlan_id} - self.assertTrue(vif.id in m_driver._port_vlan_mapping) - - cls.release_vif(m_driver, pod, vif) - - m_driver._get_parent_port.assert_called_once_with(pod) - m_driver._get_trunk_id.assert_called_once_with(parent_port) - m_driver._remove_subport.assert_called_once_with(trunk_id, vif.id) - os_net.delete_port.assert_called_once_with(vif.id) - - def _test_get_port_request(self, m_to_fips, security_groups, - m_get_network_id, m_get_port_name, - unbound=False): - cls = nested_vlan_vif.NestedVlanPodVIFDriver - cls._tag_on_creation = True - m_driver = mock.Mock(spec=cls) - - pod = mock.sentinel.pod - project_id = mock.sentinel.project_id - subnets = mock.sentinel.subnets - port_name = mock.sentinel.port_name - network_id = mock.sentinel.project_id - fixed_ips = mock.sentinel.fixed_ips - - m_get_port_name.return_value = port_name - m_get_network_id.return_value = network_id - m_to_fips.return_value = fixed_ips - - oslo_cfg.CONF.set_override('port_debug', - True, - group='kubernetes') - - expected = {'project_id': project_id, - 'name': port_name, - 'network_id': network_id, - 'fixed_ips': fixed_ips, - 'device_owner': kl_const.DEVICE_OWNER, - 'admin_state_up': True} - - if security_groups: - expected['security_groups'] = security_groups - - if unbound: - expected['name'] = constants.KURYR_PORT_NAME - - ret = cls._get_port_request(m_driver, pod, project_id, subnets, - security_groups, unbound) - - self.assertEqual(expected, ret) - if unbound: - m_get_port_name.assert_not_called() - else: - m_get_port_name.assert_called_once_with(pod) - m_get_network_id.assert_called_once_with(subnets) - m_to_fips.assert_called_once_with(subnets) - - @mock.patch('kuryr_kubernetes.controller.drivers.utils.get_port_name') - @mock.patch('kuryr_kubernetes.controller.drivers.utils.get_network_id') - @mock.patch('kuryr_kubernetes.os_vif_util.osvif_to_neutron_fixed_ips') - def test_get_port_request(self, m_to_fips, m_get_network_id, - m_get_port_name): - security_groups = mock.sentinel.security_groups - self._test_get_port_request(m_to_fips, security_groups, - m_get_network_id, m_get_port_name) - - @mock.patch('kuryr_kubernetes.controller.drivers.utils.get_port_name') - @mock.patch('kuryr_kubernetes.controller.drivers.utils.get_network_id') - @mock.patch('kuryr_kubernetes.os_vif_util.osvif_to_neutron_fixed_ips') - def test_get_port_request_no_sg(self, m_to_fips, m_get_network_id, - m_get_port_name): - security_groups = [] - self._test_get_port_request(m_to_fips, security_groups, - m_get_network_id, m_get_port_name) - - @mock.patch('kuryr_kubernetes.controller.drivers.utils.get_port_name') - @mock.patch('kuryr_kubernetes.controller.drivers.utils.get_network_id') - @mock.patch('kuryr_kubernetes.os_vif_util.osvif_to_neutron_fixed_ips') - def test_get_port_request_unbound(self, m_to_fips, m_get_network_id, - m_get_port_name): - security_groups = mock.sentinel.security_groups - self._test_get_port_request(m_to_fips, security_groups, - m_get_network_id, m_get_port_name, - unbound=True) - - @mock.patch('kuryr.lib.segmentation_type_drivers.allocate_segmentation_id') - def test__create_subports_info(self, m_allocate_seg_id): - cls = nested_vlan_vif.NestedVlanPodVIFDriver - m_driver = mock.Mock(spec=cls) - - pod = mock.sentinel.pod - project_id = mock.sentinel.project_id - subnets = mock.sentinel.subnets - security_groups = mock.sentinel.security_groups - trunk_id = mock.sentinel.trunk_id - num_ports = 2 - in_use_vlan = set([1]) - port = mock.sentinel.port - subports_info = [{'segmentation_id': i + 2, - 'port_id': '', - 'segmentation_type': 'vlan'} - for i in range(num_ports)] - - m_driver._get_in_use_vlan_ids_set.return_value = in_use_vlan - m_driver._get_port_request.return_value = port - m_allocate_seg_id.side_effect = [2, 3] - - port_res, subports_res = cls._create_subports_info( - m_driver, pod, project_id, subnets, security_groups, trunk_id, - num_ports, unbound=False) - - self.assertEqual(port_res, port) - self.assertEqual(subports_res, subports_info) - - m_driver._get_in_use_vlan_ids_set.assert_called_once_with(trunk_id) - m_driver._get_port_request.assert_called_once_with( - pod, project_id, subnets, security_groups, False) - self.assertEqual(m_allocate_seg_id.call_count, 2) - - @mock.patch('kuryr.lib.segmentation_type_drivers.allocate_segmentation_id') - def test__create_subports_info_not_enough_vlans(self, m_allocate_seg_id): - cls = nested_vlan_vif.NestedVlanPodVIFDriver - m_driver = mock.Mock(spec=cls) - - pod = mock.sentinel.pod - project_id = mock.sentinel.project_id - subnets = mock.sentinel.subnets - security_groups = mock.sentinel.security_groups - trunk_id = mock.sentinel.trunk_id - num_ports = 2 - in_use_vlan = set([1]) - port = mock.sentinel.port - subports_info = [{'segmentation_id': 2, - 'port_id': '', - 'segmentation_type': 'vlan'}] - - m_driver._get_in_use_vlan_ids_set.return_value = in_use_vlan - m_driver._get_port_request.return_value = port - m_allocate_seg_id.side_effect = [ - 2, kl_exc.SegmentationIdAllocationFailure - ] - - port_res, subports_res = cls._create_subports_info( - m_driver, pod, project_id, subnets, security_groups, trunk_id, - num_ports, unbound=False) - - self.assertEqual(port_res, port) - self.assertEqual(subports_res, subports_info) - - m_driver._get_in_use_vlan_ids_set.assert_called_once_with(trunk_id) - m_driver._get_port_request.assert_called_once_with( - pod, project_id, subnets, security_groups, False) - self.assertEqual(m_allocate_seg_id.call_count, 2) - - @mock.patch('kuryr.lib.segmentation_type_drivers.allocate_segmentation_id') - def test__create_subports_info_no_vlans(self, m_allocate_seg_id): - cls = nested_vlan_vif.NestedVlanPodVIFDriver - m_driver = mock.Mock(spec=cls) - - pod = mock.sentinel.pod - project_id = mock.sentinel.project_id - subnets = mock.sentinel.subnets - security_groups = mock.sentinel.security_groups - trunk_id = mock.sentinel.trunk_id - num_ports = 2 - in_use_vlan = set([1]) - port = mock.sentinel.port - - m_driver._get_in_use_vlan_ids_set.return_value = in_use_vlan - m_driver._get_port_request.return_value = port - m_allocate_seg_id.side_effect = kl_exc.SegmentationIdAllocationFailure - - port_res, subports_res = cls._create_subports_info( - m_driver, pod, project_id, subnets, security_groups, trunk_id, - num_ports, unbound=False) - - self.assertEqual(port_res, port) - self.assertEqual(subports_res, []) - - m_driver._get_in_use_vlan_ids_set.assert_called_once_with(trunk_id) - m_driver._get_port_request.assert_called_once_with( - pod, project_id, subnets, security_groups, False) - self.assertEqual(m_allocate_seg_id.call_count, 1) - - def test_get_trunk_id(self): - cls = nested_vlan_vif.NestedVlanPodVIFDriver - m_driver = mock.Mock(spec=cls) - trunk_id = mock.sentinel.trunk_id - port = {'trunk_details': {'trunk_id': trunk_id}} - - self.assertEqual(trunk_id, cls._get_trunk_id(m_driver, port)) - - def test_get_trunk_id_details_missing(self): - cls = nested_vlan_vif.NestedVlanPodVIFDriver - m_driver = mock.Mock(spec=cls) - trunk_id = mock.sentinel.trunk_id - port = {'trunk_details_missing': {'trunk_id_missing': trunk_id}} - self.assertRaises(k_exc.K8sNodeTrunkPortFailure, - cls._get_trunk_id, m_driver, port) - - def test_add_subport(self): - cls = nested_vlan_vif.NestedVlanPodVIFDriver - m_driver = mock.Mock(spec=cls) - os_net = self.useFixture(k_fix.MockNetworkClient()).client - trunk_id = mock.sentinel.trunk_id - subport = mock.sentinel.subport - vlan_id = mock.sentinel.vlan_id - m_driver._get_vlan_id.return_value = vlan_id - subport_dict = [{'segmentation_id': vlan_id, - 'port_id': subport, - 'segmentation_type': 'vlan'}] - nested_vlan_vif.DEFAULT_MAX_RETRY_COUNT = 1 - self.assertEqual(vlan_id, cls._add_subport(m_driver, trunk_id, - subport)) - m_driver._get_vlan_id.assert_called_once_with(trunk_id) - os_net.add_trunk_subports.assert_called_once_with(trunk_id, - subport_dict) - - def test_add_subport_get_vlanid_failure(self): - cls = nested_vlan_vif.NestedVlanPodVIFDriver - m_driver = mock.Mock(spec=cls) - self.useFixture(k_fix.MockNetworkClient()).client - trunk_id = mock.sentinel.trunk_id - subport = mock.sentinel.subport - m_driver._get_vlan_id.side_effect = os_exc.SDKException - nested_vlan_vif.DEFAULT_MAX_RETRY_COUNT = 1 - self.assertRaises(os_exc.SDKException, cls._add_subport, m_driver, - trunk_id, subport) - - m_driver._get_vlan_id.assert_called_once_with(trunk_id) - - def test_add_subport_with_vlan_id_conflict(self): - cls = nested_vlan_vif.NestedVlanPodVIFDriver - m_driver = mock.Mock(spec=cls) - os_net = self.useFixture(k_fix.MockNetworkClient()).client - trunk_id = mock.sentinel.trunk_id - subport = mock.sentinel.subport - vlan_id = mock.sentinel.vlan_id - m_driver._get_vlan_id.return_value = vlan_id - subport_dict = [{'segmentation_id': vlan_id, - 'port_id': subport, - 'segmentation_type': 'vlan'}] - os_net.add_trunk_subports.side_effect = os_exc.ConflictException - nested_vlan_vif.DEFAULT_MAX_RETRY_COUNT = 1 - self.assertRaises(os_exc.ConflictException, cls._add_subport, m_driver, - trunk_id, subport) - - os_net.add_trunk_subports.assert_called_once_with(trunk_id, - subport_dict) - - def test__remove_subports(self): - cls = nested_vlan_vif.NestedVlanPodVIFDriver - m_driver = mock.Mock(spec=cls) - os_net = self.useFixture(k_fix.MockNetworkClient()).client - trunk_id = mock.sentinel.trunk_id - subport_id = mock.sentinel.subport_id - subportid_dict = [{'port_id': subport_id}] - cls._remove_subports(m_driver, trunk_id, [subport_id]) - - os_net.delete_trunk_subports.assert_called_once_with(trunk_id, - subportid_dict) - - def test__remove_subports_duplicate(self): - cls = nested_vlan_vif.NestedVlanPodVIFDriver - m_driver = mock.Mock(spec=cls) - os_net = self.useFixture(k_fix.MockNetworkClient()).client - trunk_id = mock.sentinel.trunk_id - subport_id = mock.sentinel.subport_id - subportid_dict = [{'port_id': subport_id}] - cls._remove_subports(m_driver, trunk_id, [subport_id, subport_id]) - - os_net.delete_trunk_subports.assert_called_once_with(trunk_id, - subportid_dict) - - @mock.patch('kuryr.lib.segmentation_type_drivers.allocate_segmentation_id') - def test_get_vlan_id(self, mock_alloc_seg_id): - cls = nested_vlan_vif.NestedVlanPodVIFDriver - m_driver = mock.Mock(spec=cls) - vlanid_set = mock.sentinel.vlanid_set - trunk_id = mock.sentinel.trunk_id - m_driver._get_in_use_vlan_ids_set.return_value = vlanid_set - cls._get_vlan_id(m_driver, trunk_id) - - mock_alloc_seg_id.assert_called_once_with(vlanid_set) - - @mock.patch('kuryr.lib.segmentation_type_drivers.allocate_segmentation_id') - def test_get_vlan_id_exhausted(self, mock_alloc_seg_id): - cls = nested_vlan_vif.NestedVlanPodVIFDriver - m_driver = mock.Mock(spec=cls) - vlanid_set = mock.sentinel.vlanid_set - trunk_id = mock.sentinel.trunk_id - m_driver._get_in_use_vlan_ids_set.return_value = vlanid_set - mock_alloc_seg_id.side_effect = kl_exc.SegmentationIdAllocationFailure - self.assertRaises(kl_exc.SegmentationIdAllocationFailure, - cls._get_vlan_id, m_driver, trunk_id) - - mock_alloc_seg_id.assert_called_once_with(vlanid_set) - - @mock.patch('kuryr.lib.segmentation_type_drivers.release_segmentation_id') - def test_release_vlan_id(self, mock_rel_seg_id): - cls = nested_vlan_vif.NestedVlanPodVIFDriver - m_driver = mock.Mock(spec=cls) - vlanid = mock.sentinel.vlanid - cls._release_vlan_id(m_driver, vlanid) - - mock_rel_seg_id.assert_called_once_with(vlanid) - - def test_get_in_use_vlan_ids_set(self): - cls = nested_vlan_vif.NestedVlanPodVIFDriver - m_driver = mock.Mock(spec=cls) - os_net = self.useFixture(k_fix.MockNetworkClient()).client - - vlan_ids = set() - trunk_id = mock.sentinel.trunk_id - vlan_ids.add('100') - - port = {"segmentation_id": '100'} # Trunk.sub_ports is a list of dicts - trunk_obj = os_trunk.Trunk(sub_ports=[port]) - os_net.get_trunk.return_value = trunk_obj - self.assertEqual(vlan_ids, - cls._get_in_use_vlan_ids_set(m_driver, trunk_id)) diff --git a/kuryr_kubernetes/tests/unit/controller/drivers/test_network_policy.py b/kuryr_kubernetes/tests/unit/controller/drivers/test_network_policy.py deleted file mode 100644 index 328aedaa5..000000000 --- a/kuryr_kubernetes/tests/unit/controller/drivers/test_network_policy.py +++ /dev/null @@ -1,644 +0,0 @@ -# Copyright 2018 Red Hat, Inc. -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. - -from unittest import mock - -from oslo_config import cfg - -from kuryr_kubernetes.controller.drivers import network_policy -from kuryr_kubernetes import exceptions -from kuryr_kubernetes.tests import base as test_base -from kuryr_kubernetes.tests.unit import kuryr_fixtures as k_fix -from kuryr_kubernetes import utils - -CONF = cfg.CONF - - -def get_pod_obj(): - return { - 'status': { - 'qosClass': 'BestEffort', - 'hostIP': '192.168.1.2', - }, - 'kind': 'Pod', - 'spec': { - 'schedulerName': 'default-scheduler', - 'containers': [{ - 'name': 'busybox', - 'image': 'busybox', - 'resources': {} - }], - 'nodeName': 'kuryr-devstack' - }, - 'metadata': { - 'name': 'busybox-sleep1', - 'namespace': 'default', - 'resourceVersion': '53808', - 'uid': '452176db-4a85-11e7-80bd-fa163e29dbbb', - 'annotations': { - 'openstack.org/kuryr-vif': {} - } - }} - - -def get_namespace_obj(): - return { - "kind": "Namespace", - "metadata": { - "annotations": { - "openstack.org/kuryr-namespace-label": - "{\"projetc\": \"myproject\"}", - "openstack.org/kuryr-net-crd": "ns-myproject" - }, - "labels": { - "project": "myproject" - }, - "name": "myproject"}} - - -class TestNetworkPolicyDriver(test_base.TestCase): - - def setUp(self): - super(TestNetworkPolicyDriver, self).setUp() - self._project_id = mock.sentinel.project_id - self._policy_name = 'np-test' - self._policy_uid = mock.sentinel.policy_uid - self._policy_link = mock.sentinel.policy_link - self._sg_id = mock.sentinel.sg_id - self._i_rules = [{'sgRule': {'id': ''}}] - self._e_rules = [{'sgRule': {'id': ''}}] - - self._policy = { - 'apiVersion': 'networking.k8s.io/v1', - 'kind': 'NetworkPolicy', - 'metadata': { - 'name': self._policy_name, - 'resourceVersion': '2259309', - 'generation': 1, - 'creationTimestamp': '2018-09-18T14:09:51Z', - 'namespace': 'default', - 'annotations': {}, - 'uid': self._policy_uid - }, - 'spec': { - 'egress': [{'ports': - [{'port': 5978, 'protocol': 'TCP'}], - 'to': - [{'namespaceSelector': { - 'matchLabels': { - 'project': 'myproject'}}}]}], - 'ingress': [{'ports': - [{'port': 6379, 'protocol': 'TCP'}], - 'from': - [{'namespaceSelector': { - 'matchLabels': { - 'project': 'myproject'}}}]}], - 'policyTypes': ['Ingress', 'Egress'], - 'podSelector': {}, - } - } - - self.crd = { - 'metadata': {'name': 'foobar', - 'namespace': 'default'}, - 'spec': { - 'egressSgRules': [ - {'sgRule': - {'description': 'Kuryr-Kubernetes NetPolicy SG rule', - 'direction': 'egress', - 'ethertype': 'IPv4', - 'port_range_max': 5978, - 'port_range_min': 5978, - 'protocol': 'tcp', - }}], - 'ingressSgRules': [ - {'sgRule': - {'description': 'Kuryr-Kubernetes NetPolicy SG rule', - 'direction': 'ingress', - 'ethertype': 'IPv4', - 'port_range_max': 6379, - 'port_range_min': 6379, - 'protocol': 'tcp', - }}], - 'podSelector': {}, - 'policyTypes': self._policy['spec']['policyTypes'] - }, - 'status': { - 'securityGroupId': self._sg_id, - 'securityGroupRules': [], - 'podSelector': {}, - } - } - - self.old_crd = { - 'metadata': {'name': 'np-foobar', - 'namespace': 'default'}, - 'spec': { - 'egressSgRules': [ - {'security_group_rule': - {'description': 'Kuryr-Kubernetes NetPolicy SG rule', - 'direction': 'egress', - 'ethertype': 'IPv4', - 'port_range_max': 5978, - 'port_range_min': 5978, - 'protocol': 'tcp', - 'security_group_id': self._sg_id, - 'id': mock.sentinel.id - }}], - 'ingressSgRules': [ - {'security_group_rule': - {'description': 'Kuryr-Kubernetes NetPolicy SG rule', - 'direction': 'ingress', - 'ethertype': 'IPv4', - 'port_range_max': 6379, - 'port_range_min': 6379, - 'protocol': 'tcp', - 'security_group_id': self._sg_id, - 'id': mock.sentinel.id - }}], - 'podSelector': {}, - 'networkpolicy_spec': self._policy['spec'], - 'securityGroupId': self._sg_id, - 'securityGroupName': mock.sentinel.sg_name}} - - self.neutron = self.useFixture(k_fix.MockNetworkClient()).client - self.kubernetes = self.useFixture(k_fix.MockK8sClient()).client - self._driver = network_policy.NetworkPolicyDriver() - - @mock.patch.object(network_policy.NetworkPolicyDriver, - '_get_default_np_rules') - @mock.patch.object(network_policy.NetworkPolicyDriver, - '_get_knp_crd', return_value=False) - @mock.patch.object(network_policy.NetworkPolicyDriver, - '_create_knp_crd') - @mock.patch.object(network_policy.NetworkPolicyDriver, - '_parse_network_policy_rules') - @mock.patch.object(utils, 'get_subnet_cidr') - def test_ensure_network_policy(self, m_utils, m_parse, m_add_crd, - m_get_crd, m_get_default): - m_utils.get_subnet_cidr.return_value = mock.sentinel.cidr - m_parse.return_value = (self._i_rules, self._e_rules) - self.kubernetes.get = mock.Mock(return_value={}) - self._driver.ensure_network_policy(self._policy) - m_get_crd.assert_called_once() - m_add_crd.assert_called_once() - m_get_default.assert_called_once() - - @mock.patch('kuryr_kubernetes.controller.drivers.utils.' - 'create_security_group_rule_body') - @mock.patch.object(network_policy.NetworkPolicyDriver, - '_get_default_np_rules') - @mock.patch.object(network_policy.NetworkPolicyDriver, - '_get_knp_crd', return_value=False) - @mock.patch.object(network_policy.NetworkPolicyDriver, - '_create_knp_crd') - @mock.patch.object(network_policy.NetworkPolicyDriver, - '_parse_network_policy_rules') - @mock.patch.object(utils, 'get_subnet_cidr') - def test_ensure_network_policy_services(self, m_utils, m_parse, m_add_crd, - m_get_crd, m_get_default, - m_create_sgr): - CONF.set_override('enforce_sg_rules', False, group='octavia_defaults') - self.addCleanup(CONF.set_override, 'enforce_sg_rules', True, - group='octavia_defaults') - m_utils.get_subnet_cidr.return_value = mock.sentinel.cidr - m_parse.return_value = (self._i_rules, self._e_rules) - svcs = [ - {'metadata': {'name': 'foo', 'deletionTimestamp': 'foobar'}}, - {'metadata': {'name': 'bar'}, 'spec': {'clusterIP': 'None'}}, - {'metadata': {'name': 'baz'}, 'spec': {'clusterIP': None}}, - {'metadata': {'name': ''}, 'spec': {'clusterIP': '192.168.0.130'}}, - ] - self.kubernetes.get = mock.Mock(return_value={'items': svcs}) - self._driver.ensure_network_policy(self._policy) - m_create_sgr.assert_called_once_with('ingress', cidr='192.168.0.130', - description=mock.ANY) - m_get_crd.assert_called_once() - m_add_crd.assert_called_once() - m_get_default.assert_called_once() - - @mock.patch.object(network_policy.NetworkPolicyDriver, - '_get_default_np_rules') - @mock.patch.object(network_policy.NetworkPolicyDriver, - '_get_knp_crd') - @mock.patch.object(network_policy.NetworkPolicyDriver, - '_parse_network_policy_rules') - @mock.patch.object(utils, 'get_subnet_cidr') - def test_ensure_network_policy_with_k8s_exc(self, m_utils, m_parse, - m_get_crd, m_get_default): - m_utils.get_subnet_cidr.return_value = mock.sentinel.cidr - m_parse.return_value = (self._i_rules, self._e_rules) - m_get_crd.side_effect = exceptions.K8sClientException - self.kubernetes.get = mock.Mock(return_value={}) - self.assertRaises(exceptions.K8sClientException, - self._driver.ensure_network_policy, self._policy) - m_get_default.assert_called_once() - - @mock.patch.object(network_policy.NetworkPolicyDriver, - '_get_default_np_rules') - @mock.patch.object(network_policy.NetworkPolicyDriver, - '_get_knp_crd', return_value=None) - @mock.patch.object(network_policy.NetworkPolicyDriver, '_create_knp_crd') - @mock.patch.object(network_policy.NetworkPolicyDriver, - '_parse_network_policy_rules') - @mock.patch.object(utils, 'get_subnet_cidr') - def test_ensure_network_policy_error_add_crd( - self, m_utils, m_parse, m_add_crd, m_get_crd, m_get_default): - m_utils.get_subnet_cidr.return_value = mock.sentinel.cidr - m_parse.return_value = (self._i_rules, self._e_rules) - m_add_crd.side_effect = exceptions.K8sClientException - self.kubernetes.get = mock.Mock(return_value={}) - self.assertRaises(exceptions.K8sClientException, - self._driver.ensure_network_policy, self._policy) - m_get_crd.assert_called() - m_get_default.assert_called_once() - - def test_get_namespaces(self): - namespace_selector = {'namespaceSelector': { - 'matchLabels': {'project': 'myproject'}}} - self.kubernetes.get.side_effect = [{'items': [get_namespace_obj()]}] - - resp = self._driver._get_namespaces(namespace_selector) - self.assertEqual([get_namespace_obj()], resp) - self.kubernetes.get.assert_called() - - def test_get_namespaces_no_matches(self): - namespace_selector = {'matchLabels': {'test': 'test'}} - self.kubernetes.get.return_value = {'items': []} - - resp = self._driver._get_namespaces(namespace_selector) - self.assertEqual([], resp) - self.kubernetes.get.assert_called_once() - - @mock.patch('kuryr_kubernetes.controller.drivers.utils.get_services') - @mock.patch.object(network_policy.NetworkPolicyDriver, - '_get_resource_details') - @mock.patch.object(network_policy.NetworkPolicyDriver, - '_get_namespaces') - @mock.patch('kuryr_kubernetes.controller.drivers.utils.' - 'create_security_group_rule_body') - def test_parse_network_policy_rules_with_rules( - self, m_create, m_get_namespaces, - m_get_resource_details, m_get_svcs): - subnet_cidr = '10.10.0.0/24' - namespace = 'myproject' - m_get_namespaces.return_value = [get_namespace_obj()] - m_get_resource_details.return_value = subnet_cidr, namespace - self._driver._parse_network_policy_rules(self._policy) - m_get_namespaces.assert_called() - m_get_resource_details.assert_called() - m_create.assert_called() - - @mock.patch.object(network_policy.NetworkPolicyDriver, - '_get_namespaces') - @mock.patch('kuryr_kubernetes.controller.drivers.utils.' - 'create_security_group_rule_body') - def test_parse_network_policy_rules_with_no_rules(self, m_create, - m_get_ns): - policy = self._policy.copy() - policy['spec']['ingress'] = [{}] - policy['spec']['egress'] = [{}] - self._driver._parse_network_policy_rules(policy) - m_get_ns.assert_not_called() - calls = [mock.call('ingress', ethertype='IPv4'), - mock.call('ingress', ethertype='IPv6'), - mock.call('egress', ethertype='IPv4'), - mock.call('egress', ethertype='IPv6')] - m_create.assert_has_calls(calls) - - @mock.patch.object(network_policy.NetworkPolicyDriver, - '_create_all_pods_sg_rules') - def test_parse_network_policy_rules_with_no_pod_selector( - self, m_create_all_pods_sg_rules): - policy = self._policy.copy() - policy['spec']['ingress'] = [{'ports': - [{'port': 6379, 'protocol': 'TCP'}]}] - policy['spec']['egress'] = [{'ports': - [{'port': 6379, 'protocol': 'TCP'}]}] - self._driver._parse_network_policy_rules(policy) - m_create_all_pods_sg_rules.assert_called() - - @mock.patch.object(network_policy.NetworkPolicyDriver, - '_create_sg_rule_on_number_port') - @mock.patch.object(network_policy.NetworkPolicyDriver, - '_get_namespaces') - def test_parse_network_policy_rules_with_ipblock(self, - m_get_namespaces, - m_create_sg_rule): - policy = self._policy.copy() - policy['spec']['ingress'] = [{'from': - [{'ipBlock': - {'cidr': '172.17.0.0/16', - 'except': ['172.17.1.0/24']}}], - 'ports': [{'port': 6379, - 'protocol': 'TCP'}]}] - policy['spec']['egress'] = [{'ports': [{'port': 5978, 'protocol': - 'TCP'}], - 'to': [{'ipBlock': - {'cidr': '10.0.0.0/24'}}]}] - self._driver._parse_network_policy_rules(policy) - m_create_sg_rule.assert_called() - - @mock.patch('kuryr_kubernetes.controller.drivers.utils.get_services') - @mock.patch.object(network_policy.NetworkPolicyDriver, - '_get_resource_details') - @mock.patch.object(network_policy.NetworkPolicyDriver, - '_get_namespaces') - @mock.patch('kuryr_kubernetes.controller.drivers.utils.' - 'create_security_group_rule_body') - def test_parse_network_policy_rules_with_no_ports( - self, m_create, m_get_namespaces, m_get_resource_details, - m_get_svcs): - subnet_cidr = '10.10.0.0/24' - namespace = 'myproject' - m_get_namespaces.return_value = [get_namespace_obj()] - m_get_resource_details.return_value = subnet_cidr, namespace - policy = self._policy.copy() - selectors = {'namespaceSelector': { - 'matchLabels': { - 'project': 'myproject'}}} - policy['spec']['egress'] = [{'to': [selectors]}] - policy['spec']['ingress'] = [{'from': [selectors]}] - self._driver._parse_network_policy_rules(policy) - m_get_namespaces.assert_called() - m_get_resource_details.assert_called() - calls = [mock.call('ingress', cidr=subnet_cidr, namespace=namespace), - mock.call('egress', cidr=subnet_cidr, namespace=namespace)] - m_create.assert_has_calls(calls) - - @mock.patch.object(network_policy.NetworkPolicyDriver, 'namespaced_pods') - def test_affected_pods(self, m_namespaced): - self._driver.affected_pods(self._policy) - m_namespaced.assert_called_once_with(self._policy) - self.kubernetes.assert_not_called() - - @mock.patch.object(network_policy.NetworkPolicyDriver, 'namespaced_pods') - def test_affected_pods_with_podselector(self, m_namespaced): - self.kubernetes.get.return_value = {'items': []} - selector = {'matchLabels': {'test': 'test'}} - self._driver.affected_pods(self._policy, selector) - m_namespaced.assert_not_called() - - @mock.patch.object(network_policy.NetworkPolicyDriver, 'namespaced_pods') - def test_affected_pods_with_empty_podselector(self, m_namespaced): - m_namespaced.return_value = [] - pod_selector = {} - self._driver.affected_pods(self._policy, pod_selector) - m_namespaced.assert_called_with(self._policy) - - def test_namespaced_pods(self): - self.kubernetes.get.return_value = {'items': []} - - resp = self._driver.namespaced_pods(self._policy) - self.assertEqual([], resp) - - @mock.patch.object(network_policy.NetworkPolicyDriver, - '_del_knp_crd', return_value=False) - def test_release_network_policy(self, m_del_crd): - self._driver.release_network_policy(self.crd) - m_del_crd.assert_called_once_with(self.crd) - - @mock.patch.object(network_policy.NetworkPolicyDriver, - '_create_sg_rules_with_container_ports') - @mock.patch('kuryr_kubernetes.controller.drivers.utils.get_ports') - @mock.patch('kuryr_kubernetes.controller.drivers.utils.get_pods') - def test__create_sg_rule_body_on_text_port_ingress(self, - m_get_pods, - m_get_ports, - m_create_sgr_cont): - pod = mock.sentinel.pod - port = mock.sentinel.port - container_ports = mock.sentinel.ports - resources = [mock.sentinel.resource] - crd_rules = mock.sentinel.crd_rules - pod_selector = {} - namespace = mock.sentinel.namespace - direction = 'ingress' - - m_get_pods.return_value = {'items': [pod]} - m_get_ports.return_value = container_ports - - self._driver._create_sg_rule_body_on_text_port(direction, - port, - resources, - crd_rules, - pod_selector, - namespace) - - m_get_pods.assert_called_with(pod_selector, namespace) - m_get_ports.assert_called_with(pod, port) - - @mock.patch('kuryr_kubernetes.controller.drivers.utils.' - 'create_security_group_rule_body') - @mock.patch.object(network_policy.NetworkPolicyDriver, - '_create_sg_rules_with_container_ports') - @mock.patch('kuryr_kubernetes.controller.drivers.utils.get_ports') - @mock.patch('kuryr_kubernetes.controller.drivers.utils.get_pods') - def test__create_sg_rule_body_on_text_port_ingress_all(self, - m_get_pods, - m_get_ports, - m_create_sgr_cont, - m_create_sgr): - pod = mock.sentinel.pod - port = mock.sentinel.port - container_ports = mock.sentinel.ports - resources = [mock.sentinel.resource] - crd_rules = mock.sentinel.crd_rules - pod_selector = {} - namespace = mock.sentinel.namespace - direction = 'ingress' - cidrs = ['0.0.0.0/0'] - - m_get_pods.return_value = {'items': [pod]} - m_get_ports.return_value = container_ports - - self._driver._create_sg_rule_body_on_text_port(direction, - port, - resources, - crd_rules, - pod_selector, - namespace, - allowed_cidrs=cidrs) - - m_get_pods.assert_called_with(pod_selector, namespace) - m_get_ports.assert_called_with(pod, port) - m_create_sgr.assert_not_called() - - @mock.patch('kuryr_kubernetes.controller.drivers.utils.' - 'create_security_group_rule_body') - @mock.patch('kuryr_kubernetes.controller.drivers.utils.get_ports') - @mock.patch('kuryr_kubernetes.controller.drivers.utils.get_pods') - def test__create_sg_rule_body_on_text_port_ingress_match(self, - m_get_pods, - m_get_ports, - m_create_sgr): - - def _create_sgr_cont(container_ports, allow_all, resource, - matched_pods, crd_rules, direction, port, - pod_selector=None, policy_namespace=None): - matched_pods[container_ports[0][1]] = 'foo' - - pod = mock.sentinel.pod - port = {'protocol': 'TCP', 'port': 22} - container_ports = [("pod", mock.sentinel.container_port)] - resources = [mock.sentinel.resource] - crd_rules = [] - pod_selector = {} - namespace = mock.sentinel.namespace - direction = 'ingress' - cidrs = ['0.0.0.0/0'] - self._driver._create_sg_rules_with_container_ports = _create_sgr_cont - - m_get_pods.return_value = {'items': [pod]} - m_get_ports.return_value = container_ports - - self._driver._create_sg_rule_body_on_text_port(direction, - port, - resources, - crd_rules, - pod_selector, - namespace, - allowed_cidrs=cidrs) - - m_get_pods.assert_called_with(pod_selector, namespace) - m_get_ports.assert_called_with(pod, port) - - m_create_sgr.assert_called_once_with(direction, container_ports[0][1], - protocol=port['protocol'], - cidr=cidrs[0], - pods='foo') - - @mock.patch.object(network_policy.NetworkPolicyDriver, - '_create_sg_rules_with_container_ports') - @mock.patch('kuryr_kubernetes.controller.drivers.utils.get_ports') - @mock.patch('kuryr_kubernetes.controller.drivers.utils.get_pods') - def test__create_sg_rule_body_on_text_port_egress(self, - m_get_pods, - m_get_ports, - m_create_sgr_cont): - pod = mock.sentinel.pod - port = mock.sentinel.port - container_ports = mock.sentinel.ports - resources = [{'spec': 'foo'}] - crd_rules = mock.sentinel.crd_rules - pod_selector = {} - namespace = mock.sentinel.namespace - direction = 'egress' - - m_get_pods.return_value = {'items': [pod]} - m_get_ports.return_value = container_ports - - self._driver._create_sg_rule_body_on_text_port(direction, - port, - resources, - crd_rules, - pod_selector, - namespace) - - m_get_ports.assert_called_with(resources[0], port) - - @mock.patch.object(network_policy.NetworkPolicyDriver, - '_create_sg_rules_with_container_ports') - @mock.patch('kuryr_kubernetes.controller.drivers.utils.get_ports') - def test__create_sg_rule_body_on_text_port_egress_all(self, - m_get_ports, - m_create_sgr_cont): - port = {'protocol': 'TCP', 'port': 22} - container_ports = mock.sentinel.ports - resources = [{'spec': 'foo'}] - crd_rules = [] - pod_selector = {} - namespace = mock.sentinel.namespace - direction = 'egress' - cidrs = ['0.0.0.0/0'] - - m_get_ports.return_value = container_ports - - self._driver._create_sg_rule_body_on_text_port(direction, - port, - resources, - crd_rules, - pod_selector, - namespace, - allowed_cidrs=cidrs) - - m_get_ports.assert_called_with(resources[0], port) - self.assertEqual(len(crd_rules), 0) - - @mock.patch('kuryr_kubernetes.utils.get_subnet_cidr') - @mock.patch('kuryr_kubernetes.controller.drivers.utils.' - 'create_security_group_rule_body') - @mock.patch('kuryr_kubernetes.controller.drivers.utils.get_ports') - @mock.patch('kuryr_kubernetes.controller.drivers.utils.get_pods') - def test__create_sg_rule_body_on_text_port_egress_match(self, - m_get_pods, - m_get_ports, - m_create_sgr, - m_get_subnet_cidr): - - def _create_sgr_cont(container_ports, allow_all, resource, - matched_pods, crd_rules, sg_id, direction, port, - pod_selector=None, policy_namespace=None): - matched_pods[container_ports[0][1]] = 'foo' - - pod = mock.sentinel.pod - port = {'protocol': 'TCP', 'port': 22} - container_ports = [("pod", mock.sentinel.container_port)] - resources = [{'spec': 'foo'}] - crd_rules = [] - pod_selector = {} - namespace = mock.sentinel.namespace - direction = 'egress' - cidrs = ['0.0.0.0/0'] - self._driver._create_sg_rules_with_container_ports = _create_sgr_cont - m_get_subnet_cidr.return_value = '10.0.0.128/26' - m_create_sgr.side_effect = [mock.sentinel.sgr1, mock.sentinel.sgr2, - mock.sentinel.sgr3] - - m_get_pods.return_value = {'items': [pod]} - m_get_ports.return_value = container_ports - - self._driver._create_sg_rule_body_on_text_port(direction, - port, - resources, - crd_rules, - pod_selector, - namespace, - allowed_cidrs=cidrs) - - m_get_ports.assert_called_with(resources[0], port) - m_create_sgr.assert_called_once_with(direction, container_ports[0][1], - protocol=port['protocol'], - cidr=cidrs[0], pods='foo') - - def test__create_all_pods_sg_rules(self): - port = {'protocol': 'TCP', 'port': 22} - direction = 'ingress' - rules = [] - - self._driver._create_all_pods_sg_rules(port, direction, rules, '', - None) - self.assertEqual(len(rules), 2) - - def test__create_default_sg_rule(self): - for direction in ('ingress', 'egress'): - rules = [] - - self._driver._create_default_sg_rule(direction, rules) - self.assertEqual(len(rules), 2) - self.assertListEqual(rules, [{'sgRule': { - 'ethertype': e, - 'direction': direction, - 'description': 'Kuryr-Kubernetes NetPolicy SG rule' - }} for e in ('IPv4', 'IPv6')]) diff --git a/kuryr_kubernetes/tests/unit/controller/drivers/test_network_policy_security_groups.py b/kuryr_kubernetes/tests/unit/controller/drivers/test_network_policy_security_groups.py deleted file mode 100644 index b41bc4fd1..000000000 --- a/kuryr_kubernetes/tests/unit/controller/drivers/test_network_policy_security_groups.py +++ /dev/null @@ -1,616 +0,0 @@ -# Copyright 2018 Red Hat, Inc. -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. - -from unittest import mock - -from kuryr_kubernetes.controller.drivers import network_policy_security_groups -from kuryr_kubernetes.tests import base as test_base -from kuryr_kubernetes.tests.unit import kuryr_fixtures as k_fix - -from oslo_config import cfg - - -def get_no_match_crd_namespace_obj(): - return { - "kind": "Namespace", - "metadata": { - "annotations": { - "openstack.org/kuryr-namespace-label": '{"name": "dev"}', - "openstack.org/kuryr-net-crd": "ns-dev" - }, - "labels": {"name": "prod"}, - "name": "prod"}} - - -def get_match_crd_namespace_obj(): - return { - "kind": "Namespace", - "metadata": { - "annotations": { - "openstack.org/kuryr-namespace-label": '{"name": "dev"}', - "openstack.org/kuryr-net-crd": "ns-dev" - }, - "labels": { - "name": "dev" - }, - "name": "dev"}} - - -def get_match_crd_pod_obj(): - return { - 'kind': 'Pod', - 'metadata': { - 'name': mock.sentinel.pod_name, - 'namespace': 'dev', - 'labels': { - 'tier': 'backend'}, - 'annotations': { - 'openstack.org/kuryr-pod-label': '{"tier": "backend"}'}}, - 'status': {'podIP': mock.sentinel.podIP}} - - -def get_sg_rule(): - pod_ip = get_match_crd_pod_obj()['status'].get('podIP') - return { - "namespace": 'dev', - "sgRule": { - "description": "Kuryr-Kubernetes NetPolicy SG rule", - "direction": "ingress", - "ethertype": "IPv4", - "id": 'f15ff50a-e8a4-4872-81bf-a04cbb8cb388', - "port_range_max": 6379, - "port_range_min": 6379, - "protocol": "tcp", - "remote_ip_prefix": pod_ip, - "security_group_id": '36923e76-026c-422b-8dfd-7292e7c88228'}} - - -def get_matched_crd_obj(): - return { - "kind": "KuryrNetworkPolicy", - "metadata": {"name": "np-test-network-policy", - "namespace": "default"}, - "spec": { - "egressSgRules": [], - "ingressSgRules": [get_sg_rule()], - "networkpolicy_spec": { - "ingress": [ - {"from": [ - {"namespaceSelector": { - "matchLabels": {"name": "dev"}}}], - "ports": [ - {"port": 6379, - "protocol": "TCP"}]}], - "podSelector": {"matchLabels": {"app": "demo"}}, - "policyTypes": ["Ingress"]}, - "podSelector": {"matchLabels": {"app": "demo"}}, - "securityGroupId": '36923e76-026c-422b-8dfd-7292e7c88228'}} - - -def get_crd_obj_no_match(): - return { - "kind": "KuryrNetworkPolicy", - "metadata": {"name": "np-test-network-policy", - "namespace": "default"}, - "spec": { - "egressSgRules": [], - "ingressSgRules": [], - "networkpolicy_spec": { - "ingress": [ - {"from": [ - {"namespaceSelector": { - "matchLabels": {"name": "dev"}}}], - "ports": [ - {"port": 6379, - "protocol": "TCP"}]}], - "podSelector": {"matchLabels": {"app": "demo"}}, - "policyTypes": ["Ingress"]}, - "podSelector": {"matchLabels": {"app": "demo"}}, - "securityGroupId": '36923e76-026c-422b-8dfd-7292e7c88228'}} - - -def get_crd_obj_with_all_selectors(): - return { - "kind": "KuryrNetworkPolicy", - "metadata": {"name": "np-test-network-policy", - "namespace": "default"}, - "spec": { - "egressSgRules": [], - "ingressSgRules": [], - "networkpolicy_spec": { - "ingress": [ - {"from": [ - {"namespaceSelector": { - "matchLabels": {"name": "dev"}}, - "podSelector": { - "matchLabels": {"tier": "backend"}}}], - "ports": [ - {"port": 6379, - "protocol": "TCP"}]}], - "podSelector": {"matchLabels": {"app": "demo"}}, - "policyTypes": ["Ingress"]}, - "podSelector": {"matchLabels": {"app": "demo"}}, - "securityGroupId": '36923e76-026c-422b-8dfd-7292e7c88228'}} - - -class TestNetworkPolicySecurityGroupsDriver(test_base.TestCase): - - def setUp(self): - super(TestNetworkPolicySecurityGroupsDriver, self).setUp() - self._project_id = mock.sentinel.project_id - self._sg_id = mock.sentinel.sg_id - self._sg_id2 = mock.sentinel._sg_id2 - self._namespace = 'default' - self._crd = { - 'metadata': {'name': mock.sentinel.name}, - 'spec': { - 'egressSgRules': [ - {'sgRule': - {'description': 'Kuryr-Kubernetes NetPolicy SG rule', - 'direction': 'egress', - 'ethertype': 'IPv4', - 'port_range_max': 5978, - 'port_range_min': 5978, - 'protocol': 'tcp', - 'security_group_id': self._sg_id, - 'id': mock.sentinel.id - }}], - 'ingressSgRules': [ - {'sgRule': - {'description': 'Kuryr-Kubernetes NetPolicy SG rule', - 'direction': 'ingress', - 'ethertype': 'IPv4', - 'port_range_max': 6379, - 'port_range_min': 6379, - 'protocol': 'tcp', - 'security_group_id': self._sg_id, - 'id': mock.sentinel.id - }}], - 'podSelector': { - 'matchExpressions': [ - { - 'key': 'environment', - 'operator': 'In', - 'values': [ - 'production']}], - 'matchLabels': { - 'run': 'demo' - }}}, - 'status': { - 'securityGroupId': self._sg_id, - }, - } - - self._crd2 = { - 'metadata': {'name': mock.sentinel.name3}, - 'spec': { - 'ingressSgRules': [ - {'sgRule': - {'description': 'Kuryr-Kubernetes NetPolicy SG rule', - 'direction': 'ingress', - 'ethertype': 'IPv4', - 'port_range_max': 8080, - 'port_range_min': 8080, - 'protocol': 'tcp', - 'security_group_id': self._sg_id2, - 'id': mock.sentinel.id - }}], - 'podSelector': {}}, - 'status': { - 'securityGroupId': self._sg_id2, - 'securityGroupName': mock.sentinel.sg_name}} - - self._crds = [self._crd] - - self._multiple_crds = [self._crd, self._crd2] - - self._pod = { - 'apiVersion': 'v1', - 'kind': 'Pod', - 'metadata': { - 'name': mock.sentinel.pod_name, - 'namespace': self._namespace, - 'labels': { - 'run': 'demo', - 'environment': 'production'}}, - 'spec': { - 'containers': [{ - 'image': 'quay.io/kuryr/demo', - 'imagePullPolicy': 'Always', - 'name': mock.sentinel.pod_name - }] - }} - - self._pod2 = { - 'apiVersion': 'v1', - 'kind': 'Pod', - 'metadata': { - 'name': mock.sentinel.pod_name, - 'namespace': self._namespace, - 'labels': { - 'run': 'demo', - 'environment': 'development'}, - 'annotations': { - 'openstack.org/kuryr-pod-label': '{' - '"run": "demo","environment": "development"}'}}, - 'spec': { - 'containers': [{ - 'image': 'quay.io/kuryr/demo', - 'imagePullPolicy': 'Always', - 'name': mock.sentinel.pod_name - }] - }} - - self._pod_without_label = { - 'apiVersion': 'v1', - 'kind': 'Pod', - 'metadata': { - 'name': mock.sentinel.pod_name, - 'namespace': self._namespace}, - 'spec': { - 'containers': [{ - 'image': 'quay.io/kuryr/demo', - 'imagePullPolicy': 'Always', - 'name': mock.sentinel.pod_name - }] - }} - - self.kubernetes = self.useFixture(k_fix.MockK8sClient()).client - self._driver = ( - network_policy_security_groups.NetworkPolicySecurityGroupsDriver()) - - self._pod_ip = mock.sentinel.pod_ip - self._pod_dev_namespace = { - 'apiVersion': 'v1', - 'kind': 'Pod', - 'metadata': { - 'name': mock.sentinel.pod_name, - 'namespace': 'dev', - 'labels': { - 'tier': 'backend'}, - 'annotations': { - 'openstack.org/kuryr-pod-label': '{"tier": "backend"}'}}, - 'spec': { - 'containers': [{ - 'image': 'quay.io/kuryr/demo', - 'imagePullPolicy': 'Always', - 'name': mock.sentinel.pod_name - }]}, - 'status': {'podIP': self._pod_ip}} - - self._crd_sg_id = mock.sentinel.crd_sg_id - self._sg_rule_body = { - 'sgRule': { - 'direction': 'ingress', - 'protocol': 'tcp', - 'description': 'Kuryr-Kubernetes NetPolicy SG rule', - 'ethertype': 'IPv4', - 'port_range_max': 6379, - 'security_group_id': self._crd_sg_id, - 'port_range_min': 6379, - 'remote_ip_prefix': self._pod_ip}} - - self._new_rule_id = mock.sentinel.id - self._crd_with_rule = { - "apiVersion": "openstack.org/v1", - "kind": "KuryrNetworkPolicy", - "metadata": {"name": "np-test-network-policy", - "namespace": "default"}, - "spec": { - "egressSgRules": [], - "ingressSgRules": [{ - "sgRule": { - "description": "Kuryr-Kubernetes NetPolicy SG rule", - "direction": "ingress", - "ethertype": "IPv4", - "id": self._new_rule_id, - "port_range_max": 6379, - "port_range_min": 6379, - "protocol": "tcp", - "remote_ip_prefix": self._pod_ip, - "security_group_id": self._crd_sg_id}}], - "networkpolicy_spec": { - "ingress": [ - {"from": [ - {"namespaceSelector": { - "matchLabels": {"name": "dev"}}, - "podSelector": { - "matchLabels": {"tier": "backend"}}}], - "ports": [ - {"port": 6379, - "protocol": "TCP"}]}], - "podSelector": {"matchLabels": {"app": "demo"}}, - "policyTypes": ["Ingress"]}, - "podSelector": {"matchLabels": {"app": "demo"}}, - "securityGroupId": self._crd_sg_id}} - - @mock.patch('kuryr_kubernetes.controller.drivers.utils.' - 'match_selector', return_value=True) - @mock.patch('kuryr_kubernetes.controller.drivers.utils.get_pod_ip') - def test__create_sg_rules(self, m_get_pod_ip, - m_match_selector): - sgr_id = mock.sentinel.sgr_id - crd = get_crd_obj_with_all_selectors() - pod = get_match_crd_pod_obj() - m_get_pod_ip.return_value = pod['status'].get('podIP') - matched = False - new_sg_rule = self._sg_rule_body - - policy = crd['spec']['networkpolicy_spec'] - rule_list = policy.get('ingress', None) - pod_ns = pod['metadata']['namespace'] - - for rule_block in rule_list: - for rule in rule_block.get('from', []): - pod_selector = rule.get('podSelector') - matched = network_policy_security_groups._create_sg_rules( - crd, pod, pod_selector, rule_block, 'ingress', matched) - new_sg_rule['namespace'] = pod_ns - new_sg_rule['sgRule']['id'] = sgr_id - m_match_selector.assert_called_once_with( - pod_selector, pod['metadata']['labels']) - m_get_pod_ip.assert_called_once_with(pod) - self.assertEqual(matched, True) - - @mock.patch('kuryr_kubernetes.controller.drivers.utils.' - 'get_pod_ip') - @mock.patch('kuryr_kubernetes.controller.drivers.utils.' - 'match_selector', return_value=False) - def test__create_sg_rules_no_match(self, m_match_selector, m_get_pod_ip): - crd = get_crd_obj_with_all_selectors() - pod = self._pod2 - - policy = crd['spec']['networkpolicy_spec'] - rule_list = policy.get('ingress', None) - - for rule_block in rule_list: - for rule in rule_block.get('from', []): - pod_selector = rule.get('podSelector') - matched = network_policy_security_groups._create_sg_rules( - crd, pod, pod_selector, rule_block, 'ingress', False) - self.assertEqual(matched, False) - - @mock.patch('kuryr_kubernetes.controller.drivers.utils.bump_networkpolicy') - @mock.patch('kuryr_kubernetes.controller.drivers.utils.' - 'get_kuryrnetworkpolicy_crds') - @mock.patch('kuryr_kubernetes.controller.drivers.utils.get_pod_ip') - def test_delete_sg_rules(self, m_get_pod_ip, m_get_knp_crds, m_bump): - crd = self._crd_with_rule - m_get_pod_ip.return_value = self._pod_ip - m_get_knp_crds.return_value = [crd] - pod = self._pod_dev_namespace - - self._driver.delete_sg_rules(pod) - - m_get_knp_crds.assert_called_once() - m_get_pod_ip.assert_called_once_with(pod) - m_bump.assert_called_once() - - @mock.patch('kuryr_kubernetes.config.CONF') - @mock.patch('kuryr_kubernetes.controller.drivers.utils.' - 'get_kuryrnetworkpolicy_crds') - def test_get_sgs_for_pod_without_label(self, m_get_crds, m_cfg): - m_get_crds.return_value = self._crds - sg_list = [str(mock.sentinel.sg_id)] - m_cfg.neutron_defaults.pod_security_groups = sg_list - - sgs = self._driver.get_security_groups(self._pod_without_label, - self._project_id) - - m_get_crds.assert_called_once_with(namespace=self._namespace) - self.assertEqual(sg_list, sgs) - - @mock.patch('kuryr_kubernetes.controller.drivers.utils.' - 'match_expressions') - @mock.patch('kuryr_kubernetes.controller.drivers.utils.' - 'match_labels') - @mock.patch('kuryr_kubernetes.controller.drivers.utils.' - 'get_kuryrnetworkpolicy_crds') - def test_get_sgs_for_pod_with_label(self, m_get_crds, m_match_labels, - m_match_expressions): - m_get_crds.return_value = self._crds - m_match_expressions.return_value = True - m_match_labels.return_value = True - pod_labels = self._pod['metadata']['labels'] - resp = self._driver.get_security_groups(self._pod, self._project_id) - - m_get_crds.assert_called_once_with(namespace=self._namespace) - m_match_expressions.assert_called_once_with( - self._crd['spec']['podSelector']['matchExpressions'], pod_labels) - m_match_labels.assert_called_once_with( - self._crd['spec']['podSelector']['matchLabels'], pod_labels) - self.assertEqual(resp, [self._sg_id]) - - @mock.patch('kuryr_kubernetes.config.CONF') - @mock.patch('kuryr_kubernetes.controller.drivers.utils.' - 'match_expressions') - @mock.patch('kuryr_kubernetes.controller.drivers.utils.' - 'match_labels') - @mock.patch('kuryr_kubernetes.controller.drivers.utils.' - 'get_kuryrnetworkpolicy_crds') - def test_get_sgs_for_pod_with_label_no_match(self, m_get_crds, - m_match_labels, - m_match_expressions, m_cfg): - m_get_crds.return_value = self._crds - m_match_expressions.return_value = False - m_match_labels.return_value = True - sg_list = [mock.sentinel.sg_id] - m_cfg.neutron_defaults.pod_security_groups = sg_list - pod_labels = self._pod2['metadata']['labels'] - - sgs = self._driver.get_security_groups(self._pod2, self._project_id) - - m_get_crds.assert_called_once_with(namespace=self._namespace) - m_match_expressions.assert_called_once_with( - self._crd['spec']['podSelector']['matchExpressions'], pod_labels) - m_match_labels.assert_called_once_with( - self._crd['spec']['podSelector']['matchLabels'], pod_labels) - self.assertEqual(sg_list, sgs) - - @mock.patch('kuryr_kubernetes.controller.drivers.utils.' - 'get_kuryrnetworkpolicy_crds') - def test_get_sgs_no_crds(self, m_get_crds): - m_get_crds.return_value = [] - cfg.CONF.set_override('pod_security_groups', [], - group='neutron_defaults') - - self.assertRaises(cfg.RequiredOptError, - self._driver.get_security_groups, self._pod, - self._project_id) - m_get_crds.assert_called_with(namespace=self._namespace) - - @mock.patch('kuryr_kubernetes.controller.drivers.utils.' - 'match_expressions') - @mock.patch('kuryr_kubernetes.controller.drivers.utils.' - 'match_labels') - @mock.patch('kuryr_kubernetes.controller.drivers.utils.' - 'get_kuryrnetworkpolicy_crds') - def test_get_sgs_multiple_crds(self, m_get_crds, m_match_labels, - m_match_expressions): - m_match_expressions.return_value = True - m_match_labels.return_value = True - m_get_crds.return_value = self._multiple_crds - - resp = self._driver.get_security_groups(self._pod, self._project_id) - - m_get_crds.assert_called_once_with(namespace=self._namespace) - self.assertEqual([self._sg_id, self._sg_id2], resp) - - @mock.patch('kuryr_kubernetes.controller.drivers.utils.bump_networkpolicy') - @mock.patch('kuryr_kubernetes.controller.drivers.utils.' - 'get_kuryrnetworkpolicy_crds') - def test_delete_namespace_sg_rule(self, m_get_knp_crd, m_bump): - cls = network_policy_security_groups.NetworkPolicySecurityGroupsDriver - m_driver = mock.MagicMock(spec=cls) - - m_get_knp_crd.return_value = [get_matched_crd_obj()] - - cls.delete_namespace_sg_rules(m_driver, get_match_crd_namespace_obj()) - - m_get_knp_crd.assert_called_once() - m_bump.assert_called_once() - - @mock.patch('kuryr_kubernetes.controller.drivers.utils.bump_networkpolicy') - @mock.patch('kuryr_kubernetes.controller.drivers.utils.' - 'delete_security_group_rule') - @mock.patch('kuryr_kubernetes.controller.drivers.utils.' - 'get_kuryrnetworkpolicy_crds') - def test_delete_namespace_sg_rule_no_match( - self, m_get_knp_crd, m_delete_sg_rule, m_bump): - cls = network_policy_security_groups.NetworkPolicySecurityGroupsDriver - m_driver = mock.MagicMock(spec=cls) - - m_get_knp_crd.return_value = [get_matched_crd_obj()] - - cls.delete_namespace_sg_rules(m_driver, - get_no_match_crd_namespace_obj()) - - m_get_knp_crd.assert_called_once() - m_delete_sg_rule.assert_not_called() - m_bump.assert_not_called() - - @mock.patch('kuryr_kubernetes.controller.drivers.utils.get_pods') - @mock.patch('kuryr_kubernetes.controller.drivers.utils.match_selector') - def test__parse_rules(self, m_match_selector, m_get_pods): - crd = get_crd_obj_no_match() - policy = crd['spec']['networkpolicy_spec'] - i_rule = policy.get('ingress')[0] - ns_selector = i_rule['from'][0].get('namespaceSelector') - ns = get_match_crd_namespace_obj() - - m_match_selector.return_value = True - - matched = network_policy_security_groups._parse_rules( - 'ingress', crd, policy, namespace=ns) - - m_match_selector.assert_called_once_with(ns_selector, - ns['metadata']['labels']) - - self.assertEqual(matched, True) - - @mock.patch('kuryr_kubernetes.controller.drivers.utils.match_selector') - def test__parse_rules_no_match(self, m_match_selector): - crd = get_crd_obj_no_match() - policy = crd['spec']['networkpolicy_spec'] - i_rule = policy.get('ingress')[0] - ns_selector = i_rule['from'][0].get('namespaceSelector') - ns = get_no_match_crd_namespace_obj() - - m_match_selector.return_value = False - - matched = network_policy_security_groups._parse_rules( - 'ingress', crd, policy, namespace=ns) - - m_match_selector.assert_called_once_with(ns_selector, - ns['metadata']['labels']) - - self.assertEqual(matched, False) - - @mock.patch('kuryr_kubernetes.controller.drivers.utils.get_pods') - @mock.patch('kuryr_kubernetes.controller.drivers.utils.get_pod_ip') - @mock.patch('kuryr_kubernetes.controller.drivers.utils.match_selector') - def test__parse_rules_all_selectors(self, m_match_selector, m_get_pod_ip, - m_get_pods): - crd = get_crd_obj_with_all_selectors() - policy = crd['spec']['networkpolicy_spec'] - i_rule = policy.get('ingress')[0] - ns_selector = i_rule['from'][0].get('namespaceSelector') - pod_selector = i_rule['from'][0].get('podSelector') - ns = get_match_crd_namespace_obj() - pod = get_match_crd_pod_obj() - - m_match_selector.return_value = True - m_get_pod_ip.return_value = pod['status']['podIP'] - m_get_pods.return_value = {"items": [pod]} - - matched = network_policy_security_groups._parse_rules( - 'ingress', crd, policy, namespace=ns) - - m_match_selector.assert_called_once_with(ns_selector, - ns['metadata']['labels']) - m_get_pods.assert_called_once_with(pod_selector, - ns['metadata']['name']) - m_get_pod_ip.assert_called_once_with(pod) - - self.assertEqual(matched, True) - - @mock.patch('kuryr_kubernetes.controller.drivers.' - 'network_policy_security_groups._parse_selectors_on_pod') - def test__parse_rules_multiple_selectors(self, m_parse_selectors_on_pod): - no_selector = None - matched_selector = True - pod = mock.sentinel.pod - m_parse_selectors_on_pod.side_effect = [matched_selector]*2 - - direction = "ingress" - pod_selector = mock.sentinel.pod_selector - namespace_selector = mock.sentinel.namespace_selector - rule_block = {'from': [{'podSelector': pod_selector}, - {'namespaceSelector': namespace_selector}]} - policy = { - "ingress": [rule_block], - "policyTypes": ["Ingress"] - } - crd = {"spec": {"ingressSgRules": []}} - - matched = network_policy_security_groups._parse_rules( - direction, crd, policy, pod=pod) - - calls = [mock.call(crd, pod, pod_selector, no_selector, rule_block, - direction, not matched_selector), - mock.call(crd, pod, no_selector, namespace_selector, - rule_block, direction, matched_selector)] - m_parse_selectors_on_pod.assert_has_calls(calls) - - self.assertEqual(matched, matched_selector) diff --git a/kuryr_kubernetes/tests/unit/controller/drivers/test_neutron_vif.py b/kuryr_kubernetes/tests/unit/controller/drivers/test_neutron_vif.py deleted file mode 100644 index 81905cdd5..000000000 --- a/kuryr_kubernetes/tests/unit/controller/drivers/test_neutron_vif.py +++ /dev/null @@ -1,346 +0,0 @@ -# Copyright (c) 2016 Mirantis, Inc. -# All Rights Reserved. -# -# Licensed under the Apache License, Version 2.0 (the "License"); you may -# not use this file except in compliance with the License. You may obtain -# a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT -# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the -# License for the specific language governing permissions and limitations -# under the License. -import eventlet -from unittest import mock - -from kuryr.lib import constants as kl_const -from openstack import exceptions as os_exc -from openstack.network.v2 import port as os_port -from oslo_config import cfg as oslo_cfg - -from kuryr_kubernetes import constants -from kuryr_kubernetes.controller.drivers import neutron_vif -from kuryr_kubernetes.controller.drivers import utils -from kuryr_kubernetes import exceptions as k_exc -from kuryr_kubernetes.tests import base as test_base -from kuryr_kubernetes.tests.unit import kuryr_fixtures as k_fix - - -class NeutronPodVIFDriver(test_base.TestCase): - - @mock.patch('kuryr_kubernetes.os_vif_util.neutron_to_osvif_vif') - def test_request_vif(self, m_to_vif): - cls = neutron_vif.NeutronPodVIFDriver - cls._tag_on_creation = True - m_driver = mock.Mock(spec=cls) - os_net = self.useFixture(k_fix.MockNetworkClient()).client - - pod = mock.sentinel.pod - project_id = mock.sentinel.project_id - subnets = mock.sentinel.subnets - security_groups = mock.sentinel.security_groups - port = os_port.Port(id='910b1183-1f4a-450a-a298-0e80ad06ec8b') - port_request = {'fake_req': mock.sentinel.port_request} - vif = mock.sentinel.vif - vif_plugin = mock.sentinel.vif_plugin - port.binding_vif_type = vif_plugin - - m_to_vif.return_value = vif - m_driver._get_port_request.return_value = port_request - os_net.create_port.return_value = port - - self.assertEqual(vif, cls.request_vif(m_driver, pod, project_id, - subnets, security_groups)) - - m_driver._get_port_request.assert_called_once_with( - pod, project_id, subnets, security_groups) - os_net.create_port.assert_called_once_with(**port_request) - m_to_vif.assert_called_once_with(vif_plugin, port, subnets) - - @mock.patch('kuryr_kubernetes.os_vif_util.neutron_to_osvif_vif') - def test_request_vifs(self, m_to_vif): - cls = neutron_vif.NeutronPodVIFDriver - cls._tag_on_creation = True - m_driver = mock.Mock(spec=cls) - os_net = self.useFixture(k_fix.MockNetworkClient()).client - - pod = mock.sentinel.pod - project_id = mock.sentinel.project_id - subnets = mock.sentinel.subnets - security_groups = mock.sentinel.security_groups - num_ports = 2 - - port_request = mock.sentinel.port_request - m_driver._get_port_request.return_value = port_request - port = os_port.Port(id='910b1183-1f4a-450a-a298-0e80ad06ec8b') - vif_plugin = mock.sentinel.vif_plugin - port.binding_vif_type = vif_plugin - vif = mock.sentinel.vif - bulk_rq = [port_request for _ in range(num_ports)] - - os_net.create_ports.return_value = (p for p in [port, port]) - m_to_vif.return_value = vif - semaphore = mock.MagicMock(spec=eventlet.semaphore.Semaphore(20)) - - self.assertEqual([vif, vif], cls.request_vifs( - m_driver, pod, project_id, subnets, security_groups, num_ports, - semaphore)) - - m_driver._get_port_request.assert_called_once_with( - pod, project_id, subnets, security_groups, unbound=True) - os_net.create_ports.assert_called_once_with(bulk_rq) - calls = [mock.call(vif_plugin, port, subnets), - mock.call(vif_plugin, port, subnets)] - m_to_vif.assert_has_calls(calls) - - @mock.patch('kuryr_kubernetes.os_vif_util.neutron_to_osvif_vif') - def test_request_vifs_unbound(self, m_to_vif): - cls = neutron_vif.NeutronPodVIFDriver - cls._tag_on_creation = True - m_driver = mock.Mock(spec=cls) - os_net = self.useFixture(k_fix.MockNetworkClient()).client - - pod = mock.sentinel.pod - project_id = mock.sentinel.project_id - subnets = mock.sentinel.subnets - security_groups = mock.sentinel.security_groups - num_ports = 2 - - port_request = mock.sentinel.port_request - m_driver._get_port_request.return_value = port_request - port_id = mock.sentinel.port_id - port1 = os_port.Port(id=port_id, binding_vif_type='unbound') - vif_plugin = mock.sentinel.vif_plugin - port2 = os_port.Port(id=port_id, binding_vif_type=vif_plugin) - port1_1 = os_port.Port(id=port_id, binding_vif_type=vif_plugin) - vif = mock.sentinel.vif - bulk_rq = [port_request for _ in range(num_ports)] - semaphore = mock.MagicMock(spec=eventlet.semaphore.Semaphore(20)) - - os_net.create_ports.return_value = (p for p in [port1, port2]) - os_net.get_port.return_value = port1_1 - m_to_vif.return_value = vif - - self.assertEqual([vif, vif], cls.request_vifs( - m_driver, pod, project_id, subnets, security_groups, num_ports, - semaphore)) - - m_driver._get_port_request.assert_called_once_with( - pod, project_id, subnets, security_groups, unbound=True) - os_net.create_ports.assert_called_once_with(bulk_rq) - os_net.get_port.assert_called_once_with(port_id) - calls = [mock.call(vif_plugin, port1, subnets), - mock.call(vif_plugin, port2, subnets)] - m_to_vif.assert_has_calls(calls) - - @mock.patch('kuryr_kubernetes.os_vif_util.neutron_to_osvif_vif') - def test_request_vifs_exception(self, m_to_vif): - cls = neutron_vif.NeutronPodVIFDriver - cls._tag_on_creation = False - m_driver = mock.Mock(spec=cls) - os_net = self.useFixture(k_fix.MockNetworkClient()).client - - pod = mock.sentinel.pod - project_id = mock.sentinel.project_id - subnets = mock.sentinel.subnets - security_groups = mock.sentinel.security_groups - num_ports = 2 - - port_request = mock.sentinel.port_request - m_driver._get_port_request.return_value = port_request - bulk_rq = [port_request for _ in range(num_ports)] - - semaphore = mock.MagicMock(spec=eventlet.semaphore.Semaphore(20)) - os_net.create_ports.side_effect = os_exc.SDKException - - self.assertRaises(os_exc.SDKException, cls.request_vifs, - m_driver, pod, project_id, subnets, - security_groups, num_ports, semaphore) - - m_driver._get_port_request.assert_called_once_with( - pod, project_id, subnets, security_groups, unbound=True) - os_net.create_ports.assert_called_once_with(bulk_rq) - m_to_vif.assert_not_called() - - def test_release_vif(self): - cls = neutron_vif.NeutronPodVIFDriver - m_driver = mock.Mock(spec=cls) - os_net = self.useFixture(k_fix.MockNetworkClient()).client - - pod = mock.sentinel.pod - vif = mock.Mock() - - cls.release_vif(m_driver, pod, vif) - - os_net.delete_port.assert_called_once_with(vif.id) - - def test_release_vif_not_found(self): - cls = neutron_vif.NeutronPodVIFDriver - m_driver = mock.Mock(spec=cls) - os_net = self.useFixture(k_fix.MockNetworkClient()).client - - pod = mock.sentinel.pod - vif = mock.Mock() - - cls.release_vif(m_driver, pod, vif) - - os_net.delete_port.assert_called_once_with(vif.id) - - def test_activate_vif(self): - cls = neutron_vif.NeutronPodVIFDriver - m_driver = mock.Mock(spec=cls) - os_net = self.useFixture(k_fix.MockNetworkClient()).client - - vif = mock.Mock() - vif.active = False - port = mock.MagicMock() - - port.__getitem__.return_value = kl_const.PORT_STATUS_ACTIVE - os_net.get_port.return_value = port - - cls.activate_vif(m_driver, vif) - - os_net.get_port.assert_called_once_with(vif.id) - self.assertTrue(vif.active) - - def test_activate_vif_active(self): - cls = neutron_vif.NeutronPodVIFDriver - m_driver = mock.Mock(spec=cls) - os_net = self.useFixture(k_fix.MockNetworkClient()).client - - vif = mock.Mock() - vif.active = True - - cls.activate_vif(m_driver, vif) - - os_net.get_port.assert_not_called() - - def test_activate_vif_not_ready(self): - cls = neutron_vif.NeutronPodVIFDriver - m_driver = mock.Mock(spec=cls) - os_net = self.useFixture(k_fix.MockNetworkClient()).client - - vif = mock.Mock() - vif.active = False - port = mock.MagicMock() - - port.__getitem__.return_value = kl_const.PORT_STATUS_DOWN - os_net.get_port.return_value = port - - self.assertRaises(k_exc.ResourceNotReady, cls.activate_vif, - m_driver, vif) - - def _test_get_port_request(self, m_to_fips, security_groups, - m_get_device_id, m_get_port_name, m_get_host_id, - m_get_network_id, unbound=False): - cls = neutron_vif.NeutronPodVIFDriver - cls._tag_on_creation = True - m_driver = mock.Mock(spec=cls) - - pod = mock.sentinel.pod - project_id = mock.sentinel.project_id - subnets = mock.sentinel.subnets - port_name = mock.sentinel.port_name - network_id = mock.sentinel.network_id - fixed_ips = mock.sentinel.fixed_ips - device_id = mock.sentinel.device_id - host_id = mock.sentinel.host_id - - m_get_port_name.return_value = port_name - m_get_network_id.return_value = network_id - m_to_fips.return_value = fixed_ips - m_get_device_id.return_value = device_id - m_get_host_id.return_value = host_id - - oslo_cfg.CONF.set_override('port_debug', - True, - group='kubernetes') - - expected = {'project_id': project_id, - 'name': port_name, - 'network_id': network_id, - 'fixed_ips': fixed_ips, - 'device_owner': kl_const.DEVICE_OWNER, - 'admin_state_up': True, - 'binding_host_id': host_id} - - if security_groups: - expected['security_groups'] = security_groups - - tags = oslo_cfg.CONF.neutron_defaults.resource_tags - if cls._tag_on_creation and tags: - expected['tags'] = tags - - if unbound: - expected['name'] = constants.KURYR_PORT_NAME - else: - expected['device_id'] = device_id - - ret = cls._get_port_request(m_driver, pod, project_id, subnets, - security_groups, unbound) - - self.assertEqual(expected, ret) - m_get_network_id.assert_called_once_with(subnets) - m_to_fips.assert_called_once_with(subnets) - if not unbound: - m_get_port_name.assert_called_once_with(pod) - m_get_device_id.assert_called_once_with(pod) - m_get_host_id.assert_called_once_with(pod) - - @mock.patch('kuryr_kubernetes.os_vif_util.osvif_to_neutron_fixed_ips') - @mock.patch('kuryr_kubernetes.controller.drivers.utils.get_device_id') - @mock.patch('kuryr_kubernetes.controller.drivers.utils.get_port_name') - @mock.patch('kuryr_kubernetes.controller.drivers.utils.get_host_id') - @mock.patch('kuryr_kubernetes.controller.drivers.utils.get_network_id') - def test_get_port_request(self, m_get_network_id, m_get_host_id, - m_get_port_name, m_get_dev_id, m_to_fips): - security_groups = mock.sentinel.security_groups - self._test_get_port_request(m_to_fips, security_groups, m_get_dev_id, - m_get_port_name, m_get_host_id, - m_get_network_id) - - @mock.patch('kuryr_kubernetes.os_vif_util.osvif_to_neutron_fixed_ips') - @mock.patch('kuryr_kubernetes.controller.drivers.utils.get_device_id') - @mock.patch('kuryr_kubernetes.controller.drivers.utils.get_port_name') - @mock.patch('kuryr_kubernetes.controller.drivers.utils.get_host_id') - @mock.patch('kuryr_kubernetes.controller.drivers.utils.get_network_id') - def test_get_port_request_no_sg(self, m_get_network_id, m_get_host_id, - m_get_port_name, m_get_dev_id, m_to_fips): - security_groups = [] - self._test_get_port_request(m_to_fips, security_groups, m_get_dev_id, - m_get_port_name, m_get_host_id, - m_get_network_id) - - @mock.patch('kuryr_kubernetes.os_vif_util.osvif_to_neutron_fixed_ips') - @mock.patch('kuryr_kubernetes.controller.drivers.utils.get_device_id') - @mock.patch('kuryr_kubernetes.controller.drivers.utils.get_port_name') - @mock.patch('kuryr_kubernetes.controller.drivers.utils.get_host_id') - @mock.patch('kuryr_kubernetes.controller.drivers.utils.get_network_id') - def test_get_port_request_unbound(self, m_get_network_id, m_get_host_id, - m_get_port_name, m_get_dev_id, - m_to_fips): - security_groups = mock.sentinel.security_groups - self._test_get_port_request(m_to_fips, security_groups, m_get_dev_id, - m_get_port_name, m_get_host_id, - m_get_network_id, unbound=True) - - def test_get_port_name(self): - pod_name = mock.sentinel.pod_name - port_name = 'default/' + str(pod_name) - pod = {'metadata': {'name': pod_name, 'namespace': 'default'}} - - self.assertEqual(port_name, utils.get_port_name(pod)) - - def test_get_device_id(self): - pod_uid = mock.sentinel.pod_uid - pod = {'metadata': {'uid': pod_uid}} - - self.assertEqual(pod_uid, utils.get_device_id(pod)) - - def test_get_host_id(self): - node = mock.sentinel.pod_uid - pod = {'spec': {'nodeName': node}} - - self.assertEqual(node, utils.get_host_id(pod)) diff --git a/kuryr_kubernetes/tests/unit/controller/drivers/test_node_subnets.py b/kuryr_kubernetes/tests/unit/controller/drivers/test_node_subnets.py deleted file mode 100644 index 2b6fb567a..000000000 --- a/kuryr_kubernetes/tests/unit/controller/drivers/test_node_subnets.py +++ /dev/null @@ -1,418 +0,0 @@ -# Copyright 2020 Red Hat, Inc. -# All Rights Reserved. -# -# Licensed under the Apache License, Version 2.0 (the "License"); you may -# not use this file except in compliance with the License. You may obtain -# a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT -# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the -# License for the specific language governing permissions and limitations -# under the License. - -from unittest import mock - -from openstack import exceptions as os_exc -from openstack.network.v2 import subnet as os_subnet -from oslo_config import cfg - -from kuryr_kubernetes.controller.drivers import node_subnets -from kuryr_kubernetes import exceptions -from kuryr_kubernetes.tests import base as test_base - - -class TestConfigNodesSubnetsDriver(test_base.TestCase): - - def test_get_nodes_subnets(self): - subnets = ['subnet1', 'subnet2'] - cfg.CONF.set_override('worker_nodes_subnets', subnets, - group='pod_vif_nested') - driver = node_subnets.ConfigNodesSubnets() - - self.assertEqual(subnets, driver.get_nodes_subnets()) - - def test_get_project_not_set_raise(self): - cfg.CONF.set_override('worker_nodes_subnets', None, - group='pod_vif_nested') - driver = node_subnets.ConfigNodesSubnets() - - self.assertRaises(cfg.RequiredOptError, driver.get_nodes_subnets, - raise_on_empty=True) - - def test_get_project_not_set(self): - cfg.CONF.set_override('worker_nodes_subnets', None, - group='pod_vif_nested') - driver = node_subnets.ConfigNodesSubnets() - - self.assertEqual([], driver.get_nodes_subnets()) - - def test_add_node(self): - driver = node_subnets.ConfigNodesSubnets() - self.assertFalse(driver.add_node('node')) - - def test_delete_node(self): - driver = node_subnets.ConfigNodesSubnets() - self.assertFalse(driver.delete_node('node')) - - -class TestOpenShiftNodesSubnetsDriver(test_base.TestCase): - def setUp(self): - super().setUp() - self.machine = { - "apiVersion": "machine.openshift.io/v1beta1", - "kind": "Machine", - "metadata": { - "name": "foo-tv22d-master-2", - "namespace": "openshift-machine-api", - }, - "spec": { - "metadata": {}, - "providerSpec": { - "value": { - "cloudName": "openstack", - "cloudsSecret": { - "name": "openstack-cloud-credentials", - "namespace": "openshift-machine-api" - }, - "kind": "OpenstackProviderSpec", - "networks": [ - { - "filter": {}, - "subnets": [{ - "filter": { - "name": "foo-tv22d-nodes", - "tags": "openshiftClusterID=foo-tv22d" - }} - ] - } - ], - "trunk": True - } - } - }, - "status": {} - } - cfg.CONF.set_override('worker_nodes_subnets', [], - group='pod_vif_nested') - - def test_get_nodes_subnets(self): - subnets = ['subnet1', 'subnet2'] - driver = node_subnets.OpenShiftNodesSubnets() - for subnet in subnets: - driver.subnets.add(subnet) - self.assertCountEqual(subnets, driver.get_nodes_subnets()) - - def test_get_nodes_subnets_with_config(self): - subnets = ['subnet1', 'subnet2'] - cfg.CONF.set_override('worker_nodes_subnets', ['subnet3', 'subnet2'], - group='pod_vif_nested') - driver = node_subnets.OpenShiftNodesSubnets() - for subnet in subnets: - driver.subnets.add(subnet) - self.assertCountEqual(['subnet1', 'subnet2', 'subnet3'], - driver.get_nodes_subnets()) - - def test_get_nodes_subnets_not_raise(self): - driver = node_subnets.OpenShiftNodesSubnets() - self.assertEqual([], driver.get_nodes_subnets()) - - def test_get_nodes_subnets_raise(self): - driver = node_subnets.OpenShiftNodesSubnets() - self.assertRaises(exceptions.ResourceNotReady, - driver.get_nodes_subnets, raise_on_empty=True) - - @mock.patch('kuryr_kubernetes.clients.get_kubernetes_client') - @mock.patch('kuryr_kubernetes.utils.get_subnet_id') - def test_add_node(self, m_get_subnet_id, m_get_k8s): - driver = node_subnets.OpenShiftNodesSubnets() - m_get_subnet_id.return_value = 'foobar' - self.assertTrue(driver.add_node(self.machine)) - m_get_subnet_id.assert_called_once_with( - name='foo-tv22d-nodes', tags='openshiftClusterID=foo-tv22d') - self.assertEqual(['foobar'], driver.get_nodes_subnets()) - - @mock.patch('kuryr_kubernetes.clients.get_kubernetes_client') - @mock.patch('kuryr_kubernetes.utils.get_subnet_id') - def test_add_node_exists(self, m_get_subnet_id, m_get_k8s): - driver = node_subnets.OpenShiftNodesSubnets() - m_get_subnet_id.return_value = 'foobar' - driver.subnets.add('foobar') - self.assertFalse(driver.add_node(self.machine)) - m_get_subnet_id.assert_called_once_with( - name='foo-tv22d-nodes', tags='openshiftClusterID=foo-tv22d') - self.assertEqual(['foobar'], driver.get_nodes_subnets()) - - @mock.patch('kuryr_kubernetes.clients.get_kubernetes_client') - @mock.patch('kuryr_kubernetes.utils.get_subnet_id') - def test_add_node_uuid(self, m_get_subnet_id, m_get_k8s): - driver = node_subnets.OpenShiftNodesSubnets() - net = self.machine['spec']['providerSpec']['value']['networks'][0] - del net['subnets'][0]['filter'] - net['subnets'][0]['uuid'] = 'barfoo' - self.assertTrue(driver.add_node(self.machine)) - m_get_subnet_id.assert_not_called() - self.assertEqual(['barfoo'], driver.get_nodes_subnets()) - - @mock.patch('kuryr_kubernetes.clients.get_kubernetes_client') - @mock.patch('kuryr_kubernetes.utils.get_subnet_id') - def test_add_node_cannot(self, m_get_subnet_id, m_get_k8s): - driver = node_subnets.OpenShiftNodesSubnets() - net = self.machine['spec']['providerSpec']['value']['networks'][0] - del net['subnets'] - self.assertFalse(driver.add_node(self.machine)) - m_get_subnet_id.assert_not_called() - self.assertEqual([], driver.get_nodes_subnets()) - - @mock.patch('kuryr_kubernetes.utils.get_subnet_id') - @mock.patch('kuryr_kubernetes.clients.get_kubernetes_client') - def test_delete_node_cannot(self, m_get_k8s, m_get_subnet_id): - m_k8s = mock.Mock() - m_get_k8s.return_value = m_k8s - driver = node_subnets.OpenShiftNodesSubnets() - net = self.machine['spec']['providerSpec']['value']['networks'][0] - del net['subnets'] - self.assertFalse(driver.delete_node(self.machine)) - m_get_subnet_id.assert_not_called() - self.assertEqual([], driver.get_nodes_subnets()) - - @mock.patch('kuryr_kubernetes.utils.get_subnet_id') - @mock.patch('kuryr_kubernetes.clients.get_kubernetes_client') - def test_delete_node(self, m_get_k8s, m_get_subnet_id): - m_k8s = mock.Mock() - m_get_k8s.return_value = m_k8s - m_k8s.get.return_value = {'items': []} - - driver = node_subnets.OpenShiftNodesSubnets() - driver.subnets.add('foobar') - m_get_subnet_id.return_value = 'foobar' - self.assertTrue(driver.delete_node(self.machine)) - m_get_subnet_id.assert_called_once_with( - name='foo-tv22d-nodes', tags='openshiftClusterID=foo-tv22d') - self.assertEqual([], driver.get_nodes_subnets()) - - @mock.patch('kuryr_kubernetes.utils.get_subnet_id') - @mock.patch('kuryr_kubernetes.clients.get_kubernetes_client') - def test_delete_node_still_exists(self, m_get_k8s, m_get_subnet_id): - m_k8s = mock.Mock() - m_get_k8s.return_value = m_k8s - m_k8s.get.return_value = {'items': [self.machine]} - - driver = node_subnets.OpenShiftNodesSubnets() - driver.subnets.add('foobar') - m_get_subnet_id.return_value = 'foobar' - self.assertFalse(driver.delete_node(self.machine)) - m_get_subnet_id.assert_called_with( - name='foo-tv22d-nodes', tags='openshiftClusterID=foo-tv22d') - self.assertEqual(['foobar'], driver.get_nodes_subnets()) - - @mock.patch('kuryr_kubernetes.clients.get_kubernetes_client') - def test_get_subnet_from_machine_no_networks(self, m_get_k8s): - driver = node_subnets.OpenShiftNodesSubnets() - del self.machine['spec']['providerSpec']['value']['networks'] - - self.assertIsNone(driver._get_subnet_from_machine(self.machine)) - - @mock.patch('kuryr_kubernetes.clients.get_kubernetes_client') - @mock.patch('kuryr_kubernetes.utils.get_subnet_id') - def test_get_subnet_from_machine_networks_subnets(self, m_get_subnet_id, - m_get_k8s): - subnetid = 'd467451b-ab28-4578-882f-347f0dff4c9a' - m_get_subnet_id.return_value = subnetid - driver = node_subnets.OpenShiftNodesSubnets() - - self.assertEqual(subnetid, - driver._get_subnet_from_machine(self.machine)) - - @mock.patch('kuryr_kubernetes.clients.get_kubernetes_client') - def test_get_subnet_from_machine_networks_wo_filters(self, m_get_k8s): - driver = node_subnets.OpenShiftNodesSubnets() - nets = self.machine['spec']['providerSpec']['value']['networks'] - nets[0]['subnets'] = [{'uuid': 'f8a458e5-c280-47b7-9c8a-dbd4ecd65545'}] - self.machine['spec']['providerSpec']['value']['networks'] = nets - - result = driver._get_subnet_from_machine(self.machine) - - self.assertEqual(nets[0]['subnets'][0]['uuid'], result) - - @mock.patch('kuryr_kubernetes.clients.get_kubernetes_client') - @mock.patch('kuryr_kubernetes.clients.get_network_client') - def test_get_subnet_from_machine_primary_subnet(self, m_get_net, - m_get_k8s): - driver = node_subnets.OpenShiftNodesSubnets() - psub = '622c5fd4-804c-40e8-95ab-ecd1565ac8e2' - m_net = mock.Mock() - m_net.find_subnet.return_value = os_subnet.Subnet(id=psub) - m_get_net.return_value = m_net - self.machine['spec']['providerSpec']['value']['primarySubnet'] = psub - - result = driver._get_subnet_from_machine(self.machine) - - self.assertEqual(psub, result) - - @mock.patch('kuryr_kubernetes.clients.get_kubernetes_client') - def test_get_subnet_from_machine_ports(self, m_get_k8s): - driver = node_subnets.OpenShiftNodesSubnets() - subnet_id = '0530f763-899b-4acb-a2ca-deeedd760409' - ports = [{'fixedIPs': [{'subnetID': subnet_id}]}] - self.machine['spec']['providerSpec']['value']['ports'] = ports - del self.machine['spec']['providerSpec']['value']['networks'] - - result = driver._get_subnet_from_machine(self.machine) - - self.assertEqual(subnet_id, result) - - @mock.patch('kuryr_kubernetes.clients.get_kubernetes_client') - @mock.patch('kuryr_kubernetes.utils.get_subnet_id') - def test_get_subnet_from_machine_networks_and_ports(self, m_get_subnet_id, - m_get_k8s): - """Test both: networks and ports presence, but no primarySubnet. - - Precedence would have networks over ports. - """ - subnet_id = '7607a620-b706-478f-9481-7fdf11deeab2' - m_get_subnet_id.return_value = subnet_id - port_subnet_id = 'ec4c50ac-e3f6-426e-ad91-6ddc10b5c391' - ports = [{'fixedIPs': [{'subnetID': port_subnet_id}]}] - self.machine['spec']['providerSpec']['value']['ports'] = ports - driver = node_subnets.OpenShiftNodesSubnets() - - result = driver._get_subnet_from_machine(self.machine) - - self.assertEqual(subnet_id, result) - - @mock.patch('kuryr_kubernetes.clients.get_kubernetes_client') - def test_get_subnet_from_machine_empty_networks(self, m_get_k8s): - """Test both: networks and ports presence, but no primarySubnet. - - Precedence would have networks over ports. - """ - self.machine['spec']['providerSpec']['value']['networks'] = [] - driver = node_subnets.OpenShiftNodesSubnets() - - result = driver._get_subnet_from_machine(self.machine) - - self.assertIsNone(result) - - @mock.patch('kuryr_kubernetes.clients.get_kubernetes_client') - def test_get_subnet_from_machine_empty_ports(self, m_get_k8s): - """Test both: networks and ports presence, but no primarySubnet. - - Precedence would have networks over ports. - """ - del self.machine['spec']['providerSpec']['value']['networks'] - self.machine['spec']['providerSpec']['value']['ports'] = [] - driver = node_subnets.OpenShiftNodesSubnets() - - result = driver._get_subnet_from_machine(self.machine) - - self.assertIsNone(result) - - @mock.patch('kuryr_kubernetes.clients.get_kubernetes_client') - def test_get_subnet_from_machine_networks_no_trunk(self, m_get_k8s): - del self.machine['spec']['providerSpec']['value']['trunk'] - driver = node_subnets.OpenShiftNodesSubnets() - - self.assertIsNone(driver._get_subnet_from_machine(self.machine)) - - @mock.patch('kuryr_kubernetes.clients.get_kubernetes_client') - def test_get_subnet_from_machine_ports_no_trunk(self, m_get_k8s): - del self.machine['spec']['providerSpec']['value']['trunk'] - del self.machine['spec']['providerSpec']['value']['networks'] - subnet_id = '0530f763-899b-4acb-a2ca-deeedd760409' - ports = [{'fixedIPs': [{'subnetID': subnet_id}]}] - self.machine['spec']['providerSpec']['value']['ports'] = ports - driver = node_subnets.OpenShiftNodesSubnets() - - result = driver._get_subnet_from_machine(self.machine) - - self.assertIsNone(result) - - @mock.patch('kuryr_kubernetes.clients.get_kubernetes_client') - def test_get_subnet_from_machine_ports_no_trunk_one_with_trunk(self, - m_get_k8s): - del self.machine['spec']['providerSpec']['value']['trunk'] - del self.machine['spec']['providerSpec']['value']['networks'] - subnet_id = '0530f763-899b-4acb-a2ca-deeedd760409' - ports = [{'fixedIPs': [{'subnetID': 'foo'}]}, - {'fixedIPs': [{'subnetID': subnet_id}], 'trunk': True}] - self.machine['spec']['providerSpec']['value']['ports'] = ports - driver = node_subnets.OpenShiftNodesSubnets() - - result = driver._get_subnet_from_machine(self.machine) - - self.assertEqual(subnet_id, result) - - @mock.patch('kuryr_kubernetes.clients.get_kubernetes_client') - def test_get_subnet_from_machine_ports_both_with_trunk(self, m_get_k8s): - del self.machine['spec']['providerSpec']['value']['networks'] - subnet_id1 = '0530f763-899b-4acb-a2ca-deeedd760409' - subnet_id2 = 'ccfe75a8-c15e-4504-9596-02e397362abf' - ports = [{'fixedIPs': [{'subnetID': subnet_id1}], 'trunk': False}, - {'fixedIPs': [{'subnetID': subnet_id2}], 'trunk': True}] - self.machine['spec']['providerSpec']['value']['ports'] = ports - driver = node_subnets.OpenShiftNodesSubnets() - - result = driver._get_subnet_from_machine(self.machine) - - self.assertEqual(subnet_id2, result) - - @mock.patch('kuryr_kubernetes.clients.get_kubernetes_client') - def test_get_subnet_from_machine_ports_both_wrong(self, m_get_k8s): - del self.machine['spec']['providerSpec']['value']['networks'] - ports = [{'trunk': True}, - {'fixedIPs': [{'foo': 'bar'}], 'trunk': True}] - self.machine['spec']['providerSpec']['value']['ports'] = ports - driver = node_subnets.OpenShiftNodesSubnets() - - result = driver._get_subnet_from_machine(self.machine) - - self.assertIsNone(result) - - @mock.patch('kuryr_kubernetes.clients.get_kubernetes_client') - @mock.patch('kuryr_kubernetes.clients.get_network_client') - def test_get_subnet_from_machine_two_primary_subnet(self, m_get_net, - m_get_k8s): - driver = node_subnets.OpenShiftNodesSubnets() - sname = 'multiple subnets with the same name' - m_net = mock.Mock() - m_net.find_subnet.side_effect = os_exc.DuplicateResource - m_get_net.return_value = m_net - self.machine['spec']['providerSpec']['value']['primarySubnet'] = sname - - result = driver._get_subnet_from_machine(self.machine) - - self.assertIsNone(result) - - @mock.patch('kuryr_kubernetes.clients.get_kubernetes_client') - @mock.patch('kuryr_kubernetes.clients.get_network_client') - def test_get_subnet_from_machine_single_named_primary_subnet(self, - m_get_net, - m_get_k8s): - driver = node_subnets.OpenShiftNodesSubnets() - sname = 'single named subnet' - subnet_id = '9bcf85c8-1f15-4e3d-8e1e-0e2270ffd2b9' - m_net = mock.Mock() - m_net.find_subnet.return_value = os_subnet.Subnet(id=subnet_id) - m_get_net.return_value = m_net - self.machine['spec']['providerSpec']['value']['primarySubnet'] = sname - - result = driver._get_subnet_from_machine(self.machine) - - self.assertEqual(subnet_id, result) - - @mock.patch('kuryr_kubernetes.clients.get_kubernetes_client') - @mock.patch('kuryr_kubernetes.clients.get_network_client') - def test_get_subnet_from_machine_primary_subnet_exc(self, m_get_net, - m_get_k8s): - driver = node_subnets.OpenShiftNodesSubnets() - subnet = 'e621f2f5-38a4-4a9c-873f-1d447290939c' - m_net = mock.Mock() - m_net.find_subnet.side_effect = os_exc.SDKException - m_get_net.return_value = m_net - self.machine['spec']['providerSpec']['value']['primarySubnet'] = subnet - - self.assertRaises(exceptions.ResourceNotReady, - driver._get_subnet_from_machine, self.machine) diff --git a/kuryr_kubernetes/tests/unit/controller/drivers/test_public_ip.py b/kuryr_kubernetes/tests/unit/controller/drivers/test_public_ip.py deleted file mode 100644 index 350dd43aa..000000000 --- a/kuryr_kubernetes/tests/unit/controller/drivers/test_public_ip.py +++ /dev/null @@ -1,170 +0,0 @@ -# Copyright (c) 2017 RedHat, Inc. -# All Rights Reserved. -# -# Licensed under the Apache License, Version 2.0 (the "License"); you may -# not use this file except in compliance with the License. You may obtain -# a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT -# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the -# License for the specific language governing permissions and limitations -# under the License. - -from openstack import exceptions as os_exc -from openstack.network.v2 import floating_ip as os_fip -from unittest import mock - -from kuryr_kubernetes.controller.drivers import public_ip\ - as d_public_ip -from kuryr_kubernetes.tests import base as test_base -from kuryr_kubernetes.tests.unit import kuryr_fixtures as k_fix - - -class TestFipPubIpDriver(test_base.TestCase): - def setUp(self): - super(TestFipPubIpDriver, self).setUp() - self.driver = d_public_ip.FipPubIpDriver() - self.os_net = self.useFixture(k_fix.MockNetworkClient()).client - - def test_is_ip_available_none_param(self): - fip_id = self.driver.is_ip_available(None) - self.assertIsNone(fip_id) - - def test_is_ip_available_ip_not_exist(self): - fip = os_fip.FloatingIP( - floating_ip_address='1.2.3.4', - port_id=None, - id='a2a62ea7-e3bf-40df-8c09-aa0c29876a6b', - ) - self.os_net.ips.return_value = (ip for ip in [fip]) - - fip_ip_addr = '1.1.1.1' - fip_id = self.driver.is_ip_available(fip_ip_addr) - self.assertIsNone(fip_id) - - def test_is_ip_available_empty_fip_list(self): - self.os_net.ips.return_value = (ip for ip in []) - - fip_ip_addr = '1.1.1.1' - fip_id = self.driver.is_ip_available(fip_ip_addr) - self.assertIsNone(fip_id) - - def test_is_ip_available_occupied_fip(self): - fip = os_fip.FloatingIP( - floating_ip_address='1.2.3.4', - port_id='ec29d641-fec4-4f67-928a-124a76b3a8e6', - ) - self.os_net.ips.return_value = (ip for ip in [fip]) - fip_ip_addr = '1.2.3.4' - fip_id = self.driver.is_ip_available(fip_ip_addr) - self.assertIsNone(fip_id) - - def test_is_ip_available_ip_exist_and_available(self): - fip = os_fip.FloatingIP( - floating_ip_address='1.2.3.4', - port_id=None, - id='a2a62ea7-e3bf-40df-8c09-aa0c29876a6b', - ) - self.os_net.ips.return_value = (ip for ip in [fip]) - - fip_ip_addr = '1.2.3.4' - fip_id = self.driver.is_ip_available(fip_ip_addr) - self.assertEqual(fip_id, 'a2a62ea7-e3bf-40df-8c09-aa0c29876a6b') - - def test_allocate_ip_all_green(self): - pub_net_id = mock.sentinel.pub_net_id - pub_subnet_id = mock.sentinel.pub_subnet_id - project_id = mock.sentinel.project_id - description = mock.sentinel.description - - fip = os_fip.FloatingIP( - floating_ip_address='1.2.3.5', - id='ec29d641-fec4-4f67-928a-124a76b3a888', - ) - self.os_net.create_ip.return_value = fip - - fip_id, fip_addr = self.driver.allocate_ip(pub_net_id, project_id, - pub_subnet_id, description) - self.assertEqual(fip_id, fip.id) - self.assertEqual(fip_addr, fip.floating_ip_address) - - def test_allocate_ip_neutron_exception(self): - pub_net_id = mock.sentinel.pub_net_id - pub_subnet_id = mock.sentinel.pub_subnet_id - project_id = mock.sentinel.project_id - description = mock.sentinel.description - - self.os_net.create_ip.side_effect = os_exc.SDKException - - self.assertRaises(os_exc.SDKException, self.driver.allocate_ip, - pub_net_id, project_id, pub_subnet_id, description) - - def test_free_ip_neutron_exception(self): - res_id = mock.sentinel.res_id - - self.os_net.delete_ip.side_effect = os_exc.SDKException - rc = self.driver.free_ip(res_id) - self.assertIs(rc, False) - - def test_free_ip_succeeded(self): - res_id = mock.sentinel.res_id - - rc = self.driver.free_ip(res_id) - self.assertIs(rc, True) - - def test_associate_neutron_exception(self): - res_id = mock.sentinel.res_id - vip_port_id = mock.sentinel.vip_port_id - - uf = self.os_net.update_ip - uf.side_effect = os_exc.SDKException - self.assertRaises(os_exc.SDKException, self.driver.associate, - res_id, vip_port_id) - - def test_associate_conflict_correct(self): - driver = d_public_ip.FipPubIpDriver() - res_id = mock.sentinel.res_id - vip_port_id = mock.sentinel.vip_port_id - - os_net = self.useFixture(k_fix.MockNetworkClient()).client - os_net.update_ip.side_effect = os_exc.ConflictException - os_net.get_ip.return_value = os_fip.FloatingIP( - id=res_id, port_id=vip_port_id, - ) - self.assertIsNone(driver.associate(res_id, vip_port_id)) - - def test_associate_conflict_incorrect(self): - driver = d_public_ip.FipPubIpDriver() - res_id = mock.sentinel.res_id - vip_port_id = mock.sentinel.vip_port_id - - os_net = self.useFixture(k_fix.MockNetworkClient()).client - os_net.update_ip.side_effect = os_exc.ConflictException - os_net.get_ip.return_value = os_fip.FloatingIP( - id=res_id, port_id='foo', - ) - self.assertRaises(os_exc.ConflictException, driver.associate, res_id, - vip_port_id) - - def test_associate_succeeded(self): - res_id = mock.sentinel.res_id - vip_port_id = mock.sentinel.vip_port_id - - retcode = self.driver.associate(res_id, vip_port_id) - self.assertIsNone(retcode) - - def test_disassociate_neutron_exception(self): - res_id = mock.sentinel.res_id - - uf = self.os_net.update_ip - uf.side_effect = os_exc.SDKException - self.assertRaises(os_exc.SDKException, - self.driver.disassociate, res_id) - - def test_disassociate_succeeded(self): - res_id = mock.sentinel.res_id - - self.assertIsNone(self.driver.disassociate(res_id)) diff --git a/kuryr_kubernetes/tests/unit/controller/drivers/test_utils.py b/kuryr_kubernetes/tests/unit/controller/drivers/test_utils.py deleted file mode 100644 index bc7091d43..000000000 --- a/kuryr_kubernetes/tests/unit/controller/drivers/test_utils.py +++ /dev/null @@ -1,172 +0,0 @@ -# Copyright 2019 Red Hat, Inc. -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. -from unittest import mock - -from oslo_config import cfg - -from kuryr_kubernetes import constants -from kuryr_kubernetes.controller.drivers import utils -from kuryr_kubernetes import exceptions -from kuryr_kubernetes.tests import base as test_base -from kuryr_kubernetes.tests.unit import kuryr_fixtures as k_fix - -CONF = cfg.CONF - - -class TestUtils(test_base.TestCase): - - def test_get_namespace_not_found(self): - namespace_name = mock.sentinel.namespace_name - kubernetes = self.useFixture(k_fix.MockK8sClient()).client - kubernetes.get.side_effect = exceptions.K8sResourceNotFound( - mock.sentinel.resource) - - resp = utils.get_namespace(namespace_name) - - self.assertIsNone(resp) - kubernetes.get.assert_called_once_with('{}/namespaces/{}'.format( - constants.K8S_API_BASE, namespace_name)) - - def test_get_network_id(self): - id_a = mock.sentinel.id_a - net1 = mock.Mock() - net1.id = id_a - net2 = mock.Mock() - net2.id = id_a - subnets = {1: net1, 2: net2} - - ret = utils.get_network_id(subnets) - - self.assertEqual(ret, id_a) - - def test_get_network_id_invalid(self): - id_a = mock.sentinel.id_a - id_b = mock.sentinel.id_b - net1 = mock.Mock() - net1.id = id_a - net2 = mock.Mock() - net2.id = id_b - net3 = mock.Mock() - net3.id = id_a - subnets = {1: net1, 2: net2, 3: net3} - - self.assertRaises(exceptions.IntegrityError, utils.get_network_id, - subnets) - - def test_get_network_id_empty(self): - self.assertRaises(exceptions.IntegrityError, utils.get_network_id, {}) - - def test_match_selector(self): - self.assertFalse( - utils.match_selector({'matchLabels': {'app': 'demo'}}, None)) - self.assertFalse( - utils.match_selector({'matchLabels': {'app': 'demo'}}, {})) - self.assertFalse( - utils.match_selector({'matchLabels': {'app': 'demo'}}, - {'app': 'foobar'})) - self.assertTrue( - utils.match_selector({'matchLabels': {'app': 'demo'}}, - {'app': 'demo'})) - self.assertTrue( - utils.match_selector({'matchLabels': {'app': 'demo'}}, - {'app': 'demo', 'foo': 'bar'})) - self.assertTrue( - utils.match_selector({'matchLabels': {'app': 'demo', - 'foo': 'bar'}}, - {'app': 'demo', 'foo': 'bar'})) - self.assertFalse( - utils.match_selector({'matchLabels': {'app': 'demo', - 'foo': 'bar'}}, - {'app': 'demo'})) - - def test_is_network_policy_enabled(self): - CONF.set_override('enabled_handlers', ['fake_handler'], - group='kubernetes') - CONF.set_override('service_security_groups_driver', 'foo', - group='kubernetes') - - self.assertFalse(utils.is_network_policy_enabled()) - - CONF.set_override('enabled_handlers', ['policy'], - group='kubernetes') - CONF.set_override('service_security_groups_driver', 'foo', - group='kubernetes') - - self.assertFalse(utils.is_network_policy_enabled()) - - CONF.set_override('enabled_handlers', ['policy'], - group='kubernetes') - self.addCleanup(CONF.clear_override, 'enabled_handlers', - group='kubernetes') - CONF.set_override('service_security_groups_driver', 'policy', - group='kubernetes') - self.addCleanup(CONF.clear_override, 'service_security_groups_driver', - group='kubernetes') - - self.assertTrue(utils.is_network_policy_enabled()) - - def test_get_resource_name_with_too_long_name(self): - name = 253 * "a" - prefix = 'ns/' - suffix = '-net' - - new_name = utils.get_resource_name(name, prefix=prefix, suffix=suffix) - - self.assertEqual(new_name, - prefix + 248 * 'a' + suffix) - self.assertEqual(len(new_name), 255) - - def test_get_resource_name_with_sane_name(self): - name = 'myns' - prefix = 'ns/' - suffix = '-foo' - - new_name = utils.get_resource_name(name, prefix=prefix, suffix=suffix) - - self.assertEqual(new_name, f'{prefix}{name}{suffix}') - - def test_get_resource_name_with_prefix(self): - name = 'fun_name' - prefix = 'something/' - - new_name = utils.get_resource_name(name, prefix=prefix) - - self.assertEqual(new_name, f'{prefix}{name}') - - def test_get_resource_name_with_sufix(self): - name = 'another' - suffix = '/something-else' - - new_name = utils.get_resource_name(name, suffix=suffix) - - self.assertEqual(new_name, f'{name}{suffix}') - - def test_get_resource_name_non_ascii(self): - name = 'Ру́сский вое́нный кора́бль, иди́ на хуй!' - prefix = 'bar:' - suffix = ':baz' - - new_name = utils.get_resource_name(name, prefix=prefix, suffix=suffix) - - self.assertEqual(new_name, f'{prefix}{name}{suffix}') - - def test_get_resource_name_uid(self): - name = 'ns name' - prefix = 'foo:' - suffix = ':bar' - uid = 'b0f21afa-6d7b-496e-b151-6d7f252b8c6c' - - new_name = utils.get_resource_name(name, uid, prefix, suffix) - - self.assertEqual(new_name, f'{prefix}{uid}/{name}{suffix}') diff --git a/kuryr_kubernetes/tests/unit/controller/drivers/test_vif_pool.py b/kuryr_kubernetes/tests/unit/controller/drivers/test_vif_pool.py deleted file mode 100644 index 2c7dc9b74..000000000 --- a/kuryr_kubernetes/tests/unit/controller/drivers/test_vif_pool.py +++ /dev/null @@ -1,1878 +0,0 @@ -# Copyright 2017 Red Hat, Inc. -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. - -import collections -import eventlet -import functools -import threading -from unittest import mock -import uuid - -import ddt -from openstack import exceptions as os_exc -from openstack.network.v2 import network as os_network -from openstack.network.v2 import port as os_port -from oslo_config import cfg as oslo_cfg - -from os_vif.objects import network as osv_network -from os_vif.objects import vif as osv_vif - -from kuryr_kubernetes import constants -from kuryr_kubernetes.controller.drivers import nested_vlan_vif -from kuryr_kubernetes.controller.drivers import neutron_vif -from kuryr_kubernetes.controller.drivers import utils -from kuryr_kubernetes.controller.drivers import vif_pool -from kuryr_kubernetes import exceptions -from kuryr_kubernetes import os_vif_util as ovu -from kuryr_kubernetes.tests import base as test_base -from kuryr_kubernetes.tests import fake -from kuryr_kubernetes.tests.unit import kuryr_fixtures as k_fix - - -def get_pod_obj(): - return { - 'status': { - 'qosClass': 'BestEffort', - 'hostIP': '192.168.1.2', - }, - 'kind': 'Pod', - 'spec': { - 'schedulerName': 'default-scheduler', - 'containers': [{ - 'name': 'busybox', - 'image': 'busybox', - 'resources': {} - }], - 'nodeName': 'kuryr-devstack' - }, - 'metadata': { - 'name': 'busybox-sleep1', - 'namespace': 'default', - 'resourceVersion': '53808', - 'uid': '452176db-4a85-11e7-80bd-fa163e29dbbb', - 'annotations': { - 'openstack.org/kuryr-vif': {} - } - }} - - -def get_pod_name(pod): - return "%(namespace)s/%(name)s" % pod['metadata'] - - -AVAILABLE_PORTS_TYPE = functools.partial(collections.defaultdict, - collections.OrderedDict) - - -@ddt.ddt -class BaseVIFPool(test_base.TestCase): - - def test_request_vif(self): - cls = vif_pool.BaseVIFPool - m_driver = mock.MagicMock(spec=cls) - - pod = get_pod_obj() - project_id = str(uuid.uuid4()) - subnet_id = str(uuid.uuid4()) - net_id = str(uuid.uuid4()) - _net = os_network.Network( - id=net_id, - name=None, - mtu=None, - provider_network_type=None, - ) - network = ovu.neutron_to_osvif_network(_net) - subnets = {subnet_id: network} - security_groups = [mock.sentinel.security_groups] - vif = mock.sentinel.vif - - m_driver._get_port_from_pool.return_value = vif - m_driver._recovered_pools = True - oslo_cfg.CONF.set_override('ports_pool_min', - 5, - group='vif_pool') - pool_length = 5 - m_driver._get_pool_size.return_value = pool_length - - self.assertEqual(vif, cls.request_vif(m_driver, pod, project_id, - subnets, security_groups)) - - def test_request_vif_empty_pool(self): - cls = vif_pool.BaseVIFPool - m_driver = mock.MagicMock(spec=cls) - - host_addr = mock.sentinel.host_addr - pod_status = mock.MagicMock() - pod_status.__getitem__.return_value = host_addr - pod = mock.MagicMock() - pod.__getitem__.return_value = pod_status - project_id = str(uuid.uuid4()) - subnet_id = str(uuid.uuid4()) - net_id = str(uuid.uuid4()) - _net = os_network.Network( - id=net_id, - name=None, - mtu=None, - provider_network_type=None, - ) - network = ovu.neutron_to_osvif_network(_net) - subnets = {subnet_id: network} - security_groups = [mock.sentinel.security_groups] - m_driver._recovered_pools = True - m_driver._get_port_from_pool.side_effect = ( - exceptions.ResourceNotReady(pod)) - - self.assertRaises(exceptions.ResourceNotReady, cls.request_vif, - m_driver, pod, project_id, subnets, security_groups) - m_driver._populate_pool.assert_called_once() - - def test_request_vif_pod_without_host(self): - cls = vif_pool.BaseVIFPool - m_driver = mock.MagicMock(spec=cls) - - pod = get_pod_obj() - project_id = str(uuid.uuid4()) - subnets = mock.sentinel.subnets - security_groups = [mock.sentinel.security_groups] - m_driver._get_host_addr.side_effect = KeyError - m_driver._recovered_pools = True - - resp = cls.request_vif(m_driver, pod, project_id, subnets, - security_groups) - self.assertIsNone(resp) - - def test_request_vif_multi_vif_pod_without_host(self): - cls = vif_pool.MultiVIFPool - m_driver = mock.MagicMock(spec=cls) - - pod = get_pod_obj().copy() - del pod['spec']['nodeName'] - project_id = str(uuid.uuid4()) - subnets = mock.sentinel.subnets - security_groups = [mock.sentinel.security_groups] - m_driver._vif_drvs = {} - m_driver._vif_drvs['nested-vlan'] = 'NestedVIFPool' - m_driver._get_pod_vif_type.side_effect = KeyError - - resp = cls.request_vif(m_driver, pod, project_id, subnets, - security_groups) - self.assertIsNone(resp) - - @mock.patch('kuryr_kubernetes.clients.get_kubernetes_client') - @mock.patch('time.time', return_value=50) - @ddt.data((neutron_vif.NeutronPodVIFDriver), - (nested_vlan_vif.NestedVlanPodVIFDriver)) - def test__populate_pool(self, m_vif_driver, m_time, - m_get_kubernetes_client): - cls = vif_pool.BaseVIFPool - m_driver = mock.MagicMock(spec=cls) - - cls_vif_driver = m_vif_driver - vif_driver = mock.MagicMock(spec=cls_vif_driver) - m_driver._drv_vif = vif_driver - - pod = mock.sentinel.pod - project_id = str(uuid.uuid4()) - subnets = mock.sentinel.subnets - security_groups = ['test-sg'] - pool_key = (mock.sentinel.host_addr, project_id) - vif = osv_vif.VIFOpenVSwitch(id='0fa0e837-d34e-4580-a6c4-04f5f607d93e') - vifs = [vif] - - m_driver._existing_vifs = {} - m_driver._available_ports_pools = AVAILABLE_PORTS_TYPE() - m_driver._recovered_pools = True - m_driver._lock = threading.Lock() - m_driver._populate_pool_lock = { - pool_key: mock.MagicMock(spec=threading.Lock())} - m_driver._create_ports_semaphore = mock.MagicMock( - spec=eventlet.semaphore.Semaphore(20)) - - oslo_cfg.CONF.set_override('ports_pool_min', - 5, - group='vif_pool') - oslo_cfg.CONF.set_override('ports_pool_update_frequency', - 15, - group='vif_pool') - m_driver._get_pool_size.return_value = 2 - vif_driver.request_vifs.return_value = vifs - - cls._populate_pool(m_driver, pool_key, pod, subnets, - tuple(security_groups)) - m_driver._get_pool_size.assert_called_once() - m_driver._drv_vif.request_vifs.assert_called_once() - - @mock.patch('kuryr_kubernetes.clients.get_kubernetes_client') - @ddt.data((neutron_vif.NeutronPodVIFDriver), - (nested_vlan_vif.NestedVlanPodVIFDriver)) - def test__populate_pool_not_ready(self, m_vif_driver, - m_get_kubernetes_client): - cls = vif_pool.BaseVIFPool - m_driver = mock.MagicMock(spec=cls) - - cls_vif_driver = m_vif_driver - vif_driver = mock.MagicMock(spec=cls_vif_driver) - m_driver._drv_vif = vif_driver - - pod = mock.sentinel.pod - project_id = str(uuid.uuid4()) - subnets = mock.sentinel.subnets - security_groups = 'test-sg' - pool_key = (mock.sentinel.host_addr, project_id) - m_driver._recovered_pools = False - - self.assertFalse(cls._populate_pool( - m_driver, pool_key, pod, subnets, - tuple(security_groups))) - m_driver._drv_vif.request_vifs.assert_not_called() - - @mock.patch('kuryr_kubernetes.clients.get_kubernetes_client') - @ddt.data((neutron_vif.NeutronPodVIFDriver), - (nested_vlan_vif.NestedVlanPodVIFDriver)) - def test__populate_pool_not_ready_dont_raise(self, m_vif_driver, - m_get_kubernetes_client): - cls = vif_pool.BaseVIFPool - m_driver = mock.MagicMock(spec=cls) - - cls_vif_driver = m_vif_driver - vif_driver = mock.MagicMock(spec=cls_vif_driver) - m_driver._drv_vif = vif_driver - - pod = mock.sentinel.pod - project_id = str(uuid.uuid4()) - subnets = mock.sentinel.subnets - security_groups = 'test-sg' - pool_key = (mock.sentinel.host_addr, project_id) - m_driver._recovered_pools = False - - cls._populate_pool(m_driver, pool_key, pod, subnets, - tuple(security_groups)) - m_driver._drv_vif.request_vifs.assert_not_called() - - @mock.patch('kuryr_kubernetes.clients.get_kubernetes_client') - @mock.patch('time.time', return_value=0) - @ddt.data((neutron_vif.NeutronPodVIFDriver), - (nested_vlan_vif.NestedVlanPodVIFDriver)) - def test__populate_pool_no_update(self, m_vif_driver, m_time, - m_get_kubernetes_client): - cls = vif_pool.BaseVIFPool - m_driver = mock.MagicMock(spec=cls) - - pod = mock.sentinel.pod - project_id = str(uuid.uuid4()) - subnets = mock.sentinel.subnets - security_groups = 'test-sg' - pool_key = (mock.sentinel.host_addr, project_id) - m_driver._get_pool_size.return_value = 4 - - cls_vif_driver = m_vif_driver - vif_driver = mock.MagicMock(spec=cls_vif_driver) - m_driver._drv_vif = vif_driver - - oslo_cfg.CONF.set_override('ports_pool_update_frequency', - 15, - group='vif_pool') - oslo_cfg.CONF.set_override('ports_pool_min', - 3, - group='vif_pool') - m_driver._get_pool_size.return_value = 10 - m_driver._recovered_pools = True - - cls._populate_pool(m_driver, pool_key, pod, subnets, - tuple(security_groups)) - m_driver._get_pool_size.assert_called() - m_driver._drv_vif.request_vifs.assert_not_called() - - def test_release_vif(self): - cls = vif_pool.BaseVIFPool - m_driver = mock.MagicMock(spec=cls) - m_driver._recyclable_ports = {} - m_driver._existing_vifs = {} - - pod = get_pod_obj() - project_id = mock.sentinel.project_id - security_groups = [mock.sentinel.security_groups] - net_id = str(uuid.uuid4()) - _net = os_network.Network( - id=net_id, - name=None, - mtu=None, - provider_network_type=None, - ) - network = ovu.neutron_to_osvif_network(_net) - vif = osv_vif.VIFOpenVSwitch(id='0fa0e837-d34e-4580-a6c4-04f5f607d93e', - network=network) - - m_driver._return_ports_to_pool.return_value = None - m_driver._recovered_pools = True - - cls.release_vif(m_driver, pod, vif, project_id, security_groups) - - m_driver._return_ports_to_pool.assert_not_called() - - @mock.patch('kuryr_kubernetes.controller.drivers.utils.get_vifs') - def test__get_in_use_ports(self, get_vifs): - cls = vif_pool.BaseVIFPool - m_driver = mock.MagicMock(spec=cls) - - kubernetes = self.useFixture(k_fix.MockK8sClient()).client - pod = get_pod_obj() - port_id = str(uuid.uuid4()) - port_network = osv_network.Network(id=str(uuid.uuid4())) - pod_vif = osv_vif.VIFBase(id=port_id, network=port_network) - get_vifs.return_value = {'eth0': pod_vif} - items = [pod] - kubernetes.get.return_value = {'items': items} - network = {} - - resp = cls._get_in_use_ports_info(m_driver) - network[port_network.id] = port_network - self.assertEqual(resp, ([port_id], network)) - - def test__get_in_use_ports_empty(self): - cls = vif_pool.BaseVIFPool - m_driver = mock.MagicMock(spec=cls) - - kubernetes = self.useFixture(k_fix.MockK8sClient()).client - items = [] - kubernetes.get.return_value = {'items': items} - - resp = cls._get_in_use_ports_info(m_driver) - - self.assertEqual(resp, ([], {})) - - def test_cleanup_leftover_ports(self): - cls = vif_pool.BaseVIFPool - m_driver = mock.MagicMock(spec=cls) - - os_net = self.useFixture(k_fix.MockNetworkClient()).client - - port_id = str(uuid.uuid4()) - port = fake.get_port_obj(port_id=port_id) - net_id = port.network_id - tags = 'clusterTest' - port.tags = [tags] - os_net.ports.return_value = [port] - oslo_cfg.CONF.set_override('resource_tags', - tags, - group='neutron_defaults') - self.addCleanup(oslo_cfg.CONF.clear_override, 'resource_tags', - group='neutron_defaults') - - os_net.networks.return_value = [os_network.Network(id=net_id)] - - cls._cleanup_leftover_ports(m_driver) - os_net.networks.assert_called() - os_net.delete_port.assert_not_called() - - def test_cleanup_leftover_ports_different_network(self): - cls = vif_pool.BaseVIFPool - m_driver = mock.MagicMock(spec=cls) - - os_net = self.useFixture(k_fix.MockNetworkClient()).client - - port_id = str(uuid.uuid4()) - port = fake.get_port_obj(port_id=port_id) - tags = 'clusterTest' - port.tags = [tags] - os_net.ports.return_value = [port] - oslo_cfg.CONF.set_override('resource_tags', - tags, - group='neutron_defaults') - self.addCleanup(oslo_cfg.CONF.clear_override, 'resource_tags', - group='neutron_defaults') - os_net.networks.return_value = [] - - cls._cleanup_leftover_ports(m_driver) - os_net.networks.assert_called() - os_net.delete_port.assert_not_called() - - def test_cleanup_leftover_ports_no_binding(self): - cls = vif_pool.BaseVIFPool - m_driver = mock.MagicMock(spec=cls) - - os_net = self.useFixture(k_fix.MockNetworkClient()).client - - port_id = str(uuid.uuid4()) - port = fake.get_port_obj(port_id=port_id) - net_id = port.network_id - tags = 'clusterTest' - port.tags = [tags] - port.binding_host_id = None - os_net.ports.return_value = [port] - oslo_cfg.CONF.set_override('resource_tags', - tags, - group='neutron_defaults') - self.addCleanup(oslo_cfg.CONF.clear_override, 'resource_tags', - group='neutron_defaults') - - os_net.networks.return_value = [os_network.Network(id=net_id)] - - cls._cleanup_leftover_ports(m_driver) - os_net.networks.assert_called() - os_net.delete_port.assert_called_once_with(port.id) - - def test_cleanup_leftover_ports_no_tags(self): - cls = vif_pool.BaseVIFPool - m_driver = mock.MagicMock(spec=cls) - - os_net = self.useFixture(k_fix.MockNetworkClient()).client - - port_id = str(uuid.uuid4()) - port = fake.get_port_obj(port_id=port_id) - net_id = port.network_id - tags = 'clusterTest' - os_net.ports.return_value = [port] - oslo_cfg.CONF.set_override('resource_tags', - tags, - group='neutron_defaults') - self.addCleanup(oslo_cfg.CONF.clear_override, 'resource_tags', - group='neutron_defaults') - - os_net.networks.return_value = [os_network.Network(id=net_id)] - - cls._cleanup_leftover_ports(m_driver) - os_net.networks.assert_called() - os_net.delete_port.assert_called_once_with(port.id) - - def test_cleanup_leftover_ports_no_tagging(self): - cls = vif_pool.BaseVIFPool - m_driver = mock.MagicMock(spec=cls) - - os_net = self.useFixture(k_fix.MockNetworkClient()).client - - port_id = str(uuid.uuid4()) - port = fake.get_port_obj(port_id=port_id) - os_net.ports.return_value = [port] - - cls._cleanup_leftover_ports(m_driver) - os_net.networks.assert_not_called() - os_net.delete_port.assert_not_called() - - @mock.patch('kuryr_kubernetes.controller.drivers.utils.delete_ports') - def test_cleanup_leftover_ports_no_tagging_no_binding(self, m_del_ports): - cls = vif_pool.BaseVIFPool - m_driver = mock.MagicMock(spec=cls) - - os_net = self.useFixture(k_fix.MockNetworkClient()).client - - port_id = str(uuid.uuid4()) - port = fake.get_port_obj(port_id=port_id) - port.binding_host_id = None - os_net.ports.return_value = [port] - - cls._cleanup_leftover_ports(m_driver) - os_net.networks.assert_not_called() - m_del_ports.assert_called_once_with([port]) - - -@ddt.ddt -class NeutronVIFPool(test_base.TestCase): - - @mock.patch('kuryr_kubernetes.controller.drivers.utils.get_port_name') - @mock.patch('eventlet.spawn') - def test__get_port_from_pool(self, m_eventlet, m_get_port_name): - m_driver = vif_pool.NeutronVIFPool() - os_net = self.useFixture(k_fix.MockNetworkClient()).client - - pool_key = mock.sentinel.pool_key - port_id = str(uuid.uuid4()) - port = mock.sentinel.port - subnets = mock.sentinel.subnets - sgs = ('test-sg',) - - oslo_cfg.CONF.set_override('port_debug', - True, group='kubernetes') - pod = get_pod_obj() - m_get_port_name.return_value = get_pod_name(pod) - - m_driver._available_ports_pools = AVAILABLE_PORTS_TYPE() - m_driver._available_ports_pools[pool_key][sgs] = [port_id] - m_driver._existing_vifs = {port_id: port} - m_get_port_name.return_value = get_pod_name(pod) - - oslo_cfg.CONF.set_override('ports_pool_min', 5, group='vif_pool') - oslo_cfg.CONF.set_override('port_debug', True, group='kubernetes') - m_driver._get_pool_size = mock.Mock(return_value=5) - - self.assertEqual(port, m_driver._get_port_from_pool( - pool_key, pod, subnets, sgs)) - - os_net.update_port.assert_called_once_with( - port_id, name=get_pod_name(pod), device_id=pod['metadata']['uid']) - # 2 calls from the constructor, so no calls in _get_port_from_pool() - self.assertEqual(3, m_eventlet.call_count) - - @mock.patch('kuryr_kubernetes.controller.drivers.utils.get_port_name') - @mock.patch('eventlet.spawn') - def test__get_port_from_pool_pool_populate(self, m_eventlet, - m_get_port_name): - m_driver = vif_pool.NeutronVIFPool() - os_net = self.useFixture(k_fix.MockNetworkClient()).client - - pool_key = mock.sentinel.pool_key - port_id = str(uuid.uuid4()) - port = mock.sentinel.port - subnets = mock.sentinel.subnets - sgs = ('test-sg',) - - pod = get_pod_obj() - - m_driver._available_ports_pools = AVAILABLE_PORTS_TYPE() - m_driver._available_ports_pools[pool_key][sgs] = [port_id] - m_driver._existing_vifs = {port_id: port} - m_get_port_name.return_value = get_pod_name(pod) - - oslo_cfg.CONF.set_override('ports_pool_min', 5, group='vif_pool') - oslo_cfg.CONF.set_override('port_debug', True, group='kubernetes') - m_driver._get_pool_size = mock.Mock(return_value=3) - - self.assertEqual(port, m_driver._get_port_from_pool( - pool_key, pod, subnets, sgs)) - - os_net.update_port.assert_called_once_with( - port_id, name=get_pod_name(pod), device_id=pod['metadata']['uid']) - # 2 calls come from the constructor, so 1 call in _get_port_from_pool() - self.assertEqual(3, m_eventlet.call_count) - - def test__get_port_from_pool_empty_pool(self): - cls = vif_pool.NeutronVIFPool - m_driver = mock.MagicMock(spec=cls) - - os_net = self.useFixture(k_fix.MockNetworkClient()).client - - pod = get_pod_obj() - pool_key = mock.sentinel.pool_key - subnets = mock.sentinel.subnets - sgs = ('test-sg',) - - m_driver._available_ports_pools = AVAILABLE_PORTS_TYPE() - m_driver._available_ports_pools[pool_key][sgs] = [] - - self.assertRaises(exceptions.ResourceNotReady, cls._get_port_from_pool, - m_driver, pool_key, pod, subnets, sgs) - - os_net.update_port.assert_not_called() - - @mock.patch('eventlet.spawn') - def test__get_port_from_pool_empty_pool_reuse(self, m_eventlet): - cls = vif_pool.NeutronVIFPool - m_driver = mock.MagicMock(spec=cls) - - os_net = self.useFixture(k_fix.MockNetworkClient()).client - - pod = get_pod_obj() - port_id = str(uuid.uuid4()) - port_id_2 = str(uuid.uuid4()) - port = mock.sentinel.port - port_2 = mock.sentinel.port_2 - pool_key = mock.sentinel.pool_key - subnets = mock.sentinel.subnets - sgs = ('test-sg',) - sgs_2 = ('test-sg2',) - sgs_3 = ('test-sg3',) - - oslo_cfg.CONF.set_override('port_debug', False, group='kubernetes') - - m_driver._available_ports_pools = AVAILABLE_PORTS_TYPE() - m_driver._available_ports_pools[pool_key][sgs] = [] - m_driver._available_ports_pools[pool_key][sgs_2] = [port_id] - m_driver._available_ports_pools[pool_key][sgs_3] = [port_id_2] - m_driver._existing_vifs = {port_id: port, port_id_2: port_2} - - self.assertEqual(port, cls._get_port_from_pool( - m_driver, pool_key, pod, subnets, sgs)) - - os_net.update_port.assert_called_once_with( - port_id, security_groups=list(sgs)) - m_eventlet.assert_called() - - def test__get_port_from_pool_empty_pool_reuse_no_ports(self): - cls = vif_pool.NeutronVIFPool - m_driver = mock.MagicMock(spec=cls) - - os_net = self.useFixture(k_fix.MockNetworkClient()).client - - pod = get_pod_obj() - pool_key = mock.sentinel.pool_key - subnets = mock.sentinel.subnets - sgs = ('test-sg',) - sgs_2 = ('test-sg2',) - - oslo_cfg.CONF.set_override('port_debug', False, group='kubernetes') - pool_length = 5 - m_driver._get_pool_size.return_value = pool_length - - m_driver._available_ports_pools = AVAILABLE_PORTS_TYPE() - m_driver._available_ports_pools[pool_key][(sgs_2,)] = [] - m_driver._available_ports_pools[pool_key][(sgs,)] = [] - - self.assertRaises(exceptions.ResourceNotReady, cls._get_port_from_pool, - m_driver, pool_key, pod, subnets, sgs) - - os_net.update_port.assert_not_called() - - @ddt.data((0), (10)) - def test__trigger_return_to_pool(self, max_pool): - cls = vif_pool.NeutronVIFPool - m_driver = mock.MagicMock(spec=cls) - - os_net = self.useFixture(k_fix.MockNetworkClient()).client - - pool_key = ('node_ip', 'project_id') - port_id = str(uuid.uuid4()) - pool_length = 5 - - m_driver._recyclable_ports = {port_id: pool_key} - m_driver._available_ports_pools = AVAILABLE_PORTS_TYPE() - m_driver._recovered_pools = True - oslo_cfg.CONF.set_override('ports_pool_max', - max_pool, - group='vif_pool') - oslo_cfg.CONF.set_override('port_debug', - True, - group='kubernetes') - os_net.ports.return_value = [ - os_port.Port( - id=port_id, - security_group_ids=['security_group_modified'], - ), - ] - m_driver._get_pool_size.return_value = pool_length - - cls._trigger_return_to_pool(m_driver) - - os_net.update_port.assert_called_once_with( - port_id, name=constants.KURYR_PORT_NAME, device_id='') - os_net.delete_port.assert_not_called() - - @ddt.data((0), (10)) - def test__trigger_return_to_pool_no_update(self, max_pool): - cls = vif_pool.NeutronVIFPool - m_driver = mock.MagicMock(spec=cls) - - os_net = self.useFixture(k_fix.MockNetworkClient()).client - - pool_key = ('node_ip', 'project_id') - port_id = str(uuid.uuid4()) - pool_length = 5 - - m_driver._recyclable_ports = {port_id: pool_key} - m_driver._available_ports_pools = AVAILABLE_PORTS_TYPE() - oslo_cfg.CONF.set_override('ports_pool_max', - max_pool, - group='vif_pool') - oslo_cfg.CONF.set_override('port_debug', - False, - group='kubernetes') - - port = fake.get_port_obj(port_id=port_id) - port.security_group_ids = ['security_group'] - os_net.ports.return_value = (p for p in [port]) - m_driver._get_pool_size.return_value = pool_length - m_driver._recovered_pools = True - - cls._trigger_return_to_pool(m_driver) - - os_net.update_port.assert_not_called() - os_net.delete_port.assert_not_called() - - def test__trigger_return_to_pool_delete_port(self): - cls = vif_pool.NeutronVIFPool - m_driver = mock.MagicMock(spec=cls) - - os_net = self.useFixture(k_fix.MockNetworkClient()).client - - pool_key = ('node_ip', 'project_id') - port_id = str(uuid.uuid4()) - pool_length = 10 - vif = mock.sentinel.vif - - m_driver._recyclable_ports = {port_id: pool_key} - m_driver._available_ports_pools = AVAILABLE_PORTS_TYPE() - m_driver._existing_vifs = {port_id: vif} - m_driver._recovered_pools = True - oslo_cfg.CONF.set_override('ports_pool_max', - 10, - group='vif_pool') - os_net.ports.return_value = [ - os_port.Port( - id=port_id, - security_group_ids=['security_group_modified'], - ), - ] - m_driver._get_pool_size.return_value = pool_length - - cls._trigger_return_to_pool(m_driver) - - os_net.update_port.assert_not_called() - os_net.delete_port.assert_called_once_with(port_id) - - def test__trigger_return_to_pool_update_exception(self): - cls = vif_pool.NeutronVIFPool - m_driver = mock.MagicMock(spec=cls) - - os_net = self.useFixture(k_fix.MockNetworkClient()).client - - pool_key = ('node_ip', 'project_id') - port_id = str(uuid.uuid4()) - pool_length = 5 - - m_driver._recyclable_ports = {port_id: pool_key} - m_driver._available_ports_pools = AVAILABLE_PORTS_TYPE() - m_driver._recovered_pools = True - oslo_cfg.CONF.set_override('ports_pool_max', - 0, - group='vif_pool') - oslo_cfg.CONF.set_override('port_debug', - True, - group='kubernetes') - oslo_cfg.CONF.set_override('port_debug', - True, - group='kubernetes') - os_net.ports.return_value = [ - os_port.Port( - id=port_id, - security_group_ids=['security_group_modified'], - ), - ] - m_driver._get_pool_size.return_value = pool_length - os_net.update_port.side_effect = os_exc.SDKException - - cls._trigger_return_to_pool(m_driver) - - os_net.update_port.assert_called_once_with( - port_id, name=constants.KURYR_PORT_NAME, device_id='') - os_net.delete_port.assert_not_called() - - def test__trigger_return_to_pool_delete_exception(self): - cls = vif_pool.NeutronVIFPool - m_driver = mock.MagicMock(spec=cls) - - os_net = self.useFixture(k_fix.MockNetworkClient()).client - - pool_key = ('node_ip', 'project_id') - port_id = str(uuid.uuid4()) - pool_length = 10 - vif = mock.sentinel.vif - - m_driver._recyclable_ports = {port_id: pool_key} - m_driver._available_ports_pools = AVAILABLE_PORTS_TYPE() - m_driver._existing_vifs = {port_id: vif} - m_driver._recovered_pools = True - oslo_cfg.CONF.set_override('ports_pool_max', - 5, - group='vif_pool') - os_net.ports.return_value = [ - os_port.Port( - id=port_id, - security_group_ids=['security_group_modified'], - ), - ] - m_driver._get_pool_size.return_value = pool_length - - cls._trigger_return_to_pool(m_driver) - - os_net.update_port.assert_not_called() - os_net.delete_port.assert_called_once_with(port_id) - - def test__trigger_return_to_pool_delete_key_error(self): - cls = vif_pool.NeutronVIFPool - m_driver = mock.MagicMock(spec=cls) - - os_net = self.useFixture(k_fix.MockNetworkClient()).client - - pool_key = ('node_ip', 'project_id') - port_id = str(uuid.uuid4()) - pool_length = 10 - - m_driver._recyclable_ports = {port_id: pool_key} - m_driver._available_ports_pools = AVAILABLE_PORTS_TYPE() - m_driver._existing_vifs = {} - m_driver._recovered_pools = True - oslo_cfg.CONF.set_override('ports_pool_max', - 5, - group='vif_pool') - os_net.ports.return_value = [ - os_port.Port( - id=port_id, - security_group_ids=['security_group_modified'], - ), - ] - m_driver._get_pool_size.return_value = pool_length - - cls._trigger_return_to_pool(m_driver) - - os_net.update_port.assert_not_called() - os_net.delete_port.assert_not_called() - - @mock.patch('kuryr_kubernetes.os_vif_util.neutron_to_osvif_vif') - @mock.patch('kuryr_kubernetes.utils.get_subnet') - def test__recover_precreated_ports(self, m_get_subnet, m_to_osvif): - cls = vif_pool.NeutronVIFPool - m_driver = mock.MagicMock(spec=cls) - os_net = self.useFixture(k_fix.MockNetworkClient()).client - - cls_vif_driver = neutron_vif.NeutronPodVIFDriver - vif_driver = mock.MagicMock(spec=cls_vif_driver) - m_driver._drv_vif = vif_driver - - m_driver._existing_vifs = {} - m_driver._available_ports_pools = AVAILABLE_PORTS_TYPE() - - port_id = str(uuid.uuid4()) - port = fake.get_port_obj(port_id=port_id) - filtered_ports = [port] - os_net.ports.return_value = filtered_ports - vif_plugin = mock.sentinel.plugin - port.binding_vif_type = vif_plugin - - oslo_cfg.CONF.set_override('port_debug', - False, - group='kubernetes') - - subnet_id = port.fixed_ips[0]['subnet_id'] - net_id = str(uuid.uuid4()) - _net = os_network.Network( - id=net_id, - name=None, - mtu=None, - provider_network_type=None, - ) - network = ovu.neutron_to_osvif_network(_net) - subnet = {subnet_id: network} - m_get_subnet.return_value = network - vif = mock.sentinel.vif - m_to_osvif.return_value = vif - m_driver._get_in_use_ports_info.return_value = [], {} - - pool_key = (port.binding_host_id, port.project_id, net_id) - m_driver._get_pool_key.return_value = pool_key - m_driver._get_trunks_info.return_value = ({}, {}, {}) - - cls._recover_precreated_ports(m_driver) - - os_net.ports.assert_called_once() - m_get_subnet.assert_called_with(subnet_id) - m_to_osvif.assert_called_once_with(vif_plugin, port, subnet) - - self.assertEqual(m_driver._existing_vifs[port_id], vif) - self.assertEqual(m_driver._available_ports_pools[pool_key], - {tuple(port.security_group_ids): [port_id]}) - - @mock.patch('kuryr_kubernetes.os_vif_util.neutron_to_osvif_vif') - @mock.patch('kuryr_kubernetes.utils.get_subnet') - def test__recover_precreated_ports_empty(self, m_get_subnet, m_to_osvif): - cls = vif_pool.NeutronVIFPool - m_driver = mock.MagicMock(spec=cls) - os_net = self.useFixture(k_fix.MockNetworkClient()).client - - filtered_ports = [] - os_net.ports.return_value = filtered_ports - m_driver._get_trunks_info.return_value = ({}, {}, {}) - m_driver._get_in_use_ports_info.return_value = [], {} - - oslo_cfg.CONF.set_override('port_debug', - False, - group='kubernetes') - - cls._recover_precreated_ports(m_driver) - - os_net.ports.assert_called_once() - m_get_subnet.assert_not_called() - m_to_osvif.assert_not_called() - - @mock.patch('eventlet.GreenPool') - def test_delete_network_pools(self, m_green_pool): - cls = vif_pool.NeutronVIFPool - m_driver = mock.MagicMock(spec=cls) - m_pool = mock.MagicMock() - m_green_pool.return_value = m_pool - - net_id = mock.sentinel.net_id - pool_key = ('node_ip', 'project_id') - port_id = str(uuid.uuid4()) - m_driver._available_ports_pools = AVAILABLE_PORTS_TYPE() - m_driver._available_ports_pools[pool_key][('sg',)] = [port_id] - m_driver._lock = threading.Lock() - m_driver._populate_pool_lock = { - pool_key: mock.MagicMock(spec=threading.Lock())} - m_driver._existing_vifs = {port_id: mock.sentinel.vif} - m_driver._recovered_pools = True - - m_driver._get_pool_key_net.return_value = net_id - - cls.delete_network_pools(m_driver, net_id) - - m_driver._trigger_return_to_pool.assert_called_once() - m_driver._get_pool_key_net.assert_called_once() - m_pool.imap.assert_called_once_with(utils.delete_neutron_port, - [port_id]) - - def test_delete_network_pools_not_ready(self): - cls = vif_pool.NeutronVIFPool - m_driver = mock.MagicMock(spec=cls) - os_net = self.useFixture(k_fix.MockNetworkClient()).client - - net_id = mock.sentinel.net_id - m_driver._recovered_pools = False - - self.assertRaises(exceptions.ResourceNotReady, - cls.delete_network_pools, m_driver, net_id) - - m_driver._trigger_return_to_pool.assert_not_called() - m_driver._get_pool_key_net.assert_not_called() - os_net.delete_port.assert_not_called() - - @mock.patch('eventlet.GreenPool') - def test_delete_network_pools_missing_port_id(self, m_green_pool): - cls = vif_pool.NeutronVIFPool - m_driver = mock.MagicMock(spec=cls) - m_pool = mock.MagicMock() - m_green_pool.return_value = m_pool - - net_id = mock.sentinel.net_id - pool_key = ('node_ip', 'project_id') - port_id = str(uuid.uuid4()) - m_driver._available_ports_pools = AVAILABLE_PORTS_TYPE() - m_driver._available_ports_pools[pool_key][('sgs',)] = [port_id] - m_driver._lock = threading.Lock() - m_driver._populate_pool_lock = { - pool_key: mock.MagicMock(spec=threading.Lock())} - m_driver._existing_vifs = {} - m_driver._recovered_pools = True - - m_driver._get_pool_key_net.return_value = net_id - - cls.delete_network_pools(m_driver, net_id) - - m_driver._trigger_return_to_pool.assert_called_once() - m_driver._get_pool_key_net.assert_called_once() - m_pool.imap.assert_called_once_with(utils.delete_neutron_port, - [port_id]) - - -@ddt.ddt -class NestedVIFPool(test_base.TestCase): - - def _get_trunk_obj(self, port_id=None, subport_id=None, trunk_id=None): - trunk_obj = { - 'status': 'ACTIVE', - 'name': 'trunk-01aa31ea-5adf-4776-9c5d-21b50dba0ccc', - 'admin_state_up': True, - 'tenant_id': '18fbc0e645d74e83931193ef99dfe5c5', - 'sub_ports': [{'port_id': '85104e7d-8597-4bf7-94e7-a447ef0b50f1', - 'segmentation_type': 'vlan', - 'segmentation_id': 4056}], - 'updated_at': '2017-06-09T13:25:01Z', - 'id': 'd1217757-848f-45dd-9ff2-3640f9b053dc', - 'revision_number': 2359, - 'project_id': '18fbc0e645d74e83931193ef99dfe5c5', - 'port_id': '01aa31ea-5adf-4776-9c5d-21b50dba0ccc', - 'created_at': '2017-05-19T16:43:22Z', - 'description': '' - } - - if port_id: - trunk_obj['port_id'] = port_id - if subport_id: - trunk_obj['sub_ports'][0]['port_id'] = subport_id - if trunk_id: - trunk_obj['id'] = trunk_id - - return trunk_obj - - def _get_parent_ports(self, trunk_objs): - parent_ports = {} - for trunk_obj in trunk_objs: - parent_ports[trunk_obj['id']] = { - 'ip': 'kuryr-devstack', - 'subports': trunk_obj['sub_ports']} - return parent_ports - - @mock.patch('kuryr_kubernetes.controller.drivers.utils.get_port_name') - @mock.patch('eventlet.spawn') - def test__get_port_from_pool(self, m_eventlet, m_get_port_name): - m_driver = vif_pool.NestedVIFPool() - os_net = self.useFixture(k_fix.MockNetworkClient()).client - - pool_key = mock.sentinel.pool_key - port_id = str(uuid.uuid4()) - port = mock.sentinel.port - subnets = mock.sentinel.subnets - sgs = ('test-sg',) - - pod = get_pod_obj() - - m_driver._available_ports_pools = AVAILABLE_PORTS_TYPE() - m_driver._available_ports_pools[pool_key][sgs] = [port_id] - m_driver._existing_vifs = {port_id: port} - m_get_port_name.return_value = get_pod_name(pod) - - oslo_cfg.CONF.set_override('ports_pool_min', 5, group='vif_pool') - oslo_cfg.CONF.set_override('port_debug', True, group='kubernetes') - m_driver._get_pool_size = mock.Mock(return_value=5) - - self.assertEqual(port, m_driver._get_port_from_pool( - pool_key, pod, subnets, sgs)) - - os_net.update_port.assert_called_once_with( - port_id, name=get_pod_name(pod)) - self.assertEqual(3, m_eventlet.call_count) - - @mock.patch('kuryr_kubernetes.controller.drivers.utils.get_port_name') - @mock.patch('eventlet.spawn') - def test__get_port_from_pool_pool_populate(self, m_eventlet, - m_get_port_name): - m_driver = vif_pool.NestedVIFPool() - os_net = self.useFixture(k_fix.MockNetworkClient()).client - - pool_key = mock.sentinel.pool_key - port_id = str(uuid.uuid4()) - port = mock.sentinel.port - subnets = mock.sentinel.subnets - sgs = ('test-sg',) - - pod = get_pod_obj() - - m_driver._available_ports_pools = AVAILABLE_PORTS_TYPE() - m_driver._available_ports_pools[pool_key][sgs] = [port_id] - m_driver._existing_vifs = {port_id: port} - m_get_port_name.return_value = get_pod_name(pod) - - oslo_cfg.CONF.set_override('ports_pool_min', 5, group='vif_pool') - oslo_cfg.CONF.set_override('port_debug', True, group='kubernetes') - m_driver._get_pool_size = mock.Mock(return_value=3) - - self.assertEqual(port, m_driver._get_port_from_pool( - pool_key, pod, subnets, sgs)) - - os_net.update_port.assert_called_once_with( - port_id, name=get_pod_name(pod)) - # 2 calls come from the constructor, so 1 call in _get_port_from_pool() - self.assertEqual(3, m_eventlet.call_count) - - def test__get_port_from_pool_empty_pool(self): - cls = vif_pool.NestedVIFPool - m_driver = mock.MagicMock(spec=cls) - - os_net = self.useFixture(k_fix.MockNetworkClient()).client - - pod = mock.sentinel.pod - pool_key = mock.sentinel.pool_key - subnets = mock.sentinel.subnets - sgs = ('test-sg',) - - m_driver._available_ports_pools = AVAILABLE_PORTS_TYPE() - m_driver._available_ports_pools[pool_key][sgs] = [] - - self.assertRaises(exceptions.ResourceNotReady, cls._get_port_from_pool, - m_driver, pool_key, pod, subnets, sgs) - - os_net.update_port.assert_not_called() - - @mock.patch('eventlet.spawn') - def test__get_port_from_pool_empty_pool_reuse(self, m_eventlet): - cls = vif_pool.NestedVIFPool - m_driver = mock.MagicMock(spec=cls) - - os_net = self.useFixture(k_fix.MockNetworkClient()).client - - pod = mock.sentinel.pod - port_id = str(uuid.uuid4()) - port_id_2 = str(uuid.uuid4()) - port = mock.sentinel.port - port_2 = mock.sentinel.port_2 - pool_key = mock.sentinel.pool_key - subnets = mock.sentinel.subnets - sgs = ('test-sg',) - sgs_2 = ('test-sg2',) - sgs_3 = ('test-sg3',) - - oslo_cfg.CONF.set_override('port_debug', False, group='kubernetes') - pool_length = 5 - m_driver._get_pool_size.return_value = pool_length - - m_driver._available_ports_pools = AVAILABLE_PORTS_TYPE() - m_driver._available_ports_pools[pool_key][sgs] = [] - m_driver._available_ports_pools[pool_key][sgs_2] = [port_id] - m_driver._available_ports_pools[pool_key][sgs_3] = [port_id_2] - m_driver._existing_vifs = {port_id: port, port_id_2: port_2} - - self.assertEqual(port, cls._get_port_from_pool( - m_driver, pool_key, pod, subnets, sgs)) - - os_net.update_port.assert_called_once_with( - port_id, security_groups=list(sgs)) - m_eventlet.assert_called() - - def test__get_port_from_pool_empty_pool_reuse_no_ports(self): - cls = vif_pool.NestedVIFPool - m_driver = mock.MagicMock(spec=cls) - - os_net = self.useFixture(k_fix.MockNetworkClient()).client - - pod = mock.sentinel.pod - pool_key = mock.sentinel.pool_key - subnets = mock.sentinel.subnets - sgs = ('test-sg',) - sgs_2 = ('test-sg2',) - - oslo_cfg.CONF.set_override('port_debug', False, group='kubernetes') - pool_length = 5 - m_driver._get_pool_size.return_value = pool_length - - m_driver._available_ports_pools = AVAILABLE_PORTS_TYPE() - m_driver._available_ports_pools[pool_key][sgs] = [] - m_driver._available_ports_pools[pool_key][sgs_2] = [] - - self.assertRaises(exceptions.ResourceNotReady, cls._get_port_from_pool, - m_driver, pool_key, pod, subnets, sgs) - - os_net.update_port.assert_not_called() - - @ddt.data((0), (10)) - def test__trigger_return_to_pool(self, max_pool): - cls = vif_pool.NestedVIFPool - m_driver = mock.MagicMock(spec=cls) - - os_net = self.useFixture(k_fix.MockNetworkClient()).client - - pool_key = ('node_ip', 'project_id') - port_id = str(uuid.uuid4()) - pool_length = 5 - - m_driver._recyclable_ports = {port_id: pool_key} - m_driver._available_ports_pools = AVAILABLE_PORTS_TYPE() - oslo_cfg.CONF.set_override('ports_pool_max', - max_pool, - group='vif_pool') - oslo_cfg.CONF.set_override('port_debug', - True, - group='kubernetes') - os_net.ports.return_value = [ - os_port.Port( - id=port_id, - security_group_ids=['security_group_modified'], - ), - ] - m_driver._get_pool_size.return_value = pool_length - m_driver._recovered_pools = True - - cls._trigger_return_to_pool(m_driver) - - (os_net.update_port - .assert_called_once_with(port_id, - name=constants.KURYR_PORT_NAME)) - os_net.delete_port.assert_not_called() - - @ddt.data((0), (10)) - def test__trigger_return_to_pool_no_update(self, max_pool): - cls = vif_pool.NestedVIFPool - m_driver = mock.MagicMock(spec=cls) - - os_net = self.useFixture(k_fix.MockNetworkClient()).client - - pool_key = ('node_ip', 'project_id') - port_id = str(uuid.uuid4()) - pool_length = 5 - - m_driver._recyclable_ports = {port_id: pool_key} - m_driver._available_ports_pools = AVAILABLE_PORTS_TYPE() - oslo_cfg.CONF.set_override('ports_pool_max', - max_pool, - group='vif_pool') - oslo_cfg.CONF.set_override('port_debug', - False, - group='kubernetes') - port = fake.get_port_obj(port_id=port_id) - port.security_group_ids = ['security_group'] - os_net.ports.return_value = [port] - m_driver._get_pool_size.return_value = pool_length - m_driver._recovered_pools = True - - cls._trigger_return_to_pool(m_driver) - - os_net.update_port.assert_not_called() - os_net.delete_port.assert_not_called() - - def test__trigger_return_to_pool_delete_port(self): - cls = vif_pool.NestedVIFPool - m_driver = mock.MagicMock(spec=cls) - - os_net = self.useFixture(k_fix.MockNetworkClient()).client - - cls_vif_driver = nested_vlan_vif.NestedVlanPodVIFDriver - vif_driver = mock.MagicMock(spec=cls_vif_driver) - m_driver._drv_vif = vif_driver - - pool_key = ('node_ip', 'project_id') - port_id = str(uuid.uuid4()) - pool_length = 10 - vif = mock.MagicMock() - vif.vlan_id = mock.sentinel.vlan_id - trunk_id = str(uuid.uuid4()) - - m_driver._recyclable_ports = {port_id: pool_key} - m_driver._available_ports_pools = AVAILABLE_PORTS_TYPE() - m_driver._existing_vifs = {port_id: vif} - oslo_cfg.CONF.set_override('ports_pool_max', - 10, - group='vif_pool') - port = fake.get_port_obj(port_id=port_id) - port.security_group_ids = ['security_group_modified'] - os_net.ports.return_value = [port] - m_driver._get_pool_size.return_value = pool_length - m_driver._get_trunk_id.return_value = trunk_id - m_driver._known_trunk_ids = {} - m_driver._recovered_pools = True - - cls._trigger_return_to_pool(m_driver) - - os_net.update_port.assert_not_called() - os_net.delete_port.assert_called_once_with(port_id) - m_driver._get_trunk_id.assert_called_once() - m_driver._drv_vif._remove_subport.assert_called_once_with(trunk_id, - port_id) - - def test__trigger_return_to_pool_update_exception(self): - cls = vif_pool.NestedVIFPool - m_driver = mock.MagicMock(spec=cls) - - os_net = self.useFixture(k_fix.MockNetworkClient()).client - - pool_key = ('node_ip', 'project_id') - port_id = str(uuid.uuid4()) - pool_length = 5 - - m_driver._recyclable_ports = {port_id: pool_key} - m_driver._available_ports_pools = AVAILABLE_PORTS_TYPE() - oslo_cfg.CONF.set_override('ports_pool_max', - 0, - group='vif_pool') - oslo_cfg.CONF.set_override('port_debug', - True, - group='kubernetes') - port = fake.get_port_obj(port_id=port_id) - port.security_group_ids = ['security_group_modified'] - os_net.ports.return_value = [port] - m_driver._get_pool_size.return_value = pool_length - os_net.update_port.side_effect = os_exc.SDKException - m_driver._recovered_pools = True - - cls._trigger_return_to_pool(m_driver) - - os_net.update_port.assert_called_once_with( - port_id, name=constants.KURYR_PORT_NAME) - os_net.delete_port.assert_not_called() - - def test__trigger_return_to_pool_delete_exception(self): - cls = vif_pool.NestedVIFPool - m_driver = mock.MagicMock(spec=cls) - os_net = self.useFixture(k_fix.MockNetworkClient()).client - cls_vif_driver = nested_vlan_vif.NestedVlanPodVIFDriver - vif_driver = mock.MagicMock(spec=cls_vif_driver) - m_driver._drv_vif = vif_driver - - pool_key = ('node_ip', 'project_id') - port_id = str(uuid.uuid4()) - pool_length = 10 - vif = mock.MagicMock() - vif.vlan_id = mock.sentinel.vlan_id - trunk_id = str(uuid.uuid4()) - - m_driver._recyclable_ports = {port_id: pool_key} - m_driver._available_ports_pools = AVAILABLE_PORTS_TYPE() - m_driver._existing_vifs = {port_id: vif} - oslo_cfg.CONF.set_override('ports_pool_max', - 5, - group='vif_pool') - port = fake.get_port_obj(port_id=port_id) - port.security_group_ids = ['security_group_modified'] - os_net.ports.return_value = [port] - m_driver._get_pool_size.return_value = pool_length - m_driver._get_trunk_id.return_value = trunk_id - m_driver._known_trunk_ids = {} - m_driver._recovered_pools = True - - cls._trigger_return_to_pool(m_driver) - - os_net.update_port.assert_not_called() - m_driver._get_trunk_id.assert_called_once() - m_driver._drv_vif._remove_subport.assert_called_once_with(trunk_id, - port_id) - os_net.delete_port.assert_called_once_with(port_id) - - def test__trigger_return_to_pool_delete_key_error(self): - cls = vif_pool.NestedVIFPool - m_driver = mock.MagicMock(spec=cls) - os_net = self.useFixture(k_fix.MockNetworkClient()).client - cls_vif_driver = nested_vlan_vif.NestedVlanPodVIFDriver - vif_driver = mock.MagicMock(spec=cls_vif_driver) - m_driver._drv_vif = vif_driver - - pool_key = ('node_ip', 'project_id') - port_id = str(uuid.uuid4()) - pool_length = 10 - trunk_id = str(uuid.uuid4()) - - m_driver._recyclable_ports = {port_id: pool_key} - m_driver._available_ports_pools = AVAILABLE_PORTS_TYPE() - m_driver._existing_vifs = {} - oslo_cfg.CONF.set_override('ports_pool_max', - 5, - group='vif_pool') - port = fake.get_port_obj(port_id=port_id) - port.security_group_ids = ['security_group_modified'] - os_net.ports.return_value = [port] - m_driver._get_pool_size.return_value = pool_length - m_driver._known_trunk_ids = {} - m_driver._get_trunk_id.return_value = trunk_id - m_driver._recovered_pools = True - - cls._trigger_return_to_pool(m_driver) - - os_net.update_port.assert_not_called() - m_driver._get_trunk_id.assert_called_once() - m_driver._drv_vif._remove_subport.assert_called_once_with(trunk_id, - port_id) - os_net.delete_port.assert_not_called() - - @mock.patch('kuryr_kubernetes.utils.get_subnet') - def test__get_trunk_info(self, m_get_subnet): - cls = vif_pool.NestedVIFPool - m_driver = mock.MagicMock(spec=cls) - os_net = self.useFixture(k_fix.MockNetworkClient()).client - - port_id = str(uuid.uuid4()) - trunk_port = fake.get_port_obj(port_id=port_id) - trunk_id = str(uuid.uuid4()) - trunk_details = { - 'trunk_id': trunk_id, - 'sub_ports': [{ - 'port_id': '85104e7d-8597-4bf7-94e7-a447ef0b50f1', - 'segmentation_type': 'vlan', - 'segmentation_id': 4056}]} - trunk_port.trunk_details = trunk_details - - subport_id = str(uuid.uuid4()) - subport = fake.get_port_obj(port_id=subport_id, - device_owner='trunk:subport') - os_net.ports.return_value = [trunk_port, subport] - m_driver._get_in_use_ports_info.return_value = [], {} - subnet = mock.sentinel.subnet - m_get_subnet.return_value = subnet - - exp_p_ports = {trunk_id: { - 'ip': trunk_port.fixed_ips[0]['ip_address'], - 'subports': trunk_details['sub_ports']}} - exp_subnets = {subport.fixed_ips[0]['subnet_id']: - {subport.fixed_ips[0]['subnet_id']: subnet}} - - r_p_ports, r_subports, r_subnets = cls._get_trunks_info(m_driver) - - self.assertEqual(r_p_ports, exp_p_ports) - self.assertDictEqual(r_subports[subport_id].to_dict(), - subport.to_dict()) - self.assertEqual(r_subnets, exp_subnets) - - def test__get_trunk_info_empty(self): - cls = vif_pool.NestedVIFPool - m_driver = mock.MagicMock(spec=cls) - os_net = self.useFixture(k_fix.MockNetworkClient()).client - - os_net.ports.return_value = [] - m_driver._get_in_use_ports_info.return_value = [], {} - - r_p_ports, r_subports, r_subnets = cls._get_trunks_info(m_driver) - - self.assertEqual(r_p_ports, {}) - self.assertEqual(r_subports, {}) - self.assertEqual(r_subnets, {}) - - def test__get_trunk_info_no_trunk_details(self): - cls = vif_pool.NestedVIFPool - m_driver = mock.MagicMock(spec=cls) - os_net = self.useFixture(k_fix.MockNetworkClient()).client - - port_id = str(uuid.uuid4()) - port = fake.get_port_obj(port_id=port_id, device_owner='compute:nova') - os_net.ports.return_value = [port] - m_driver._get_in_use_ports_info.return_value = [], {} - - r_p_ports, r_subports, r_subnets = cls._get_trunks_info(m_driver) - - self.assertEqual(r_p_ports, {}) - self.assertEqual(r_subports, {}) - self.assertEqual(r_subnets, {}) - - @mock.patch('kuryr_kubernetes.os_vif_util.' - 'neutron_to_osvif_vif_nested_vlan') - def test__precreated_ports_recover(self, m_to_osvif): - cls = vif_pool.NestedVIFPool - m_driver = mock.MagicMock(spec=cls) - - os_net = self.useFixture(k_fix.MockNetworkClient()).client - - m_driver._available_ports_pools = AVAILABLE_PORTS_TYPE() - m_driver._existing_vifs = {} - - oslo_cfg.CONF.set_override('port_debug', - True, - group='kubernetes') - port_id = str(uuid.uuid4()) - trunk_id = str(uuid.uuid4()) - trunk_obj = self._get_trunk_obj(port_id=trunk_id, subport_id=port_id) - port = fake.get_port_obj(port_id=port_id, device_owner='trunk:subport') - - p_ports = self._get_parent_ports([trunk_obj]) - a_subports = {port_id: port} - subnet_id = port.fixed_ips[0]['subnet_id'] - net_id = str(uuid.uuid4()) - _net = os_network.Network( - id=net_id, - name=None, - mtu=None, - provider_network_type=None, - ) - network = ovu.neutron_to_osvif_network(_net) - subnets = {subnet_id: {subnet_id: network}} - m_driver._get_trunks_info.return_value = (p_ports, a_subports, - subnets) - - vif = mock.sentinel.vif - m_to_osvif.return_value = vif - - pool_key = (port.binding_host_id, port.project_id, net_id) - m_driver._get_pool_key.return_value = pool_key - - cls._precreated_ports(m_driver, 'recover') - - m_driver._get_trunks_info.assert_called_once() - self.assertEqual(m_driver._existing_vifs[port_id], vif) - self.assertEqual(m_driver._available_ports_pools[pool_key], - {tuple(port.security_group_ids): [port_id]}) - os_net.delete_port.assert_not_called() - - @mock.patch('kuryr_kubernetes.os_vif_util.' - 'neutron_to_osvif_vif_nested_vlan') - def test__precreated_ports_recover_plus_port_cleanup(self, m_to_osvif): - cls = vif_pool.NestedVIFPool - m_driver = mock.MagicMock(spec=cls) - os_net = self.useFixture(k_fix.MockNetworkClient()).client - - m_driver._available_ports_pools = AVAILABLE_PORTS_TYPE() - m_driver._existing_vifs = {} - - oslo_cfg.CONF.set_override('port_debug', - True, - group='kubernetes') - - port_id = str(uuid.uuid4()) - trunk_id = str(uuid.uuid4()) - trunk_obj = self._get_trunk_obj(port_id=trunk_id, subport_id=port_id) - port = fake.get_port_obj(port_id=port_id, device_owner='trunk:subport') - port_to_delete_id = str(uuid.uuid4()) - port_to_delete = fake.get_port_obj(port_id=port_to_delete_id, - device_owner='trunk:subport') - - p_ports = self._get_parent_ports([trunk_obj]) - a_subports = {port_id: port, port_to_delete_id: port_to_delete} - subnet_id = port.fixed_ips[0]['subnet_id'] - net_id = str(uuid.uuid4()) - _net = os_network.Network( - id=net_id, - name=None, - mtu=None, - provider_network_type=None, - ) - network = ovu.neutron_to_osvif_network(_net) - subnets = {subnet_id: {subnet_id: network}} - m_driver._get_trunks_info.return_value = (p_ports, a_subports, - subnets) - - vif = mock.sentinel.vif - m_to_osvif.return_value = vif - - pool_key = (port.binding_host_id, port.project_id, net_id) - m_driver._get_pool_key.return_value = pool_key - - cls._precreated_ports(m_driver, 'recover') - - m_driver._get_trunks_info.assert_called_once() - self.assertEqual(m_driver._existing_vifs[port_id], vif) - self.assertEqual(m_driver._available_ports_pools[pool_key], - {tuple(port.security_group_ids): [port_id]}) - os_net.delete_port.assert_called_with(port_to_delete_id) - - def test__precreated_ports_free(self): - cls = vif_pool.NestedVIFPool - m_driver = mock.MagicMock(spec=cls) - os_net = self.useFixture(k_fix.MockNetworkClient()).client - cls_vif_driver = nested_vlan_vif.NestedVlanPodVIFDriver - vif_driver = mock.MagicMock(spec=cls_vif_driver) - m_driver._drv_vif = vif_driver - - oslo_cfg.CONF.set_override('port_debug', - True, - group='kubernetes') - - port_id = str(uuid.uuid4()) - trunk_id = str(uuid.uuid4()) - trunk_obj = self._get_trunk_obj(port_id=trunk_id, subport_id=port_id) - port = fake.get_port_obj(port_id=port_id, - device_owner='trunk:subport') - - p_ports = self._get_parent_ports([trunk_obj]) - a_subports = {port_id: port} - subnet_id = port.fixed_ips[0]['subnet_id'] - net_id = str(uuid.uuid4()) - _net = os_network.Network( - id=net_id, - name=None, - mtu=None, - provider_network_type=None, - ) - network = ovu.neutron_to_osvif_network(_net) - subnets = {subnet_id: {subnet_id: network}} - m_driver._get_trunks_info.return_value = (p_ports, a_subports, - subnets) - - pool_key = (port.binding_host_id, port.project_id, net_id) - m_driver._get_pool_key.return_value = pool_key - m_driver._available_ports_pools = AVAILABLE_PORTS_TYPE() - m_driver._available_ports_pools[pool_key][ - tuple(port.security_group_ids)] = [port_id] - m_driver._existing_vifs = {port_id: mock.sentinel.vif} - - cls._precreated_ports(m_driver, 'free') - - m_driver._get_trunks_info.assert_called_once() - m_driver._drv_vif._remove_subport.assert_called_once() - os_net.delete_port.assert_called_once() - m_driver._drv_vif._release_vlan_id.assert_called_once() - - self.assertEqual(m_driver._existing_vifs, {}) - self.assertEqual(m_driver._available_ports_pools[pool_key][tuple( - port.security_group_ids)], []) - - @mock.patch('kuryr_kubernetes.os_vif_util.' - 'neutron_to_osvif_vif_nested_vlan') - def test__precreated_ports_recover_several_trunks(self, m_to_osvif): - cls = vif_pool.NestedVIFPool - m_driver = mock.MagicMock(spec=cls) - - os_net = self.useFixture(k_fix.MockNetworkClient()).client - - m_driver._available_ports_pools = AVAILABLE_PORTS_TYPE() - m_driver._existing_vifs = {} - - oslo_cfg.CONF.set_override('port_debug', - True, - group='kubernetes') - - port_id1 = str(uuid.uuid4()) - trunk_id1 = str(uuid.uuid4()) - - port_id2 = str(uuid.uuid4()) - trunk_id2 = str(uuid.uuid4()) - - trunk_obj1 = self._get_trunk_obj(port_id=trunk_id1, - subport_id=port_id1) - trunk_obj2 = self._get_trunk_obj(port_id=trunk_id2, - subport_id=port_id2, - trunk_id=str(uuid.uuid4())) - - port1 = fake.get_port_obj(port_id=port_id1, - device_owner='trunk:subport') - port2 = fake.get_port_obj(port_id=port_id2, - device_owner='trunk:subport') - - p_ports = self._get_parent_ports([trunk_obj1, trunk_obj2]) - a_subports = {port_id1: port1, port_id2: port2} - subnet_id = port1.fixed_ips[0]['subnet_id'] - net_id = str(uuid.uuid4()) - _net = os_network.Network( - id=net_id, - name=None, - mtu=None, - provider_network_type=None, - ) - network = ovu.neutron_to_osvif_network(_net) - subnets = {subnet_id: {subnet_id: network}} - - m_driver._get_trunks_info.return_value = (p_ports, a_subports, - subnets) - vif = mock.sentinel.vif - m_to_osvif.return_value = vif - - cls._precreated_ports(m_driver, 'recover') - - m_driver._get_trunks_info.assert_called_once() - self.assertEqual(m_driver._existing_vifs, {port_id1: vif, - port_id2: vif}) - os_net.delete_port.assert_not_called() - - @mock.patch('kuryr_kubernetes.os_vif_util.' - 'neutron_to_osvif_vif_nested_vlan') - def test__precreated_ports_recover_several_subports(self, m_to_osvif): - cls = vif_pool.NestedVIFPool - m_driver = mock.MagicMock(spec=cls) - - os_net = self.useFixture(k_fix.MockNetworkClient()).client - - m_driver._available_ports_pools = AVAILABLE_PORTS_TYPE() - m_driver._existing_vifs = {} - - oslo_cfg.CONF.set_override('port_debug', - True, - group='kubernetes') - - port_id1 = str(uuid.uuid4()) - port_id2 = str(uuid.uuid4()) - trunk_id = str(uuid.uuid4()) - trunk_obj = self._get_trunk_obj(port_id=trunk_id, - subport_id=port_id1) - trunk_obj['sub_ports'].append({'port_id': port_id2, - 'segmentation_type': 'vlan', - 'segmentation_id': 101}) - port1 = fake.get_port_obj(port_id=port_id1, - device_owner='trunk:subport') - port2 = fake.get_port_obj(port_id=port_id2, - device_owner='trunk:subport') - - p_ports = self._get_parent_ports([trunk_obj]) - a_subports = {port_id1: port1, port_id2: port2} - subnet_id = port1.fixed_ips[0]['subnet_id'] - net_id = str(uuid.uuid4()) - _net = os_network.Network( - id=net_id, - name=None, - mtu=None, - provider_network_type=None, - ) - network = ovu.neutron_to_osvif_network(_net) - subnets = {subnet_id: {subnet_id: network}} - - m_driver._get_trunks_info.return_value = (p_ports, a_subports, - subnets) - - vif = mock.sentinel.vif - m_to_osvif.return_value = vif - - pool_key = (port1.binding_host_id, port1.project_id, net_id) - m_driver._get_pool_key.return_value = pool_key - cls._precreated_ports(m_driver, 'recover') - - m_driver._get_trunks_info.assert_called_once() - self.assertEqual(m_driver._existing_vifs, {port_id1: vif, - port_id2: vif}) - self.assertEqual(m_driver._available_ports_pools[pool_key], - {tuple(port1.security_group_ids): [port_id1, - port_id2]}) - os_net.delete_port.assert_not_called() - - @ddt.data(('recover'), ('free')) - def test__precreated_ports_no_ports(self, m_action): - cls = vif_pool.NestedVIFPool - m_driver = mock.MagicMock(spec=cls) - - os_net = self.useFixture(k_fix.MockNetworkClient()).client - - oslo_cfg.CONF.set_override('port_debug', - True, - group='kubernetes') - m_driver._available_ports_pools = AVAILABLE_PORTS_TYPE() - m_driver._existing_vifs = {} - - port_id = mock.sentinel.port_id - trunk_id = mock.sentinel.trunk_id - trunk_obj = self._get_trunk_obj(port_id=trunk_id, subport_id=port_id) - - p_ports = self._get_parent_ports([trunk_obj]) - a_subports = {} - subnets = {} - m_driver._get_trunks_info.return_value = (p_ports, a_subports, - subnets) - - cls._precreated_ports(m_driver, m_action) - - m_driver._get_trunks_info.assert_called_once() - self.assertEqual(m_driver._existing_vifs, {}) - self.assertEqual(m_driver._available_ports_pools, {}) - os_net.delete_port.assert_not_called() - - @ddt.data(('recover'), ('free')) - def test__precreated_ports_no_trunks(self, m_action): - cls = vif_pool.NestedVIFPool - m_driver = mock.MagicMock(spec=cls) - - os_net = self.useFixture(k_fix.MockNetworkClient()).client - - m_driver._available_ports_pools = AVAILABLE_PORTS_TYPE() - m_driver._existing_vifs = {} - oslo_cfg.CONF.set_override('port_debug', - True, - group='kubernetes') - - port_id = str(uuid.uuid4()) - port = fake.get_port_obj(port_id=port_id, - device_owner='trunk:subport') - - p_ports = {} - a_subports = {} - subnet_id = port.fixed_ips[0]['subnet_id'] - subnet = mock.sentinel.subnet - subnets = {subnet_id: {subnet_id: subnet}} - m_driver._get_trunks_info.return_value = (p_ports, a_subports, - subnets) - cls._precreated_ports(m_driver, m_action) - m_driver._get_trunks_info.assert_called_once() - self.assertEqual(m_driver._existing_vifs, {}) - self.assertEqual(m_driver._available_ports_pools, {}) - os_net.delete_port.assert_not_called() - - @mock.patch('eventlet.GreenPool') - def test_delete_network_pools(self, m_green_pool): - cls = vif_pool.NestedVIFPool - m_driver = mock.MagicMock(spec=cls) - cls_vif_driver = nested_vlan_vif.NestedVlanPodVIFDriver - vif_driver = mock.MagicMock(spec=cls_vif_driver) - m_driver._drv_vif = vif_driver - m_pool = mock.MagicMock() - m_green_pool.return_value = m_pool - - net_id = mock.sentinel.net_id - pool_key = ('node_ip', 'project_id') - port_id = str(uuid.uuid4()) - trunk_id = str(uuid.uuid4()) - vif = mock.MagicMock() - vlan_id = mock.sentinel.vlan_id - vif.vlan_id = vlan_id - m_driver._available_ports_pools = AVAILABLE_PORTS_TYPE() - m_driver._available_ports_pools[pool_key][('sg',)] = [port_id] - m_driver._lock = threading.Lock() - m_driver._populate_pool_lock = { - pool_key: mock.MagicMock(spec=threading.Lock())} - m_driver._existing_vifs = {port_id: vif} - m_driver._recovered_pools = True - - m_driver._get_trunk_id.return_value = trunk_id - m_driver._get_pool_key_net.return_value = net_id - - cls.delete_network_pools(m_driver, net_id) - - m_driver._trigger_return_to_pool.assert_called_once() - m_driver._get_pool_key_net.assert_called_once() - m_driver._get_trunk_id.assert_called_once_with(pool_key) - m_driver._drv_vif._remove_subports.assert_called_once_with(trunk_id, - [port_id]) - m_driver._drv_vif._release_vlan_id.assert_called_once_with(vlan_id) - m_pool.imap.assert_called_once_with(utils.delete_neutron_port, - [port_id]) - - def test_delete_network_pools_not_ready(self): - cls = vif_pool.NestedVIFPool - m_driver = mock.MagicMock(spec=cls) - cls_vif_driver = nested_vlan_vif.NestedVlanPodVIFDriver - vif_driver = mock.MagicMock(spec=cls_vif_driver) - m_driver._drv_vif = vif_driver - os_net = self.useFixture(k_fix.MockNetworkClient()).client - - net_id = mock.sentinel.net_id - m_driver._recovered_pools = False - - self.assertRaises(exceptions.ResourceNotReady, - cls.delete_network_pools, m_driver, net_id) - - m_driver._trigger_return_to_pool.assert_not_called() - m_driver._get_pool_key_net.assert_not_called() - m_driver._get_trunk_id.assert_not_called() - m_driver._drv_vif._remove_subports.assert_not_called() - os_net.delete_port.assert_not_called() - - def test_delete_network_pools_exception(self): - cls = vif_pool.NestedVIFPool - m_driver = mock.MagicMock(spec=cls) - cls_vif_driver = nested_vlan_vif.NestedVlanPodVIFDriver - vif_driver = mock.MagicMock(spec=cls_vif_driver) - m_driver._drv_vif = vif_driver - - os_net = self.useFixture(k_fix.MockNetworkClient()).client - - net_id = mock.sentinel.net_id - pool_key = ('node_ip', 'project_id') - port_id = str(uuid.uuid4()) - trunk_id = str(uuid.uuid4()) - vif = mock.MagicMock() - vlan_id = mock.sentinel.vlan_id - vif.vlan_id = vlan_id - m_driver._available_ports_pools = AVAILABLE_PORTS_TYPE() - m_driver._available_ports_pools[pool_key][('sg',)] = [port_id] - m_driver._existing_vifs = {port_id: vif} - m_driver._recovered_pools = True - - m_driver._get_trunk_id.return_value = trunk_id - m_driver._get_pool_key_net.return_value = net_id - m_driver._drv_vif._remove_subports.side_effect = os_exc.SDKException - - self.assertRaises(exceptions.ResourceNotReady, - cls.delete_network_pools, m_driver, net_id) - - m_driver._trigger_return_to_pool.assert_called_once() - m_driver._get_pool_key_net.assert_called_once() - m_driver._get_trunk_id.assert_called_once_with(pool_key) - m_driver._drv_vif._remove_subports.assert_called_once_with(trunk_id, - [port_id]) - m_driver._drv_vif._release_vlan_id.assert_not_called() - os_net.delete_port.assert_not_called() - - @mock.patch('eventlet.GreenPool') - def test_delete_network_pools_missing_port(self, m_green_pool): - cls = vif_pool.NestedVIFPool - m_driver = mock.MagicMock(spec=cls) - cls_vif_driver = nested_vlan_vif.NestedVlanPodVIFDriver - vif_driver = mock.MagicMock(spec=cls_vif_driver) - m_driver._drv_vif = vif_driver - m_pool = mock.MagicMock() - m_green_pool.return_value = m_pool - - net_id = mock.sentinel.net_id - pool_key = ('node_ip', 'project_id') - port_id = str(uuid.uuid4()) - trunk_id = str(uuid.uuid4()) - vif = mock.MagicMock() - vlan_id = mock.sentinel.vlan_id - vif.vlan_id = vlan_id - m_driver._available_ports_pools = AVAILABLE_PORTS_TYPE() - m_driver._available_ports_pools[pool_key][('sg',)] = [port_id] - m_driver._lock = threading.Lock() - m_driver._populate_pool_lock = { - pool_key: mock.MagicMock(spec=threading.Lock())} - m_driver._existing_vifs = {} - m_driver._recovered_pools = True - - m_driver._get_trunk_id.return_value = trunk_id - m_driver._get_pool_key_net.return_value = net_id - - cls.delete_network_pools(m_driver, net_id) - - m_driver._trigger_return_to_pool.assert_called_once() - m_driver._get_pool_key_net.assert_called_once() - m_driver._get_trunk_id.assert_called_once_with(pool_key) - m_driver._drv_vif._remove_subports.assert_called_once_with(trunk_id, - [port_id]) - m_driver._drv_vif._release_vlan_id.assert_not_called() - m_pool.imap.assert_called_once_with(utils.delete_neutron_port, - [port_id]) diff --git a/kuryr_kubernetes/tests/unit/controller/handlers/__init__.py b/kuryr_kubernetes/tests/unit/controller/handlers/__init__.py deleted file mode 100644 index e69de29bb..000000000 diff --git a/kuryr_kubernetes/tests/unit/controller/handlers/test_fake_handler.py b/kuryr_kubernetes/tests/unit/controller/handlers/test_fake_handler.py deleted file mode 100644 index 94bae4811..000000000 --- a/kuryr_kubernetes/tests/unit/controller/handlers/test_fake_handler.py +++ /dev/null @@ -1,24 +0,0 @@ -# Copyright (c) 2018 RedHat, Inc. -# All Rights Reserved. -# -# Licensed under the Apache License, Version 2.0 (the "License"); you may -# not use this file except in compliance with the License. You may obtain -# a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT -# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the -# License for the specific language governing permissions and limitations -# under the License. -from kuryr_kubernetes.handlers import k8s_base - - -class TestHandler(k8s_base.ResourceEventHandler): - - OBJECT_KIND = 'DUMMY' - OBJECT_WATCH_PATH = 'DUMMY_PATH' - - def __init__(self): - super(TestHandler, self).__init__() diff --git a/kuryr_kubernetes/tests/unit/controller/handlers/test_kuryrnetwork.py b/kuryr_kubernetes/tests/unit/controller/handlers/test_kuryrnetwork.py deleted file mode 100644 index c67bf6c91..000000000 --- a/kuryr_kubernetes/tests/unit/controller/handlers/test_kuryrnetwork.py +++ /dev/null @@ -1,395 +0,0 @@ -# Copyright 2020 Red Hat, Inc. -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. - -from unittest import mock - -from openstack.network.v2 import network as os_network -from openstack.network.v2 import subnet as os_subnet -from oslo_config import cfg as oslo_cfg - -from kuryr_kubernetes.controller.drivers import base as drivers -from kuryr_kubernetes.controller.drivers import namespace_subnet as subnet_drv -from kuryr_kubernetes.controller.drivers import utils as driver_utils -from kuryr_kubernetes.controller.drivers import vif_pool -from kuryr_kubernetes.controller.handlers import kuryrnetwork -from kuryr_kubernetes import exceptions as k_exc -from kuryr_kubernetes.tests import base as test_base -from kuryr_kubernetes.tests.unit import kuryr_fixtures as k_fix - - -@mock.patch('kuryr_kubernetes.controller.drivers.utils.' - 'is_network_policy_enabled', mock.Mock(return_value=True)) -class TestKuryrNetworkHandler(test_base.TestCase): - - def setUp(self): - super(TestKuryrNetworkHandler, self).setUp() - - self._project_id = mock.sentinel.project_id - self._subnets = mock.sentinel.subnets - self._kuryrnet_crd = { - 'metadata': { - 'name': 'ns-test-namespace', - }, - 'spec': { - 'nsName': 'test-namespace', - 'projectId': 'test-project', - 'nsLabels': {}, - }, - 'status': { - } - } - - self._handler = mock.MagicMock( - spec=kuryrnetwork.KuryrNetworkHandler) - self._handler._drv_project = mock.Mock(spec=drivers.PodProjectDriver) - # NOTE(ltomasbo): The KuryrNetwork handler is associated to the usage - # of namespace subnet driver, - self._handler._drv_subnets = mock.Mock( - spec=subnet_drv.NamespacePodSubnetDriver) - self._handler._drv_sg = mock.Mock(spec=drivers.PodSecurityGroupsDriver) - self._handler._drv_vif_pool = mock.MagicMock( - spec=vif_pool.MultiVIFPool) - self._handler.k8s = self.useFixture(k_fix.MockK8sClient()).client - - self._get_project = self._handler._drv_project.get_project - self._set_vif_driver = self._handler._drv_vif_pool.set_vif_driver - self._create_network = self._handler._drv_subnets.create_network - self._create_subnet = self._handler._drv_subnets.create_subnet - self._delete_namespace_subnet = ( - self._handler._drv_subnets.delete_namespace_subnet) - self._add_subnet_to_router = ( - self._handler._drv_subnets.add_subnet_to_router) - self._delete_ns_sg_rules = ( - self._handler._drv_sg.delete_namespace_sg_rules) - self._update_ns_sg_rules = ( - self._handler._drv_sg.update_namespace_sg_rules) - self._delete_network_pools = ( - self._handler._drv_vif_pool.delete_network_pools) - - self._get_project.return_value = self._project_id - - @mock.patch.object(drivers.LBaaSDriver, 'get_instance') - @mock.patch.object(drivers.VIFPoolDriver, 'get_instance') - @mock.patch.object(drivers.PodSecurityGroupsDriver, 'get_instance') - @mock.patch.object(drivers.PodSubnetsDriver, 'get_instance') - @mock.patch.object(drivers.NamespaceProjectDriver, 'get_instance') - def test_init(self, m_get_project_driver, m_get_subnet_driver, - m_get_sg_driver, m_get_vif_pool_driver, m_get_lbaas_driver): - project_driver = mock.sentinel.project_driver - subnet_driver = mock.sentinel.subnet_driver - sg_driver = mock.sentinel.sg_driver - vif_pool_driver = mock.Mock(spec=vif_pool.MultiVIFPool) - lbaas_driver = mock.sentinel.lbaas_driver - - m_get_project_driver.return_value = project_driver - m_get_subnet_driver.return_value = subnet_driver - m_get_sg_driver.return_value = sg_driver - m_get_vif_pool_driver.return_value = vif_pool_driver - m_get_lbaas_driver.return_value = lbaas_driver - - handler = kuryrnetwork.KuryrNetworkHandler() - - self.assertEqual(project_driver, handler._drv_project) - self.assertEqual(subnet_driver, handler._drv_subnets) - self.assertEqual(sg_driver, handler._drv_sg) - self.assertEqual(vif_pool_driver, handler._drv_vif_pool) - - @mock.patch.object(driver_utils, 'get_services') - @mock.patch.object(driver_utils, 'get_namespace') - def test_on_present(self, m_get_ns, m_get_svc): - net_id = mock.sentinel.net_id - subnet_id = mock.sentinel.subnet_id - subnet_cidr = mock.sentinel.subnet_cidr - router_id = mock.sentinel.router_id - ns = {'metadata': {'uid': 'e28127f4-bc41-450a-97cf-56d9bc76d53e', - 'name': 'test'}} - - self._create_network.return_value = net_id - self._create_subnet.return_value = (subnet_id, subnet_cidr) - self._add_subnet_to_router.return_value = router_id - m_get_ns.return_value = ns - m_get_svc.return_value = [] - - kuryrnetwork.KuryrNetworkHandler.on_present(self._handler, - self._kuryrnet_crd) - - self._handler._patch_kuryrnetwork_crd.assert_called() - self._create_network.assert_called_once_with( - ns, - self._kuryrnet_crd['spec']['projectId']) - self._create_subnet.assert_called_once_with( - ns, - self._kuryrnet_crd['spec']['projectId'], - net_id) - self._add_subnet_to_router.assert_called_once_with(subnet_id) - m_get_ns.assert_called_once_with(self._kuryrnet_crd['spec']['nsName']) - self._update_ns_sg_rules.assert_called_once_with(ns) - m_get_svc.assert_called_once() - self._handler._update_services.assert_called_once() - self.assertEqual(self._handler.k8s.add_event.call_count, 4) - - @mock.patch.object(driver_utils, 'get_services') - @mock.patch.object(driver_utils, 'get_namespace') - def test_on_present_no_sg_enforce(self, m_get_ns, m_get_svc): - net_id = mock.sentinel.net_id - subnet_id = mock.sentinel.subnet_id - subnet_cidr = mock.sentinel.subnet_cidr - router_id = mock.sentinel.router_id - ns = {'metadata': {'uid': '843645f7-d255-4fd8-86fb-09140d57c392', - 'name': 'test'}} - - self._create_network.return_value = net_id - self._create_subnet.return_value = (subnet_id, subnet_cidr) - self._add_subnet_to_router.return_value = router_id - m_get_ns.return_value = ns - - oslo_cfg.CONF.set_override('enforce_sg_rules', - False, - group='octavia_defaults') - self.addCleanup(oslo_cfg.CONF.clear_override, 'enforce_sg_rules', - group='octavia_defaults') - - kuryrnetwork.KuryrNetworkHandler.on_present(self._handler, - self._kuryrnet_crd) - - self._handler._patch_kuryrnetwork_crd.assert_called() - self._create_network.assert_called_once_with( - ns, - self._kuryrnet_crd['spec']['projectId']) - self._create_subnet.assert_called_once_with( - ns, - self._kuryrnet_crd['spec']['projectId'], - net_id) - self._add_subnet_to_router.assert_called_once_with(subnet_id) - m_get_ns.assert_called_once_with(self._kuryrnet_crd['spec']['nsName']) - self._update_ns_sg_rules.assert_called_once_with(ns) - m_get_svc.assert_not_called() - self._handler._update_services.assert_not_called() - self.assertEqual(self._handler.k8s.add_event.call_count, 4) - - @mock.patch.object(driver_utils, 'get_namespace') - def test_on_present_existing(self, m_get_ns): - net_id = mock.sentinel.net_id - subnet_id = mock.sentinel.subnet_id - subnet_cidr = mock.sentinel.subnet_cidr - router_id = mock.sentinel.router_id - - kns_crd = self._kuryrnet_crd.copy() - kns_crd['status'] = { - 'netId': net_id, - 'subnetId': subnet_id, - 'subnetCIDR': subnet_cidr, - 'routerId': router_id} - - kuryrnetwork.KuryrNetworkHandler.on_present(self._handler, kns_crd) - - self._handler._patch_kuryrnetwork_crd.assert_not_called() - self._create_network.assert_not_called() - self._create_subnet.assert_not_called() - self._add_subnet_to_router.assert_not_called() - self._handler.k8s.add_event.assert_not_called() - - @mock.patch.object(driver_utils, 'get_services') - def test_on_finalize(self, m_get_svc): - net_id = mock.sentinel.net_id - kns_crd = self._kuryrnet_crd.copy() - kns_crd['status'] = {'netId': net_id} - crd_selector = mock.sentinel.crd_selector - - self._delete_ns_sg_rules.return_value = [crd_selector] - m_get_svc.return_value = [] - - kuryrnetwork.KuryrNetworkHandler.on_finalize(self._handler, kns_crd) - - self._delete_network_pools.assert_called_once_with(net_id) - self._delete_namespace_subnet.assert_called_once_with(kns_crd) - self._delete_ns_sg_rules.assert_called_once() - m_get_svc.assert_called_once() - self._handler._update_services.assert_called_once() - self._handler.k8s.remove_finalizer.assert_called_once() - self._handler.k8s.add_event.assert_not_called() - - @mock.patch('kuryr_kubernetes.clients.get_network_client') - @mock.patch.object(driver_utils, 'get_services') - def test_on_finalize_no_network(self, m_get_svc, m_get_net_client): - crd_selector = mock.sentinel.crd_selector - self._delete_ns_sg_rules.return_value = [crd_selector] - m_get_svc.return_value = [] - net_mock = mock.MagicMock() - net_mock.return_value = [] - m_get_net_client.return_value = net_mock - self._handler.k8s.get.return_value = ( - {'metadata': {'name': 'test-namespace', - 'uid': 'e237acaf-ea5f-4d96-b604-292268a938a1'}}) - - kuryrnetwork.KuryrNetworkHandler.on_finalize(self._handler, - self._kuryrnet_crd) - - self._delete_network_pools.assert_not_called() - self._delete_namespace_subnet.assert_not_called() - self._delete_ns_sg_rules.assert_called_once() - m_get_svc.assert_called_once() - self._handler._update_services.assert_called_once() - self._handler.k8s.remove_finalizer.assert_called_once() - self._handler.k8s.add_event.assert_not_called() - - @mock.patch('kuryr_kubernetes.clients.get_network_client') - @mock.patch.object(driver_utils, 'get_services') - def test_on_finalize_no_network_in_kn_no_desc(self, m_get_svc, - m_get_net_client): - crd_selector = mock.sentinel.crd_selector - self._delete_ns_sg_rules.return_value = [crd_selector] - m_get_svc.return_value = [] - net_mock = mock.MagicMock() - net_id = '1612ffb1-ff7d-4590-bd9c-95aeb043705a' - net_mock.networks.return_value = [ - os_network.Network(id=net_id, description=''), - ] - m_get_net_client.return_value = net_mock - self._handler.k8s.get.return_value = ( - {'metadata': {'name': 'test-namespace', 'uid': net_id}}) - - kuryrnetwork.KuryrNetworkHandler.on_finalize(self._handler, - self._kuryrnet_crd) - - self._delete_network_pools.assert_not_called() - self._delete_ns_sg_rules.assert_called_once() - m_get_svc.assert_called_once() - self._handler._update_services.assert_called_once() - self._handler.k8s.remove_finalizer.assert_called_once() - self._handler.k8s.add_event.assert_not_called() - - @mock.patch('kuryr_kubernetes.clients.get_network_client') - @mock.patch.object(driver_utils, 'get_services') - def test_on_finalize_no_network_in_kn_with_subnet(self, m_get_svc, - m_get_net_client): - crd_selector = mock.sentinel.crd_selector - self._delete_ns_sg_rules.return_value = [crd_selector] - m_get_svc.return_value = [] - net_id = 'db0b2c83-dae3-47ff-b4b0-9a19b7c15589' - ns_id = '0b6d6f0b-4e44-4a1b-a711-71ab6c79bee8' - subnet_id = 'a595fc4b-6885-48ff-b90c-d3f7aefd6d1a' - net_mock = mock.MagicMock() - net_mock.networks.return_value = [ - os_network.Network(id=net_id, description=ns_id), - ] - net_mock.subnets.return_value = [os_subnet.Subnet(id=subnet_id)] - m_get_net_client.return_value = net_mock - self._handler.k8s.get.return_value = ( - {'metadata': {'name': 'test-namespace', 'uid': ns_id}}) - - kuryrnetwork.KuryrNetworkHandler.on_finalize(self._handler, - self._kuryrnet_crd) - - self._delete_network_pools.assert_called_once() - self._delete_ns_sg_rules.assert_called_once() - m_get_svc.assert_called_once() - self._handler._update_services.assert_called_once() - self._handler.k8s.remove_finalizer.assert_called_once() - self._handler.k8s.add_event.assert_not_called() - - @mock.patch('kuryr_kubernetes.clients.get_network_client') - @mock.patch.object(driver_utils, 'get_services') - def test_on_finalize_no_network_in_kn_with_no_ns_match(self, m_get_svc, - m_get_net_client): - crd_selector = mock.sentinel.crd_selector - self._delete_ns_sg_rules.return_value = [crd_selector] - m_get_svc.return_value = [] - net_id = 'db0b2c83-dae3-47ff-b4b0-9a19b7c15589' - ns_id = '0b6d6f0b-4e44-4a1b-a711-71ab6c79bee8' - subnet_id = 'a595fc4b-6885-48ff-b90c-d3f7aefd6d1a' - net_mock = mock.MagicMock() - net_mock.networks.return_value = [ - os_network.Network(id=net_id, description=ns_id), - ] - net_mock.subnets.return_value = [os_subnet.Subnet(id=subnet_id)] - m_get_net_client.return_value = net_mock - self._handler.k8s.get.return_value = ( - {'metadata': {'name': 'test-namespace', 'uid': net_id}}) - - kuryrnetwork.KuryrNetworkHandler.on_finalize(self._handler, - self._kuryrnet_crd) - - self._delete_network_pools.assert_not_called() - m_get_svc.assert_called_once() - self._handler._update_services.assert_called_once() - self._handler.k8s.remove_finalizer.assert_called_once() - self._handler.k8s.add_event.assert_not_called() - - @mock.patch.object(driver_utils, 'get_services') - def test_on_finalize_no_sg_enforce(self, m_get_svc): - net_id = mock.sentinel.net_id - kns_crd = self._kuryrnet_crd.copy() - kns_crd['status'] = {'netId': net_id} - crd_selector = mock.sentinel.crd_selector - - self._delete_ns_sg_rules.return_value = [crd_selector] - m_get_svc.return_value = [] - oslo_cfg.CONF.set_override('enforce_sg_rules', - False, - group='octavia_defaults') - self.addCleanup(oslo_cfg.CONF.clear_override, 'enforce_sg_rules', - group='octavia_defaults') - - kuryrnetwork.KuryrNetworkHandler.on_finalize( - self._handler, kns_crd) - - self._delete_network_pools.assert_called_once_with(net_id) - self._delete_namespace_subnet.assert_called_once_with(kns_crd) - self._delete_ns_sg_rules.assert_called_once() - m_get_svc.assert_not_called() - self._handler._update_services.assert_not_called() - self._handler.k8s.remove_finalizer.assert_called_once() - self._handler.k8s.add_event.assert_not_called() - - def test_on_finalize_finalizer_delete_ns_subnet_exception(self): - net_id = mock.sentinel.net_id - kns_crd = self._kuryrnet_crd.copy() - kns_crd['status'] = {'netId': net_id} - crd_selector = mock.sentinel.crd_selector - - self._delete_ns_sg_rules.return_value = [crd_selector] - self._delete_namespace_subnet.side_effect = (k_exc. - ResourceNotReady(kns_crd)) - - self.assertRaises(k_exc.ResourceNotReady, - kuryrnetwork.KuryrNetworkHandler.on_finalize, - self._handler, kns_crd) - - self._delete_network_pools.assert_called_once_with(net_id) - self._delete_namespace_subnet.assert_called_once_with(kns_crd) - - @mock.patch.object(driver_utils, 'get_services') - def test_on_finalize_finalizer_exception(self, m_get_svc): - net_id = mock.sentinel.net_id - kns_crd = self._kuryrnet_crd.copy() - kns_crd['status'] = {'netId': net_id} - crd_selector = mock.sentinel.crd_selector - - self._delete_ns_sg_rules.return_value = [crd_selector] - m_get_svc.return_value = [] - k8s = self._handler.k8s - k8s.remove_finalizer.side_effect = k_exc.K8sClientException - - self.assertRaises( - k_exc.K8sClientException, - kuryrnetwork.KuryrNetworkHandler.on_finalize, - self._handler, kns_crd) - - self._delete_network_pools.assert_called_once_with(net_id) - self._delete_namespace_subnet.assert_called_once_with(kns_crd) - self._delete_ns_sg_rules.assert_called_once() - m_get_svc.assert_called_once() - self._handler._update_services.assert_called_once() - k8s.remove_finalizer.assert_called_once() diff --git a/kuryr_kubernetes/tests/unit/controller/handlers/test_kuryrnetwork_population.py b/kuryr_kubernetes/tests/unit/controller/handlers/test_kuryrnetwork_population.py deleted file mode 100644 index 84492c0be..000000000 --- a/kuryr_kubernetes/tests/unit/controller/handlers/test_kuryrnetwork_population.py +++ /dev/null @@ -1,112 +0,0 @@ -# Copyright 2020, Inc. -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. - -from unittest import mock - -from kuryr_kubernetes.controller.drivers import base as drivers -from kuryr_kubernetes.controller.drivers import namespace_subnet as subnet_drv -from kuryr_kubernetes.controller.drivers import node_subnets -from kuryr_kubernetes.controller.drivers import utils as driver_utils -from kuryr_kubernetes.controller.drivers import vif_pool -from kuryr_kubernetes.controller.handlers import kuryrnetwork_population -from kuryr_kubernetes.tests import base as test_base -from kuryr_kubernetes import utils - - -class TestKuryrNetworkPopulationHandler(test_base.TestCase): - - def setUp(self): - super(TestKuryrNetworkPopulationHandler, self).setUp() - - self._project_id = mock.sentinel.project_id - self._subnets = mock.sentinel.subnets - self._kuryrnet_crd = { - 'metadata': { - 'name': 'test-namespace', - }, - 'spec': { - 'nsName': 'test-namespace', - 'projectId': 'test-project', - 'nsLabels': {}, - }, - 'status': { - 'subnetId': 'test-subnet' - } - } - - self._handler = mock.MagicMock( - spec=kuryrnetwork_population.KuryrNetworkPopulationHandler) - # NOTE(ltomasbo): The KuryrNetwork handler is associated to the usage - # of namespace subnet driver, - self._handler._drv_subnets = mock.Mock( - spec=subnet_drv.NamespacePodSubnetDriver) - self._handler._drv_vif_pool = mock.MagicMock( - spec=vif_pool.MultiVIFPool) - self._handler._drv_nodes_subnets = mock.MagicMock( - spec=node_subnets.ConfigNodesSubnets) - - self._get_namespace_subnet = ( - self._handler._drv_subnets.get_namespace_subnet) - self._set_vif_driver = self._handler._drv_vif_pool.set_vif_driver - self._populate_pool = self._handler._drv_vif_pool.populate_pool - self._patch_kuryrnetwork_crd = self._handler._patch_kuryrnetwork_crd - - self._get_namespace_subnet.return_value = self._subnets - - @mock.patch.object(drivers.VIFPoolDriver, 'get_instance') - @mock.patch.object(drivers.PodSubnetsDriver, 'get_instance') - def test_init(self, m_get_subnet_driver, m_get_vif_pool_driver): - subnet_driver = mock.sentinel.subnet_driver - vif_pool_driver = mock.Mock(spec=vif_pool.MultiVIFPool) - - m_get_subnet_driver.return_value = subnet_driver - m_get_vif_pool_driver.return_value = vif_pool_driver - - handler = kuryrnetwork_population.KuryrNetworkPopulationHandler() - - self.assertEqual(subnet_driver, handler._drv_subnets) - self.assertEqual(vif_pool_driver, handler._drv_vif_pool) - - @mock.patch.object(driver_utils, 'get_annotations') - @mock.patch.object(driver_utils, 'get_namespace') - @mock.patch.object(utils, 'get_nodes_ips') - def test_on_present(self, m_get_nodes_ips, m_get_ns, m_get_ann): - m_get_nodes_ips.return_value = ['node-ip'] - m_get_ns.return_value = mock.sentinel.ns - m_get_ann.return_value = self._kuryrnet_crd['metadata']['name'] - - kuryrnetwork_population.KuryrNetworkPopulationHandler.on_present( - self._handler, self._kuryrnet_crd) - - self._get_namespace_subnet.assert_called_once_with( - self._kuryrnet_crd['spec']['nsName'], - self._kuryrnet_crd['status']['subnetId']) - self._populate_pool.assert_called_once_with( - 'node-ip', self._kuryrnet_crd['spec']['projectId'], self._subnets, - []) - self._patch_kuryrnetwork_crd.assert_called_once() - - def test_on_added_no_subnet(self): - kns = self._kuryrnet_crd.copy() - del kns['status'] - kuryrnetwork_population.KuryrNetworkPopulationHandler.on_added( - self._handler, kns) - self._get_namespace_subnet.assert_not_called() - - def test_on_added_populated(self): - kns = self._kuryrnet_crd.copy() - kns['status'] = {'populated': True} - kuryrnetwork_population.KuryrNetworkPopulationHandler.on_added( - self._handler, kns) - self._get_namespace_subnet.assert_not_called() diff --git a/kuryr_kubernetes/tests/unit/controller/handlers/test_kuryrnetworkpolicy.py b/kuryr_kubernetes/tests/unit/controller/handlers/test_kuryrnetworkpolicy.py deleted file mode 100644 index 5f3f84c29..000000000 --- a/kuryr_kubernetes/tests/unit/controller/handlers/test_kuryrnetworkpolicy.py +++ /dev/null @@ -1,97 +0,0 @@ -# Copyright 2020 Red Hat, Inc. -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. - -from unittest import mock - -from kuryr_kubernetes.controller.drivers import base as drivers -from kuryr_kubernetes.controller.handlers import kuryrnetworkpolicy -from kuryr_kubernetes.tests import base as test_base - - -class TestPolicyHandler(test_base.TestCase): - - @mock.patch.object(drivers.LBaaSDriver, 'get_instance') - @mock.patch.object(drivers.NetworkPolicyDriver, 'get_instance') - @mock.patch('kuryr_kubernetes.clients.get_kubernetes_client') - @mock.patch('kuryr_kubernetes.clients.get_network_client') - @mock.patch('kuryr_kubernetes.clients.get_loadbalancer_client') - def setUp(self, m_get_os_lb, m_get_os_net, m_get_k8s, m_get_np, - m_get_lbaas): - super(TestPolicyHandler, self).setUp() - - self._project_id = mock.sentinel.project_id - self._policy_name = 'np-test' - self._policy_uid = mock.sentinel.policy_uid - self._policy_link = mock.sentinel.policy_link - - self._policy = { - 'apiVersion': 'networking.k8s.io/v1', - 'kind': 'NetworkPolicy', - 'metadata': { - 'name': self._policy_name, - 'resourceVersion': '2259309', - 'generation': 1, - 'creationTimestamp': '2018-09-18T14:09:51Z', - 'namespace': 'default', - 'annotations': {}, - 'uid': self._policy_uid - }, - 'spec': { - 'egress': [{'ports': [{'port': 5978, 'protocol': 'TCP'}]}], - 'ingress': [{'ports': [{'port': 6379, 'protocol': 'TCP'}]}], - 'policyTypes': ['Ingress', 'Egress'] - } - } - - self.k8s = mock.Mock() - m_get_k8s.return_value = self.k8s - self.m_get_k8s = m_get_k8s - - self.os_net = mock.Mock() - m_get_os_net.return_value = self.os_net - self.m_get_os_net = m_get_os_net - - self.np_driver = mock.Mock() - m_get_np.return_value = self.np_driver - self.m_get_np = m_get_np - - self.lbaas_driver = mock.Mock() - m_get_lbaas.return_value = self.lbaas_driver - self.m_get_lbaas = m_get_lbaas - - self.k8s.get.return_value = {} - self.handler = kuryrnetworkpolicy.KuryrNetworkPolicyHandler() - - def _get_knp_obj(self): - knp_obj = { - 'apiVersion': 'openstack.org/v1', - 'kind': 'KuryrNetworkPolicy', - 'metadata': { - 'name': 'np-test-network-policy', - 'namespace': 'test-1', - }, - 'spec': { - 'securityGroupId': 'c1ac16f5-e198-4628-9d84-253c6001be8e', - 'securityGroupName': 'sg-test-network-policy' - }} - return knp_obj - - def test_init(self): - self.m_get_k8s.assert_called_once() - self.m_get_np.assert_called_once() - - self.assertEqual(self.np_driver, self.handler._drv_policy) - self.assertEqual(self.k8s, self.handler.k8s) - self.assertEqual(self.os_net, self.handler.os_net) - self.assertEqual(self.lbaas_driver, self.handler._drv_lbaas) diff --git a/kuryr_kubernetes/tests/unit/controller/handlers/test_kuryrport.py b/kuryr_kubernetes/tests/unit/controller/handlers/test_kuryrport.py deleted file mode 100644 index 703f9e3e8..000000000 --- a/kuryr_kubernetes/tests/unit/controller/handlers/test_kuryrport.py +++ /dev/null @@ -1,681 +0,0 @@ -# Copyright (c) 2020 Red Hat, Inc. -# All Rights Reserved. -# -# Licensed under the Apache License, Version 2.0 (the "License"); you may -# not use this file except in compliance with the License. You may obtain -# a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT -# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the -# License for the specific language governing permissions and limitations -# under the License. - -from unittest import mock - -from openstack import exceptions as os_exc -from os_vif import objects as os_obj -from oslo_config import cfg - -from kuryr_kubernetes import constants -from kuryr_kubernetes.controller.drivers import multi_vif -from kuryr_kubernetes.controller.handlers import kuryrport -from kuryr_kubernetes import exceptions as k_exc -from kuryr_kubernetes.tests import base as test_base -from kuryr_kubernetes.tests.unit import kuryr_fixtures as k_fix -from kuryr_kubernetes import utils - - -CONF = cfg.CONF - - -class TestKuryrPortHandler(test_base.TestCase): - - def setUp(self): - super().setUp() - self._project_id = mock.sentinel.project_id - self._subnets = mock.sentinel.subnets - self._security_groups = mock.sentinel.security_groups - self._host = mock.sentinel.hostname - self._pod_version = mock.sentinel.pod_version - self._pod_link = mock.sentinel.pod_link - self._kp_version = mock.sentinel.kp_version - self._kp_namespace = mock.sentinel.namespace - self._kp_uid = mock.sentinel.kp_uid - self._kp_name = 'pod1' - self._pod_uid = 'deadbeef' - - self._pod = {'apiVersion': 'v1', - 'kind': 'Pod', - 'metadata': { - 'resourceVersion': self._pod_version, - 'name': self._kp_name, - 'deletionTimestamp': mock.sentinel.date, - 'namespace': self._kp_namespace, - 'uid': self._pod_uid, - }, - 'spec': {'nodeName': self._host}} - - self._kp = { - 'apiVersion': 'openstack.org/v1', - 'kind': 'KuryrPort', - 'metadata': { - 'resourceVersion': self._kp_version, - 'name': self._kp_name, - 'namespace': self._kp_namespace, - 'labels': { - constants.KURYRPORT_LABEL: self._host - }, - 'finalizers': [], - }, - 'spec': { - 'podUid': self._pod_uid, - 'podNodeName': self._host - }, - 'status': {'vifs': {}} - } - - self._vif1 = os_obj.vif.VIFBase() - self._vif2 = os_obj.vif.VIFBase() - self._vif1.active = False - self._vif2.active = False - self._vif1.plugin = 'object' - self._vif2.plugin = 'object' - self._vif1_primitive = self._vif1.obj_to_primitive() - self._vif2_primitive = self._vif2.obj_to_primitive() - self._vifs_primitive = {'eth0': {'default': True, - 'vif': self._vif1_primitive}, - 'eth1': {'default': False, - 'vif': self._vif2_primitive}} - self._vifs = {'eth0': {'default': True, - 'vif': self._vif1}, - 'eth1': {'default': False, - 'vif': self._vif2}} - self._pod_uri = (f"{constants.K8S_API_NAMESPACES}" - f"/{self._kp['metadata']['namespace']}/pods/" - f"{self._kp['metadata']['name']}") - self._kp_uri = utils.get_res_link(self._kp) - self._node_uri = f"{constants.K8S_API_BASE}/nodes/{self._host}" - self.useFixture(k_fix.MockNetworkClient()) - self._driver = multi_vif.NoopMultiVIFDriver() - - @mock.patch('kuryr_kubernetes.controller.handlers.kuryrport.' - 'KuryrPortHandler.get_vifs') - @mock.patch('kuryr_kubernetes.clients.get_kubernetes_client') - @mock.patch('kuryr_kubernetes.controller.drivers.base.MultiVIFDriver.' - 'get_enabled_drivers') - def test_on_present_no_vifs_create(self, ged, get_k8s_client, get_vifs): - ged.return_value = [self._driver] - kp = kuryrport.KuryrPortHandler() - get_vifs.return_value = True - - kp.on_present(self._kp) - - get_vifs.assert_called_once_with(self._kp) - - @mock.patch('kuryr_kubernetes.controller.handlers.kuryrport.' - 'KuryrPortHandler.get_vifs') - @mock.patch('kuryr_kubernetes.clients.get_kubernetes_client') - @mock.patch('kuryr_kubernetes.controller.drivers.base.MultiVIFDriver.' - 'get_enabled_drivers') - def test_on_present_getting_vifs_failed(self, ged, get_k8s_client, - get_vifs): - ged.return_value = [self._driver] - kp = kuryrport.KuryrPortHandler() - get_vifs.return_value = False - - self.assertFalse(kp.on_present(self._kp)) - - get_vifs.assert_called_once_with(self._kp) - - @mock.patch('kuryr_kubernetes.controller.drivers.default_project.' - 'DefaultPodProjectDriver.get_project') - @mock.patch('kuryr_kubernetes.controller.handlers.kuryrport.' - 'KuryrPortHandler._update_kuryrport_crd') - @mock.patch('kuryr_kubernetes.controller.drivers.vif_pool.MultiVIFPool.' - 'activate_vif') - @mock.patch('kuryr_kubernetes.clients.get_kubernetes_client') - @mock.patch('kuryr_kubernetes.controller.drivers.base.MultiVIFDriver.' - 'get_enabled_drivers') - def test_on_present(self, ged, get_k8s_client, activate_vif, - update_crd, get_project): - ged.return_value = [mock.MagicMock] - kp = kuryrport.KuryrPortHandler() - self._kp['status']['vifs'] = self._vifs_primitive - get_project.return_value = self._project_id - - with mock.patch.object(kp, 'k8s') as k8s: - k8s.get.return_value = self._pod - - kp.on_present(self._kp) - - k8s.get.assert_called_once_with(self._pod_uri) - - activate_vif.assert_has_calls([mock.call(self._vif1, pod=self._pod, - retry_info=mock.ANY), - mock.call(self._vif2, pod=self._pod, - retry_info=mock.ANY)]) - update_crd.assert_called_once_with(self._kp, self._vifs) - - @mock.patch('kuryr_kubernetes.clients.get_kubernetes_client') - @mock.patch('kuryr_kubernetes.controller.drivers.base.MultiVIFDriver.' - 'get_enabled_drivers') - def test_on_present_active(self, ged, get_k8s_client): - ged.return_value = [self._driver] - kp = kuryrport.KuryrPortHandler() - self._vif1.active = True - self._vif2.active = True - self._kp['status']['vifs'] = { - 'eth0': {'default': True, - 'vif': self._vif1.obj_to_primitive()}, - 'eth1': {'default': False, - 'vif': self._vif2.obj_to_primitive()}} - - kp.on_present(self._kp) - - @mock.patch('kuryr_kubernetes.controller.handlers.kuryrport.' - 'KuryrPortHandler._update_kuryrport_crd') - @mock.patch('kuryr_kubernetes.controller.drivers.vif_pool.MultiVIFPool.' - 'activate_vif') - @mock.patch('kuryr_kubernetes.clients.get_kubernetes_client') - @mock.patch('kuryr_kubernetes.controller.drivers.base.MultiVIFDriver.' - 'get_enabled_drivers') - def test_on_present_port_not_found(self, ged, get_k8s_client, activate_vif, - update_crd): - ged.return_value = [self._driver] - kp = kuryrport.KuryrPortHandler() - self._kp['status']['vifs'] = self._vifs_primitive - activate_vif.side_effect = os_exc.ResourceNotFound() - - kp.on_present(self._kp) - - activate_vif.assert_has_calls([mock.call(self._vif1, pod=mock.ANY, - retry_info=mock.ANY), - mock.call(self._vif2, pod=mock.ANY, - retry_info=mock.ANY)]) - - @mock.patch('kuryr_kubernetes.controller.drivers.vif_pool.MultiVIFPool.' - 'activate_vif') - @mock.patch('kuryr_kubernetes.clients.get_kubernetes_client') - @mock.patch('kuryr_kubernetes.controller.drivers.base.MultiVIFDriver.' - 'get_enabled_drivers') - def test_on_present_pod_not_found(self, ged, get_k8s_client, activate_vif): - ged.return_value = [self._driver] - kp = kuryrport.KuryrPortHandler() - self._kp['status']['vifs'] = self._vifs_primitive - - with mock.patch.object(kp, 'k8s') as k8s: - k8s.get.side_effect = k_exc.K8sResourceNotFound(self._pod) - - self.assertRaises(k_exc.K8sResourceNotFound, kp.on_present, - self._kp) - - k8s.get.assert_called_once_with(self._pod_uri) - - @mock.patch('kuryr_kubernetes.controller.drivers.vif_pool.MultiVIFPool.' - 'release_vif') - @mock.patch('kuryr_kubernetes.controller.drivers.default_security_groups.' - 'DefaultPodSecurityGroupsDriver.get_security_groups') - @mock.patch('kuryr_kubernetes.controller.drivers.default_project.' - 'DefaultPodProjectDriver.get_project') - @mock.patch('kuryr_kubernetes.controller.handlers.kuryrport.' - 'KuryrPortHandler._update_kuryrport_crd') - @mock.patch('kuryr_kubernetes.controller.drivers.vif_pool.MultiVIFPool.' - 'activate_vif') - @mock.patch('kuryr_kubernetes.clients.get_kubernetes_client') - @mock.patch('kuryr_kubernetes.controller.drivers.base.MultiVIFDriver.' - 'get_enabled_drivers') - def test_on_present_fail_update_crd(self, ged, get_k8s_client, - activate_vif, update_crd, get_project, - get_sg, release_vif): - ged.return_value = [self._driver] - kp = kuryrport.KuryrPortHandler() - self._kp['status']['vifs'] = self._vifs_primitive - update_crd.side_effect = k_exc.K8sResourceNotFound(self._kp) - get_project.return_value = self._project_id - get_sg.return_value = self._security_groups - - with mock.patch.object(kp, 'k8s') as k8s: - k8s.get.return_value = self._pod - - kp.on_present(self._kp) - - k8s.get.assert_called_once_with(self._pod_uri) - - @mock.patch('kuryr_kubernetes.controller.drivers.vif_pool.MultiVIFPool.' - 'release_vif') - @mock.patch('kuryr_kubernetes.controller.drivers.default_security_groups.' - 'DefaultPodSecurityGroupsDriver.get_security_groups') - @mock.patch('kuryr_kubernetes.controller.drivers.default_project.' - 'DefaultPodProjectDriver.get_project') - @mock.patch('kuryr_kubernetes.controller.handlers.kuryrport.' - 'KuryrPortHandler._update_kuryrport_crd') - @mock.patch('kuryr_kubernetes.controller.drivers.vif_pool.MultiVIFPool.' - 'activate_vif') - @mock.patch('kuryr_kubernetes.clients.get_kubernetes_client') - @mock.patch('kuryr_kubernetes.controller.drivers.base.MultiVIFDriver.' - 'get_enabled_drivers') - def test_on_present_exception_during_update_crd(self, ged, get_k8s_client, - activate_vif, - update_crd, get_project, - get_sg, release_vif): - ged.return_value = [self._driver] - kp = kuryrport.KuryrPortHandler() - self._kp['status']['vifs'] = self._vifs_primitive - update_crd.side_effect = k_exc.K8sClientException() - get_project.return_value = self._project_id - get_sg.return_value = self._security_groups - - with mock.patch.object(kp, 'k8s') as k8s: - k8s.get.return_value = self._pod - - self.assertRaises(k_exc.ResourceNotReady, kp.on_present, self._kp) - - k8s.get.assert_called_once_with(self._pod_uri) - - update_crd.assert_called_once_with(self._kp, self._vifs) - - @mock.patch('kuryr_kubernetes.controller.drivers.default_project.' - 'DefaultPodProjectDriver.get_project') - @mock.patch('kuryr_kubernetes.controller.drivers.utils.get_services') - @mock.patch('kuryr_kubernetes.controller.handlers.kuryrport.' - 'KuryrPortHandler._update_services') - @mock.patch('kuryr_kubernetes.controller.drivers.default_security_groups.' - 'DefaultPodSecurityGroupsDriver.create_sg_rules') - @mock.patch('kuryr_kubernetes.controller.drivers.base.' - 'ServiceSecurityGroupsDriver.get_instance') - @mock.patch('kuryr_kubernetes.controller.drivers.base.LBaaSDriver.' - 'get_instance') - @mock.patch('kuryr_kubernetes.controller.handlers.kuryrport.' - 'KuryrPortHandler._update_kuryrport_crd') - @mock.patch('kuryr_kubernetes.controller.drivers.vif_pool.MultiVIFPool.' - 'activate_vif') - @mock.patch('kuryr_kubernetes.clients.get_kubernetes_client') - @mock.patch('kuryr_kubernetes.controller.drivers.utils.' - 'is_network_policy_enabled') - @mock.patch('kuryr_kubernetes.controller.drivers.base.MultiVIFDriver.' - 'get_enabled_drivers') - def test_on_present_np(self, ged, is_np_enabled, get_k8s_client, - activate_vif, update_crd, get_lb_instance, - get_sg_instance, create_sgr, update_services, - get_services, get_project): - ged.return_value = [self._driver] - kp = kuryrport.KuryrPortHandler() - self._kp['status']['vifs'] = self._vifs_primitive - - with mock.patch.object(kp, 'k8s') as k8s: - k8s.get.return_value = self._pod - - kp.on_present(self._kp) - - k8s.get.assert_called_once_with(self._pod_uri) - - activate_vif.assert_has_calls([mock.call(self._vif1, pod=self._pod, - retry_info=mock.ANY), - mock.call(self._vif2, pod=self._pod, - retry_info=mock.ANY)]) - update_crd.assert_called_once_with(self._kp, self._vifs) - create_sgr.assert_called_once_with(self._pod) - - @mock.patch('kuryr_kubernetes.controller.drivers.default_project.' - 'DefaultPodProjectDriver.get_project') - @mock.patch('kuryr_kubernetes.utils.get_parent_port_id') - @mock.patch('kuryr_kubernetes.utils.get_parent_port_ip') - @mock.patch('kuryr_kubernetes.clients.get_kubernetes_client') - @mock.patch('kuryr_kubernetes.controller.drivers.base.MultiVIFDriver.' - 'get_enabled_drivers') - def test_on_finalize_exception_on_pod(self, ged, k8s, gppip, gppid, - project_driver): - ged.return_value = [self._driver] - kp = kuryrport.KuryrPortHandler() - self._kp['metadata']['deletionTimestamp'] = 'foobar' - self._kp['status']['vifs'] = self._vifs_primitive - - with mock.patch.object(kp, 'k8s') as k8s: - k8s.get.side_effect = k_exc.K8sResourceNotFound(self._pod) - - self.assertIsNone(kp.on_finalize(self._kp)) - - k8s.get.assert_has_calls([mock.call(self._pod_uri), - mock.call(self._node_uri)]) - k8s.remove_finalizer.assert_has_calls( - (mock.call(mock.ANY, constants.POD_FINALIZER), - mock.call(self._kp, constants.KURYRPORT_FINALIZER))) - - @mock.patch('kuryr_kubernetes.controller.handlers.kuryrport.' - 'KuryrPortHandler._update_services') - @mock.patch('kuryr_kubernetes.controller.drivers.utils.get_services') - @mock.patch('kuryr_kubernetes.controller.drivers.base.' - 'ServiceSecurityGroupsDriver.get_instance') - @mock.patch('kuryr_kubernetes.controller.drivers.base.LBaaSDriver.' - 'get_instance') - @mock.patch('kuryr_kubernetes.controller.drivers.utils.' - 'is_network_policy_enabled') - @mock.patch('kuryr_kubernetes.controller.drivers.vif_pool.MultiVIFPool.' - 'release_vif') - @mock.patch('kuryr_kubernetes.controller.drivers.default_security_groups.' - 'DefaultPodSecurityGroupsDriver.delete_sg_rules') - @mock.patch('kuryr_kubernetes.controller.drivers.default_project.' - 'DefaultPodProjectDriver.get_project') - @mock.patch('kuryr_kubernetes.clients.get_kubernetes_client') - @mock.patch('kuryr_kubernetes.controller.drivers.base.MultiVIFDriver.' - 'get_enabled_drivers') - def test_on_finalize_np(self, ged, k8s, get_project, delete_sg_rules, - release_vif, is_np_enabled, get_lb_instance, - get_sg_instance, get_services, update_services): - ged.return_value = [self._driver] - CONF.set_override('enforce_sg_rules', True, group='octavia_defaults') - self.addCleanup(CONF.clear_override, 'enforce_sg_rules', - group='octavia_defaults') - kp = kuryrport.KuryrPortHandler() - self._kp['status']['vifs'] = self._vifs_primitive - get_project.return_value = self._project_id - selector = mock.sentinel.selector - delete_sg_rules.return_value = selector - get_services.return_value = mock.sentinel.services - - with mock.patch.object(kp, 'k8s') as k8s: - k8s.get.return_value = self._pod - - kp.on_finalize(self._kp) - - k8s.get.assert_called_once_with(self._pod_uri) - k8s.remove_finalizer.assert_has_calls( - [mock.call(self._pod, constants.POD_FINALIZER), - mock.call(self._kp, constants.KURYRPORT_FINALIZER)]) - - delete_sg_rules.assert_called_once_with(self._pod) - release_vif.assert_has_calls([mock.call(self._pod, self._vif1, - self._project_id), - mock.call(self._pod, self._vif2, - self._project_id)]) - - get_services.assert_called_once() - update_services.assert_called_once_with(mock.sentinel.services, - selector, self._project_id) - - @mock.patch('kuryr_kubernetes.clients.get_kubernetes_client') - @mock.patch('kuryr_kubernetes.controller.drivers.base.MultiVIFDriver.' - 'get_enabled_drivers') - def test_on_finalize_pod_running(self, ged, k8s): - ged.return_value = [self._driver] - # copy, so it will not be affected by other tests run in parallel. - pod = dict(self._pod) - del(pod['metadata']['deletionTimestamp']) - - kp = kuryrport.KuryrPortHandler() - - with mock.patch.object(kp, 'k8s') as k8s: - k8s.get.return_value = pod - self.assertIsNone(kp.on_finalize(self._kp)) - k8s.get.assert_called_once_with(self._pod_uri) - - @mock.patch('kuryr_kubernetes.controller.handlers.kuryrport.' - 'KuryrPortHandler._update_kuryrport_crd') - @mock.patch('kuryr_kubernetes.controller.drivers.vif_pool.MultiVIFPool.' - 'request_vif') - @mock.patch('kuryr_kubernetes.controller.drivers.default_subnet.' - 'DefaultPodSubnetDriver.get_subnets') - @mock.patch('kuryr_kubernetes.controller.drivers.default_security_groups.' - 'DefaultPodSecurityGroupsDriver.get_security_groups') - @mock.patch('kuryr_kubernetes.controller.drivers.default_project.' - 'DefaultPodProjectDriver.get_project') - @mock.patch('kuryr_kubernetes.clients.get_kubernetes_client') - @mock.patch('kuryr_kubernetes.controller.drivers.base.MultiVIFDriver.' - 'get_enabled_drivers') - def test_get_vifs(self, ged, k8s, get_project, get_sg, get_subnets, - request_vif, update_crd): - ged.return_value = [self._driver] - kp = kuryrport.KuryrPortHandler() - kp.k8s.get.return_value = self._pod - get_sg.return_value = self._security_groups - get_project.return_value = self._project_id - get_subnets.return_value = mock.sentinel.subnets - request_vif.return_value = self._vif1 - - self.assertTrue(kp.get_vifs(self._kp)) - - kp.k8s.get.assert_called_once_with(self._pod_uri) - get_project.assert_called_once_with(self._pod) - get_sg.assert_called_once_with(self._pod, self._project_id) - get_subnets.assert_called_once_with(self._pod, self._project_id) - request_vif.assert_called_once_with(self._pod, self._project_id, - mock.sentinel.subnets, - self._security_groups) - update_crd.assert_called_once_with(self._kp, - {constants.DEFAULT_IFNAME: - {'default': True, - 'vif': self._vif1}}) - - @mock.patch('kuryr_kubernetes.clients.get_kubernetes_client') - @mock.patch('kuryr_kubernetes.controller.drivers.base.MultiVIFDriver.' - 'get_enabled_drivers') - def test_get_vifs_pod_not_found(self, ged, k8s): - ged.return_value = [self._driver] - kp = kuryrport.KuryrPortHandler() - kp.k8s.get.side_effect = k_exc.K8sResourceNotFound(self._pod) - - self.assertFalse(kp.get_vifs(self._kp)) - - kp.k8s.get.assert_called_once_with(self._pod_uri) - kp.k8s.delete.assert_called_once_with(self._kp_uri) - - @mock.patch('kuryr_kubernetes.controller.drivers.default_subnet.' - 'DefaultPodSubnetDriver.get_subnets') - @mock.patch('kuryr_kubernetes.controller.drivers.default_security_groups.' - 'DefaultPodSecurityGroupsDriver.get_security_groups') - @mock.patch('kuryr_kubernetes.controller.drivers.default_project.' - 'DefaultPodProjectDriver.get_project') - @mock.patch('kuryr_kubernetes.clients.get_kubernetes_client') - @mock.patch('kuryr_kubernetes.controller.drivers.base.MultiVIFDriver.' - 'get_enabled_drivers') - def test_get_vifs_subnet_error(self, ged, k8s, get_project, get_sg, - get_subnets): - ged.return_value = [self._driver] - kp = kuryrport.KuryrPortHandler() - kp.k8s.get.return_value = self._pod - get_sg.return_value = self._security_groups - get_project.return_value = self._project_id - get_subnets.side_effect = os_exc.ResourceNotFound() - - self.assertFalse(kp.get_vifs(self._kp)) - - kp.k8s.get.assert_called_once_with(self._pod_uri) - get_project.assert_called_once_with(self._pod) - get_sg.assert_called_once_with(self._pod, self._project_id) - get_subnets.assert_called_once_with(self._pod, self._project_id) - - @mock.patch('kuryr_kubernetes.controller.drivers.vif_pool.MultiVIFPool.' - 'request_vif') - @mock.patch('kuryr_kubernetes.controller.drivers.default_subnet.' - 'DefaultPodSubnetDriver.get_subnets') - @mock.patch('kuryr_kubernetes.controller.drivers.default_security_groups.' - 'DefaultPodSecurityGroupsDriver.get_security_groups') - @mock.patch('kuryr_kubernetes.controller.drivers.default_project.' - 'DefaultPodProjectDriver.get_project') - @mock.patch('kuryr_kubernetes.clients.get_kubernetes_client') - @mock.patch('kuryr_kubernetes.controller.drivers.base.MultiVIFDriver.' - 'get_enabled_drivers') - def test_get_vifs_no_vif(self, ged, k8s, get_project, get_sg, get_subnets, - request_vif): - ged.return_value = [self._driver] - kp = kuryrport.KuryrPortHandler() - kp.k8s.get.return_value = self._pod - get_sg.return_value = self._security_groups - get_project.return_value = self._project_id - get_subnets.return_value = mock.sentinel.subnets - request_vif.return_value = None - - self.assertFalse(kp.get_vifs(self._kp)) - - kp.k8s.get.assert_called_once_with(self._pod_uri) - get_project.assert_called_once_with(self._pod) - get_sg.assert_called_once_with(self._pod, self._project_id) - get_subnets.assert_called_once_with(self._pod, self._project_id) - request_vif.assert_called_once_with(self._pod, self._project_id, - mock.sentinel.subnets, - self._security_groups) - - @mock.patch('kuryr_kubernetes.controller.drivers.vif_pool.MultiVIFPool.' - 'request_vif') - @mock.patch('kuryr_kubernetes.controller.drivers.default_subnet.' - 'DefaultPodSubnetDriver.get_subnets') - @mock.patch('kuryr_kubernetes.controller.drivers.default_security_groups.' - 'DefaultPodSecurityGroupsDriver.get_security_groups') - @mock.patch('kuryr_kubernetes.controller.drivers.default_project.' - 'DefaultPodProjectDriver.get_project') - @mock.patch('kuryr_kubernetes.clients.get_kubernetes_client') - @mock.patch('kuryr_kubernetes.controller.drivers.base.MultiVIFDriver.' - 'get_enabled_drivers') - def test_get_vifs_resource_not_found(self, ged, k8s, get_project, get_sg, - get_subnets, request_vif): - ged.return_value = [self._driver] - kp = kuryrport.KuryrPortHandler() - kp.k8s.get.return_value = self._pod - get_sg.return_value = self._security_groups - get_project.return_value = self._project_id - get_subnets.return_value = mock.sentinel.subnets - request_vif.side_effect = os_exc.ResourceNotFound() - - self.assertRaises(k_exc.ResourceNotReady, kp.get_vifs, self._kp) - - kp.k8s.get.assert_called_once_with(self._pod_uri) - get_project.assert_called_once_with(self._pod) - get_sg.assert_called_once_with(self._pod, self._project_id) - get_subnets.assert_called_once_with(self._pod, self._project_id) - request_vif.assert_called_once_with(self._pod, self._project_id, - mock.sentinel.subnets, - self._security_groups) - - @mock.patch('kuryr_kubernetes.controller.handlers.kuryrport.' - 'KuryrPortHandler._update_kuryrport_crd') - @mock.patch('kuryr_kubernetes.controller.drivers.vif_pool.MultiVIFPool.' - 'request_vif') - @mock.patch('kuryr_kubernetes.controller.drivers.default_subnet.' - 'DefaultPodSubnetDriver.get_subnets') - @mock.patch('kuryr_kubernetes.controller.drivers.default_security_groups.' - 'DefaultPodSecurityGroupsDriver.get_security_groups') - @mock.patch('kuryr_kubernetes.controller.drivers.default_project.' - 'DefaultPodProjectDriver.get_project') - @mock.patch('kuryr_kubernetes.clients.get_kubernetes_client') - @mock.patch('kuryr_kubernetes.controller.drivers.base.MultiVIFDriver.' - 'get_enabled_drivers') - def test_get_vifs_with_additional_vif(self, ged, k8s, get_project, get_sg, - get_subnets, request_vif, - update_crd): - ged.return_value = [self._driver] - kp = kuryrport.KuryrPortHandler() - kp.k8s.get.return_value = self._pod - fake_driver = mock.MagicMock() - fake_driver.request_additional_vifs.return_value = [self._vif2] - kp._drv_multi_vif.append(fake_driver) - get_sg.return_value = self._security_groups - get_project.return_value = self._project_id - get_subnets.return_value = mock.sentinel.subnets - request_vif.return_value = self._vif1 - - self.assertTrue(kp.get_vifs(self._kp)) - - kp.k8s.get.assert_called_once_with(self._pod_uri) - get_project.assert_called_once_with(self._pod) - get_sg.assert_called_once_with(self._pod, self._project_id) - get_subnets.assert_called_once_with(self._pod, self._project_id) - request_vif.assert_called_once_with(self._pod, self._project_id, - mock.sentinel.subnets, - self._security_groups) - update_crd.assert_called_once_with(self._kp, - {'eth0': {'default': True, - 'vif': self._vif1}, - 'eth1': {'default': False, - 'vif': self._vif2}}) - - @mock.patch('kuryr_kubernetes.controller.drivers.vif_pool.MultiVIFPool.' - 'release_vif') - @mock.patch('kuryr_kubernetes.controller.handlers.kuryrport.' - 'KuryrPortHandler._update_kuryrport_crd') - @mock.patch('kuryr_kubernetes.controller.drivers.vif_pool.MultiVIFPool.' - 'request_vif') - @mock.patch('kuryr_kubernetes.controller.drivers.default_subnet.' - 'DefaultPodSubnetDriver.get_subnets') - @mock.patch('kuryr_kubernetes.controller.drivers.default_security_groups.' - 'DefaultPodSecurityGroupsDriver.get_security_groups') - @mock.patch('kuryr_kubernetes.controller.drivers.default_project.' - 'DefaultPodProjectDriver.get_project') - @mock.patch('kuryr_kubernetes.clients.get_kubernetes_client') - @mock.patch('kuryr_kubernetes.controller.drivers.base.MultiVIFDriver.' - 'get_enabled_drivers') - def test_get_exception_on_update_crd(self, ged, k8s, get_project, get_sg, - get_subnets, request_vif, update_crd, - release_vif): - ged.return_value = [self._driver] - kp = kuryrport.KuryrPortHandler() - kp.k8s.get.return_value = self._pod - get_sg.return_value = self._security_groups - get_project.return_value = self._project_id - get_subnets.return_value = mock.sentinel.subnets - request_vif.return_value = self._vif1 - update_crd.side_effect = k_exc.K8sClientException() - - self.assertTrue(kp.get_vifs(self._kp)) - - kp.k8s.get.assert_called_once_with(self._pod_uri) - get_project.assert_called_once_with(self._pod) - get_sg.assert_called_once_with(self._pod, self._project_id) - get_subnets.assert_called_once_with(self._pod, self._project_id) - request_vif.assert_called_once_with(self._pod, self._project_id, - mock.sentinel.subnets, - self._security_groups) - update_crd.assert_called_once_with(self._kp, - {constants.DEFAULT_IFNAME: - {'default': True, - 'vif': self._vif1}}) - release_vif.assert_called_once_with(self._pod, self._vif1, - self._project_id) - - @mock.patch('kuryr_kubernetes.clients.get_kubernetes_client') - @mock.patch('kuryr_kubernetes.controller.drivers.base.MultiVIFDriver.' - 'get_enabled_drivers') - def test_update_kuryrport_crd(self, ged, k8s): - ged.return_value = [self._driver] - kp = kuryrport.KuryrPortHandler() - - kp._update_kuryrport_crd(self._kp, self._vifs) - self._vif1.obj_reset_changes() - self._vif2.obj_reset_changes() - vif1 = self._vif1.obj_to_primitive() - vif2 = self._vif2.obj_to_primitive() - - arg = {'vifs': {'eth0': {'default': True, 'vif': vif1}, - 'eth1': {'default': False, 'vif': vif2}}} - kp.k8s.patch_crd.assert_called_once_with('status', - utils.get_res_link(self._kp), - arg) - - @mock.patch('kuryr_kubernetes.controller.drivers.utils.' - 'service_matches_affected_pods') - @mock.patch('kuryr_kubernetes.clients.get_kubernetes_client') - @mock.patch('kuryr_kubernetes.controller.drivers.base.MultiVIFDriver.' - 'get_enabled_drivers') - def test_update_services(self, ged, k8s, smap): - ged.return_value = [self._driver] - kp = kuryrport.KuryrPortHandler() - kp._drv_lbaas = mock.MagicMock() - kp._drv_svc_sg = mock.MagicMock() - kp._drv_svc_sg.get_security_groups.return_value = self._security_groups - - smap.side_effect = [True, False] - services = {'items': ['service1', 'service2']} - - kp._update_services(services, mock.sentinel.crd_pod_selectors, - self._project_id) - - smap.assert_has_calls([mock.call('service1', - mock.sentinel.crd_pod_selectors), - mock.call('service2', - mock.sentinel.crd_pod_selectors)]) - kp._drv_svc_sg.get_security_groups.assert_called_once_with( - 'service1', self._project_id) - kp._drv_lbaas.update_lbaas_sg.assert_called_once_with( - 'service1', self._security_groups) diff --git a/kuryr_kubernetes/tests/unit/controller/handlers/test_lbaas.py b/kuryr_kubernetes/tests/unit/controller/handlers/test_lbaas.py deleted file mode 100644 index e1e6c39bc..000000000 --- a/kuryr_kubernetes/tests/unit/controller/handlers/test_lbaas.py +++ /dev/null @@ -1,448 +0,0 @@ -# Copyright (c) 2016 Mirantis, Inc. -# All Rights Reserved. -# -# Licensed under the Apache License, Version 2.0 (the "License"); you may -# not use this file except in compliance with the License. You may obtain -# a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT -# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the -# License for the specific language governing permissions and limitations -# under the License. - -from oslo_log import log as logging -from unittest import mock - -import os_vif.objects.network as osv_network -import os_vif.objects.subnet as osv_subnet - -from kuryr_kubernetes.controller.handlers import lbaas as h_lbaas -from kuryr_kubernetes import exceptions as k_exc -from kuryr_kubernetes.tests import base as test_base - -_SUPPORTED_LISTENER_PROT = ('HTTP', 'HTTPS', 'TCP') - - -@mock.patch('kuryr_kubernetes.controller.drivers.base.LBaaSDriver.' - 'get_instance', mock.Mock()) -@mock.patch('kuryr_kubernetes.clients.get_kubernetes_client', mock.Mock()) -class TestServiceHandler(test_base.TestCase): - @mock.patch('kuryr_kubernetes.clients.get_kubernetes_client') - def test_on_present(self, get_k8s_client): - svc_event = { - 'apiVersion': 'v1', - 'kind': 'Service', - "metadata": { - "creationTimestamp": "2020-07-25T18:15:12Z", - "finalizers": [ - "openstack.org/service" - ], - "labels": { - "run": "test" - }, - "name": "test", - "namespace": "test", - "resourceVersion": "413753", - "uid": "a026ae48-6141-4029-b743-bac48dae7f06" - }, - "spec": { - "clusterIP": "2.2.2.2", - "ports": [ - { - "port": 1, - "protocol": "TCP", - "targetPort": 1 - } - ], - "selector": { - "run": "test" - }, - "sessionAffinity": "None", - "type": "ClusterIP" - }, - "status": { - "loadBalancer": {} - } - } - - old_spec = { - 'apiVersion': 'openstack.org/v1', - 'kind': 'KuryrLoadBalancer', - 'metadata': { - 'name': 'test', - 'finalizers': [''], - }, - 'spec': { - 'ip': '1.1.1.1' - } - } - new_spec = { - 'apiVersion': 'openstack.org/v1', - 'kind': 'KuryrLoadBalancer', - 'metadata': { - 'name': 'test', - 'finalizers': [''], - }, - 'spec': { - 'ip': '2.2.2.2' - } - } - - project_id = mock.sentinel.project_id - m_drv_project = mock.Mock() - m_drv_project.get_project.return_value = project_id - - m_handler = mock.Mock(spec=h_lbaas.ServiceHandler) - m_handler._has_lbaas_spec_changes.return_value = True - m_handler.create_crd_spec.return_value = new_spec - m_handler._should_ignore.return_value = False - m_handler._drv_project = m_drv_project - m_handler.k8s = mock.Mock() - - h_lbaas.ServiceHandler.on_present(m_handler, svc_event) - m_handler.create_crd_spec(svc_event) - m_handler._has_lbaas_spec_changes.return_value = True - m_handler._update_crd_spec(old_spec, svc_event) - - @mock.patch('kuryr_kubernetes.clients.get_kubernetes_client') - def test_on_present_no_changes(self, get_k8s_client): - svc_event = { - 'apiVersion': 'v1', - 'kind': 'Service', - "metadata": { - "creationTimestamp": "2020-07-25T18:15:12Z", - "finalizers": [ - "openstack.org/service" - ], - "labels": { - "run": "test" - }, - "name": "test", - "namespace": "test", - "resourceVersion": "413753", - "uid": "a026ae48-6141-4029-b743-bac48dae7f06" - }, - "spec": { - "clusterIP": "2.2.2.2", - "ports": [ - { - "port": 1, - "protocol": "TCP", - "targetPort": 1 - } - ], - "selector": { - "run": "test" - }, - "sessionAffinity": "None", - "type": "ClusterIP" - }, - "status": { - "loadBalancer": {} - } - } - - old_spec = { - 'apiVersion': 'openstack.org/v1', - 'kind': 'KuryrLoadBalancer', - 'metadata': { - 'name': 'test', - 'finalizers': [''], - }, - 'spec': { - 'ip': '1.1.1.1' - } - } - - project_id = mock.sentinel.project_id - m_drv_project = mock.Mock() - m_drv_project.get_project.return_value = project_id - - m_handler = mock.Mock(spec=h_lbaas.ServiceHandler) - m_handler._has_lbaas_spec_changes.return_value = True - m_handler.create_crd_spec.return_value = old_spec - m_handler._should_ignore.return_value = False - m_handler._drv_project = m_drv_project - m_handler.k8s = mock.Mock() - - h_lbaas.ServiceHandler.on_present(m_handler, svc_event) - m_handler.create_crd_spec(svc_event) - m_handler._has_lbaas_spec_changes.return_value = False - - def test_get_service_ip(self): - svc_body = {'spec': {'type': 'ClusterIP', - 'clusterIP': '192.168.0.11'}} - handler = h_lbaas.ServiceHandler() - ret = handler._get_service_ip(svc_body) - self.assertEqual('192.168.0.11', ret) - - svc_body = {'spec': {'type': 'LoadBalancer', - 'clusterIP': '192.168.0.11'}} - ret = handler._get_service_ip(svc_body) - self.assertEqual('192.168.0.11', ret) - - def test_get_service_ip_funny(self): - svc_body = {'spec': {'type': 'ClusterIP', - 'clusterIP': '172.30.0.011'}} - handler = h_lbaas.ServiceHandler() - - ret = handler._get_service_ip(svc_body) - self.assertEqual('172.30.0.11', ret) - - def test_is_supported_type_clusterip(self): - m_handler = mock.Mock(spec=h_lbaas.ServiceHandler) - svc_body = {'spec': {'type': 'ClusterIP', - 'clusterIP': mock.sentinel.cluster_ip}} - - ret = h_lbaas.ServiceHandler._is_supported_type(m_handler, svc_body) - self.assertEqual(ret, True) - - def test_is_supported_type_loadbalancer(self): - m_handler = mock.Mock(spec=h_lbaas.ServiceHandler) - svc_body = {'spec': {'type': 'LoadBalancer', - 'clusterIP': mock.sentinel.cluster_ip}} - - ret = h_lbaas.ServiceHandler._is_supported_type(m_handler, svc_body) - self.assertEqual(ret, True) - - def _make_test_net_obj(self, cidr_list): - subnets = [osv_subnet.Subnet(cidr=cidr) for cidr in cidr_list] - subnets_list = osv_subnet.SubnetList(objects=subnets) - return osv_network.Network(subnets=subnets_list) - - @mock.patch('kuryr_kubernetes.utils.has_port_changes') - def test_has_lbaas_spec_changes(self, m_port_changes): - m_handler = mock.Mock(spec=h_lbaas.ServiceHandler) - service = mock.sentinel.service - lbaas_spec = mock.sentinel.lbaas_spec - - for has_ip_changes in (True, False): - for has_port_changes in (True, False): - for timeout in (True, False): - for provider in (True, False): - m_handler._has_ip_changes.return_value = has_ip_changes - m_port_changes.return_value = has_port_changes - m_handler._has_timeout_changes.return_value = timeout - m_handler._has_provider_changes.return_value = provider - ret = h_lbaas.ServiceHandler._has_lbaas_spec_changes( - m_handler, service, lbaas_spec) - self.assertEqual( - has_ip_changes or has_port_changes or timeout - or provider, ret) - - def test_has_ip_changes(self): - m_handler = mock.Mock(spec=h_lbaas.ServiceHandler) - m_service = {'apiVersion': 'v1', - 'kind': 'Service', - "metadata": {"name": "test", - "namespace": "test"}} - m_handler._get_service_ip.return_value = '1.1.1.1' - m_lbaas_spec = mock.MagicMock() - m_lbaas_spec.ip.__str__.return_value = '2.2.2.2' - - ret = h_lbaas.ServiceHandler._has_ip_changes( - m_handler, m_service, m_lbaas_spec) - self.assertTrue(ret) - - def test_has_ip_changes__no_changes(self): - service = { - 'apiVersion': 'v1', - 'kind': 'Service', - "metadata": { - "creationTimestamp": "2020-07-25T18:15:12Z", - "finalizers": [ - "openstack.org/service" - ], - "labels": { - "run": "test" - }, - "name": "test", - "namespace": "test", - "resourceVersion": "413753", - "uid": "a026ae48-6141-4029-b743-bac48dae7f06" - }, - "spec": { - "clusterIP": "1.1.1.1" - } - } - m_handler = mock.Mock(spec=h_lbaas.ServiceHandler) - m_handler._get_service_ip.return_value = '1.1.1.1' - lb_crd = { - 'apiVersion': 'openstack.org/v1', - 'kind': 'KuryrLoadBalancer', - 'metadata': { - 'name': 'test', - 'finalizers': [''], - }, - 'spec': { - 'ip': '1.1.1.1' - } - } - - ret = h_lbaas.ServiceHandler._has_ip_changes( - m_handler, service, lb_crd) - self.assertFalse(ret) - - def test_has_ip_changes__no_spec(self): - m_handler = mock.Mock(spec=h_lbaas.ServiceHandler) - m_handler._get_service_ip.return_value = '1.1.1.1' - service = { - 'apiVersion': 'v1', - 'kind': 'Service', - "metadata": { - "creationTimestamp": "2020-07-25T18:15:12Z", - "finalizers": [ - "openstack.org/service" - ], - "labels": { - "run": "test" - }, - "name": "test", - "namespace": "test", - "resourceVersion": "413753", - "uid": "a026ae48-6141-4029-b743-bac48dae7f06" - }, - "spec": { - "clusterIP": "1.1.1.1" - } - } - lb_crd = { - "spec": { - "ip": None - } - } - - ret = h_lbaas.ServiceHandler._has_ip_changes( - m_handler, service, lb_crd) - self.assertTrue(ret) - - def test_has_ip_changes__no_nothing(self): - m_handler = mock.Mock(spec=h_lbaas.ServiceHandler) - service = { - 'apiVersion': 'v1', - 'kind': 'Service', - "metadata": { - "creationTimestamp": "2020-07-25T18:15:12Z", - "finalizers": [ - "openstack.org/service" - ], - "labels": { - "run": "test" - }, - "name": "test", - "namespace": "test", - "resourceVersion": "413753", - "uid": "a026ae48-6141-4029-b743-bac48dae7f06" - }, - "spec": { - "clusterIP": "1.1.1.1" - } - } - lb_crd = { - "spec": { - "ip": None - } - } - m_handler._get_service_ip.return_value = None - - ret = h_lbaas.ServiceHandler._has_ip_changes( - m_handler, service, lb_crd) - self.assertFalse(ret) - - def test_set_lbaas_spec(self): - self.skipTest("skipping until generalised annotation handling is " - "implemented") - - def test_get_lbaas_spec(self): - self.skipTest("skipping until generalised annotation handling is " - "implemented") - - -class TestEndpointsHandler(test_base.TestCase): - - def setUp(self): - super().setUp() - self._ep_name = 'my-service' - self._ep_namespace = mock.sentinel.namespace - self._ep_ip = '1.2.3.4' - - self._ep = { - "kind": "Endpoints", - "apiVersion": "v1", - "metadata": { - "name": self._ep_name, - "namespace": self._ep_namespace, - }, - "subsets": [ - { - "addresses": [ - { - "ip": self._ep_ip - }, - ], - "ports": [ - { - "port": 8080, - "protocol": "TCP" - } - ] - } - ] - } - - self._klb_name = 'my-service' - self._klb_ip = '1.1.1.1' - - self._klb = { - 'apiVersion': 'openstack.org/v1', - 'kind': 'KuryrLoadBalancer', - 'metadata': { - 'name': self._klb_name, - 'finalizers': [''], - }, - 'spec': { - 'ip': self._klb_ip - } - } - - def test_on_deleted(self): - m_handler = mock.Mock(spec=h_lbaas.EndpointsHandler) - h_lbaas.EndpointsHandler.on_deleted(m_handler, self._ep) - m_handler._remove_endpoints.assert_called_once_with(self._ep) - - @mock.patch('kuryr_kubernetes.utils.get_klb_crd_path') - def test__remove_endpoints(self, get_klb_crd_path): - m_handler = mock.Mock() - h_lbaas.EndpointsHandler._remove_endpoints(m_handler, self._ep) - m_handler.k8s.patch_crd.assert_called_once_with( - 'spec', get_klb_crd_path(self._ep), 'endpointSlices', - action='remove') - - @mock.patch.object(logging.getLogger( - 'kuryr_kubernetes.controller.handlers.lbaas'), - 'debug') - def test__remove_endpoints_not_found(self, log): - m_handler = mock.Mock() - m_handler.k8s.patch_crd.side_effect = k_exc.K8sResourceNotFound('foo') - h_lbaas.EndpointsHandler._remove_endpoints(m_handler, self._ep) - log.assert_called_once() - - def test__remove_endpoints_client_exception(self): - m_handler = mock.Mock() - m_handler.k8s.patch_crd.side_effect = k_exc.K8sClientException() - self.assertRaises(k_exc.K8sClientException, - h_lbaas.EndpointsHandler._remove_endpoints, - m_handler, self._ep) - - @mock.patch.object(logging.getLogger( - 'kuryr_kubernetes.controller.handlers.lbaas'), - 'warning') - def test__remove_endpoints_unprocessable_entity(self, log): - m_handler = mock.Mock() - m_handler.k8s.patch_crd.side_effect = k_exc.K8sUnprocessableEntity( - 'bar') - h_lbaas.EndpointsHandler._remove_endpoints(m_handler, self._ep) - log.assert_not_called() diff --git a/kuryr_kubernetes/tests/unit/controller/handlers/test_loadbalancer.py b/kuryr_kubernetes/tests/unit/controller/handlers/test_loadbalancer.py deleted file mode 100755 index 58987b1e6..000000000 --- a/kuryr_kubernetes/tests/unit/controller/handlers/test_loadbalancer.py +++ /dev/null @@ -1,696 +0,0 @@ -# Copyright (c) 2016 Mirantis, Inc. -# All Rights Reserved. -# -# Licensed under the Apache License, Version 2.0 (the "License"); you may -# not use this file except in compliance with the License. You may obtain -# a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT -# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the -# License for the specific language governing permissions and limitations -# under the License. - -from unittest import mock -import uuid - -import os_vif.objects.network as osv_network -import os_vif.objects.subnet as osv_subnet - -from kuryr_kubernetes import constants as k_const -from kuryr_kubernetes.controller.drivers import base as drv_base -from kuryr_kubernetes.controller.handlers import loadbalancer as h_lb -from kuryr_kubernetes.tests import base as test_base -from kuryr_kubernetes.tests.unit import kuryr_fixtures as k_fix - -_SUPPORTED_LISTENER_PROT = ('HTTP', 'HTTPS', 'TCP') - - -def get_lb_crd(): - return { - 'apiVersion': 'openstack.org/v1', - 'kind': 'KuryrLoadBalancer', - "metadata": { - "creationTimestamp": "2020-07-28T13:13:30Z", - "finalizers": [ - "" - ], - "generation": 6, - "name": "test", - "namespace": "default", - "resourceVersion": "111871", - "uid": "584fe3ea-04dd-43f7-be2f-713e861694ec" - }, - "spec": { - "ip": "1.2.3.4", - "ports": [ - { - "port": 1, - "protocol": "TCP", - "targetPort": "1" - } - ], - "project_id": "1023456789120", - "security_groups_ids": [ - "1d134e68-5653-4192-bda2-4214319af799", - "31d7b8c2-75f1-4125-9565-8c15c5cf046c" - ], - "subnet_id": "123456789120", - "endpointSlices": [ - { - "endpoints": [ - { - "addresses": ["1.1.1.1"], - "targetRef": { - "kind": "Pod", - "name": "test-f87976f9c-thjbk", - "namespace": "default", - "resourceVersion": "111701", - "uid": "10234567800" - } - } - ], - "ports": [ - { - "port": 2, - "protocol": "TCP" - } - ] - } - ], - "type": "LoadBalancer", - "provider": "ovn" - }, - "status": { - "listeners": [ - { - "id": "012345678912", - "loadbalancer_id": "01234567890", - "name": "default/test:TCP:80", - "port": 1, - "project_id": "12345678912", - "protocol": "TCP" - } - ], - "loadbalancer": { - "id": "01234567890", - "ip": "1.2.3.4", - "name": "default/test", - "port_id": "1023456789120", - "project_id": "12345678912", - "provider": "amphora", - "security_groups": [ - "1d134e68-5653-4192-bda2-4214319af799", - "31d7b8c2-75f1-4125-9565-8c15c5cf046c" - ], - "subnet_id": "123456789120" - }, - "members": [ - { - "id": "0123456789", - "ip": "1.1.1.1", - "name": "default/test-f87976f9c-thjbk:8080", - "pool_id": "1234567890", - "port": 2, - "project_id": "12345678912", - "subnet_id": "123456789120" - } - ], - "pools": [ - { - "id": "1234567890", - "listener_id": "012345678912", - "loadbalancer_id": "01234567890", - "name": "default/test:TCP:80", - "project_id": "12345678912", - "protocol": "TCP" - } - ], - 'service_pub_ip_info': { - 'ip_id': '1.2.3.5', - 'ip_addr': 'ec29d641-fec4-4f67-928a-124a76b3a888', - 'alloc_method': 'kk' - } - } - } - - -def get_lb_crds(): - return [ - { - 'apiVersion': 'openstack.org/v1', - 'kind': 'KuryrLoadBalancer', - "metadata": { - "creationTimestamp": "2020-07-28T13:13:30Z", - "finalizers": [ - "" - ], - "generation": 6, - "name": "test", - "namespace": "default", - "resourceVersion": "111871", - "uid": "584fe3ea-04dd-43f7-be2f-713e861694ec" - }, - "status": { - "listeners": [ - { - "id": "012345678912", - "loadbalancer_id": "01234567890", - "name": "default/test:TCP:80", - "port": 1, - "project_id": "12345678912", - "protocol": "TCP" - } - ], - "loadbalancer": { - "id": "01234567890", - "ip": "1.2.3.4", - "name": "default/test", - "port_id": "1023456789120", - "project_id": "12345678912", - "provider": "amphora", - "security_groups": [ - "1d134e68-5653-4192-bda2-4214319af799", - "31d7b8c2-75f1-4125-9565-8c15c5cf046c" - ], - "subnet_id": "123456789120" - }, - "pools": [ - { - "id": "1234567890", - "listener_id": "012345678912", - "loadbalancer_id": "01234567890", - "name": "default/test:TCP:80", - "project_id": "12345678912", - "protocol": "TCP" - } - ], - "members": [ - { - "id": "0123456789a", - "ip": "1.1.1.1", - "name": "default/test-f87976f9c-thjbk:8080", - "pool_id": "1234567890", - "port": 2, - "project_id": "12345678912", - "subnet_id": "123456789120" - } - ], - } - }, - { - 'apiVersion': 'openstack.org/v1', - 'kind': 'KuryrLoadBalancer', - "metadata": { - "creationTimestamp": "2020-07-28T13:13:30Z", - "finalizers": [ - "" - ], - "generation": 6, - "name": "demo", - "namespace": "default", - "resourceVersion": "111871", - "uid": "584fe3ea-04dd-43f7-be2f-713e861694ec" - }, - "status": { - "listeners": [ - { - "id": "012345678913", - "loadbalancer_id": "01234567891", - "name": "default/demo:TCP:80", - "port": 1, - "project_id": "12345678912", - "protocol": "TCP" - } - ], - "loadbalancer": { - "id": "01234567891", - "ip": "1.2.3.4", - "name": "default/demo", - "port_id": "1023456789120", - "project_id": "12345678912", - "provider": "amphora", - "security_groups": [ - "1d134e68-5653-4192-bda2-4214319af799", - "31d7b8c2-75f1-4125-9565-8c15c5cf046c" - ], - "subnet_id": "123456789120" - }, - "pools": [ - { - "id": "1234567891", - "listener_id": "012345678913", - "loadbalancer_id": "01234567891", - "name": "default/test:TCP:80", - "project_id": "12345678912", - "protocol": "TCP" - } - ], - "members": [ - { - "id": "0123456789b", - "ip": "1.1.1.1", - "name": "default/test_1-f87976f9c-thjbk:8080", - "pool_id": "1234567891", - "port": 2, - "project_id": "12345678913", - "subnet_id": "123456789121" - } - ], - } - } - ] - - -class FakeLBaaSDriver(drv_base.LBaaSDriver): - - def ensure_loadbalancer(self, name, project_id, subnet_id, ip, - security_groups_ids, service_type, provider=None): - - return { - 'name': name, - 'project_id': project_id, - 'subnet_id': subnet_id, - 'ip': ip, - 'id': str(uuid.uuid4()), - 'provider': provider - } - - def ensure_listener(self, loadbalancer, protocol, port, - service_type='ClusterIP'): - if protocol not in _SUPPORTED_LISTENER_PROT: - return None - - name = "%s:%s:%s" % (loadbalancer['name'], protocol, port) - return { - 'name': name, - 'project_id': loadbalancer['project_id'], - 'loadbalancer_id': loadbalancer['id'], - 'protocol': protocol, - 'port': port, - 'id': str(uuid.uuid4()) - } - - def ensure_pool(self, loadbalancer, listener): - return { - 'name': listener['name'], - 'project_id': loadbalancer['project_id'], - 'loadbalancer_id': loadbalancer['id'], - 'listener_id': listener['id'], - 'protocol': listener['protocol'], - 'id': str(uuid.uuid4()) - } - - def ensure_member(self, loadbalancer, pool, subnet_id, ip, port, - target_ref_namespace, target_ref_name, listener_port=None - ): - name = "%s:%s:%s" % (loadbalancer['name'], ip, port) - return { - 'name': name, - 'project_id': pool['project_id'], - 'pool_id': pool['id'], - 'subnet_id': subnet_id, - 'ip': ip, - 'port': port, - 'id': str(uuid.uuid4()) - } - - -@mock.patch('kuryr_kubernetes.utils.get_subnets_id_cidrs', - mock.Mock(return_value=[('id', 'cidr')])) -class TestKuryrLoadBalancerHandler(test_base.TestCase): - def test_on_present(self): - m_drv_service_pub_ip = mock.Mock() - m_drv_service_pub_ip.acquire_service_pub_ip_info.return_value = None - m_drv_service_pub_ip.associate_pub_ip.return_value = True - - m_handler = mock.Mock(spec=h_lb.KuryrLoadBalancerHandler) - - m_handler._should_ignore.return_value = False - m_handler._sync_lbaas_members.return_value = True - m_handler._drv_service_pub_ip = m_drv_service_pub_ip - - h_lb.KuryrLoadBalancerHandler.on_present(m_handler, get_lb_crd()) - - m_handler._should_ignore.assert_called_once_with(get_lb_crd()) - m_handler._sync_lbaas_members.assert_called_once_with( - get_lb_crd()) - - def _fake_sync_lbaas_members(self, crd): - loadbalancer = { - "id": "01234567890", - "ip": "1.2.3.4", - "name": "default/test", - "port_id": "1023456789120", - "project_id": "12345678912", - "provider": "amphora", - "security_groups": [ - "1d134e68-5653-4192-bda2-4214319af799", - "31d7b8c2-75f1-4125-9565-8c15c5cf046c" - ], - "subnet_id": "123456789120" - } - loadbalancer['port_id'] = 12345678 - crd['status']['loadbalancer'] = loadbalancer - crd['status']['service_pub_ip_info'] = None - return True - - def test_on_present_loadbalancer_service(self): - floating_ip = {'floating_ip_address': '1.2.3.5', - 'id': 'ec29d641-fec4-4f67-928a-124a76b3a888'} - - service_pub_ip_info = { - 'ip_id': floating_ip['id'], - 'ip_addr': floating_ip['floating_ip_address'], - 'alloc_method': 'kk' - } - crd = get_lb_crd() - m_drv_service_pub_ip = mock.Mock() - m_drv_service_pub_ip.acquire_service_pub_ip_info.return_value = ( - service_pub_ip_info) - m_drv_service_pub_ip.associate_pub_ip.return_value = True - - h = mock.Mock(spec=h_lb.KuryrLoadBalancerHandler) - h._should_ignore.return_value = False - h._sync_lbaas_members.return_value = self._fake_sync_lbaas_members(crd) - h._drv_service_pub_ip = m_drv_service_pub_ip - kubernetes = self.useFixture(k_fix.MockK8sClient()).client - kubernetes.get_kubernetes_client = mock.Mock() - kubernetes.get_kubernetes_client() - h_lb.KuryrLoadBalancerHandler.on_present(h, crd) - h._should_ignore.assert_called_once_with(crd) - h._update_lb_status.assert_called() - - @mock.patch('kuryr_kubernetes.utils.get_lbaas_spec') - @mock.patch('kuryr_kubernetes.utils.set_lbaas_state') - @mock.patch('kuryr_kubernetes.utils.get_lbaas_state') - def test_on_present_rollback(self, m_get_lbaas_state, - m_set_lbaas_state, m_get_lbaas_spec): - m_drv_service_pub_ip = mock.Mock() - m_drv_service_pub_ip.acquire_service_pub_ip_info.return_value = None - m_drv_service_pub_ip.associate_pub_ip.return_value = True - - m_handler = mock.Mock(spec=h_lb.KuryrLoadBalancerHandler) - m_handler._should_ignore.return_value = False - m_handler._sync_lbaas_members.return_value = True - m_handler._drv_service_pub_ip = m_drv_service_pub_ip - h_lb.KuryrLoadBalancerHandler.on_present(m_handler, get_lb_crd()) - - m_handler._should_ignore.assert_called_once_with(get_lb_crd()) - m_handler._sync_lbaas_members.assert_called_once_with( - get_lb_crd()) - - def test_on_cascade_deleted_lb_service(self): - m_handler = mock.Mock(spec=h_lb.KuryrLoadBalancerHandler) - m_handler._drv_lbaas = mock.Mock() - m_handler._drv_service_pub_ip = mock.Mock() - crd = get_lb_crd() - m_handler._drv_lbaas.release_loadbalancer( - loadbalancer=crd['status']['loadbalancer']) - m_handler._drv_service_pub_ip.release_pub_ip( - crd['status']['service_pub_ip_info']) - - def test_should_ignore(self): - m_handler = mock.Mock(spec=h_lb.KuryrLoadBalancerHandler) - loadbalancer_crd = get_lb_crd() - loadbalancer_crd['status'] = {} - m_handler._has_endpoints.return_value = True - - ret = h_lb.KuryrLoadBalancerHandler._should_ignore( - m_handler, loadbalancer_crd) - self.assertEqual(False, ret) - - m_handler._has_endpoints.assert_called_once_with(loadbalancer_crd) - - def test_should_ignore_member_scale_to_0(self): - m_handler = mock.Mock(spec=h_lb.KuryrLoadBalancerHandler) - m_handler._has_endpoints.return_value = False - loadbalancer_crd = get_lb_crd() - - ret = h_lb.KuryrLoadBalancerHandler._should_ignore( - m_handler, loadbalancer_crd) - self.assertEqual(False, ret) - - m_handler._has_endpoints.assert_called_once_with(loadbalancer_crd) - - def test_has_endpoints(self): - crd = get_lb_crd() - m_handler = mock.Mock(spec=h_lb.KuryrLoadBalancerHandler) - - ret = h_lb.KuryrLoadBalancerHandler._has_endpoints(m_handler, crd) - - self.assertEqual(True, ret) - - def test_get_pod_subnet(self): - subnet_id = mock.sentinel.subnet_id - project_id = mock.sentinel.project_id - target_ref = {'kind': k_const.K8S_OBJ_POD, - 'name': 'pod-name', - 'namespace': 'default', - 'spec': {}} - ip = '1.2.3.4' - m_handler = mock.Mock(spec=h_lb.KuryrLoadBalancerHandler) - m_drv_pod_project = mock.Mock() - m_drv_pod_project.get_project.return_value = project_id - m_handler._drv_pod_project = m_drv_pod_project - m_drv_pod_subnets = mock.Mock() - m_drv_pod_subnets.get_subnets.return_value = { - subnet_id: osv_network.Network(subnets=osv_subnet.SubnetList( - objects=[osv_subnet.Subnet(cidr='1.2.3.0/24')]))} - m_handler._drv_pod_subnets = m_drv_pod_subnets - - observed_subnet_id = h_lb.KuryrLoadBalancerHandler._get_pod_subnet( - m_handler, target_ref, ip) - - self.assertEqual(subnet_id, observed_subnet_id) - - def _sync_lbaas_members_impl(self, m_get_drv_lbaas, m_get_drv_project, - m_get_drv_subnets, subnet_id, project_id, - crd): - m_drv_lbaas = mock.Mock(wraps=FakeLBaaSDriver()) - m_drv_project = mock.Mock() - m_drv_project.get_project.return_value = project_id - m_drv_subnets = mock.Mock() - m_drv_subnets.get_subnets.return_value = { - subnet_id: mock.sentinel.subnet} - m_get_drv_lbaas.return_value = m_drv_lbaas - m_get_drv_project.return_value = m_drv_project - m_get_drv_subnets.return_value = m_drv_subnets - - handler = h_lb.KuryrLoadBalancerHandler() - - with mock.patch.object(handler, '_get_pod_subnet') as m_get_pod_subnet: - m_get_pod_subnet.return_value = subnet_id - handler._sync_lbaas_members(crd) - - lsnrs = {lsnr['id']: lsnr for lsnr in crd['status']['listeners']} - pools = {pool['id']: pool for pool in crd['status']['pools']} - observed_targets = sorted( - (str(member['ip']), ( - lsnrs[pools[member['pool_id']]['listener_id']]['port'], - member['port'])) - for member in crd['status']['members']) - return observed_targets - - @mock.patch('kuryr_kubernetes.utils.get_subnet_cidr') - @mock.patch('kuryr_kubernetes.controller.drivers.base.' - 'ServiceSecurityGroupsDriver.get_instance') - @mock.patch('kuryr_kubernetes.controller.drivers.base.' - 'ServiceProjectDriver.get_instance') - @mock.patch('kuryr_kubernetes.clients.get_kubernetes_client') - @mock.patch('kuryr_kubernetes.controller.drivers.base' - '.PodSubnetsDriver.get_instance') - @mock.patch('kuryr_kubernetes.controller.drivers.base' - '.PodProjectDriver.get_instance') - @mock.patch('kuryr_kubernetes.controller.drivers.base' - '.LBaaSDriver.get_instance') - @mock.patch('kuryr_kubernetes.controller.drivers.base' - '.NodesSubnetsDriver.get_instance', mock.Mock()) - def test_sync_lbaas_members(self, m_get_drv_lbaas, m_get_drv_project, - m_get_drv_subnets, m_k8s, m_svc_project_drv, - m_svc_sg_drv, m_get_cidr): - # REVISIT(ivc): test methods separately and verify ensure/release - m_get_cidr.return_value = '10.0.0.128/26' - project_id = str(uuid.uuid4()) - subnet_id = str(uuid.uuid4()) - expected_ip = '1.2.3.4' - expected_targets = { - '1.1.1.1': (1, 2), - '1.1.1.1': (1, 2), - '1.1.1.1': (1, 2)} - crd = get_lb_crd() - - observed_targets = self._sync_lbaas_members_impl( - m_get_drv_lbaas, m_get_drv_project, m_get_drv_subnets, - subnet_id, project_id, crd) - - self.assertEqual(sorted(expected_targets.items()), observed_targets) - self.assertEqual(expected_ip, str(crd['status']['loadbalancer']['ip'])) - - @mock.patch('kuryr_kubernetes.utils.get_subnet_cidr') - @mock.patch('kuryr_kubernetes.controller.drivers.base.' - 'ServiceSecurityGroupsDriver.get_instance') - @mock.patch('kuryr_kubernetes.controller.drivers.base.' - 'ServiceProjectDriver.get_instance') - @mock.patch('kuryr_kubernetes.clients.get_kubernetes_client') - @mock.patch('kuryr_kubernetes.controller.drivers.base' - '.PodSubnetsDriver.get_instance') - @mock.patch('kuryr_kubernetes.controller.drivers.base' - '.PodProjectDriver.get_instance') - @mock.patch('kuryr_kubernetes.controller.drivers.base' - '.LBaaSDriver.get_instance') - @mock.patch('kuryr_kubernetes.controller.drivers.base' - '.NodesSubnetsDriver.get_instance', mock.Mock()) - def test_sync_lbaas_members_udp(self, m_get_drv_lbaas, - m_get_drv_project, m_get_drv_subnets, - m_k8s, m_svc_project_drv, m_svc_sg_drv, - m_get_cidr): - # REVISIT(ivc): test methods separately and verify ensure/release - m_get_cidr.return_value = '10.0.0.128/26' - project_id = str(uuid.uuid4()) - subnet_id = str(uuid.uuid4()) - expected_ip = "1.2.3.4" - expected_targets = { - '1.1.1.1': (1, 2), - '1.1.1.1': (1, 2), - '1.1.1.1': (1, 2)} - - crd = get_lb_crd() - - observed_targets = self._sync_lbaas_members_impl( - m_get_drv_lbaas, m_get_drv_project, m_get_drv_subnets, - subnet_id, project_id, crd) - - self.assertEqual(sorted(expected_targets.items()), observed_targets) - self.assertEqual(expected_ip, str(crd['status']['loadbalancer']['ip'])) - - @mock.patch('kuryr_kubernetes.utils.get_subnet_cidr') - @mock.patch('kuryr_kubernetes.controller.drivers.base.' - 'ServiceSecurityGroupsDriver.get_instance') - @mock.patch('kuryr_kubernetes.controller.drivers.base.' - 'ServiceProjectDriver.get_instance') - @mock.patch('kuryr_kubernetes.clients.get_kubernetes_client') - @mock.patch('kuryr_kubernetes.controller.drivers.base' - '.PodSubnetsDriver.get_instance') - @mock.patch('kuryr_kubernetes.controller.drivers.base' - '.PodProjectDriver.get_instance') - @mock.patch('kuryr_kubernetes.controller.drivers.base' - '.LBaaSDriver.get_instance') - @mock.patch('kuryr_kubernetes.controller.drivers.base' - '.NodesSubnetsDriver.get_instance', mock.Mock()) - def test_sync_lbaas_members_svc_listener_port_edit( - self, m_get_drv_lbaas, m_get_drv_project, m_get_drv_subnets, - m_k8s, m_svc_project_drv, m_svc_sg_drv, m_get_cidr): - # REVISIT(ivc): test methods separately and verify ensure/release - m_get_cidr.return_value = '10.0.0.128/26' - project_id = str(uuid.uuid4()) - subnet_id = str(uuid.uuid4()) - expected_ip = '1.2.3.4' - crd = get_lb_crd() - - m_drv_lbaas = mock.Mock(wraps=FakeLBaaSDriver()) - m_drv_project = mock.Mock() - m_drv_project.get_project.return_value = project_id - m_drv_subnets = mock.Mock() - m_drv_subnets.get_subnets.return_value = { - subnet_id: mock.sentinel.subnet} - m_get_drv_lbaas.return_value = m_drv_lbaas - m_get_drv_project.return_value = m_drv_project - m_get_drv_subnets.return_value = m_drv_subnets - - handler = h_lb.KuryrLoadBalancerHandler() - - with mock.patch.object(handler, '_get_pod_subnet') as m_get_pod_subnet: - m_get_pod_subnet.return_value = subnet_id - handler._sync_lbaas_members(crd) - - self.assertEqual(expected_ip, str(crd['status']['loadbalancer']['ip'])) - - @mock.patch('kuryr_kubernetes.utils.get_subnet_cidr') - @mock.patch('kuryr_kubernetes.controller.drivers.base.' - 'ServiceSecurityGroupsDriver.get_instance') - @mock.patch('kuryr_kubernetes.controller.drivers.base.' - 'ServiceProjectDriver.get_instance') - @mock.patch('kuryr_kubernetes.clients.get_kubernetes_client') - @mock.patch('kuryr_kubernetes.controller.drivers.base' - '.PodSubnetsDriver.get_instance') - @mock.patch('kuryr_kubernetes.controller.drivers.base' - '.PodProjectDriver.get_instance') - @mock.patch('kuryr_kubernetes.controller.drivers.base' - '.LBaaSDriver.get_instance') - @mock.patch('kuryr_kubernetes.controller.drivers.base' - '.NodesSubnetsDriver.get_instance', mock.Mock()) - def test_add_new_members_udp(self, m_get_drv_lbaas, - m_get_drv_project, m_get_drv_subnets, - m_k8s, m_svc_project_drv, - m_svc_sg_drv, m_get_cidr): - m_get_cidr.return_value = '10.0.0.128/26' - project_id = str(uuid.uuid4()) - subnet_id = str(uuid.uuid4()) - crd = get_lb_crd() - - m_drv_lbaas = mock.Mock(wraps=FakeLBaaSDriver()) - m_drv_project = mock.Mock() - m_drv_project.get_project.return_value = project_id - m_drv_subnets = mock.Mock() - m_drv_subnets.get_subnets.return_value = { - subnet_id: mock.sentinel.subnet} - m_get_drv_lbaas.return_value = m_drv_lbaas - m_get_drv_project.return_value = m_drv_project - m_get_drv_subnets.return_value = m_drv_subnets - - handler = h_lb.KuryrLoadBalancerHandler() - member_added = handler._add_new_members(crd) - - self.assertEqual(member_added, False) - m_drv_lbaas.ensure_member.assert_not_called() - - @mock.patch('kuryr_kubernetes.utils.get_res_link') - @mock.patch('kuryr_kubernetes.clients.get_kubernetes_client') - @mock.patch('kuryr_kubernetes.controller.drivers.base' - '.LBaaSDriver.get_instance') - def test_reconcile_loadbalancers(self, m_get_drv_lbaas, m_k8s, - m_get_res_link): - loadbalancer_crds = get_lb_crds() - m_handler = mock.MagicMock(spec=h_lb.KuryrLoadBalancerHandler) - m_handler._drv_lbaas = m_get_drv_lbaas - lbaas = self.useFixture(k_fix.MockLBaaSClient()).client - lbaas.load_balancers.return_value = [] - lbaas.listeners.return_value = [] - lbaas.pools.return_value = [] - lbaas.members.return_value = [] - selflink = ('/apis/openstack.org/v1/namespaces/default/' - 'kuryrloadbalancers/test') - m_get_res_link.return_value = selflink - h_lb.KuryrLoadBalancerHandler._trigger_reconciliation( - m_handler, loadbalancer_crds) - filters = {} - lbaas.load_balancers.assert_called_once_with(**filters) - m_handler._reconcile_lb.assert_called_with({'id': mock.ANY, - 'selflink': selflink, - 'klb': mock.ANY}) - - @mock.patch('kuryr_kubernetes.clients.get_kubernetes_client') - @mock.patch('kuryr_kubernetes.controller.drivers.base' - '.LBaaSDriver.get_instance') - def test_reconcile_loadbalancers_in_sync(self, m_get_drv_lbaas, m_k8s): - loadbalancer_crds = get_lb_crds() - - m_handler = mock.MagicMock(spec=h_lb.KuryrLoadBalancerHandler) - m_handler._drv_lbaas = m_get_drv_lbaas - lbaas = self.useFixture(k_fix.MockLBaaSClient()).client - - loadbalancers_id = [{'id': '01234567890'}, {'id': '01234567891'}] - listeners_id = [{'id': '012345678912'}, {'id': '012345678913'}] - pools_id = [{'id': '1234567890'}, {'id': '1234567891'}] - members_id = [{"id": "0123456789a"}, {"id": "0123456789b"}] - lbaas.load_balancers.return_value = loadbalancers_id - lbaas.listeners.return_value = listeners_id - lbaas.pools.return_value = pools_id - lbaas.members.return_value = members_id - - h_lb.KuryrLoadBalancerHandler._trigger_reconciliation( - m_handler, loadbalancer_crds) - m_handler._reconcile_lb.assert_not_called() diff --git a/kuryr_kubernetes/tests/unit/controller/handlers/test_machine.py b/kuryr_kubernetes/tests/unit/controller/handlers/test_machine.py deleted file mode 100644 index bb3a1aea5..000000000 --- a/kuryr_kubernetes/tests/unit/controller/handlers/test_machine.py +++ /dev/null @@ -1,84 +0,0 @@ -# Copyright 2020 Red Hat, Inc. -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. - -from unittest import mock - -from kuryr_kubernetes import constants -from kuryr_kubernetes.controller.handlers import machine -from kuryr_kubernetes import exceptions -from kuryr_kubernetes.tests import base as test_base - - -class TestKuryrMachineHandler(test_base.TestCase): - @mock.patch( - 'kuryr_kubernetes.controller.drivers.base.NodesSubnetsDriver.' - 'get_instance') - def setUp(self, m_get_instance): - super(TestKuryrMachineHandler, self).setUp() - self.driver = mock.Mock() - m_get_instance.return_value = self.driver - self.handler = machine.MachineHandler() - - def test_on_present(self): - self.handler._bump_nps = mock.Mock() - self.driver.add_node.return_value = False - self.handler.on_present(mock.sentinel.machine) - self.driver.add_node.assert_called_once_with(mock.sentinel.machine) - self.handler._bump_nps.assert_not_called() - - def test_on_present_new(self): - self.handler._bump_nps = mock.Mock() - self.driver.add_node.return_value = True - self.handler.on_present(mock.sentinel.machine) - self.driver.add_node.assert_called_once_with(mock.sentinel.machine) - self.handler._bump_nps.assert_called_once() - - def test_on_deleted(self): - self.handler._bump_nps = mock.Mock() - self.driver.delete_node.return_value = False - self.handler.on_deleted(mock.sentinel.machine) - self.driver.delete_node.assert_called_once_with(mock.sentinel.machine) - self.handler._bump_nps.assert_not_called() - - def test_on_deleted_gone(self): - self.handler._bump_nps = mock.Mock() - self.driver.delete_node.return_value = True - self.handler.on_deleted(mock.sentinel.machine) - self.driver.delete_node.assert_called_once_with(mock.sentinel.machine) - self.handler._bump_nps.assert_called_once() - - @mock.patch('kuryr_kubernetes.clients.get_kubernetes_client') - def test_bump_nps(self, get_client): - m_k8s = mock.Mock() - get_client.return_value = m_k8s - m_k8s.get.return_value = { - 'items': [ - {'metadata': {'annotations': { - 'networkPolicyLink': mock.sentinel.link1}}}, - {'metadata': {'annotations': { - 'networkPolicyLink': mock.sentinel.link2}}}, - {'metadata': {'annotations': { - 'networkPolicyLink': mock.sentinel.link3}}}, - ] - } - m_k8s.annotate.side_effect = ( - None, exceptions.K8sResourceNotFound('NP'), None) - self.handler._bump_nps() - m_k8s.get.assert_called_once_with( - constants.K8S_API_CRD_KURYRNETWORKPOLICIES) - m_k8s.annotate.assert_has_calls([ - mock.call(mock.sentinel.link1, mock.ANY), - mock.call(mock.sentinel.link2, mock.ANY), - mock.call(mock.sentinel.link3, mock.ANY), - ]) diff --git a/kuryr_kubernetes/tests/unit/controller/handlers/test_namespace.py b/kuryr_kubernetes/tests/unit/controller/handlers/test_namespace.py deleted file mode 100644 index c06b3fdfb..000000000 --- a/kuryr_kubernetes/tests/unit/controller/handlers/test_namespace.py +++ /dev/null @@ -1,139 +0,0 @@ -# Copyright (c) 2018 Red Hat, Inc. -# All Rights Reserved. -# -# Licensed under the Apache License, Version 2.0 (the "License"); you may -# not use this file except in compliance with the License. You may obtain -# a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT -# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the -# License for the specific language governing permissions and limitations -# under the License. - -from unittest import mock - -from kuryr_kubernetes.controller.drivers import base as drivers -from kuryr_kubernetes.controller.handlers import namespace -from kuryr_kubernetes import exceptions as k_exc -from kuryr_kubernetes.tests import base as test_base - - -class TestNamespaceHandler(test_base.TestCase): - - def setUp(self): - super(TestNamespaceHandler, self).setUp() - - self._project_id = mock.sentinel.project_id - self._subnets = mock.sentinel.subnets - - self._namespace_version = mock.sentinel.namespace_version - self._namespace_link = mock.sentinel.namespace_link - - self._namespace_name = 'ns-test' - self._namespace = { - 'metadata': {'name': self._namespace_name, - 'resourceVersion': self._namespace_version}, - 'status': {'phase': 'Active'} - } - self._crd_id = 'ns-' + self._namespace_name - - self._handler = mock.MagicMock(spec=namespace.NamespaceHandler) - - self._handler._drv_project = mock.Mock( - spec=drivers.NamespaceProjectDriver) - - self._get_project = self._handler._drv_project.get_project - self._update_labels = self._handler._update_labels - self._get_kns_crd = self._handler._get_kns_crd - self._add_kuryrnetwork_crd = self._handler._add_kuryrnetwork_crd - self._handle_namespace = self._handler._handle_namespace - - self._get_project.return_value = self._project_id - - def _get_crd(self): - crd = { - 'kind': 'KuryrNetwork', - 'metadata': { - 'name': self._namespace_name, - 'namespace': self._namespace_name, - }, - 'spec': {} - } - return crd - - @mock.patch.object(drivers.NamespaceProjectDriver, 'get_instance') - def test_init(self, m_get_project_driver): - project_driver = mock.sentinel.project_driver - m_get_project_driver.return_value = project_driver - - handler = namespace.NamespaceHandler() - self.assertEqual(project_driver, handler._drv_project) - - def test_on_present(self): - self._get_kns_crd.return_value = None - self._handle_namespace.return_value = True - - namespace.NamespaceHandler.on_present(self._handler, self._namespace) - - self._handle_namespace.assert_called_once() - self._get_kns_crd.assert_called_once_with( - self._namespace['metadata']['name']) - self._add_kuryrnetwork_crd.assert_called_once_with( - self._namespace, {}) - - def test_on_present_existing(self): - net_crd = self._get_crd() - self._get_kns_crd.return_value = net_crd - - namespace.NamespaceHandler.on_present(self._handler, self._namespace) - - self._handle_namespace.assert_not_called() - self._get_kns_crd.assert_called_once_with( - self._namespace['metadata']['name']) - self._update_labels.assert_called_once_with(net_crd, {}) - self._add_kuryrnetwork_crd.assert_not_called() - - def test_on_present_add_kuryrnetwork_crd_exception(self): - self._get_kns_crd.return_value = None - self._add_kuryrnetwork_crd.side_effect = k_exc.K8sClientException - self._handle_namespace.return_value = True - - self.assertRaises(k_exc.ResourceNotReady, - namespace.NamespaceHandler.on_present, - self._handler, self._namespace) - - self._handle_namespace.assert_called_once() - self._get_kns_crd.assert_called_once_with( - self._namespace['metadata']['name']) - self._add_kuryrnetwork_crd.assert_called_once_with( - self._namespace, {}) - - @mock.patch('kuryr_kubernetes.clients.get_kubernetes_client') - def test_handle_namespace_no_pods(self, m_get_k8s_client): - k8s = mock.MagicMock() - m_get_k8s_client.return_value = k8s - k8s.get.return_value = {"items": []} - self.assertFalse(namespace.NamespaceHandler._handle_namespace( - self._handler, "test")) - k8s.get.assert_called_once() - - @mock.patch('kuryr_kubernetes.clients.get_kubernetes_client') - def test_handle_namespace_host_network_pods(self, m_get_k8s_client): - k8s = mock.MagicMock() - m_get_k8s_client.return_value = k8s - k8s.get.return_value = {"items": [{"spec": {"hostNetwork": True}}]} - self.assertFalse(namespace.NamespaceHandler._handle_namespace( - self._handler, "test")) - k8s.get.assert_called_once() - - @mock.patch('kuryr_kubernetes.clients.get_kubernetes_client') - def test_handle_namespace(self, m_get_k8s_client): - k8s = mock.MagicMock() - m_get_k8s_client.return_value = k8s - k8s.get.return_value = {"items": [{"spec": {}}]} - self.assertTrue(namespace.NamespaceHandler._handle_namespace( - self._handler, "test")) - k8s.get.assert_called_once() diff --git a/kuryr_kubernetes/tests/unit/controller/handlers/test_pipeline.py b/kuryr_kubernetes/tests/unit/controller/handlers/test_pipeline.py deleted file mode 100644 index b41d78691..000000000 --- a/kuryr_kubernetes/tests/unit/controller/handlers/test_pipeline.py +++ /dev/null @@ -1,61 +0,0 @@ -# Copyright (c) 2016 Mirantis, Inc. -# All Rights Reserved. -# -# Licensed under the Apache License, Version 2.0 (the "License"); you may -# not use this file except in compliance with the License. You may obtain -# a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT -# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the -# License for the specific language governing permissions and limitations -# under the License. - -from unittest import mock - -from kuryr_kubernetes.controller.handlers import pipeline as h_pipeline -from kuryr_kubernetes.handlers import dispatch as h_dis -from kuryr_kubernetes.handlers import k8s_base as h_k8s -from kuryr_kubernetes.tests import base as test_base - - -class TestControllerPipeline(test_base.TestCase): - @mock.patch('kuryr_kubernetes.handlers.logging.LogExceptions') - @mock.patch('kuryr_kubernetes.handlers.retry.Retry') - def test_wrap_consumer(self, m_retry_type, m_logging_type): - consumer = mock.sentinel.consumer - retry_handler = mock.sentinel.retry_handler - logging_handler = mock.sentinel.logging_handler - m_retry_type.return_value = retry_handler - m_logging_type.return_value = logging_handler - thread_group = mock.sentinel.thread_group - - with mock.patch.object(h_dis.EventPipeline, '__init__'): - pipeline = h_pipeline.ControllerPipeline(thread_group) - ret = pipeline._wrap_consumer(consumer) - - self.assertEqual(logging_handler, ret) - m_logging_type.assert_called_with(retry_handler, - ignore_exceptions=mock.ANY) - m_retry_type.assert_called_with(consumer, exceptions=mock.ANY) - - @mock.patch('kuryr_kubernetes.handlers.logging.LogExceptions') - @mock.patch('kuryr_kubernetes.handlers.asynchronous.Async') - def test_wrap_dispatcher(self, m_async_type, m_logging_type): - dispatcher = mock.sentinel.dispatcher - async_handler = mock.sentinel.async_handler - logging_handler = mock.sentinel.logging_handler - m_async_type.return_value = async_handler - m_logging_type.return_value = logging_handler - thread_group = mock.sentinel.thread_group - - with mock.patch.object(h_dis.EventPipeline, '__init__'): - pipeline = h_pipeline.ControllerPipeline(thread_group) - ret = pipeline._wrap_dispatcher(dispatcher) - - self.assertEqual(logging_handler, ret) - m_logging_type.assert_called_with(async_handler) - m_async_type.assert_called_with(dispatcher, thread_group, - h_k8s.object_uid, h_k8s.object_info) diff --git a/kuryr_kubernetes/tests/unit/controller/handlers/test_pod_label.py b/kuryr_kubernetes/tests/unit/controller/handlers/test_pod_label.py deleted file mode 100644 index cfd716e41..000000000 --- a/kuryr_kubernetes/tests/unit/controller/handlers/test_pod_label.py +++ /dev/null @@ -1,126 +0,0 @@ -# Copyright 2018 Red Hat, Inc. -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. - -from unittest import mock - -from kuryr_kubernetes import constants as k_const -from kuryr_kubernetes.controller.drivers import base as drivers -from kuryr_kubernetes.controller.handlers import pod_label as p_label -from kuryr_kubernetes.tests import base as test_base - - -class TestPodLabelHandler(test_base.TestCase): - - def setUp(self): - super(TestPodLabelHandler, self).setUp() - - self._project_id = mock.sentinel.project_id - self._sg_id = mock.sentinel.sg_id - - self._pod_version = mock.sentinel.pod_version - self._pod_link = mock.sentinel.pod_link - self._pod = { - 'metadata': {'resourceVersion': self._pod_version, - 'namespace': 'default'}, - 'status': {'phase': k_const.K8S_POD_STATUS_PENDING}, - 'spec': {'hostNetwork': False, - 'nodeName': 'hostname'} - } - self._handler = mock.MagicMock(spec=p_label.PodLabelHandler) - self._handler._drv_project = mock.Mock(spec=drivers.PodProjectDriver) - self._handler._drv_sg = mock.Mock(spec=drivers.PodSecurityGroupsDriver) - self._handler._drv_vif_pool = mock.MagicMock( - spec=drivers.VIFPoolDriver) - - self._get_project = self._handler._drv_project.get_project - self._get_security_groups = self._handler._drv_sg.get_security_groups - self._set_vif_driver = self._handler._drv_vif_pool.set_vif_driver - self._get_pod_info = self._handler._get_pod_info - self._set_pod_info = self._handler._set_pod_info - self._has_vifs = self._handler._has_vifs - self._update_vif_sgs = self._handler._drv_vif_pool.update_vif_sgs - - self._get_project.return_value = self._project_id - self._get_security_groups.return_value = [self._sg_id] - - @mock.patch.object(drivers.VIFPoolDriver, 'get_instance') - @mock.patch.object(drivers.PodSecurityGroupsDriver, 'get_instance') - @mock.patch.object(drivers.PodProjectDriver, 'get_instance') - @mock.patch.object(drivers.LBaaSDriver, 'get_instance') - def test_init(self, m_get_lbaas_driver, m_get_project_driver, - m_get_sg_driver, m_get_vif_pool_driver): - project_driver = mock.sentinel.project_driver - sg_driver = mock.sentinel.sg_driver - lbaas_driver = mock.sentinel.lbaas_driver - vif_pool_driver = mock.Mock(spec=drivers.VIFPoolDriver) - m_get_lbaas_driver.return_value = lbaas_driver - m_get_project_driver.return_value = project_driver - m_get_sg_driver.return_value = sg_driver - m_get_vif_pool_driver.return_value = vif_pool_driver - - handler = p_label.PodLabelHandler() - - self.assertEqual(lbaas_driver, handler._drv_lbaas) - self.assertEqual(project_driver, handler._drv_project) - self.assertEqual(sg_driver, handler._drv_sg) - self.assertEqual(vif_pool_driver, handler._drv_vif_pool) - - @mock.patch('kuryr_kubernetes.controller.drivers.utils.get_services') - def test_on_present(self, m_get_services): - m_get_services.return_value = {"items": []} - self._has_vifs.return_value = True - self._get_pod_info.return_value = ({'test1': 'test'}, '192.168.0.1') - - p_label.PodLabelHandler.on_present(self._handler, self._pod) - - self._has_vifs.assert_called_once_with(self._pod) - self._get_pod_info.assert_called_once_with(self._pod) - self._get_project.assert_called_once() - self._get_security_groups.assert_called_once() - self._update_vif_sgs.assert_called_once_with(self._pod, [self._sg_id]) - self._set_pod_info.assert_called_once_with(self._pod, (None, None)) - - def test_on_present_no_state(self): - self._has_vifs.return_value = False - - resp = p_label.PodLabelHandler.on_present(self._handler, self._pod) - - self.assertIsNone(resp) - self._has_vifs.assert_called_once_with(self._pod) - self._get_pod_info.assert_not_called() - self._set_pod_info.assert_not_called() - - @mock.patch('kuryr_kubernetes.controller.drivers.utils.get_services') - def test_on_present_no_labels(self, m_get_services): - self._has_vifs.return_value = True - self._get_pod_info.return_value = None, None - - p_label.PodLabelHandler.on_present(self._handler, self._pod) - - self._has_vifs.assert_called_once_with(self._pod) - self._get_pod_info.assert_called_once_with(self._pod) - self._set_pod_info.assert_not_called() - - def test_on_present_no_changes(self): - self._has_vifs.return_value = True - pod_with_label = self._pod.copy() - pod_with_label['metadata']['labels'] = {'test1': 'test'} - pod_with_label['status']['podIP'] = '192.168.0.1' - self._get_pod_info.return_value = ({'test1': 'test'}, '192.168.0.1') - - p_label.PodLabelHandler.on_present(self._handler, pod_with_label) - - self._has_vifs.assert_called_once_with(pod_with_label) - self._get_pod_info.assert_called_once_with(pod_with_label) - self._set_pod_info.assert_not_called() diff --git a/kuryr_kubernetes/tests/unit/controller/handlers/test_policy.py b/kuryr_kubernetes/tests/unit/controller/handlers/test_policy.py deleted file mode 100644 index 129d1665b..000000000 --- a/kuryr_kubernetes/tests/unit/controller/handlers/test_policy.py +++ /dev/null @@ -1,80 +0,0 @@ -# Copyright 2018 Red Hat, Inc. -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. - -from unittest import mock - -from kuryr_kubernetes.controller.drivers import base as drivers -from kuryr_kubernetes.controller.handlers import policy -from kuryr_kubernetes.tests import base as test_base - - -class TestPolicyHandler(test_base.TestCase): - - @mock.patch.object(drivers.NetworkPolicyDriver, 'get_instance') - @mock.patch('kuryr_kubernetes.clients.get_kubernetes_client') - def setUp(self, m_get_k8s, m_get_np): - super(TestPolicyHandler, self).setUp() - - self._project_id = mock.sentinel.project_id - self._policy_name = 'np-test' - self._policy_uid = mock.sentinel.policy_uid - self._policy_link = mock.sentinel.policy_link - - self._policy = { - 'apiVersion': 'networking.k8s.io/v1', - 'kind': 'NetworkPolicy', - 'metadata': { - 'name': self._policy_name, - 'resourceVersion': '2259309', - 'generation': 1, - 'creationTimestamp': '2018-09-18T14:09:51Z', - 'namespace': 'default', - 'annotations': {}, - 'uid': self._policy_uid - }, - 'spec': { - 'egress': [{'ports': [{'port': 5978, 'protocol': 'TCP'}]}], - 'ingress': [{'ports': [{'port': 6379, 'protocol': 'TCP'}]}], - 'policyTypes': ['Ingress', 'Egress'] - } - } - - self.k8s = mock.Mock() - m_get_k8s.return_value = self.k8s - self.m_get_k8s = m_get_k8s - - self.np_driver = mock.Mock() - m_get_np.return_value = self.np_driver - self._m_get_np = m_get_np - - self.handler = policy.NetworkPolicyHandler() - - def test_init(self): - self.m_get_k8s.assert_called_once() - self._m_get_np.assert_called_once() - - self.assertEqual(self.np_driver, self.handler._drv_policy) - self.assertEqual(self.k8s, self.handler.k8s) - - def test_on_finalize(self): - self.handler.on_finalize(self._policy) - self.np_driver.release_network_policy.assert_called_once_with( - self._policy) - - def test_on_present(self): - self.handler.on_present(self._policy) - self.k8s.add_finalizer.assert_called_once_with( - self._policy, 'kuryr.openstack.org/networkpolicy-finalizer') - self.np_driver.ensure_network_policy.assert_called_once_with( - self._policy) diff --git a/kuryr_kubernetes/tests/unit/controller/handlers/test_vif.py b/kuryr_kubernetes/tests/unit/controller/handlers/test_vif.py deleted file mode 100644 index 58ddfd57b..000000000 --- a/kuryr_kubernetes/tests/unit/controller/handlers/test_vif.py +++ /dev/null @@ -1,282 +0,0 @@ -# Copyright (c) 2016 Mirantis, Inc. -# All Rights Reserved. -# -# Licensed under the Apache License, Version 2.0 (the "License"); you may -# not use this file except in compliance with the License. You may obtain -# a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT -# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the -# License for the specific language governing permissions and limitations -# under the License. - -from unittest import mock - -from os_vif import objects as os_obj - -from kuryr_kubernetes import constants as k_const -from kuryr_kubernetes.controller.drivers import base as drivers -from kuryr_kubernetes.controller.handlers import vif as h_vif -from kuryr_kubernetes import exceptions as k_exc -from kuryr_kubernetes.objects import vif -from kuryr_kubernetes.tests import base as test_base -from kuryr_kubernetes.tests import fake - - -class TestVIFHandler(test_base.TestCase): - - def setUp(self): - super(TestVIFHandler, self).setUp() - - self._project_id = mock.sentinel.project_id - self._subnets = mock.sentinel.subnets - self._security_groups = mock.sentinel.security_groups - self._vif = os_obj.vif.VIFBase() - self._vif.active = True - self._vif_serialized = mock.sentinel.vif_serialized - self._multi_vif_drv = mock.MagicMock(spec=drivers.MultiVIFDriver) - self._additioan_vifs = [] - self._state = vif.PodState(default_vif=self._vif) - - self._pod_version = mock.sentinel.pod_version - self._pod_link = mock.sentinel.pod_link - self._pod_namespace = 'namespace1' - self._pod_uid = mock.sentinel.pod_uid - self._pod_name = 'pod1' - self._pod = fake.get_k8s_pod() - self._pod['status'] = {'phase': k_const.K8S_POD_STATUS_PENDING} - self._pod['spec'] = {'hostNetwork': False, 'nodeName': 'hostname'} - - self._kp_version = mock.sentinel.kp_version - self._kp_link = mock.sentinel.kp_link - self._kp = {'apiVersion': 'openstack.org/v1', - 'kind': 'KuryrPort', - 'metadata': {'resourceVersion': self._kp_version, - 'selfLink': mock.sentinel.kp_link, - 'namespace': self._pod_namespace, - 'labels': mock.ANY}, - 'spec': {'podUid': self._pod_uid, - 'podNodeName': 'hostname'}, - 'status': {'vifs': {}}} - - self._handler = mock.MagicMock(spec=h_vif.VIFHandler) - self._handler._drv_project = mock.Mock(spec=drivers.PodProjectDriver) - self._handler._drv_subnets = mock.Mock(spec=drivers.PodSubnetsDriver) - self._handler._drv_sg = mock.Mock(spec=drivers.PodSecurityGroupsDriver) - self._handler._drv_vif = mock.Mock(spec=drivers.PodVIFDriver) - self._handler._drv_vif_pool = mock.MagicMock( - spec=drivers.VIFPoolDriver) - self._handler._drv_multi_vif = [self._multi_vif_drv] - self._handler.k8s = mock.Mock() - - self._get_project = self._handler._drv_project.get_project - self._get_subnets = self._handler._drv_subnets.get_subnets - self._get_security_groups = self._handler._drv_sg.get_security_groups - self._set_vifs_driver = self._handler._drv_vif_pool.set_vif_driver - self._request_vif = self._handler._drv_vif_pool.request_vif - self._release_vif = self._handler._drv_vif_pool.release_vif - self._activate_vif = self._handler._drv_vif_pool.activate_vif - self._is_pod_scheduled = self._handler._is_pod_scheduled - self._request_additional_vifs = \ - self._multi_vif_drv.request_additional_vifs - - self._request_vif.return_value = self._vif - self._request_additional_vifs.return_value = self._additioan_vifs - self._is_pod_scheduled.return_value = True - self._get_project.return_value = self._project_id - self._get_subnets.return_value = self._subnets - self._get_security_groups.return_value = self._security_groups - self._set_vifs_driver.return_value = mock.Mock( - spec=drivers.PodVIFDriver) - - def test_is_pod_scheduled(self): - self.assertTrue(h_vif.VIFHandler._is_pod_scheduled(self._pod)) - - def test_is_not_pending(self): - self._pod['status']['phase'] = 'Unknown' - self.assertFalse(h_vif.VIFHandler._is_pod_scheduled(self._pod)) - - def test_is_pending_no_node(self): - self._pod['spec']['nodeName'] = None - self.assertFalse(h_vif.VIFHandler._is_pod_scheduled(self._pod)) - - def test_unset_pending(self): - self.assertFalse(h_vif.VIFHandler._is_pod_scheduled({'spec': {}, - 'status': {}})) - - @mock.patch('kuryr_kubernetes.clients.get_kubernetes_client') - @mock.patch('kuryr_kubernetes.utils.is_host_network') - @mock.patch('kuryr_kubernetes.controller.drivers.utils.get_kuryrport') - def test_on_present_host_network(self, m_get_kuryrport, m_host_network, - m_get_k8s_client): - m_get_kuryrport.return_value = self._kp - m_host_network.return_value = True - k8s = mock.MagicMock() - m_get_k8s_client.return_value = k8s - - h_vif.VIFHandler.on_present(self._handler, self._pod) - - k8s.add_finalizer.assert_not_called() - m_get_kuryrport.assert_not_called() - self._request_vif.assert_not_called() - self._request_additional_vifs.assert_not_called() - self._activate_vif.assert_not_called() - - @mock.patch('kuryr_kubernetes.controller.drivers.utils.get_k8s_resource') - @mock.patch('kuryr_kubernetes.utils.is_pod_completed') - @mock.patch('kuryr_kubernetes.utils.is_host_network') - @mock.patch('kuryr_kubernetes.controller.drivers.utils.get_kuryrport') - def test_on_present_not_scheduled(self, m_get_kuryrport, m_host_network, - m_is_pod_completed, m_get_k8s_res): - m_get_kuryrport.return_value = self._kp - m_host_network.return_value = False - m_is_pod_completed.return_value = False - m_get_k8s_res.return_value = {} - - h_vif.VIFHandler.on_present(self._handler, self._pod) - - self._handler.k8s.add_finalizer.assert_called() - m_get_kuryrport.assert_called() - self._request_vif.assert_not_called() - self._request_additional_vifs.assert_not_called() - self._activate_vif.assert_not_called() - - @mock.patch('kuryr_kubernetes.utils.is_pod_completed') - @mock.patch('kuryr_kubernetes.clients.get_kubernetes_client') - @mock.patch('kuryr_kubernetes.controller.drivers.utils.get_kuryrport') - def test_on_present_on_completed_without_kuryrport(self, m_get_kuryrport, - m_get_k8s_client, - m_is_pod_completed): - m_is_pod_completed.return_value = True - m_get_kuryrport.return_value = None - k8s = mock.MagicMock() - m_get_k8s_client.return_value = k8s - - h_vif.VIFHandler.on_present(self._handler, self._pod) - - self._handler.on_finalize.assert_called() - self._request_vif.assert_not_called() - self._request_additional_vifs.assert_not_called() - self._activate_vif.assert_not_called() - - @mock.patch('kuryr_kubernetes.utils.is_pod_completed') - @mock.patch('kuryr_kubernetes.clients.get_kubernetes_client') - @mock.patch('kuryr_kubernetes.controller.drivers.utils.get_kuryrport') - def test_on_present_on_completed_with_kuryrport(self, m_get_kuryrport, - m_get_k8s_client, - m_is_pod_completed): - m_is_pod_completed.return_value = True - m_get_kuryrport.return_value = mock.MagicMock() - k8s = mock.MagicMock() - m_get_k8s_client.return_value = k8s - - h_vif.VIFHandler.on_present(self._handler, self._pod) - - self._handler.on_finalize.assert_called() - self._request_vif.assert_not_called() - self._request_additional_vifs.assert_not_called() - self._activate_vif.assert_not_called() - - @mock.patch('kuryr_kubernetes.controller.drivers.utils.get_k8s_resource') - @mock.patch('kuryr_kubernetes.utils.is_host_network') - @mock.patch('kuryr_kubernetes.controller.drivers.utils.get_kuryrport') - def test_on_present_create(self, m_get_kuryrport, m_host_network, - m_get_k8s_res): - m_get_kuryrport.return_value = None - m_host_network.return_value = False - m_get_k8s_res.return_value = {} - - h_vif.VIFHandler.on_present(self._handler, self._pod) - - add_finalizer = self._handler.k8s.add_finalizer - add_finalizer.assert_called_once_with(self._pod, k_const.POD_FINALIZER) - m_get_kuryrport.assert_called_once_with(self._pod) - self._handler._add_kuryrport_crd.assert_called_once_with(self._pod) - - @mock.patch('kuryr_kubernetes.controller.drivers.utils.get_k8s_resource') - @mock.patch('kuryr_kubernetes.utils.is_host_network') - @mock.patch('kuryr_kubernetes.controller.drivers.utils.get_kuryrport') - def test_on_present_update(self, m_get_kuryrport, m_host_network, - m_get_k8s_res): - m_get_kuryrport.return_value = self._kp - m_host_network.return_value = False - m_get_k8s_res.return_value = {} - - h_vif.VIFHandler.on_present(self._handler, self._pod) - - add_finalizer = self._handler.k8s.add_finalizer - add_finalizer.assert_called_once_with(self._pod, k_const.POD_FINALIZER) - m_get_kuryrport.assert_called_once_with(self._pod) - self._handler._add_kuryrport_crd.assert_not_called() - - @mock.patch('kuryr_kubernetes.controller.drivers.utils.get_k8s_resource') - @mock.patch('kuryr_kubernetes.utils.is_host_network') - @mock.patch('kuryr_kubernetes.controller.drivers.utils.get_kuryrport') - def test_on_present_upgrade(self, m_get_kuryrport, m_host_network, - m_get_k8s_res): - m_get_kuryrport.return_value = self._kp - m_host_network.return_value = False - m_get_k8s_res.return_value = {} - - h_vif.VIFHandler.on_present(self._handler, self._pod) - - add_finalizer = self._handler.k8s.add_finalizer - add_finalizer.assert_called_once_with(self._pod, k_const.POD_FINALIZER) - m_get_kuryrport.assert_called() - self._request_vif.assert_not_called() - self._request_additional_vifs.assert_not_called() - self._activate_vif.assert_not_called() - - @mock.patch('kuryr_kubernetes.controller.drivers.utils.get_k8s_resource') - @mock.patch('kuryr_kubernetes.utils.is_host_network') - def test_on_present_pod_finalizer_exception(self, m_host_network, - m_get_k8s_res): - m_host_network.return_value = False - m_get_k8s_res.return_value = {} - self._handler.k8s.add_finalizer.side_effect = k_exc.K8sClientException - - self.assertRaises(k_exc.K8sClientException, - h_vif.VIFHandler.on_present, self._handler, - self._pod) - - add_finalizer = self._handler.k8s.add_finalizer - add_finalizer.assert_called_once_with(self._pod, k_const.POD_FINALIZER) - - def test_on_finalize_crd(self): - self._handler.k8s.get.return_value = self._kp - - h_vif.VIFHandler.on_finalize(self._handler, self._pod) - - self._handler.k8s.delete.assert_called_once_with( - h_vif.KURYRPORT_URI.format( - ns=self._pod["metadata"]["namespace"], - crd=self._pod["metadata"]["name"])) - - def test_on_finalize_crd_exception(self): - self._handler.k8s.get.return_value = self._kp - self._handler.k8s.delete.side_effect = k_exc.K8sClientException - - self.assertRaises(k_exc.ResourceNotReady, h_vif.VIFHandler - .on_finalize, self._handler, self._pod) - - self._handler.k8s.delete.assert_called_once_with( - h_vif.KURYRPORT_URI.format( - ns=self._pod["metadata"]["namespace"], - crd=self._pod["metadata"]["name"])) - - def test_on_finalize_crd_not_found(self): - self._handler.k8s.get.return_value = self._kp - (self._handler.k8s.delete - .side_effect) = k_exc.K8sResourceNotFound(self._pod) - - h_vif.VIFHandler.on_finalize(self._handler, self._pod) - - self._handler.k8s.delete.assert_called_once_with( - h_vif.KURYRPORT_URI.format( - ns=self._pod["metadata"]["namespace"], - crd=self._pod["metadata"]["name"])) - (self._handler.k8s.remove_finalizer - .assert_called_once_with(self._pod, k_const.POD_FINALIZER)) diff --git a/kuryr_kubernetes/tests/unit/controller/managers/__init__.py b/kuryr_kubernetes/tests/unit/controller/managers/__init__.py deleted file mode 100644 index e69de29bb..000000000 diff --git a/kuryr_kubernetes/tests/unit/controller/managers/test_health.py b/kuryr_kubernetes/tests/unit/controller/managers/test_health.py deleted file mode 100644 index ff4a0c457..000000000 --- a/kuryr_kubernetes/tests/unit/controller/managers/test_health.py +++ /dev/null @@ -1,225 +0,0 @@ -# Copyright 2018 Maysa de Macedo Souza. -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. - -from kuryr_kubernetes.controller.managers import health -from kuryr_kubernetes.handlers import health as h_health -from kuryr_kubernetes.tests import base -from kuryr_kubernetes.tests.unit import kuryr_fixtures as k_fix -from unittest import mock - -from oslo_config import cfg as oslo_cfg - - -def get_quota_obj(): - return { - 'quota': { - 'subnet': { - 'used': 50, - 'limit': 100, - 'reserved': 0 - }, - 'network': { - 'used': 50, - 'limit': 100, - 'reserved': 0 - }, - 'floatingip': { - 'used': 25, - 'limit': 50, - 'reserved': 0 - }, - 'subnetpool': { - 'used': 0, - 'limit': -1, - 'reserved': 0 - }, - 'security_group_rule': { - 'used': 50, - 'limit': 100, - 'reserved': 0 - }, - 'security_group': { - 'used': 5, - 'limit': 10, - 'reserved': 0 - }, - 'router': { - 'used': 5, - 'limit': 10, - 'reserved': 0 - }, - 'rbac_policy': { - 'used': 5, - 'limit': 10, - 'reserved': 0 - }, - 'port': { - 'used': 250, - 'limit': 500, - 'reserved': 0 - } - } - } - - -class _TestHandler(h_health.HealthHandler): - def is_alive(self): - pass - - def is_ready(self): - pass - - -class TestHealthServer(base.TestCase): - - def setUp(self): - super(TestHealthServer, self).setUp() - self.srv = health.HealthServer() - self.srv.application.testing = True - self.test_client = self.srv.application.test_client() - - @mock.patch('kuryr_kubernetes.controller.managers.health.HealthServer.' - '_components_ready') - @mock.patch('os.path.exists') - @mock.patch('kuryr_kubernetes.controller.managers.health.HealthServer.' - 'verify_keystone_connection') - @mock.patch('kuryr_kubernetes.controller.managers.health.HealthServer.' - 'verify_k8s_connection') - def test_readiness(self, m_verify_k8s_conn, m_verify_keystone_conn, - m_exist, m_components_ready): - m_verify_k8s_conn.return_value = True, 200 - m_exist.return_value = True - m_components_ready.return_value = True - - resp = self.test_client.get('/ready') - - m_verify_k8s_conn.assert_called_once() - m_verify_keystone_conn.assert_called_once() - m_components_ready.assert_called_once() - - self.assertEqual(200, resp.status_code) - self.assertEqual('ok', resp.data.decode()) - - @mock.patch('os.path.exists') - def test_readiness_not_found(self, m_exist): - m_exist.return_value = False - oslo_cfg.CONF.set_override('vif_pool_driver', 'neutron', - group='kubernetes') - resp = self.test_client.get('/ready') - self.assertEqual(404, resp.status_code) - - @mock.patch('kuryr_kubernetes.controller.managers.health.HealthServer.' - 'verify_k8s_connection') - @mock.patch('os.path.exists') - def test_readiness_k8s_error(self, m_exist, m_verify_k8s_conn): - m_exist.return_value = True - m_verify_k8s_conn.return_value = False - resp = self.test_client.get('/ready') - - m_verify_k8s_conn.assert_called_once() - self.assertEqual(500, resp.status_code) - - @mock.patch('kuryr_kubernetes.controller.managers.health.HealthServer.' - 'verify_keystone_connection') - @mock.patch('kuryr_kubernetes.controller.managers.health.HealthServer.' - 'verify_k8s_connection') - @mock.patch('os.path.exists') - def test_readiness_unauthorized(self, m_exist, m_verify_k8s_conn, - m_verify_keystone_conn): - m_exist.return_value = True - m_verify_k8s_conn.return_value = True, 200 - m_verify_keystone_conn.side_effect = Exception - resp = self.test_client.get('/ready') - - m_verify_keystone_conn.assert_called_once() - self.assertEqual(500, resp.status_code) - - @mock.patch('kuryr_kubernetes.controller.managers.health.HealthServer.' - '_components_ready') - @mock.patch('os.path.exists') - @mock.patch('kuryr_kubernetes.controller.managers.health.HealthServer.' - 'verify_keystone_connection') - @mock.patch('kuryr_kubernetes.controller.managers.health.HealthServer.' - 'verify_k8s_connection') - def test_readiness_neutron_error(self, m_verify_k8s_conn, - m_verify_keystone_conn, - m_exist, m_components_ready): - m_components_ready.side_effect = Exception - - resp = self.test_client.get('/ready') - - m_components_ready.assert_called_once() - self.assertEqual(500, resp.status_code) - - @mock.patch('kuryr_kubernetes.controller.managers.health.HealthServer.' - '_components_ready') - @mock.patch('os.path.exists') - @mock.patch('kuryr_kubernetes.controller.managers.health.HealthServer.' - 'verify_keystone_connection') - @mock.patch('kuryr_kubernetes.controller.managers.health.HealthServer.' - 'verify_k8s_connection') - def test_readiness_components_ready_error(self, m_verify_k8s_conn, - m_verify_keystone_conn, - m_exist, m_components_ready): - m_components_ready.return_value = False - - resp = self.test_client.get('/ready') - - m_components_ready.assert_called_once() - self.assertEqual(500, resp.status_code) - - @mock.patch.object(_TestHandler, 'is_ready') - def test__components_ready(self, m_status): - os_net = self.useFixture(k_fix.MockNetworkClient()).client - os_net.get_quota.return_value = get_quota_obj() - self.srv._registry = [_TestHandler()] - m_status.return_value = True - - resp = self.srv._components_ready() - - m_status.assert_called_once() - self.assertIs(resp, True) - os_net.get_quota.assert_called_once() - - @mock.patch.object(_TestHandler, 'is_ready') - def test__components_ready_error(self, m_status): - os_net = self.useFixture(k_fix.MockNetworkClient()).client - os_net.get_quota.return_value = get_quota_obj() - self.srv._registry = [_TestHandler()] - m_status.return_value = False - - resp = self.srv._components_ready() - - m_status.assert_called_once() - self.assertIs(resp, False) - os_net.get_quota.assert_called_once() - - @mock.patch.object(_TestHandler, 'is_alive') - def test_liveness(self, m_status): - m_status.return_value = True - self.srv._registry = [_TestHandler()] - - resp = self.test_client.get('/alive') - - m_status.assert_called_once() - self.assertEqual(200, resp.status_code) - - @mock.patch.object(_TestHandler, 'is_alive') - def test_liveness_error(self, m_status): - m_status.return_value = False - self.srv._registry = [_TestHandler()] - resp = self.test_client.get('/alive') - - m_status.assert_called_once() - self.assertEqual(500, resp.status_code) diff --git a/kuryr_kubernetes/tests/unit/controller/managers/test_pool.py b/kuryr_kubernetes/tests/unit/controller/managers/test_pool.py deleted file mode 100644 index d00395a8d..000000000 --- a/kuryr_kubernetes/tests/unit/controller/managers/test_pool.py +++ /dev/null @@ -1,355 +0,0 @@ -# Copyright 2017 Red Hat, Inc. -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. -from unittest import mock - -from http.server import BaseHTTPRequestHandler - -from oslo_serialization import jsonutils - -from kuryr_kubernetes.controller.managers import pool as m_pool -from kuryr_kubernetes.tests import base as test_base - - -class TestRequestHandler(test_base.TestCase): - - def setUp(self): - super(TestRequestHandler, self).setUp() - client_address = 'localhost' - server = '/tmp/server.lock' - req = mock.MagicMock() - with mock.patch.object(BaseHTTPRequestHandler, '__init__') as m_http: - m_http.return_value = None - self._req_handler = m_pool.RequestHandler(req, client_address, - server) - self._req_handler.rfile = mock.Mock() - self._req_handler.wfile = mock.Mock() - - def _do_POST_helper(self, method, path, headers, body, expected_resp, - trigger_exception, trunk_ips, num_ports=None): - self._req_handler.headers = headers - self._req_handler.path = path - - with mock.patch.object(self._req_handler.rfile, 'read') as m_read,\ - mock.patch.object(self._req_handler, - '_create_subports') as m_create,\ - mock.patch.object(self._req_handler, - '_delete_subports') as m_delete: - m_read.return_value = body - if trigger_exception: - m_create.side_effect = Exception - m_delete.side_effect = Exception - - with mock.patch.object(self._req_handler, - 'send_header') as m_send_header,\ - mock.patch.object(self._req_handler, - 'end_headers') as m_end_headers,\ - mock.patch.object(self._req_handler.wfile, - 'write') as m_write: - self._req_handler.do_POST() - - if method == 'create': - if trunk_ips: - m_create.assert_called_once_with(num_ports, trunk_ips) - else: - m_create.assert_not_called() - if method == 'delete': - m_delete.assert_called_once_with(trunk_ips) - - m_send_header.assert_called_once_with('Content-Length', - len(expected_resp)) - m_end_headers.assert_called_once() - m_write.assert_called_once_with(expected_resp) - - def _do_GET_helper(self, method, method_resp, path, headers, body, - expected_resp, trigger_exception, pool_key=None): - self._req_handler.headers = headers - self._req_handler.path = path - - with mock.patch.object(self._req_handler.rfile, 'read') as m_read,\ - mock.patch.object(self._req_handler, - '_list_pools') as m_list,\ - mock.patch.object(self._req_handler, - '_show_pool') as m_show: - m_read.return_value = body - if trigger_exception: - m_list.side_effect = Exception - m_show.side_effect = Exception - else: - m_list.return_value = method_resp - m_show.return_value = method_resp - - with mock.patch.object(self._req_handler, - 'send_header') as m_send_header,\ - mock.patch.object(self._req_handler, - 'end_headers') as m_end_headers,\ - mock.patch.object(self._req_handler.wfile, - 'write') as m_write: - self._req_handler.do_GET() - - if method == 'list': - m_list.assert_called_once() - if method == 'show': - if pool_key and len(pool_key) == 3: - m_show.assert_called_once_with( - (pool_key[0], pool_key[1], - tuple(sorted(pool_key[2])))) - else: - m_show.assert_not_called() - - m_send_header.assert_called_once_with('Content-Length', - len(expected_resp)) - m_end_headers.assert_called_once() - m_write.assert_called_once_with(expected_resp) - - def test_do_POST_populate(self): - method = 'create' - path = "http://localhost/populatePool" - trunk_ips = ["10.0.0.6"] - num_ports = 3 - body = jsonutils.dumps({"trunks": trunk_ips, - "num_ports": num_ports}) - headers = {'Content-Type': 'application/json', 'Connection': 'close'} - headers['Content-Length'] = len(body) - trigger_exception = False - - expected_resp = ('Ports pool at {} was populated with 3 ports.' - .format(trunk_ips)).encode() - - self._do_POST_helper(method, path, headers, body, expected_resp, - trigger_exception, trunk_ips, num_ports) - - def test_do_POST_populate_exception(self): - method = 'create' - path = "http://localhost/populatePool" - trunk_ips = ["10.0.0.6"] - num_ports = 3 - body = jsonutils.dumps({"trunks": trunk_ips, - "num_ports": num_ports}) - headers = {'Content-Type': 'application/json', 'Connection': 'close'} - headers['Content-Length'] = len(body) - trigger_exception = True - - expected_resp = ('Error while populating pool {0} with {1} ports.' - .format(trunk_ips, num_ports)).encode() - - self._do_POST_helper(method, path, headers, body, expected_resp, - trigger_exception, trunk_ips, num_ports) - - def test_do_POST_populate_no_trunks(self): - method = 'create' - path = "http://localhost/populatePool" - trunk_ips = [] - num_ports = 3 - body = jsonutils.dumps({"trunks": trunk_ips, - "num_ports": num_ports}) - headers = {'Content-Type': 'application/json', 'Connection': 'close'} - headers['Content-Length'] = len(body) - trigger_exception = False - - expected_resp = ('Trunk port IP(s) missing.').encode() - - self._do_POST_helper(method, path, headers, body, expected_resp, - trigger_exception, trunk_ips, num_ports) - - def test_do_POST_free(self): - method = 'delete' - path = "http://localhost/freePool" - trunk_ips = ["10.0.0.6"] - body = jsonutils.dumps({"trunks": trunk_ips}) - headers = {'Content-Type': 'application/json', 'Connection': 'close'} - headers['Content-Length'] = len(body) - trigger_exception = False - - expected_resp = ('Ports pool belonging to {0} was freed.' - .format(trunk_ips)).encode() - - self._do_POST_helper(method, path, headers, body, expected_resp, - trigger_exception, trunk_ips) - - def test_do_POST_free_exception(self): - method = 'delete' - path = "http://localhost/freePool" - trunk_ips = ["10.0.0.6"] - body = jsonutils.dumps({"trunks": trunk_ips}) - headers = {'Content-Type': 'application/json', 'Connection': 'close'} - headers['Content-Length'] = len(body) - trigger_exception = True - - expected_resp = ('Error freeing ports pool: {0}.' - .format(trunk_ips)).encode() - - self._do_POST_helper(method, path, headers, body, expected_resp, - trigger_exception, trunk_ips) - - def test_do_POST_free_no_trunks(self): - method = 'delete' - path = "http://localhost/freePool" - trunk_ips = [] - body = jsonutils.dumps({"trunks": trunk_ips}) - headers = {'Content-Type': 'application/json', 'Connection': 'close'} - headers['Content-Length'] = len(body) - trigger_exception = False - - expected_resp = ('Ports pool belonging to all was freed.').encode() - - self._do_POST_helper(method, path, headers, body, expected_resp, - trigger_exception, trunk_ips) - - def test_do_POST_wrong_action(self): - method = 'fake' - path = "http://localhost/fakeMethod" - trunk_ips = ["10.0.0.6"] - body = jsonutils.dumps({"trunks": trunk_ips}) - headers = {'Content-Type': 'application/json', 'Connection': 'close'} - headers['Content-Length'] = len(body) - trigger_exception = False - - expected_resp = ('Method not allowed.').encode() - - self._do_POST_helper(method, path, headers, body, expected_resp, - trigger_exception, trunk_ips) - - def test_do_GET_list(self): - method = 'list' - method_resp = ('["10.0.0.6", "9d2b45c4efaa478481c30340b49fd4d2", ' - '["00efc78c-f11c-414a-bfcd-a82e16dc07d1", ' - '"fd6b13dc-7230-4cbe-9237-36b4614bc6b5"]] ' - 'has 5 ports') - - path = "http://localhost/listPools" - body = jsonutils.dumps({}) - headers = {'Content-Type': 'application/json', 'Connection': 'close'} - headers['Content-Length'] = len(body) - trigger_exception = False - - expected_resp = ('Pools:\n{0}'.format(method_resp)).encode() - - self._do_GET_helper(method, method_resp, path, headers, body, - expected_resp, trigger_exception) - - def test_do_GET_list_exception(self): - method = 'list' - method_resp = ('["10.0.0.6", "9d2b45c4efaa478481c30340b49fd4d2", ' - '["00efc78c-f11c-414a-bfcd-a82e16dc07d1", ' - '"fd6b13dc-7230-4cbe-9237-36b4614bc6b5"]] ' - 'has 5 ports') - - path = "http://localhost/listPools" - body = jsonutils.dumps({}) - headers = {'Content-Type': 'application/json', 'Connection': 'close'} - headers['Content-Length'] = len(body) - trigger_exception = True - - expected_resp = ('Error listing the pools.').encode() - - self._do_GET_helper(method, method_resp, path, headers, body, - expected_resp, trigger_exception) - - def test_do_GET_list_empty(self): - method = 'list' - method_resp = 'There are no pools' - - path = "http://localhost/listPools" - body = jsonutils.dumps({}) - headers = {'Content-Type': 'application/json', 'Connection': 'close'} - headers['Content-Length'] = len(body) - trigger_exception = False - - expected_resp = ('Pools:\n{0}'.format(method_resp)).encode() - - self._do_GET_helper(method, method_resp, path, headers, body, - expected_resp, trigger_exception) - - def test_do_GET_show(self): - method = 'show' - method_resp = "251f748d-2a0d-4143-bce8-2e616f7a6a4a" - path = "http://localhost/showPool" - pool_key = ["10.0.0.6", "9d2b45c4efaa478481c30340b49fd4d2", - ["00efc78c-f11c-414a-bfcd-a82e16dc07d1", - "fd6b13dc-7230-4cbe-9237-36b4614bc6b5"]] - body = jsonutils.dumps({"pool_key": pool_key}) - headers = {'Content-Type': 'application/json', 'Connection': 'close'} - headers['Content-Length'] = len(body) - trigger_exception = False - formated_key = (pool_key[0], pool_key[1], tuple(sorted(pool_key[2]))) - expected_resp = ('Pool {0} ports are:\n{1}' - .format(formated_key, method_resp)).encode() - - self._do_GET_helper(method, method_resp, path, headers, body, - expected_resp, trigger_exception, pool_key) - - def test_do_GET_show_exception(self): - method = 'show' - method_resp = "251f748d-2a0d-4143-bce8-2e616f7a6a4a" - path = "http://localhost/showPool" - pool_key = ["10.0.0.6", "9d2b45c4efaa478481c30340b49fd4d2", - ["00efc78c-f11c-414a-bfcd-a82e16dc07d1", - "fd6b13dc-7230-4cbe-9237-36b4614bc6b5"]] - body = jsonutils.dumps({"pool_key": pool_key}) - headers = {'Content-Type': 'application/json', 'Connection': 'close'} - headers['Content-Length'] = len(body) - trigger_exception = True - formated_key = (pool_key[0], pool_key[1], tuple(sorted(pool_key[2]))) - expected_resp = ('Error showing pool: {0}.' - .format(formated_key)).encode() - - self._do_GET_helper(method, method_resp, path, headers, body, - expected_resp, trigger_exception, pool_key) - - def test_do_GET_show_empty(self): - method = 'show' - method_resp = "Empty pool" - path = "http://localhost/showPool" - pool_key = ["10.0.0.6", "9d2b45c4efaa478481c30340b49fd4d2", - ["00efc78c-f11c-414a-bfcd-a82e16dc07d1", - "fd6b13dc-7230-4cbe-9237-36b4614bc6b5"]] - body = jsonutils.dumps({"pool_key": pool_key}) - headers = {'Content-Type': 'application/json', 'Connection': 'close'} - headers['Content-Length'] = len(body) - trigger_exception = False - formated_key = (pool_key[0], pool_key[1], tuple(sorted(pool_key[2]))) - expected_resp = ('Pool {0} ports are:\n{1}' - .format(formated_key, method_resp)).encode() - - self._do_GET_helper(method, method_resp, path, headers, body, - expected_resp, trigger_exception, pool_key) - - def test_do_GET_show_wrong_key(self): - method = 'show' - method_resp = "" - path = "http://localhost/showPool" - pool_key = ["10.0.0.6", "9d2b45c4efaa478481c30340b49fd4d2"] - body = jsonutils.dumps({"pool_key": pool_key}) - headers = {'Content-Type': 'application/json', 'Connection': 'close'} - headers['Content-Length'] = len(body) - trigger_exception = False - expected_resp = ('Invalid pool key. Proper format is:\n[trunk_ip, ' - 'project_id, [security_groups]]\n').encode() - - self._do_GET_helper(method, method_resp, path, headers, body, - expected_resp, trigger_exception, pool_key) - - def test_do_GET_wrong_action(self): - method = 'fake' - method_resp = "" - path = "http://localhost/fakeMethod" - body = jsonutils.dumps({}) - headers = {'Content-Type': 'application/json', 'Connection': 'close'} - headers['Content-Length'] = len(body) - trigger_exception = False - - expected_resp = ('Method not allowed.').encode() - - self._do_GET_helper(method, method_resp, path, headers, body, - expected_resp, trigger_exception) diff --git a/kuryr_kubernetes/tests/unit/controller/managers/test_prometheus_exporter.py b/kuryr_kubernetes/tests/unit/controller/managers/test_prometheus_exporter.py deleted file mode 100644 index b845078ff..000000000 --- a/kuryr_kubernetes/tests/unit/controller/managers/test_prometheus_exporter.py +++ /dev/null @@ -1,186 +0,0 @@ -# Copyright 2021 Red Hat -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. - -import prometheus_client -from unittest import mock - -from openstack.load_balancer.v2 import load_balancer as os_lb -from openstack.load_balancer.v2 import pool as os_pool -from openstack.network.v2 import subnet as os_subnet - -from kuryr_kubernetes.controller.managers import prometheus_exporter -from kuryr_kubernetes.tests import base -from kuryr_kubernetes.tests.unit import kuryr_fixtures as k_fix - - -def get_quota_obj(): - return { - 'subnets': { - 'used': 50, - 'limit': 100, - 'reserved': 0 - }, - 'networks': { - 'used': 50, - 'limit': 100, - 'reserved': 0 - }, - 'security_group_rules': { - 'used': 50, - 'limit': 100, - 'reserved': 0 - }, - 'security_groups': { - 'used': 5, - 'limit': 10, - 'reserved': 0 - }, - 'ports': { - 'used': 250, - 'limit': 500, - 'reserved': 0 - } - } - - -class TestControllerPrometheusExporter(base.TestCase): - - def setUp(self): - super(TestControllerPrometheusExporter, self).setUp() - self.cls = prometheus_exporter.ControllerPrometheusExporter - self.srv = mock.MagicMock(spec=self.cls) - self.srv.quota_free_count = mock.MagicMock( - spec=prometheus_client.Gauge) - self.srv.port_quota_per_subnet = mock.MagicMock( - spec=prometheus_client.Gauge) - self.srv.lbs_members_count = mock.MagicMock( - spec=prometheus_client.Gauge) - self.srv.lbs_state = mock.MagicMock( - spec=prometheus_client.Enum) - self.srv._project_id = mock.sentinel.project_id - self.srv._os_net = self.useFixture(k_fix.MockNetworkClient()).client - self.srv._os_lb = self.useFixture(k_fix.MockLBaaSClient()).client - - def test__record_quota_free_count_metric(self): - quota = get_quota_obj() - self.srv._os_net.get_quota.return_value = quota - self.cls._record_quota_free_count_metric(self.srv) - calls = [] - for resource in prometheus_exporter.RESOURCES: - calls.extend( - [mock.call(**{'resource': resource}), - mock.call().set( - quota[resource]['limit']-quota[resource]['used'])]) - self.srv.quota_free_count.labels.assert_has_calls(calls) - - def test__record_no_quota_free_count_metric(self): - quota = get_quota_obj() - for resource in quota: - quota[resource]['used'] = quota[resource]['limit'] - self.srv._os_net.get_quota.return_value = quota - self.cls._record_quota_free_count_metric(self.srv) - calls = [] - for resource in prometheus_exporter.RESOURCES: - calls.extend( - [mock.call(**{'resource': resource}), - mock.call().set(0)]) - self.srv.quota_free_count.labels.assert_has_calls(calls) - - def test__record_ports_quota_per_subnet_metric(self): - subnet_id = mock.sentinel.id - subnet_name = 'ns/cluster-version-net' - network_id = mock.sentinel.network_id - subnets = [ - os_subnet.Subnet( - id=subnet_id, - name=subnet_name, - network_id=network_id, - allocation_pools=[ - {'start': '10.128.70.2', 'end': '10.128.71.254'}, - ], - ), - ] - ports = [mock.MagicMock()] - self.srv._os_net.subnets.return_value = subnets - self.srv._os_net.ports.return_value = ports - self.cls._record_ports_quota_per_subnet_metric(self.srv) - self.srv.port_quota_per_subnet.labels.assert_called_with( - **{'subnet_id': subnet_id, 'subnet_name': subnet_name}) - self.srv.port_quota_per_subnet.labels().set.assert_called_with(509) - - @mock.patch('kuryr_kubernetes.utils.get_kuryrloadbalancer') - def test__record_lbs_metrics(self, m_get_klb): - lb_name = 'default/kubernetes' - lb_id = mock.sentinel.id - pool_name = mock.sentinel.name - pool_id = mock.sentinel.id - lb_state = 'ACTIVE' - m_get_klb.return_value = { - "status": { - "loadbalancer": { - "id": lb_id, - } - } - } - self.srv._os_lb.find_load_balancer.return_value = os_lb.LoadBalancer( - id=lb_id, - name=lb_name, - provisioning_status=lb_state, - pools=[{'id': pool_id}], - ) - self.srv._os_lb.pools.return_value = [ - os_pool.Pool( - id=pool_id, - name=pool_name, - loadbalancers=[{'id': lb_id}], - members=[{'id': mock.sentinel.id}], - ), - ] - - self.cls._record_lbs_metrics(self.srv) - - self.srv.lbs_state.labels.assert_called_with( - **{'lb_name': lb_name}) - self.srv.lbs_state.labels().state.assert_called_with(lb_state) - self.srv.lbs_members_count.labels.assert_called_with( - **{'lb_name': lb_name, 'lb_pool_name': pool_name}) - self.srv.lbs_members_count.labels().set.assert_called_with(1) - - @mock.patch('kuryr_kubernetes.utils.get_kuryrloadbalancer') - def test__record_no_lb_present_metric(self, m_get_klb): - lb_name = 'default/kubernetes' - lb_id = mock.sentinel.id - m_get_klb.return_value = { - "status": { - "loadbalancer": { - "id": lb_id, - } - } - } - self.srv._os_lb.find_load_balancer.return_value = None - self.cls._record_lbs_metrics(self.srv) - self.srv.lbs_state.labels.assert_called_with( - **{'lb_name': lb_name}) - self.srv.lbs_state.labels().state.assert_called_with('DELETED') - - @mock.patch('kuryr_kubernetes.utils.get_kuryrloadbalancer') - def test__no_record_lbs_metrics(self, m_get_klb): - m_get_klb.return_value = {} - - self.cls._record_lbs_metrics(self.srv) - - self.srv.lbs_state.labels.assert_not_called() - self.srv.lbs_state.labels().state.assert_not_called() - self.srv.lbs_members_count.labels.assert_not_called() - self.srv.lbs_members_count.labels().set.assert_not_called() diff --git a/kuryr_kubernetes/tests/unit/controller/test_service.py b/kuryr_kubernetes/tests/unit/controller/test_service.py deleted file mode 100644 index 1f83856c2..000000000 --- a/kuryr_kubernetes/tests/unit/controller/test_service.py +++ /dev/null @@ -1,59 +0,0 @@ -# Copyright (c) 2016 Mirantis, Inc. -# All Rights Reserved. -# -# Licensed under the Apache License, Version 2.0 (the "License"); you may -# not use this file except in compliance with the License. You may obtain -# a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT -# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the -# License for the specific language governing permissions and limitations -# under the License. - -from unittest import mock - -from kuryr_kubernetes.controller import service -from kuryr_kubernetes.tests import base as test_base -from kuryr_kubernetes.tests.unit.controller.handlers import test_fake_handler -from oslo_config import cfg - - -class TestControllerService(test_base.TestCase): - - @mock.patch('oslo_service.service.launch') - @mock.patch('kuryr_kubernetes.config.init') - @mock.patch('kuryr_kubernetes.config.setup_logging') - @mock.patch('kuryr_kubernetes.clients.setup_clients') - @mock.patch('kuryr_kubernetes.controller.service.KuryrK8sService') - def test_start(self, m_svc, m_setup_clients, m_setup_logging, - m_config_init, m_oslo_launch): - m_launcher = mock.Mock() - m_oslo_launch.return_value = m_launcher - - service.start() - - m_config_init.assert_called() - m_setup_logging.assert_called() - m_setup_clients.assert_called() - m_svc.assert_called() - m_oslo_launch.assert_called() - m_launcher.wait.assert_called() - - def test_check_test_handler(self): - cfg.CONF.set_override('enabled_handlers', ['test_handler'], - group='kubernetes') - handlers = service._load_kuryr_ctrlr_handlers() - for handler in handlers: - self.assertEqual(handler.get_watch_path(), - test_fake_handler.TestHandler.OBJECT_WATCH_PATH) - - @mock.patch('kuryr_kubernetes.controller.service._handler_not_found') - def test_handler_not_found(self, m_handler_not_found): - - cfg.CONF.set_override('enabled_handlers', ['fake_handler'], - group='kubernetes') - service._load_kuryr_ctrlr_handlers() - m_handler_not_found.assert_called() diff --git a/kuryr_kubernetes/tests/unit/handlers/__init__.py b/kuryr_kubernetes/tests/unit/handlers/__init__.py deleted file mode 100644 index e69de29bb..000000000 diff --git a/kuryr_kubernetes/tests/unit/handlers/test_asynchronous.py b/kuryr_kubernetes/tests/unit/handlers/test_asynchronous.py deleted file mode 100644 index cd99cbaa9..000000000 --- a/kuryr_kubernetes/tests/unit/handlers/test_asynchronous.py +++ /dev/null @@ -1,161 +0,0 @@ -# Copyright (c) 2016 Mirantis, Inc. -# All Rights Reserved. -# -# Licensed under the Apache License, Version 2.0 (the "License"); you may -# not use this file except in compliance with the License. You may obtain -# a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT -# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the -# License for the specific language governing permissions and limitations -# under the License. - -import queue -from unittest import mock - -from kuryr_kubernetes.handlers import asynchronous as h_async -from kuryr_kubernetes.tests import base as test_base - - -class TestAsyncHandler(test_base.TestCase): - - def test_call(self): - event = mock.sentinel.event - group = mock.sentinel.group - m_queue = mock.Mock() - m_handler = mock.Mock() - m_group_by = mock.Mock(return_value=group) - m_info = mock.Mock(return_value=group) - async_handler = h_async.Async(m_handler, mock.Mock(), m_group_by, - m_info) - async_handler._queues[group] = m_queue - - async_handler(event) - - m_handler.assert_not_called() - self.assertEqual({group: m_queue}, async_handler._queues) - m_queue.put.assert_called_once_with((event, (), {})) - - @mock.patch('queue.Queue') - def test_call_new(self, m_queue_type): - event = mock.sentinel.event - group = mock.sentinel.group - queue_depth = mock.sentinel.queue_depth - m_queue = mock.Mock() - m_queue_type.return_value = m_queue - m_handler = mock.Mock() - m_th = mock.Mock() - m_tg = mock.Mock() - m_tg.add_thread.return_value = m_th - m_group_by = mock.Mock(return_value=group) - m_info = mock.Mock(return_value=group) - async_handler = h_async.Async(m_handler, m_tg, m_group_by, m_info, - queue_depth=queue_depth) - - async_handler(event) - - m_handler.assert_not_called() - m_queue_type.assert_called_once_with(queue_depth) - self.assertEqual({group: m_queue}, async_handler._queues) - m_tg.add_thread.assert_called_once_with(async_handler._run, group, - m_queue, group) - m_th.link.assert_called_once_with(async_handler._done, group, group) - m_queue.put.assert_called_once_with((event, (), {})) - - def test_call_injected(self): - event = mock.sentinel.event - group = mock.sentinel.group - m_queue = mock.Mock() - m_handler = mock.Mock() - m_group_by = mock.Mock(return_value=group) - m_info = mock.Mock(return_value=group) - async_handler = h_async.Async(m_handler, mock.Mock(), m_group_by, - m_info) - async_handler._queues[group] = m_queue - - async_handler(event, injected=True) - - m_handler.assert_not_called() - self.assertEqual({group: m_queue}, async_handler._queues) - m_queue.put.assert_not_called() - - @mock.patch('itertools.count') - def test_run(self, m_count): - event = mock.sentinel.event - group = mock.sentinel.group - m_queue = mock.Mock() - m_queue.empty.return_value = True - m_queue.get.return_value = (event, (), {}) - m_handler = mock.Mock() - m_count.return_value = [1] - async_handler = h_async.Async(m_handler, mock.Mock(), mock.Mock(), - mock.Mock(), queue_depth=1) - - with mock.patch('time.sleep'): - async_handler._run(group, m_queue, None) - - m_handler.assert_called_once_with(event) - - @mock.patch('itertools.count') - def test_run_empty(self, m_count): - events = [(x, (), {}) for x in (mock.sentinel.event1, - mock.sentinel.event2)] - group = mock.sentinel.group - m_queue = mock.Mock() - m_queue.empty.return_value = True - m_queue.get.side_effect = events + [queue.Empty()] - m_handler = mock.Mock() - m_count.return_value = list(range(5)) - async_handler = h_async.Async(m_handler, mock.Mock(), mock.Mock(), - mock.Mock()) - - with mock.patch('time.sleep'): - async_handler._run(group, m_queue, None) - - m_handler.assert_has_calls([mock.call(event[0]) for event in events]) - self.assertEqual(len(events), m_handler.call_count) - - @mock.patch('itertools.count') - def test_run_stale(self, m_count): - events = [(x, (), {}) for x in (mock.sentinel.event1, - mock.sentinel.event2)] - group = mock.sentinel.group - m_queue = mock.Mock() - m_queue.empty.side_effect = [False, True, True] - m_queue.get.side_effect = events + [queue.Empty()] - m_handler = mock.Mock() - m_count.return_value = list(range(5)) - async_handler = h_async.Async(m_handler, mock.Mock(), mock.Mock(), - mock.Mock()) - - with mock.patch('time.sleep'): - async_handler._run(group, m_queue, None) - - m_handler.assert_called_once_with(mock.sentinel.event2) - - def test_done(self): - group = mock.sentinel.group - m_queue = mock.Mock() - async_handler = h_async.Async(mock.Mock(), mock.Mock(), mock.Mock(), - mock.Mock()) - async_handler._queues[group] = m_queue - - async_handler._done(mock.Mock(), group, None) - - self.assertFalse(async_handler._queues) - - @mock.patch('kuryr_kubernetes.handlers.asynchronous.LOG.critical') - def test_done_terminated(self, m_critical): - group = mock.sentinel.group - m_queue = mock.Mock() - m_queue.empty.return_value = False - async_handler = h_async.Async(mock.Mock(), mock.Mock(), mock.Mock(), - mock.Mock()) - async_handler._queues[group] = m_queue - - async_handler._done(mock.Mock(), group, None) - - m_critical.assert_called_once() diff --git a/kuryr_kubernetes/tests/unit/handlers/test_dispatch.py b/kuryr_kubernetes/tests/unit/handlers/test_dispatch.py deleted file mode 100644 index 30168bb01..000000000 --- a/kuryr_kubernetes/tests/unit/handlers/test_dispatch.py +++ /dev/null @@ -1,123 +0,0 @@ -# Copyright (c) 2016 Mirantis, Inc. -# All Rights Reserved. -# -# Licensed under the Apache License, Version 2.0 (the "License"); you may -# not use this file except in compliance with the License. You may obtain -# a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT -# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the -# License for the specific language governing permissions and limitations -# under the License. - -from unittest import mock - -from kuryr_kubernetes.handlers import dispatch as h_dis -from kuryr_kubernetes.tests import base as test_base - - -def make_event(name): - return {'object': {'metadata': {'name': str(name)}}} - - -class TestDispatch(test_base.TestCase): - def test_dispatch(self): - events = [make_event(i) for i in range(3)] - handler = mock.Mock() - dispatcher = h_dis.Dispatcher() - dispatcher.register(lambda e: True, True, handler) - - for event in events: - dispatcher(event) - - handler.assert_has_calls([mock.call(e) for e in events]) - - def test_dispatch_broadcast(self): - handlers = [mock.Mock() for _ in range(3)] - dispatcher = h_dis.Dispatcher() - event = make_event(mock.sentinel.event_name) - - for handler in handlers: - dispatcher.register(lambda e: True, True, handler) - - dispatcher(event) - - for handler in handlers: - handler.assert_called_once_with(event) - - def test_dispatch_by_key(self): - def key_fn(event): - return event['object']['metadata']['name'] - - events = {} - for i in range(3): - e = make_event(i) - events[key_fn(e)] = e - handlers = {key: mock.Mock() for key in events} - dispatcher = h_dis.Dispatcher() - for key, handler in handlers.items(): - dispatcher.register(key_fn, key, handler) - - for event in events.values(): - dispatcher(event) - - for key, handler in handlers.items(): - handler.assert_called_once_with(events[key]) - - -class _TestEventPipeline(h_dis.EventPipeline): - def _wrap_dispatcher(self, dispatcher): - pass - - def _wrap_consumer(self, consumer): - pass - - -class TestEventPipeline(test_base.TestCase): - @mock.patch.object(_TestEventPipeline, '_wrap_dispatcher') - @mock.patch('kuryr_kubernetes.handlers.dispatch.Dispatcher') - def test_init(self, m_dispatcher_type, m_wrapper): - m_dispatcher_type.return_value = mock.sentinel.dispatcher - m_wrapper.return_value = mock.sentinel.handler - - pipeline = _TestEventPipeline() - - m_dispatcher_type.assert_called_once() - m_wrapper.assert_called_once_with(mock.sentinel.dispatcher) - self.assertEqual(mock.sentinel.dispatcher, pipeline._dispatcher) - self.assertEqual(mock.sentinel.handler, pipeline._handler) - - @mock.patch.object(_TestEventPipeline, '_wrap_consumer') - @mock.patch.object(_TestEventPipeline, '__init__') - def test_register(self, m_init, m_wrap_consumer): - consumes = {mock.sentinel.key_fn1: mock.sentinel.key1, - mock.sentinel.key_fn2: mock.sentinel.key2, - mock.sentinel.key_fn3: mock.sentinel.key3} - m_dispatcher = mock.Mock() - m_consumer = mock.Mock() - m_consumer.consumes = consumes - m_wrap_consumer.return_value = mock.sentinel.handler - m_init.return_value = None - pipeline = _TestEventPipeline() - pipeline._dispatcher = m_dispatcher - - pipeline.register(m_consumer) - - m_wrap_consumer.assert_called_once_with(m_consumer) - m_dispatcher.register.assert_has_calls([ - mock.call(key_fn, key, mock.sentinel.handler) - for key_fn, key in consumes.items()], any_order=True) - - @mock.patch.object(_TestEventPipeline, '__init__') - def test_call(self, m_init): - m_init.return_value = None - m_handler = mock.Mock() - pipeline = _TestEventPipeline() - pipeline._handler = m_handler - - pipeline(mock.sentinel.event) - - m_handler.assert_called_once_with(mock.sentinel.event) diff --git a/kuryr_kubernetes/tests/unit/handlers/test_health.py b/kuryr_kubernetes/tests/unit/handlers/test_health.py deleted file mode 100644 index 335007de7..000000000 --- a/kuryr_kubernetes/tests/unit/handlers/test_health.py +++ /dev/null @@ -1,48 +0,0 @@ -# Copyright 2018 Maysa de Macedo Souza. -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. - -from kuryr_kubernetes.handlers import health as h_health -from kuryr_kubernetes.tests import base as test_base -from unittest import mock - - -class _TestHandler(h_health.HealthHandler): - def is_alive(self): - pass - - -class TestHealthRegister(test_base.TestCase): - - def test_register(self): - m_component = mock.Mock() - health_register = h_health.HealthRegister() - health_register.register(m_component) - - self.assertEqual(health_register.registry, [m_component]) - - -class TestHealthHandler(test_base.TestCase): - - @mock.patch.object(h_health.HealthRegister, 'get_instance') - def test_init(self, m_health_register): - cls = h_health.HealthRegister - m_health_register_obj = mock.Mock(spec=cls) - m_health_register.return_value = m_health_register_obj - - health_handler = _TestHandler() - - self.assertTrue(health_handler._alive) - self.assertTrue(health_handler._ready) - m_health_register_obj.register.assert_called_once_with(health_handler) - self.assertEqual(m_health_register_obj, health_handler._manager) diff --git a/kuryr_kubernetes/tests/unit/handlers/test_k8s_base.py b/kuryr_kubernetes/tests/unit/handlers/test_k8s_base.py deleted file mode 100644 index 5e1e398c9..000000000 --- a/kuryr_kubernetes/tests/unit/handlers/test_k8s_base.py +++ /dev/null @@ -1,64 +0,0 @@ -# Copyright (c) 2016 Mirantis, Inc. -# All Rights Reserved. -# -# Licensed under the Apache License, Version 2.0 (the "License"); you may -# not use this file except in compliance with the License. You may obtain -# a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT -# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the -# License for the specific language governing permissions and limitations -# under the License. - -from unittest import mock - -from kuryr_kubernetes.handlers import k8s_base as h_k8s -from kuryr_kubernetes.tests import base as test_base - - -class TestResourceEventHandler(test_base.TestCase): - - @mock.patch.object(h_k8s.ResourceEventHandler, 'on_added') - @mock.patch.object(h_k8s.ResourceEventHandler, 'on_present') - def test_added(self, m_added, m_present): - obj = mock.sentinel.obj - event = {'type': 'ADDED', 'object': obj} - handler = h_k8s.ResourceEventHandler() - - handler(event) - - m_added.assert_called_once_with(obj) - m_present.assert_called_once_with(obj) - - @mock.patch.object(h_k8s.ResourceEventHandler, 'on_modified') - @mock.patch.object(h_k8s.ResourceEventHandler, 'on_present') - def test_modified(self, m_modified, m_present): - obj = mock.sentinel.obj - event = {'type': 'MODIFIED', 'object': obj} - handler = h_k8s.ResourceEventHandler() - - handler(event) - - m_modified.assert_called_once_with(obj) - m_present.assert_called_once_with(obj) - - @mock.patch.object(h_k8s.ResourceEventHandler, 'on_deleted') - def test_deleted(self, m_deleted): - obj = mock.sentinel.obj - event = {'type': 'DELETED', 'object': obj} - handler = h_k8s.ResourceEventHandler() - - handler(event) - - m_deleted.assert_called_once_with(obj) - - def test_unknown(self): - event = {'type': 'UNKNOWN'} - handler = h_k8s.ResourceEventHandler() - - handler(event) - - self.assertTrue(True) diff --git a/kuryr_kubernetes/tests/unit/handlers/test_logging.py b/kuryr_kubernetes/tests/unit/handlers/test_logging.py deleted file mode 100644 index 1860935b8..000000000 --- a/kuryr_kubernetes/tests/unit/handlers/test_logging.py +++ /dev/null @@ -1,65 +0,0 @@ -# Copyright (c) 2016 Mirantis, Inc. -# All Rights Reserved. -# -# Licensed under the Apache License, Version 2.0 (the "License"); you may -# not use this file except in compliance with the License. You may obtain -# a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT -# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the -# License for the specific language governing permissions and limitations -# under the License. - -from unittest import mock - -from kuryr_kubernetes.handlers import logging as h_log -from kuryr_kubernetes.tests import base as test_base - - -class TestLoggingHandler(test_base.TestCase): - - @mock.patch('kuryr_kubernetes.handlers.logging.LOG') - def test_no_exception(self, m_log): - m_handler = mock.Mock() - handler = h_log.LogExceptions(m_handler) - - handler(mock.sentinel.event) - - m_handler.assert_called_once_with(mock.sentinel.event) - m_log.exception.assert_not_called() - - @mock.patch('kuryr_kubernetes.handlers.logging.LOG') - def test_exception(self, m_log): - m_handler = mock.Mock() - m_handler.side_effect = ValueError() - handler = h_log.LogExceptions(m_handler, exceptions=ValueError) - - handler(mock.sentinel.event) - - m_handler.assert_called_once_with(mock.sentinel.event) - m_log.exception.assert_called_once() - - @mock.patch('kuryr_kubernetes.handlers.logging.LOG') - def test_exception_default(self, m_log): - m_handler = mock.Mock() - m_handler.side_effect = ValueError() - handler = h_log.LogExceptions(m_handler) - - handler(mock.sentinel.event) - - m_handler.assert_called_once_with(mock.sentinel.event) - m_log.exception.assert_called_once() - - @mock.patch('kuryr_kubernetes.handlers.logging.LOG') - def test_raises(self, m_log): - m_handler = mock.Mock() - m_handler.side_effect = KeyError() - handler = h_log.LogExceptions(m_handler, exceptions=ValueError) - - self.assertRaises(KeyError, handler, mock.sentinel.event) - - m_handler.assert_called_once_with(mock.sentinel.event) - m_log.exception.assert_not_called() diff --git a/kuryr_kubernetes/tests/unit/handlers/test_retry.py b/kuryr_kubernetes/tests/unit/handlers/test_retry.py deleted file mode 100644 index 2f3837e6d..000000000 --- a/kuryr_kubernetes/tests/unit/handlers/test_retry.py +++ /dev/null @@ -1,163 +0,0 @@ -# Copyright (c) 2016 Mirantis, Inc. -# All Rights Reserved. -# -# Licensed under the Apache License, Version 2.0 (the "License"); you may -# not use this file except in compliance with the License. You may obtain -# a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT -# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the -# License for the specific language governing permissions and limitations -# under the License. - -from unittest import mock - -import fixtures -import time - -from kuryr_kubernetes import exceptions -from kuryr_kubernetes.handlers import retry as h_retry -from kuryr_kubernetes.tests import base as test_base -from kuryr_kubernetes.tests.unit import kuryr_fixtures as k_fix - - -class _EX1(Exception): - pass - - -class _EX11(_EX1): - pass - - -class _EX2(Exception): - pass - - -class TestRetryHandler(test_base.TestCase): - - def setUp(self): - super(TestRetryHandler, self).setUp() - - self.now = time.time() - f_time = self.useFixture(fixtures.MockPatch('time.time')) - f_time.mock.return_value = self.now - - self.k8s = self.useFixture(k_fix.MockK8sClient()).client - f_k8s = self.useFixture(fixtures.MockPatch( - 'kuryr_kubernetes.clients.get_kubernetes_client')) - f_k8s.mock.return_value = self.k8s - - @mock.patch('time.sleep') - def test_should_not_sleep(self, m_sleep): - deadline = self.now - 1 - retry = h_retry.Retry(mock.Mock()) - - ret = retry._sleep(deadline, 1, _EX1()) - - self.assertFalse(ret) - m_sleep.assert_not_called() - - def _test_should_sleep(self, seconds_left, slept): - attempt = 2 - timeout = 20 - interval = 3 - deadline = self.now + seconds_left - retry = h_retry.Retry(mock.Mock(), timeout=timeout, interval=interval) - - with mock.patch('random.randint') as m_randint, \ - mock.patch('time.sleep') as m_sleep: - m_randint.return_value = 0 # Assume 0 as jitter - - ret = retry._sleep(deadline, attempt, _EX2()) - - self.assertEqual(slept, ret) - m_sleep.assert_called_once_with(slept) - - def test_should_sleep(self): - self._test_should_sleep(20, 12) - - def test_should_sleep_last(self): - self._test_should_sleep(5, 5) - - def test_should_sleep_last_capped(self): - self._test_should_sleep(2, 3) - - @mock.patch('itertools.count') - @mock.patch.object(h_retry.Retry, '_sleep') - def test_call(self, m_sleep, m_count): - m_handler = mock.Mock() - m_count.return_value = list(range(1, 5)) - retry = h_retry.Retry(m_handler) - event = {'type': 'DELETED'} - - retry(event) - - m_handler.assert_called_once_with(event, retry_info=mock.ANY) - m_sleep.assert_not_called() - - @mock.patch('itertools.count') - @mock.patch.object(h_retry.Retry, '_sleep') - def test_call_outdated_event(self, m_sleep, m_count): - m_handler = mock.Mock() - m_count.return_value = list(range(1, 5)) - self_link = '/api/v1/namespaces/ns1/services/srv1' - obj = {'apiVersion': 'v1', - 'kind': 'Service', - 'metadata': {'name': 'srv1', - 'namespace': 'ns1'}} - event = {'type': 'MODIFIED', 'object': obj} - self.k8s.get.side_effect = exceptions.K8sResourceNotFound(obj) - - retry = h_retry.Retry(m_handler) - retry(event) - - self.k8s.get.assert_called_once_with(self_link) - m_handler.assert_not_called() - m_sleep.assert_not_called() - - @mock.patch('itertools.count') - @mock.patch.object(h_retry.Retry, '_sleep') - def test_call_retry(self, m_sleep, m_count): - attempts = 3 - timeout = 10 - deadline = self.now + timeout - failures = [_EX1()] * (attempts - 1) - event = {'type': 'DELETED'} - m_handler = mock.Mock() - m_handler.side_effect = failures + [None] - m_sleep.return_value = 1 - m_count.return_value = list(range(1, 5)) - retry = h_retry.Retry(m_handler, timeout=timeout, exceptions=_EX1) - - retry(event) - - m_handler.assert_has_calls([mock.call( - event, retry_info=mock.ANY)] * attempts) - m_sleep.assert_has_calls([ - mock.call(deadline, i + 1, failures[i]) - for i in range(len(failures))]) - - @mock.patch('itertools.count') - @mock.patch.object(h_retry.Retry, '_sleep') - def test_call_retry_raises(self, m_sleep, m_count): - attempts = 3 - timeout = 10 - deadline = self.now + timeout - failures = [_EX1(), _EX1(), _EX11()] - event = {'type': 'DELETED'} - m_handler = mock.Mock() - m_handler.side_effect = failures - m_sleep.side_effect = [1] * (attempts - 1) + [0] - m_count.return_value = list(range(1, 5)) - retry = h_retry.Retry(m_handler, timeout=timeout, exceptions=_EX1) - - self.assertRaises(_EX11, retry, event) - - m_handler.assert_has_calls([mock.call( - event, retry_info=mock.ANY)] * attempts) - m_sleep.assert_has_calls([ - mock.call(deadline, i + 1, failures[i]) - for i in range(len(failures))]) diff --git a/kuryr_kubernetes/tests/unit/kuryr_fixtures.py b/kuryr_kubernetes/tests/unit/kuryr_fixtures.py deleted file mode 100644 index afea61ca4..000000000 --- a/kuryr_kubernetes/tests/unit/kuryr_fixtures.py +++ /dev/null @@ -1,52 +0,0 @@ -# Copyright (c) 2016 Mirantis, Inc. -# All Rights Reserved. -# -# Licensed under the Apache License, Version 2.0 (the "License"); you may -# not use this file except in compliance with the License. You may obtain -# a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT -# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the -# License for the specific language governing permissions and limitations -# under the License. - -from unittest import mock - -import fixtures - -from kuryr_kubernetes import k8s_client - - -class MockK8sClient(fixtures.Fixture): - def _setUp(self): - self.client = mock.Mock(k8s_client.K8sClient) - self.useFixture(fixtures.MockPatch( - 'kuryr_kubernetes.clients.get_kubernetes_client', - lambda: self.client)) - - -class MockLBaaSClient(fixtures.Fixture): - def _setUp(self): - self.client = mock.Mock() - self.useFixture(fixtures.MockPatch( - 'kuryr_kubernetes.clients.get_loadbalancer_client', - lambda: self.client)) - - -class MockNetworkClient(fixtures.Fixture): - def _setUp(self): - self.client = mock.Mock() - self.useFixture(fixtures.MockPatch( - 'kuryr_kubernetes.clients.get_network_client', - lambda: self.client)) - - -class MockComputeClient(fixtures.Fixture): - def _setUp(self): - self.client = mock.Mock() - self.useFixture(fixtures.MockPatch( - 'kuryr_kubernetes.clients.get_compute_client', - lambda: self.client)) diff --git a/kuryr_kubernetes/tests/unit/test_clients.py b/kuryr_kubernetes/tests/unit/test_clients.py deleted file mode 100644 index eb88f497d..000000000 --- a/kuryr_kubernetes/tests/unit/test_clients.py +++ /dev/null @@ -1,49 +0,0 @@ -# Copyright (c) 2016 Mirantis, Inc. -# All Rights Reserved. -# -# Licensed under the Apache License, Version 2.0 (the "License"); you may -# not use this file except in compliance with the License. You may obtain -# a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT -# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the -# License for the specific language governing permissions and limitations -# under the License. - -from unittest import mock - -from kuryr_kubernetes import clients -from kuryr_kubernetes.tests import base as test_base - - -class TestK8sClient(test_base.TestCase): - - @mock.patch('openstack.connection.Connection') - @mock.patch('kuryr_kubernetes.config.CONF') - @mock.patch('kuryr_kubernetes.k8s_client.K8sClient') - def test_setup_clients(self, m_k8s, m_cfg, m_openstack): - k8s_api_root = 'http://127.0.0.1:1234' - - openstacksdk_mock = mock.Mock() - openstacksdk_mock.load_balancer = mock.Mock() - openstacksdk_mock.network = mock.Mock() - openstacksdk_mock.compute = mock.Mock() - k8s_dummy = object() - - m_cfg.kubernetes.api_root = k8s_api_root - m_k8s.return_value = k8s_dummy - m_openstack.return_value = openstacksdk_mock - - clients.setup_clients() - - m_k8s.assert_called_with(k8s_api_root) - self.assertIs(k8s_dummy, clients.get_kubernetes_client()) - self.assertIs(openstacksdk_mock.load_balancer, - clients.get_loadbalancer_client()) - self.assertIs(openstacksdk_mock.network, - clients.get_network_client()) - self.assertIs(openstacksdk_mock.compute, - clients.get_compute_client()) diff --git a/kuryr_kubernetes/tests/unit/test_k8s_client.py b/kuryr_kubernetes/tests/unit/test_k8s_client.py deleted file mode 100644 index 7e2039a02..000000000 --- a/kuryr_kubernetes/tests/unit/test_k8s_client.py +++ /dev/null @@ -1,525 +0,0 @@ -# Copyright (c) 2016 Mirantis, Inc. -# All Rights Reserved. -# -# Licensed under the Apache License, Version 2.0 (the "License"); you may -# not use this file except in compliance with the License. You may obtain -# a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT -# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the -# License for the specific language governing permissions and limitations -# under the License. -import itertools -import os -import tempfile -from unittest import mock - -from oslo_serialization import jsonutils -import requests - -from kuryr_kubernetes import exceptions as exc -from kuryr_kubernetes import k8s_client -from kuryr_kubernetes.tests import base as test_base -from kuryr_kubernetes.tests import fake - - -class TestK8sClient(test_base.TestCase): - @mock.patch('kuryr_kubernetes.config.CONF') - def setUp(self, m_cfg): - super(TestK8sClient, self).setUp() - self.base_url = 'http://127.0.0.1:12345' - m_cfg.kubernetes.ssl_client_crt_file = None - m_cfg.kubernetes.ssl_client_key_file = None - m_cfg.kubernetes.ssl_ca_crt_file = None - m_cfg.kubernetes.token_file = None - m_cfg.kubernetes.ssl_verify_server_crt = False - self.client = k8s_client.K8sClient(self.base_url) - default_cert = (None, None) - default_token = None - self.assertEqual(default_cert, self.client.cert) - self.assertEqual(False, self.client.verify_server) - self.assertEqual(default_token, self.client.token) - - @mock.patch('os.path.exists') - @mock.patch('kuryr_kubernetes.config.CONF') - def test_https_client_init(self, m_cfg, m_exist): - m_cfg.kubernetes.ssl_client_crt_file = 'dummy_crt_file_path' - m_cfg.kubernetes.ssl_client_key_file = 'dummy_key_file_path' - m_cfg.kubernetes.ssl_ca_crt_file = 'dummy_ca_file_path' - m_cfg.kubernetes.token_file = None - m_cfg.kubernetes.ssl_verify_server_crt = True - m_exist.return_value = True - test_client = k8s_client.K8sClient(self.base_url) - cert = ('dummy_crt_file_path', 'dummy_key_file_path') - self.assertEqual(cert, test_client.cert) - self.assertEqual('dummy_ca_file_path', test_client.verify_server) - - @mock.patch('kuryr_kubernetes.config.CONF') - def test_https_client_init_invalid_client_crt_path(self, m_cfg): - m_cfg.kubernetes.ssl_client_crt_file = 'dummy_crt_file_path' - m_cfg.kubernetes.ssl_client_key_file = 'dummy_key_file_path' - m_cfg.kubernetes.token_file = None - self.assertRaises(RuntimeError, k8s_client.K8sClient, self.base_url) - - @mock.patch('os.path.exists') - @mock.patch('kuryr_kubernetes.config.CONF') - def test_https_client_init_invalid_ca_path(self, m_cfg, m_exist): - m_cfg.kubernetes.ssl_client_crt_file = 'dummy_crt_file_path' - m_cfg.kubernetes.ssl_client_key_file = 'dummy_key_file_path' - m_cfg.kubernetes.ssl_ca_crt_file = None - m_cfg.kubernetes.ssl_verify_server_crt = True - m_cfg.kubernetes.token_file = None - m_exist.return_value = True - self.assertRaises(RuntimeError, k8s_client.K8sClient, self.base_url) - - @mock.patch('requests.sessions.Session.send') - @mock.patch('kuryr_kubernetes.config.CONF') - def test_bearer_token(self, m_cfg, m_send): - token_content = ( - "eyJhbGciOiJSUzI1NiIsInR5cCI6IkpXVCJ9.eyJpc3MiOiJrdWJlcm5ldGVzL3Nl" - "cnZpY2VhY2NvdW50Iiwia3ViZXJuZXRlcy5pby9zZXJ2aWNlYWNjb3VudC9uYW1lc" - "3BhY2UiOiJrdWJlLXN5c3RlbSIsImt1YmVybmV0ZXMuaW8vc2VydmljZWFjY291bn" - "Qvc2VjcmV0Lm5hbWUiOiJkZWZhdWx0LXRva2VuLWh4M3QxIiwia3ViZXJuZXRlcy5" - "pby9zZXJ2aWNlYWNjb3VudC9zZXJ2aWNlLWFjY291bnQubmFtZSI6ImRlZmF1bHQi" - "LCJrdWJlcm5ldGVzLmlvL3NlcnZpY2VhY2NvdW50L3NlcnZpY2UtYWNjb3VudC51a" - "WQiOiIxYTkyM2ZmNi00MDkyLTExZTctOTMwYi1mYTE2M2VkY2ViMDUiLCJzdWIiOi" - "JzeXN0ZW06c2VydmljZWFjY291bnQ6a3ViZS1zeXN0ZW06ZGVmYXVsdCJ9.lzcPef" - "DQ-uzF5cD-5pLwTKpRvtvvxKB4LX8TLymrPLMTth8WGr1vT6jteJPmLiDZM2C5dZI" - "iFJpOw4LL1XLullik-ls-CmnTWq97NvlW1cZolC0mNyRz6JcL7gkH8WfUSjLA7x80" - "ORalanUxtl9-ghMGKCtKIACAgvr5gGT4iznGYQQRx_hKURs4O6Js5vhwNM6UuOKeW" - "GDDAlhgHMG0u59z3bhiBLl6jbQktZsu8c3diXniQb3sYqYQcGKUm1IQFujyA_ByDb" - "5GUtCv1BOPL_-IjYtvdJD8ZzQ_UnPFoYQklpDyJLB7_7qCGcfVEQbnSCh907NdKo4" - "w_8Wkn2y-Tg") - token_file = tempfile.NamedTemporaryFile(mode="w+t", delete=False) - try: - m_cfg.kubernetes.token_file = token_file.name - token_file.write(token_content) - token_file.close() - m_cfg.kubernetes.ssl_verify_server_crt = False - - path = '/test' - client = k8s_client.K8sClient(self.base_url) - client.get(path) - - self.assertEqual(f'Bearer {token_content}', - m_send.call_args[0][0].headers['Authorization']) - finally: - os.unlink(m_cfg.kubernetes.token_file) - - @mock.patch('requests.sessions.Session.get') - def test_get(self, m_get): - path = '/test' - ret = {'kind': 'Pod', 'apiVersion': 'v1'} - - m_resp = mock.MagicMock() - m_resp.ok = True - m_resp.json.return_value = ret - m_get.return_value = m_resp - - self.assertEqual(ret, self.client.get(path)) - m_get.assert_called_once_with(self.base_url + path, headers=None) - - @mock.patch('requests.sessions.Session.get') - def test_get_list(self, m_get): - path = '/test' - ret = {'kind': 'PodList', - 'apiVersion': 'v1', - 'items': [{'metadata': {'name': 'pod1'}, - 'spec': {}, - 'status': {}}]} - res = {'kind': 'PodList', - 'apiVersion': 'v1', - 'items': [{'metadata': {'name': 'pod1'}, - 'spec': {}, - 'status': {}, - 'kind': 'Pod', - 'apiVersion': 'v1'}]} - - m_resp = mock.MagicMock() - m_resp.ok = True - m_resp.json.return_value = ret - m_get.return_value = m_resp - - self.assertDictEqual(res, self.client.get(path)) - m_get.assert_called_once_with(self.base_url + path, headers=None) - - @mock.patch('requests.sessions.Session.get') - def test_get_exception(self, m_get): - path = '/test' - - m_resp = mock.MagicMock() - m_resp.ok = False - m_get.return_value = m_resp - - self.assertRaises(exc.K8sClientException, self.client.get, path) - - @mock.patch('requests.sessions.Session.get') - def test_get_null_on_items_list(self, m_get): - path = '/test' - - req = {'kind': 'PodList', - 'apiVersion': 'v1', - 'metadata': {}, - 'items': None} - - ret = {'kind': 'PodList', - 'apiVersion': 'v1', - 'metadata': {}, - 'items': []} - - m_resp = mock.MagicMock() - m_resp.ok = True - m_resp.json.return_value = req - m_get.return_value = m_resp - - self.assertEqual(self.client.get(path), ret) - - @mock.patch('itertools.count') - @mock.patch('requests.sessions.Session.patch') - def test_annotate(self, m_patch, m_count): - m_count.return_value = list(range(1, 5)) - path = '/test' - annotations = {'a1': 'v1', 'a2': 'v2'} - resource_version = "123" - ret = {'metadata': {'annotations': annotations, - "resourceVersion": resource_version}} - data = jsonutils.dumps(ret, sort_keys=True) - - m_resp = mock.MagicMock() - m_resp.ok = True - m_resp.json.return_value = ret - m_patch.return_value = m_resp - - self.assertEqual(annotations, self.client.annotate( - path, annotations, resource_version=resource_version)) - m_patch.assert_called_once_with(self.base_url + path, - data=data, headers=mock.ANY) - - @mock.patch('itertools.count') - @mock.patch('requests.sessions.Session.patch') - def test_annotate_exception(self, m_patch, m_count): - m_count.return_value = list(range(1, 5)) - path = '/test' - - m_resp = mock.MagicMock() - m_resp.ok = False - m_patch.return_value = m_resp - - self.assertRaises(exc.K8sClientException, self.client.annotate, - path, {}) - - @mock.patch('itertools.count') - @mock.patch('requests.sessions.Session.patch') - def test_annotate_diff_resource_vers_no_conflict(self, m_patch, m_count): - m_count.return_value = list(range(1, 5)) - path = '/test' - annotations = {'a1': 'v1', 'a2': 'v2'} - resource_version = "123" - new_resource_version = "456" - conflicting_obj = {'metadata': { - 'annotations': annotations, - 'resourceVersion': resource_version}} - good_obj = {'metadata': { - 'annotations': annotations, - 'resourceVersion': new_resource_version}} - conflicting_data = jsonutils.dumps(conflicting_obj, sort_keys=True) - - m_resp_conflict = mock.MagicMock() - m_resp_conflict.ok = False - m_resp_conflict.status_code = requests.codes.conflict - m_resp_good = mock.MagicMock() - m_resp_good.ok = True - m_resp_good.json.return_value = conflicting_obj - m_patch.side_effect = [m_resp_conflict, m_resp_good] - - with mock.patch.object(self.client, 'get') as m_get: - m_get.return_value = good_obj - self.assertEqual(annotations, self.client.annotate( - path, annotations, resource_version=resource_version)) - - m_patch.assert_has_calls([ - mock.call(self.base_url + path, - data=conflicting_data, - headers=mock.ANY)]) - - @mock.patch('itertools.count') - @mock.patch('requests.sessions.Session.patch') - def test_annotate_diff_resource_vers_no_annotation(self, m_patch, m_count): - m_count.return_value = list(range(1, 5)) - path = '/test' - annotations = {'a1': 'v1', 'a2': 'v2'} - annotating_resource_version = '123' - annotating_obj = {'metadata': { - 'annotations': annotations, - 'resourceVersion': annotating_resource_version}} - annotating_data = jsonutils.dumps(annotating_obj, sort_keys=True) - - new_resource_version = '456' - new_obj = {'metadata': { - 'resourceVersion': new_resource_version}} - - resolution_obj = annotating_obj.copy() - resolution_obj['metadata']['resourceVersion'] = new_resource_version - resolution_data = jsonutils.dumps(resolution_obj, sort_keys=True) - - m_resp_conflict = mock.MagicMock() - m_resp_conflict.ok = False - m_resp_conflict.status_code = requests.codes.conflict - m_resp_good = mock.MagicMock() - m_resp_good.ok = True - m_resp_good.json.return_value = resolution_obj - m_patch.side_effect = (m_resp_conflict, m_resp_good) - - with mock.patch.object(self.client, 'get') as m_get: - m_get.return_value = new_obj - self.assertEqual(annotations, self.client.annotate( - path, annotations, - resource_version=annotating_resource_version)) - - m_patch.assert_has_calls([ - mock.call(self.base_url + path, - data=annotating_data, - headers=mock.ANY), - mock.call(self.base_url + path, - data=resolution_data, - headers=mock.ANY)]) - - @mock.patch('itertools.count') - @mock.patch('requests.sessions.Session.patch') - def test_annotate_diff_resource_vers_conflict(self, m_patch, m_count): - m_count.return_value = list(range(1, 5)) - path = '/test' - annotations = {'a1': 'v1', 'a2': 'v2'} - resource_version = "123" - new_resource_version = "456" - conflicting_obj = {'metadata': { - 'annotations': annotations, - 'resourceVersion': resource_version}} - actual_obj = {'metadata': { - 'annotations': {'a1': 'v2'}, - 'resourceVersion': new_resource_version}} - good_obj = {'metadata': { - 'annotations': annotations, - 'resourceVersion': new_resource_version}} - conflicting_data = jsonutils.dumps(conflicting_obj, sort_keys=True) - good_data = jsonutils.dumps(good_obj, sort_keys=True) - - m_resp_conflict = mock.MagicMock() - m_resp_conflict.ok = False - m_resp_conflict.status_code = requests.codes.conflict - m_patch.return_value = m_resp_conflict - m_resp_good = mock.MagicMock() - m_resp_good.ok = True - m_resp_good.json.return_value = conflicting_obj - m_patch.side_effect = [m_resp_conflict, m_resp_good] - - with mock.patch.object(self.client, 'get') as m_get: - m_get.return_value = actual_obj - self.assertEqual(annotations, self.client.annotate( - path, annotations, - resource_version=resource_version)) - m_patch.assert_has_calls([ - mock.call(self.base_url + path, - data=conflicting_data, - headers=mock.ANY), - mock.call(self.base_url + path, - data=good_data, - headers=mock.ANY)]) - - @mock.patch('itertools.count') - @mock.patch('requests.sessions.Session.patch') - def test_annotate_resource_not_found(self, m_patch, m_count): - m_count.return_value = list(range(1, 5)) - path = '/test' - annotations = {'a1': 'v1', 'a2': 'v2'} - resource_version = "123" - annotate_obj = {'metadata': { - 'annotations': annotations, - 'resourceVersion': resource_version}} - annotate_data = jsonutils.dumps(annotate_obj, sort_keys=True) - - m_resp_not_found = mock.MagicMock() - m_resp_not_found.ok = False - m_resp_not_found.status_code = requests.codes.not_found - m_patch.return_value = m_resp_not_found - - self.assertRaises(exc.K8sResourceNotFound, - self.client.annotate, - path, - annotations, - resource_version=resource_version) - m_patch.assert_called_once_with(self.base_url + path, - data=annotate_data, - headers=mock.ANY) - - @mock.patch('requests.sessions.Session.get') - def test_watch(self, m_get): - path = '/test' - data = [{'obj': 'obj%s' % i} for i in range(3)] - lines = [jsonutils.dump_as_bytes(i) for i in data] - - m_resp = mock.MagicMock() - m_resp.ok = True - m_resp.iter_lines.return_value = lines - m_get.return_value = m_resp - - cycles = 3 - self.assertEqual( - data * cycles, - list(itertools.islice(self.client.watch(path), - len(data) * cycles))) - - self.assertEqual(cycles, m_get.call_count) - self.assertEqual(cycles, m_resp.close.call_count) - m_get.assert_called_with(self.base_url + path, stream=True, - params={'watch': 'true'}) - - @mock.patch('requests.sessions.Session.get') - def test_watch_restart(self, m_get): - path = '/test' - data = [{'object': {'metadata': {'name': 'obj%s' % i, - 'resourceVersion': i}}} - for i in range(3)] - lines = [jsonutils.dump_as_bytes(i) for i in data] - - m_resp = mock.MagicMock() - m_resp.ok = True - m_resp.iter_lines.side_effect = [lines, requests.ReadTimeout, lines] - m_get.return_value = m_resp - - self.assertEqual(data * 2, - list(itertools.islice(self.client.watch(path), - len(data) * 2))) - self.assertEqual(3, m_get.call_count) - self.assertEqual(3, m_resp.close.call_count) - m_get.assert_any_call( - self.base_url + path, stream=True, params={"watch": "true"}) - m_get.assert_any_call( - self.base_url + path, stream=True, params={"watch": "true", - "resourceVersion": 2}) - - @mock.patch('requests.sessions.Session.get') - def test_watch_exception(self, m_get): - path = '/test' - - m_resp = mock.MagicMock() - m_resp.ok = False - m_get.return_value = m_resp - - self.assertRaises(exc.K8sClientException, next, - self.client.watch(path)) - - @mock.patch('requests.sessions.Session.post') - def test_post(self, m_post): - path = '/test' - body = {'test': 'body'} - ret = {'test': 'value'} - - m_resp = mock.MagicMock() - m_resp.ok = True - m_resp.json.return_value = ret - m_post.return_value = m_resp - - self.assertEqual(ret, self.client.post(path, body)) - m_post.assert_called_once_with(self.base_url + path, json=body, - headers=mock.ANY) - - @mock.patch('requests.sessions.Session.post') - def test_post_exception(self, m_post): - path = '/test' - body = {'test': 'body'} - - m_resp = mock.MagicMock() - m_resp.ok = False - m_post.return_value = m_resp - - self.assertRaises(exc.K8sClientException, - self.client.post, path, body) - - @mock.patch('requests.sessions.Session.delete') - def test_delete(self, m_delete): - path = '/test' - ret = {'test': 'value'} - - m_resp = mock.MagicMock() - m_resp.ok = True - m_resp.json.return_value = ret - m_delete.return_value = m_resp - - self.assertEqual(ret, self.client.delete(path)) - m_delete.assert_called_once_with(self.base_url + path, - headers=mock.ANY) - - @mock.patch('requests.sessions.Session.delete') - def test_delete_exception(self, m_delete): - path = '/test' - - m_resp = mock.MagicMock() - m_resp.ok = False - m_delete.return_value = m_resp - - self.assertRaises(exc.K8sClientException, - self.client.delete, path) - - def test__raise_from_response(self): - m_resp = mock.MagicMock() - m_resp.ok = True - m_resp.status_code = 202 - self.client._raise_from_response(m_resp) - - def test__raise_from_response_404(self): - m_resp = mock.MagicMock() - m_resp.ok = False - m_resp.status_code = 404 - self.assertRaises(exc.K8sResourceNotFound, - self.client._raise_from_response, m_resp) - - def test__raise_from_response_500(self): - m_resp = mock.MagicMock() - m_resp.ok = False - m_resp.status_code = 500 - self.assertRaises(exc.K8sClientException, - self.client._raise_from_response, m_resp) - - def test_add_event(self): - self.client.post = mock.MagicMock() - get_hex_ts = self.client._get_hex_timestamp = mock.MagicMock() - get_hex_ts.return_value = 'deadc0de' - - namespace = 'n1' - uid = 'deadbeef' - name = 'pod-123' - pod = fake.get_k8s_pod(name=name, namespace=namespace, uid=uid) - event_name = f'{name}.deadc0de' - - self.client.add_event(pod, 'reason', 'message') - - # Event path - url = self.client.post.call_args[0][0] - data = self.client.post.call_args[0][1] - self.assertEqual(url, f'/api/v1/namespaces/{namespace}/events') - - # Event fields - self.assertEqual(data['metadata']['name'], event_name) - self.assertEqual(data['reason'], 'reason') - self.assertEqual(data['message'], 'message') - self.assertEqual(data['type'], 'Normal') - - # involvedObject - self.assertDictEqual(data['involvedObject'], - {'apiVersion': pod['apiVersion'], - 'kind': pod['kind'], - 'name': name, - 'namespace': namespace, - 'uid': uid}) - - def test_add_event_k8s_exception(self): - self.client.post = mock.MagicMock() - self.client.post.side_effect = exc.K8sClientException - pod = fake.get_k8s_pod() - - self.assertDictEqual(self.client.add_event(pod, 'reason1', 'message2'), - {}) diff --git a/kuryr_kubernetes/tests/unit/test_linux_net_utils.py b/kuryr_kubernetes/tests/unit/test_linux_net_utils.py deleted file mode 100644 index 6fcca7e3d..000000000 --- a/kuryr_kubernetes/tests/unit/test_linux_net_utils.py +++ /dev/null @@ -1,62 +0,0 @@ -# Licensed under the Apache License, Version 2.0 (the "License"); you may -# not use this file except in compliance with the License. You may obtain -# a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT -# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the -# License for the specific language governing permissions and limitations -# under the License. - -from unittest import mock - -from oslo_concurrency import processutils as utils - -from kuryr_kubernetes import linux_net_utils as linux_net -from kuryr_kubernetes.tests import base as test_base - - -class LinuxNetworkUtilsTestCase(test_base.TestCase): - - def test_ovs_vif_port_cmd(self): - expected = ['--', '--if-exists', - 'del-port', 'fake-dev', '--', 'add-port', - 'fake-bridge', 'fake-dev', - '--', 'set', 'Interface', 'fake-dev', - 'external-ids:iface-id=fake-iface-id', - 'external-ids:iface-status=active', - 'external-ids:attached-mac=fake-mac', - 'external-ids:vm-uuid=fake-instance-uuid'] - cmd = linux_net._create_ovs_vif_cmd('fake-bridge', 'fake-dev', - 'fake-iface-id', 'fake-mac', - 'fake-instance-uuid') - - self.assertEqual(expected, cmd) - - def test_create_ovs_vif_port(self): - calls = [ - mock.call('ovs-vsctl', '--', '--if-exists', - 'del-port', 'fake-dev', '--', 'add-port', - 'fake-bridge', 'fake-dev', - '--', 'set', 'Interface', 'fake-dev', - 'external-ids:iface-id=fake-iface-id', - 'external-ids:iface-status=active', - 'external-ids:attached-mac=fake-mac', - 'external-ids:vm-uuid=fake-instance-uuid', - run_as_root=True)] - with mock.patch.object(utils, 'execute', return_value=('', '')) as ex: - linux_net.create_ovs_vif_port('fake-bridge', 'fake-dev', - 'fake-iface-id', 'fake-mac', - 'fake-instance-uuid') - ex.assert_has_calls(calls) - - def test_delete_ovs_vif_port(self): - calls = [ - mock.call('ovs-vsctl', '--', '--if-exists', - 'del-port', 'fake-bridge', 'fake-dev', - run_as_root=True)] - with mock.patch.object(utils, 'execute', return_value=('', '')) as ex: - linux_net.delete_ovs_vif_port('fake-bridge', 'fake-dev') - ex.assert_has_calls(calls) diff --git a/kuryr_kubernetes/tests/unit/test_object.py b/kuryr_kubernetes/tests/unit/test_object.py deleted file mode 100644 index 7d779ab0a..000000000 --- a/kuryr_kubernetes/tests/unit/test_object.py +++ /dev/null @@ -1,73 +0,0 @@ -# Copyright 2018 Red Hat, Inc. -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. - -from kuryr_kubernetes.objects import base as kuryr_base -from kuryr_kubernetes.tests import base as test_base -from oslo_versionedobjects import base -from oslo_versionedobjects import fixture - -# NOTE(danms): The hashes in this list should only be changed if -# they come with a corresponding version bump in the affected -# objects -object_data = { - 'LBaaSListener': '1.0-a9e2d5c73687f5edc66fdb2f48650e15', - 'LBaaSLoadBalancer': '1.4-835c38599fa4692ad26726342c36ccb4', - 'LBaaSMember': '1.0-a770c6884c27d6d8c21186b27d0e2ccb', - 'LBaaSPool': '1.1-6e77370d7632a902445444249eb77b01', - 'LBaaSPortSpec': '1.1-1b307f34630617086c7af70f2cb8b215', - 'LBaaSPubIp': '1.0-83992edec2c60fb4ab8998ea42a4ff74', - 'LBaaSServiceSpec': '1.0-d430ecd443f2b1999196bfe531e56f7e', - 'LBaaSState': '1.0-a0ff7dce2d3f6ce1ffab4ff95a344361', -} - - -def get_kuryr_objects(): - """Get Kuryr versioned objects - - This returns a dict of versioned objects which are - in the Kuryr project namespace only (excludes objects - from os-vif and other 3rd party modules) - - :return: a dict mapping class names to lists of versioned objects - """ - - all_classes = base.VersionedObjectRegistry.obj_classes() - kuryr_classes = {} - for name in all_classes: - objclasses = all_classes[name] - if (objclasses[0].OBJ_PROJECT_NAMESPACE == - kuryr_base.KuryrK8sObjectBase.OBJ_PROJECT_NAMESPACE): - kuryr_classes[name] = objclasses - return kuryr_classes - - -class TestObjectVersions(test_base.TestCase): - def test_versions(self): - """Test Versions - - Ensures that modified objects had their versions bumped - """ - - checker = fixture.ObjectVersionChecker( - get_kuryr_objects()) - expected, actual = checker.test_hashes(object_data) - self.assertEqual(expected, actual, - """Some objects have changed; please make sure the - versions have been bumped and backporting - compatibility code has been added to - obj_make_compatible if necessary, and then update - their hashes in the object_data map in this test - module. If we don't need to add backporting code then - it means we also don't need the version bump and we - just have to change the hash in this module.""") diff --git a/kuryr_kubernetes/tests/unit/test_os_vif_plug_noop.py b/kuryr_kubernetes/tests/unit/test_os_vif_plug_noop.py deleted file mode 100644 index 13bb42e86..000000000 --- a/kuryr_kubernetes/tests/unit/test_os_vif_plug_noop.py +++ /dev/null @@ -1,93 +0,0 @@ -# Licensed under the Apache License, Version 2.0 (the "License"); you may -# not use this file except in compliance with the License. You may obtain -# a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT -# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the -# License for the specific language governing permissions and limitations -# under the License. - -from stevedore import extension -from unittest import mock - -import os_vif -from os_vif import objects - -from kuryr_kubernetes.objects import vif as k_vif -from kuryr_kubernetes.os_vif_plug_noop import NoOpPlugin -from kuryr_kubernetes.tests import base - - -class TestNoOpPlugin(base.TestCase): - - def setUp(self): - super(TestNoOpPlugin, self).setUp() - os_vif._EXT_MANAGER = None - - @mock.patch('stevedore.extension.ExtensionManager') - def test_initialize(self, mock_EM): - self.assertIsNone(os_vif._EXT_MANAGER) - os_vif.initialize() - mock_EM.assert_called_once_with( - invoke_on_load=False, namespace='os_vif') - self.assertIsNotNone(os_vif._EXT_MANAGER) - - @mock.patch.object(NoOpPlugin, "plug") - def test_plug(self, mock_plug): - plg = extension.Extension(name="noop", - entry_point="os-vif", - plugin=NoOpPlugin, - obj=None) - with mock.patch('stevedore.extension.ExtensionManager.names', - return_value=['foobar']),\ - mock.patch('stevedore.extension.ExtensionManager.__getitem__', - return_value=plg): - os_vif.initialize() - info = mock.sentinel.info - vif = mock.MagicMock() - vif.plugin_name = 'noop' - os_vif.plug(vif, info) - mock_plug.assert_called_once_with(vif, info) - - @mock.patch.object(NoOpPlugin, "unplug") - def test_unplug(self, mock_unplug): - plg = extension.Extension(name="demo", - entry_point="os-vif", - plugin=NoOpPlugin, - obj=None) - with mock.patch('stevedore.extension.ExtensionManager.names', - return_value=['foobar']),\ - mock.patch('stevedore.extension.ExtensionManager.__getitem__', - return_value=plg): - os_vif.initialize() - info = mock.sentinel.info - vif = mock.MagicMock() - vif.plugin_name = 'noop' - os_vif.unplug(vif, info) - mock_unplug.assert_called_once_with(vif, info) - - def test_describe_noop_plugin(self): - os_vif.initialize() - noop_plugin = NoOpPlugin.load('noop') - result = noop_plugin.describe() - - expected = objects.host_info.HostPluginInfo( - plugin_name='noop', - vif_info=[ - objects.host_info.HostVIFInfo( - vif_object_name=k_vif.VIFVlanNested.__name__, - min_version="1.0", - max_version="1.0"), - objects.host_info.HostVIFInfo( - vif_object_name=k_vif.VIFMacvlanNested.__name__, - min_version="1.0", - max_version="1.0"), - objects.host_info.HostVIFInfo( - vif_object_name=k_vif.VIFDPDKNested.__name__, - min_version="1.0", - max_version="1.0"), - ]) - self.assertEqual(expected, result) diff --git a/kuryr_kubernetes/tests/unit/test_os_vif_util.py b/kuryr_kubernetes/tests/unit/test_os_vif_util.py deleted file mode 100644 index 150bed2cf..000000000 --- a/kuryr_kubernetes/tests/unit/test_os_vif_util.py +++ /dev/null @@ -1,658 +0,0 @@ -# Copyright (c) 2016 Mirantis, Inc. -# All Rights Reserved. -# -# Licensed under the Apache License, Version 2.0 (the "License"); you may -# not use this file except in compliance with the License. You may obtain -# a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT -# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the -# License for the specific language governing permissions and limitations -# under the License. - -from unittest import mock -import uuid - -from openstack.network.v2 import network as os_network -from openstack.network.v2 import subnet as os_subnet -from os_vif.objects import fixed_ip as osv_fixed_ip -from os_vif.objects import network as osv_network -from os_vif.objects import route as osv_route -from os_vif.objects import subnet as osv_subnet -from oslo_config import cfg as o_cfg - -from kuryr_kubernetes import constants as const -from kuryr_kubernetes import exceptions as k_exc -from kuryr_kubernetes import os_vif_util as ovu -from kuryr_kubernetes.tests import base as test_base -from kuryr_kubernetes.tests import fake -from kuryr_kubernetes import utils - - -# REVISIT(ivc): move to kuryr-lib along with 'os_vif_util' - - -class TestOSVIFUtils(test_base.TestCase): - def test_neutron_to_osvif_network(self): - network_id = str(uuid.uuid4()) - network_name = 'test-net' - network_mtu = 1500 - neutron_network = os_network.Network( - id=network_id, - name=network_name, - mtu=network_mtu, - provider_network_type=None, - ) - - network = ovu.neutron_to_osvif_network(neutron_network) - - self.assertEqual(network_id, network.id) - self.assertEqual(network_name, network.label) - self.assertEqual(network_mtu, network.mtu) - - def test_neutron_to_osvif_network_no_name(self): - network_id = str(uuid.uuid4()) - network_mtu = 1500 - neutron_network = os_network.Network( - id=network_id, - name=None, - mtu=network_mtu, - provider_network_type=None, - ) - - network = ovu.neutron_to_osvif_network(neutron_network) - - self.assertFalse(network.obj_attr_is_set('label')) - - def test_neutron_to_osvif_network_no_mtu(self): - network_id = str(uuid.uuid4()) - network_name = 'test-net' - neutron_network = os_network.Network( - id=network_id, - name=network_name, - mtu=None, - provider_network_type=None, - ) - - network = ovu.neutron_to_osvif_network(neutron_network) - - self.assertIsNone(network.mtu) - - @mock.patch('kuryr_kubernetes.os_vif_util._neutron_to_osvif_routes') - def test_neutron_to_osvif_subnet(self, m_conv_routes): - gateway = '1.1.1.1' - cidr = '1.1.1.1/8' - dns = ['2.2.2.2', '3.3.3.3'] - host_routes = [mock.sentinel.host_route] - route_list = osv_route.RouteList(objects=[ - osv_route.Route(cidr='4.4.4.4/8', gateway='5.5.5.5')]) - m_conv_routes.return_value = route_list - neutron_subnet = os_subnet.Subnet( - cidr=cidr, - dns_nameservers=dns, - host_routes=host_routes, - gateway_ip=gateway, - ) - - subnet = ovu.neutron_to_osvif_subnet(neutron_subnet) - - self.assertEqual(cidr, str(subnet.cidr)) - self.assertEqual(route_list, subnet.routes) - self.assertEqual(set(dns), set([str(addr) for addr in subnet.dns])) - self.assertEqual(gateway, str(subnet.gateway)) - m_conv_routes.assert_called_once_with(host_routes) - - @mock.patch('kuryr_kubernetes.os_vif_util._neutron_to_osvif_routes') - def test_neutron_to_osvif_subnet_no_gateway(self, m_conv_routes): - cidr = '1.1.1.1/8' - route_list = osv_route.RouteList() - m_conv_routes.return_value = route_list - neutron_subnet = os_subnet.Subnet( - cidr=cidr, - dns_nameservers=[], - host_routes=[], - gateway_ip=None, - ) - - subnet = ovu.neutron_to_osvif_subnet(neutron_subnet) - - self.assertFalse(subnet.obj_attr_is_set('gateway')) - - def test_neutron_to_osvif_routes(self): - routes_map = {'%s.0.0.0/8' % i: '10.0.0.%s' % i for i in range(3)} - routes = [{'destination': k, 'nexthop': v} - for k, v in routes_map.items()] - - route_list = ovu._neutron_to_osvif_routes(routes) - - self.assertEqual(len(routes), len(route_list.objects)) - for route in route_list.objects: - self.assertEqual(routes_map[str(route.cidr)], str(route.gateway)) - - @mock.patch('kuryr_kubernetes.os_vif_util._VIF_MANAGERS') - def test_neutron_to_osvif_vif(self, m_mgrs): - vif_plugin = mock.sentinel.vif_plugin - port = mock.sentinel.port - subnets = mock.sentinel.subnets - m_mgr = mock.Mock() - m_mgrs.__getitem__.return_value = m_mgr - - ovu.neutron_to_osvif_vif(vif_plugin, port, subnets) - - m_mgrs.__getitem__.assert_called_with(vif_plugin) - m_mgr.driver.assert_called_with(vif_plugin, port, subnets) - - @mock.patch('stevedore.driver.DriverManager') - @mock.patch('kuryr_kubernetes.os_vif_util._VIF_MANAGERS') - def test_neutron_to_osvif_vif_load(self, m_mgrs, m_stv_drm): - vif_plugin = mock.sentinel.vif_plugin - port = mock.sentinel.port - subnets = mock.sentinel.subnets - m_mgr = mock.Mock() - m_mgrs.__getitem__.side_effect = KeyError - m_stv_drm.return_value = m_mgr - - ovu.neutron_to_osvif_vif(vif_plugin, port, subnets) - - m_stv_drm.assert_called_once_with( - namespace=ovu._VIF_TRANSLATOR_NAMESPACE, - name=vif_plugin, - invoke_on_load=False) - m_mgrs.__setitem__.assert_called_once_with(vif_plugin, m_mgr) - m_mgr.driver.assert_called_once_with(vif_plugin, port, subnets) - - @mock.patch('kuryr_kubernetes.os_vif_util._get_ovs_hybrid_bridge_name') - @mock.patch('kuryr_kubernetes.os_vif_util._get_vif_name') - @mock.patch('kuryr_kubernetes.os_vif_util._is_port_active') - @mock.patch('kuryr_kubernetes.os_vif_util._make_vif_network') - @mock.patch('os_vif.objects.vif.VIFBridge') - @mock.patch('os_vif.objects.vif.VIFPortProfileOpenVSwitch') - def test_neutron_to_osvif_vif_ovs_hybrid(self, - m_mk_profile, - m_mk_vif, - m_make_vif_network, - m_is_port_active, - m_get_vif_name, - m_get_ovs_hybrid_bridge_name): - vif_plugin = 'ovs' - port_id = mock.sentinel.port_id - ovs_bridge = mock.sentinel.ovs_bridge - port_filter = mock.sentinel.port_filter - subnets = mock.sentinel.subnets - port_profile = mock.sentinel.port_profile - network = mock.sentinel.network - port_active = mock.sentinel.port_active - vif_name = "vhu01234567-89" - hybrid_bridge = mock.sentinel.hybrid_bridge - vif = mock.sentinel.vif - port = fake.get_port_obj(port_id=port_id, - vif_details={'ovs_hybrid_plug': True, - 'bridge_name': ovs_bridge, - 'port_filter': port_filter}) - - m_mk_profile.return_value = port_profile - m_make_vif_network.return_value = network - m_is_port_active.return_value = port_active - m_get_vif_name.return_value = vif_name - m_get_ovs_hybrid_bridge_name.return_value = hybrid_bridge - m_mk_vif.return_value = vif - - self.assertEqual(vif, ovu.neutron_to_osvif_vif_ovs(vif_plugin, port, - subnets)) - - m_mk_profile.assert_called_once_with(interface_id=port_id) - m_make_vif_network.assert_called_once_with(port, subnets) - m_is_port_active.assert_called_once_with(port) - m_get_ovs_hybrid_bridge_name.assert_called_once_with(port) - m_get_vif_name.assert_called_once_with(port) - self.assertEqual(ovs_bridge, network.bridge) - m_mk_vif.assert_called_once_with( - id=port_id, - address=port.mac_address, - network=network, - has_traffic_filtering=port.binding_vif_details['port_filter'], - preserve_on_delete=False, - active=port_active, - port_profile=port_profile, - plugin=vif_plugin, - vif_name=vif_name, - bridge_name=hybrid_bridge) - - @mock.patch('kuryr_kubernetes.os_vif_util._get_vif_name') - @mock.patch('kuryr_kubernetes.os_vif_util._is_port_active') - @mock.patch('kuryr_kubernetes.os_vif_util._make_vif_network') - @mock.patch('os_vif.objects.vif.VIFOpenVSwitch') - @mock.patch('os_vif.objects.vif.VIFPortProfileOpenVSwitch') - def test_neutron_to_osvif_vif_ovs_native(self, - m_mk_profile, - m_mk_vif, - m_make_vif_network, - m_is_port_active, - m_get_vif_name): - vif_plugin = 'ovs' - vif_details = {'ovs_hybrid_plug': False, - 'bridge_name': mock.sentinel.ovs_bridge} - port = fake.get_port_obj(vif_details=vif_details) - port.active = mock.sentinel.port_active - port.profile = mock.sentinel.port_profile - - subnets = mock.sentinel.subnets - network = mock.sentinel.network - vif_name = "vhu01234567-89" - vif = mock.sentinel.vif - - m_mk_profile.return_value = port.profile - m_make_vif_network.return_value = network - m_is_port_active.return_value = port.active - m_get_vif_name.return_value = vif_name - m_mk_vif.return_value = vif - - self.assertEqual(vif, ovu.neutron_to_osvif_vif_ovs(vif_plugin, port, - subnets)) - m_mk_profile.assert_called_once_with(interface_id=port.id) - m_make_vif_network.assert_called_once_with(port, subnets) - m_is_port_active.assert_called_once_with(port) - m_get_vif_name.assert_called_once_with(port) - self.assertEqual(network.bridge, - port.binding_vif_details['bridge_name']) - - @mock.patch('kuryr_kubernetes.os_vif_util._get_vhu_vif_name') - @mock.patch('kuryr_kubernetes.os_vif_util._is_port_active') - @mock.patch('kuryr_kubernetes.os_vif_util._make_vif_network') - @mock.patch('os_vif.objects.vif.VIFVHostUser') - @mock.patch('os_vif.objects.vif.VIFPortProfileOpenVSwitch') - def test_neutron_to_osvif_vif_ovs_vu_client(self, m_mk_profile, m_mk_vif, - m_make_vif_network, - m_is_port_active, - m_get_vif_name): - vif_plugin = 'vhostuser' - o_cfg.CONF.set_override('mount_point', - '/var/lib/cni/vhostuser', - group='vhostuser') - port_id = mock.sentinel.port_id - mac_address = mock.sentinel.mac_address - ovs_bridge = mock.sentinel.ovs_bridge - subnets = mock.sentinel.subnets - port_profile = mock.sentinel.port_profile - network = mock.sentinel.network - port_active = mock.sentinel.port_active - vif_name = "vhu01234567-89" - vif = mock.sentinel.vif - - m_mk_profile.return_value = port_profile - m_make_vif_network.return_value = network - m_is_port_active.return_value = port_active - m_get_vif_name.return_value = vif_name - m_mk_vif.return_value = vif - - port = fake.get_port_obj(port_id=port_id, - vif_details={'ovs_hybrid_plug': False, - 'bridge_name': ovs_bridge, - 'vhostuser_mode': 'client'}) - port.mac_address = mac_address - - self.assertEqual(vif, ovu.neutron_to_osvif_vif_ovs(vif_plugin, port, - subnets)) - m_mk_profile.assert_called_once_with(interface_id=port_id) - m_make_vif_network.assert_called_once_with(port, subnets) - m_is_port_active.assert_called_once_with(port) - m_get_vif_name.assert_called_once_with(port_id) - self.assertEqual(ovs_bridge, network.bridge) - - @mock.patch('kuryr_kubernetes.os_vif_util._get_vhu_vif_name') - @mock.patch('kuryr_kubernetes.os_vif_util._is_port_active') - @mock.patch('kuryr_kubernetes.os_vif_util._make_vif_network') - @mock.patch('os_vif.objects.vif.VIFVHostUser') - @mock.patch('os_vif.objects.vif.VIFPortProfileOpenVSwitch') - def test_neutron_to_osvif_vif_ovs_vu_server(self, m_mk_profile, m_mk_vif, - m_make_vif_network, - m_is_port_active, - m_get_vif_name): - vif_plugin = 'vhostuser' - o_cfg.CONF.set_override('mount_point', - '/var/lib/cni/vhostuser', - group='vhostuser') - port_id = mock.sentinel.port_id - mac_address = mock.sentinel.mac_address - ovs_bridge = mock.sentinel.ovs_bridge - subnets = mock.sentinel.subnets - port_profile = mock.sentinel.port_profile - network = mock.sentinel.network - port_active = mock.sentinel.port_active - vif_name = "vhu01234567-89" - vif = mock.sentinel.vif - - m_mk_profile.return_value = port_profile - m_make_vif_network.return_value = network - m_is_port_active.return_value = port_active - m_get_vif_name.return_value = vif_name - m_mk_vif.return_value = vif - - port = fake.get_port_obj(port_id=port_id, - vif_details={'ovs_hybrid_plug': False, - 'bridge_name': ovs_bridge, - 'vhostuser_mode': 'server'}) - port.mac_address = mac_address - - self.assertEqual(vif, ovu.neutron_to_osvif_vif_ovs(vif_plugin, port, - subnets)) - m_mk_profile.assert_called_once_with(interface_id=port_id) - m_make_vif_network.assert_called_once_with(port, subnets) - m_is_port_active.assert_called_once_with(port) - m_get_vif_name.assert_called_once_with(port_id) - self.assertEqual(ovs_bridge, network.bridge) - - @mock.patch('kuryr_kubernetes.os_vif_util._get_vif_name') - @mock.patch('kuryr_kubernetes.os_vif_util._is_port_active') - @mock.patch('kuryr_kubernetes.os_vif_util._make_vif_network') - @mock.patch('kuryr_kubernetes.objects.vif.VIFVlanNested') - def test_neutron_to_osvif_nested_vlan(self, m_mk_vif, m_make_vif_network, - m_is_port_active, m_get_vif_name): - vif_plugin = const.K8S_OS_VIF_NOOP_PLUGIN - port_id = mock.sentinel.port_id - mac_address = mock.sentinel.mac_address - port_filter = mock.sentinel.port_filter - subnets = mock.sentinel.subnets - network = mock.sentinel.network - port_active = mock.sentinel.port_active - vif_name = mock.sentinel.vif_name - vif = mock.sentinel.vif - vlan_id = mock.sentinel.vlan_id - port = fake.get_port_obj(port_id=port_id, - vif_details={'port_filter': port_filter}) - port.mac_address = mac_address - - m_make_vif_network.return_value = network - m_is_port_active.return_value = port_active - m_get_vif_name.return_value = vif_name - m_mk_vif.return_value = vif - - self.assertEqual(vif, ovu.neutron_to_osvif_vif_nested_vlan(port, - subnets, vlan_id)) - - m_make_vif_network.assert_called_once_with(port, subnets) - m_is_port_active.assert_called_once_with(port) - m_get_vif_name.assert_called_once_with(port) - m_mk_vif.assert_called_once_with( - id=port_id, - address=mac_address, - network=network, - has_traffic_filtering=port_filter, - preserve_on_delete=False, - active=port_active, - plugin=vif_plugin, - vif_name=vif_name, - vlan_id=vlan_id) - - @mock.patch('kuryr_kubernetes.os_vif_util._get_vif_name') - @mock.patch('kuryr_kubernetes.os_vif_util._is_port_active') - @mock.patch('kuryr_kubernetes.os_vif_util._make_vif_network') - @mock.patch('kuryr_kubernetes.objects.vif.VIFMacvlanNested') - def test_neutron_to_osvif_nested_macvlan(self, m_mk_vif, - m_make_vif_network, - m_is_port_active, m_get_vif_name): - vif_plugin = const.K8S_OS_VIF_NOOP_PLUGIN - port_id = mock.sentinel.port_id - mac_address = mock.sentinel.mac_address - port_filter = mock.sentinel.port_filter - subnets = mock.sentinel.subnets - network = mock.sentinel.network - port_active = mock.sentinel.port_active - vif_name = mock.sentinel.vif_name - vif = mock.sentinel.vif - - m_make_vif_network.return_value = network - m_is_port_active.return_value = port_active - m_get_vif_name.return_value = vif_name - m_mk_vif.return_value = vif - - port = {'id': port_id, - 'mac_address': mac_address, - 'binding:vif_details': { - 'port_filter': port_filter}, - } - - self.assertEqual(vif, ovu.neutron_to_osvif_vif_nested_macvlan(port, - subnets)) - - m_make_vif_network.assert_called_once_with(port, subnets) - m_is_port_active.assert_called_once_with(port) - m_get_vif_name.assert_called_once_with(port) - m_mk_vif.assert_called_once_with( - id=port_id, - address=mac_address, - network=network, - has_traffic_filtering=port_filter, - preserve_on_delete=False, - active=port_active, - plugin=vif_plugin, - vif_name=vif_name) - - @mock.patch('kuryr_kubernetes.os_vif_util._get_vif_name') - @mock.patch('kuryr_kubernetes.os_vif_util._is_port_active') - @mock.patch('kuryr_kubernetes.os_vif_util._make_vif_network') - @mock.patch('kuryr_kubernetes.objects.vif.VIFDPDKNested') - @mock.patch('os_vif.objects.vif.VIFPortProfileK8sDPDK') - def test_neutron_to_osvif_nested_dpdk(self, m_mk_port_profile, m_mk_vif, - m_make_vif_network, - m_is_port_active, m_get_vif_name): - vif_plugin = const.K8S_OS_VIF_NOOP_PLUGIN - port_id = mock.sentinel.port_id - mac_address = mock.sentinel.mac_address - port_filter = mock.sentinel.port_filter - subnets = mock.sentinel.subnets - network = mock.sentinel.network - port_active = mock.sentinel.port_active - vif_name = mock.sentinel.vif_name - vif = mock.sentinel.vif - port_profile = mock.sentinel.port_profile - - m_make_vif_network.return_value = network - m_is_port_active.return_value = port_active - m_get_vif_name.return_value = vif_name - m_mk_vif.return_value = vif - m_mk_port_profile.return_value = port_profile - - pod = fake.get_k8s_pod() - - port = {'id': port_id, - 'mac_address': mac_address, - 'binding:vif_details': { - 'port_filter': port_filter}, - } - - self.assertEqual(vif, ovu.neutron_to_osvif_vif_dpdk(port, - subnets, pod)) - - m_make_vif_network.assert_called_once_with(port, subnets) - m_is_port_active.assert_called_once_with(port) - m_get_vif_name.assert_called_once_with(port) - m_mk_port_profile.assert_called_once_with( - l3_setup=False, - selflink=utils.get_res_link(pod)) - - m_mk_vif.assert_called_once_with( - id=port_id, - port_profile=port_profile, - address=mac_address, - network=network, - has_traffic_filtering=port_filter, - preserve_on_delete=False, - active=port_active, - plugin=vif_plugin, - pci_address="", - dev_driver="", - vif_name=vif_name) - - def test_neutron_to_osvif_vif_ovs_no_bridge(self): - vif_plugin = 'ovs' - port = fake.get_port_obj(port_id=str(uuid.uuid4())) - subnets = {} - - self.assertRaises(o_cfg.RequiredOptError, - ovu.neutron_to_osvif_vif_ovs, - vif_plugin, port, subnets) - - def test_get_ovs_hybrid_bridge_name(self): - port = fake.get_port_obj(port_id=str(uuid.uuid4())) - - self.assertEqual("qbr" + port.id[:11], - ovu._get_ovs_hybrid_bridge_name(port)) - - def test_is_port_active(self): - port = fake.get_port_obj(port_id=str(uuid.uuid4())) - port.status = 'ACTIVE' - - self.assertTrue(ovu._is_port_active(port)) - - def test_is_port_inactive(self): - port = fake.get_port_obj(port_id=str(uuid.uuid4())) - - self.assertFalse(ovu._is_port_active(port)) - - @mock.patch('kuryr.lib.binding.drivers.utils.get_veth_pair_names') - def test_get_vif_name(self, m_get_veth_pair_names): - vif_name = mock.sentinel.vif_name - port = fake.get_port_obj(port_id=str(uuid.uuid4())) - m_get_veth_pair_names.return_value = (vif_name, mock.sentinel.any) - - self.assertEqual(vif_name, ovu._get_vif_name(port)) - m_get_veth_pair_names.assert_called_once_with(port.id) - - @mock.patch('kuryr_kubernetes.os_vif_util._make_vif_subnets') - @mock.patch('os_vif.objects.subnet.SubnetList') - def test_make_vif_network(self, m_mk_subnet_list, m_make_vif_subnets): - network_id = mock.sentinel.network_id - network = mock.Mock() - orig_network = mock.Mock() - orig_network.id = network_id - orig_network.obj_clone.return_value = network - subnet_id = mock.sentinel.subnet_id - subnets = {subnet_id: orig_network} - vif_subnets = mock.sentinel.vif_subnets - subnet_list = mock.sentinel.subnet_list - m_make_vif_subnets.return_value = vif_subnets - m_mk_subnet_list.return_value = subnet_list - port = {'network_id': network_id} - - self.assertEqual(network, ovu._make_vif_network(port, subnets)) - self.assertEqual(subnet_list, network.subnets) - m_make_vif_subnets.assert_called_once_with(port, subnets) - m_mk_subnet_list.assert_called_once_with(objects=vif_subnets) - - def test_make_vif_network_not_found(self): - network_id = mock.sentinel.network_id - port = {'network_id': network_id} - subnets = {} - - self.assertRaises(k_exc.IntegrityError, ovu._make_vif_network, - port, subnets) - - @mock.patch('kuryr_kubernetes.os_vif_util._make_vif_subnet') - @mock.patch('os_vif.objects.fixed_ip.FixedIP') - def test_make_vif_subnets(self, m_mk_fixed_ip, m_make_vif_subnet): - subnet_id = mock.sentinel.subnet_id - ip_address = mock.sentinel.ip_address - fixed_ip = mock.sentinel.fixed_ip - subnet = mock.Mock() - subnets = mock.MagicMock() - subnets.__contains__.return_value = True - m_mk_fixed_ip.return_value = fixed_ip - m_make_vif_subnet.return_value = subnet - port = {'fixed_ips': [ - {'subnet_id': subnet_id, 'ip_address': ip_address}]} - - self.assertEqual([subnet], ovu._make_vif_subnets(port, subnets)) - m_make_vif_subnet.assert_called_once_with(subnets, subnet_id) - m_mk_fixed_ip.assert_called_once_with(address=ip_address) - subnet.ips.objects.append.assert_called_once_with(fixed_ip) - - def test_make_vif_subnets_not_found(self): - subnet_id = mock.sentinel.subnet_id - ip_address = mock.sentinel.ip_address - subnets = mock.MagicMock() - subnets.__contains__.return_value = False - port = {'fixed_ips': [ - {'subnet_id': subnet_id, 'ip_address': ip_address}]} - - self.assertRaises(k_exc.IntegrityError, ovu._make_vif_subnets, - port, subnets) - - @mock.patch('os_vif.objects.fixed_ip.FixedIPList') - def test_make_vif_subnet(self, m_mk_fixed_ip_list): - subnet_id = mock.sentinel.subnet_id - fixed_ip_list = mock.sentinel.fixed_ip_list - subnet = mock.Mock() - orig_subnet = mock.Mock() - orig_subnet.obj_clone.return_value = subnet - orig_network = mock.Mock() - orig_network.subnets.objects = [orig_subnet] - m_mk_fixed_ip_list.return_value = fixed_ip_list - subnets = {subnet_id: orig_network} - - self.assertEqual(subnet, ovu._make_vif_subnet(subnets, subnet_id)) - self.assertEqual(fixed_ip_list, subnet.ips) - m_mk_fixed_ip_list.assert_called_once_with(objects=[]) - - def test_make_vif_subnet_invalid(self): - subnet_id = mock.sentinel.subnet_id - orig_network = mock.Mock() - orig_network.subnets.objects = [] - subnets = {subnet_id: orig_network} - - self.assertRaises(k_exc.IntegrityError, ovu._make_vif_subnet, - subnets, subnet_id) - - def test_osvif_to_neutron_fixed_ips(self): - ip11 = '1.1.1.1' - ip12 = '2.2.2.2' - ip3 = '3.3.3.3' - subnet_id_1 = str(uuid.uuid4()) - subnet_id_2 = str(uuid.uuid4()) - subnet_id_3 = str(uuid.uuid4()) - - subnet_1 = osv_subnet.Subnet(ips=osv_fixed_ip.FixedIPList( - objects=[osv_fixed_ip.FixedIP(address=ip11), - osv_fixed_ip.FixedIP(address=ip12)])) - subnet_2 = osv_subnet.Subnet() - subnet_3 = osv_subnet.Subnet(ips=osv_fixed_ip.FixedIPList( - objects=[osv_fixed_ip.FixedIP(address=ip3)])) - - net1 = osv_network.Network(subnets=osv_subnet.SubnetList( - objects=[subnet_1])) - net2 = osv_network.Network(subnets=osv_subnet.SubnetList( - objects=[subnet_2])) - net3 = osv_network.Network(subnets=osv_subnet.SubnetList( - objects=[subnet_3])) - - subnets = {subnet_id_1: net1, subnet_id_2: net2, subnet_id_3: net3} - - expected = [{'subnet_id': subnet_id_1, 'ip_address': ip11}, - {'subnet_id': subnet_id_1, 'ip_address': ip12}, - {'subnet_id': subnet_id_2}, - {'subnet_id': subnet_id_3, 'ip_address': ip3}] - - ret = ovu.osvif_to_neutron_fixed_ips(subnets) - - def _sort_key(e): - return (e.get('subnet_id'), e.get('ip_address')) - - self.assertEqual(sorted(expected, key=_sort_key), - sorted(ret, key=_sort_key)) - - def test_osvif_to_neutron_fixed_ips_invalid(self): - subnet_id = str(uuid.uuid4()) - - subnet_1 = osv_subnet.Subnet() - subnet_2 = osv_subnet.Subnet() - - net = osv_network.Network(subnets=osv_subnet.SubnetList( - objects=[subnet_1, subnet_2])) - - subnets = {subnet_id: net} - - self.assertRaises(k_exc.IntegrityError, - ovu.osvif_to_neutron_fixed_ips, subnets) diff --git a/kuryr_kubernetes/tests/unit/test_utils.py b/kuryr_kubernetes/tests/unit/test_utils.py deleted file mode 100644 index 5ac48f215..000000000 --- a/kuryr_kubernetes/tests/unit/test_utils.py +++ /dev/null @@ -1,577 +0,0 @@ -# Copyright 2018 Red Hat, Inc. -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. - -from unittest import mock -import uuid - -from openstack import exceptions as os_exc -from openstack.network.v2 import port as os_port -from openstack.network.v2 import subnet as os_subnet -from os_vif import objects -from oslo_config import cfg -from oslo_utils import timeutils - -from kuryr_kubernetes import constants as k_const -from kuryr_kubernetes import exceptions as k_exc -from kuryr_kubernetes.objects import vif -from kuryr_kubernetes.tests import base as test_base -from kuryr_kubernetes.tests import fake -from kuryr_kubernetes.tests.unit import kuryr_fixtures as k_fix -from kuryr_kubernetes import utils - -CONF = cfg.CONF - - -class TestUtils(test_base.TestCase): - - def setUp(self): - super().setUp() - cfg.CONF.set_override('resource_tags', [], group='neutron_defaults') - - @mock.patch('socket.gethostname') - def test_get_node_name(self, m_gethostname): - m_gethostname.return_value = 'foo' - res = utils.get_node_name() - self.assertEqual('foo', res) - m_gethostname.assert_called_once_with() - - @mock.patch('requests.get') - def test_get_leader_name(self, m_get): - m_get.return_value = mock.Mock(json=mock.Mock( - return_value={'name': 'foo'})) - res = utils.get_leader_name() - m_get.assert_called_once_with( - 'http://localhost:%d' % CONF.kubernetes.controller_ha_elector_port) - self.assertEqual('foo', res) - - @mock.patch('requests.get') - def test_get_leader_name_malformed(self, m_get): - m_get.return_value = mock.Mock(json=mock.Mock( - return_value={'name2': 'foo'})) - res = utils.get_leader_name() - m_get.assert_called_once_with( - 'http://localhost:%d' % CONF.kubernetes.controller_ha_elector_port) - self.assertIsNone(res) - - @mock.patch('requests.get') - def test_get_leader_name_exc(self, m_get): - m_get.side_effect = Exception - res = utils.get_leader_name() - m_get.assert_called_once_with( - 'http://localhost:%d' % CONF.kubernetes.controller_ha_elector_port) - self.assertIsNone(res) - - @mock.patch('kuryr_kubernetes.os_vif_util.neutron_to_osvif_network') - @mock.patch('kuryr_kubernetes.os_vif_util.neutron_to_osvif_subnet') - def test_get_subnet(self, m_osv_subnet, m_osv_network): - os_net = self.useFixture(k_fix.MockNetworkClient()).client - - subnet = mock.MagicMock() - network = mock.MagicMock() - subnet_id = mock.sentinel.subnet_id - network_id = mock.sentinel.network_id - - neutron_subnet = os_subnet.Subnet(**{'network_id': network_id}) - neutron_network = mock.sentinel.neutron_network - - os_net.get_subnet.return_value = neutron_subnet - os_net.get_network.return_value = neutron_network - - m_osv_subnet.return_value = subnet - m_osv_network.return_value = network - - ret = utils.get_subnet(subnet_id) - - self.assertEqual(network, ret) - os_net.get_subnet.assert_called_once_with(subnet_id) - os_net.get_network.assert_called_once_with(network_id) - m_osv_subnet.assert_called_once_with(neutron_subnet) - m_osv_network.assert_called_once_with(neutron_network) - network.subnets.objects.append.assert_called_once_with(subnet) - - def test_extract_pod_annotation(self): - vif_obj = objects.vif.VIFBase() - ps = vif.PodState(default_vif=vif_obj) - d = ps.obj_to_primitive() - result = utils.extract_pod_annotation(d) - self.assertEqual(vif.PodState.obj_name(), result.obj_name()) - self.assertEqual(vif_obj, result.default_vif) - - def test_extract_pod_annotation_convert(self): - vif_obj = objects.vif.VIFBase() - d = vif_obj.obj_to_primitive() - result = utils.extract_pod_annotation(d) - self.assertEqual(vif.PodState.obj_name(), result.obj_name()) - self.assertEqual(vif_obj, result.default_vif) - - def test__has_kuryrnetwork_crd(self): - kuryrnet_crd = { - "apiVersion": "openstack.org/v1", - "items": [ - - ], - "kind": "KuryrNetworkList", - "metadata": { - "continue": "", - "resourceVersion": "33018", - } - } - - kubernetes = self.useFixture(k_fix.MockK8sClient()).client - kubernetes.get.return_value = kuryrnet_crd - - kuryrnets_url = k_const.K8S_API_CRD_KURYRNETWORKS - resp = utils.has_kuryr_crd(kuryrnets_url) - - self.assertEqual(resp, True) - - def test__has_kuryr_crd_error(self): - crds = [k_const.K8S_API_CRD_KURYRNETWORKS, - k_const.K8S_API_CRD_KURYRNETWORKPOLICIES, - k_const.K8S_API_CRD_KURYRLOADBALANCERS] - - for crd_url in crds: - kubernetes = self.useFixture(k_fix.MockK8sClient()).client - kubernetes.get.side_effect = k_exc.K8sClientException - - resp = utils.has_kuryr_crd(crd_url) - self.assertEqual(resp, False) - - kubernetes.get.assert_called_once() - - def test_get_endpoints_link(self): - service = {'apiVersion': 'v1', - 'kind': 'Service', - 'metadata': {'namespace': 'default', - 'name': 'test'}} - ret = utils.get_endpoints_link(service) - expected_link = "/api/v1/namespaces/default/endpoints/test" - self.assertEqual(expected_link, ret) - - def test_get_service_ports(self): - service = {'spec': {'ports': [ - {'port': 1, 'targetPort': 1}, - {'port': 2, 'name': 'X', 'protocol': 'UDP', 'targetPort': 2}, - {'port': 3, 'name': 'Y', 'protocol': 'SCTP', 'targetPort': 3} - ]}} - expected_ret = [ - {'port': 1, 'name': None, 'protocol': 'TCP', 'targetPort': '1'}, - {'port': 2, 'name': 'X', 'protocol': 'UDP', 'targetPort': '2'}, - {'port': 3, 'name': 'Y', 'protocol': 'SCTP', 'targetPort': '3'}] - - ret = utils.get_service_ports(service) - self.assertEqual(expected_ret, ret) - - @mock.patch('kuryr_kubernetes.utils.get_service_ports') - def test_has_port_changes(self, m_get_service_ports): - service = { - 'apiVersion': 'v1', - 'kind': 'Service', - 'metadata': { - 'name': 'serv-1', - 'namespace': 'ns1' - }, - 'spec': { - 'ports': [ - { - 'port': 1, - 'name': 'X', - 'protocol': 'TCP', - 'targetPort': '1' - } - ] - } - } - lb_crd_spec = { - 'spec': { - 'ports': [ - { - 'name': 'Y', - 'protocol': 'TCP', - 'port': 2, - 'targetPort': 2 - } - ] - } - } - ret = utils.has_port_changes(service, lb_crd_spec) - self.assertTrue(ret) - - @mock.patch('kuryr_kubernetes.utils.get_service_ports') - def test_has_port_changes_more_ports(self, m_get_service_ports): - service = { - 'apiVersion': 'v1', - 'kind': 'Service', - 'metadata': { - 'name': 'serv-1', - 'namespace': 'ns1' - }, - 'spec': { - 'ports': [ - { - 'port': 1, - 'name': 'X', - 'protocol': 'TCP', - 'targetPort': '1' - } - ] - } - } - lb_crd_spec = { - 'spec': { - 'ports': [ - { - 'name': 'X', - 'protocol': 'TCP', - 'port': 1, - 'targetPort': 1 - }, - { - 'name': 'Y', - 'protocol': 'TCP', - 'port': 2, - 'targetPort': 2 - } - ] - } - } - - ret = utils.has_port_changes(service, lb_crd_spec) - self.assertTrue(ret) - - @mock.patch('kuryr_kubernetes.utils.get_service_ports') - def test_has_port_changes_no_changes(self, m_get_service_ports): - - service = { - 'apiVersion': 'v1', - 'kind': 'Service', - 'metadata': { - 'name': 'serv-1', - 'namespace': 'ns1' - }, - 'spec': { - 'ports': [ - { - 'port': 1, - 'name': 'X', - 'protocol': 'TCP', - 'targetPort': '1' - }, - { - 'name': 'Y', - 'protocol': 'TCP', - 'port': 2, - 'targetPort': '2' - } - ] - } - } - - lb_crd_spec = { - 'spec': { - 'ports': [ - { - 'name': 'X', - 'protocol': 'TCP', - 'port': 1, - 'targetPort': '1' - }, - { - 'name': 'Y', - 'protocol': 'TCP', - 'port': 2, - 'targetPort': '2' - } - ] - } - } - - ret = utils.has_port_changes(service, lb_crd_spec) - self.assertFalse(ret) - - def test_get_nodes_ips(self): - os_net = self.useFixture(k_fix.MockNetworkClient()).client - ip1 = os_port.Port( - fixed_ips=[{'ip_address': '10.0.0.1', 'subnet_id': 'foo'}], - trunk_details={'trunk_id': 'wow', 'sub_ports': []}, - ) - ip2 = os_port.Port( - fixed_ips=[{'ip_address': '10.0.0.2', 'subnet_id': 'bar'}], - trunk_details={'trunk_id': 'odd', 'sub_ports': []}, - ) - ip3 = os_port.Port( - fixed_ips=[{'ip_address': '10.0.0.3', 'subnet_id': 'baz'}], - trunk_details=None, - ) - ip4 = os_port.Port( - fixed_ips=[{'ip_address': '10.0.0.4', 'subnet_id': 'zab'}], - trunk_details={'trunk_id': 'eek', 'sub_ports': []}, - ) - ports = (p for p in [ip1, ip2, ip3, ip4]) - - os_net.ports.return_value = ports - trunk_ips = utils.get_nodes_ips(['foo', 'bar']) - os_net.ports.assert_called_once_with(status='ACTIVE') - self.assertEqual(trunk_ips, [ip1.fixed_ips[0]['ip_address'], - ip2.fixed_ips[0]['ip_address']]) - - def test_get_nodes_ips_tagged(self): - CONF.set_override('resource_tags', ['foo'], group='neutron_defaults') - self.addCleanup(CONF.clear_override, 'resource_tags', - group='neutron_defaults') - - os_net = self.useFixture(k_fix.MockNetworkClient()).client - ip1 = os_port.Port( - fixed_ips=[{'ip_address': '10.0.0.1', 'subnet_id': 'foo'}], - trunk_details={'trunk_id': 'wow', 'sub_ports': []}, - ) - ip2 = os_port.Port( - fixed_ips=[{'ip_address': '10.0.0.2', 'subnet_id': 'bar'}], - trunk_details=None, - ) - ports = (p for p in [ip1, ip2]) - - os_net.ports.return_value = ports - trunk_ips = utils.get_nodes_ips(['foo']) - os_net.ports.assert_called_once_with(status='ACTIVE', tags=['foo']) - self.assertEqual(trunk_ips, [ip1.fixed_ips[0]['ip_address']]) - - def test_get_subnet_cidr(self): - os_net = self.useFixture(k_fix.MockNetworkClient()).client - subnet_id = mock.sentinel.subnet_id - subnet = os_subnet.Subnet(cidr='10.0.0.0/24') - os_net.get_subnet.return_value = subnet - - result = utils.get_subnet_cidr(subnet_id) - os_net.get_subnet.assert_called_once_with(subnet_id) - self.assertEqual(result, '10.0.0.0/24') - - def test_get_subnet_cidr_no_such_subnet(self): - os_net = self.useFixture(k_fix.MockNetworkClient()).client - subnet_id = mock.sentinel.subnet_id - os_net.get_subnet.side_effect = os_exc.ResourceNotFound - - self.assertRaises(os_exc.ResourceNotFound, utils.get_subnet_cidr, - subnet_id) - os_net.get_subnet.assert_called_once_with(subnet_id) - - def test_get_current_endpoints_target_with_target_ref(self): - ep = {'addresses': ['10.0.2.107'], 'conditions': {'ready': True}, - 'targetRef': {'kind': 'Pod', 'name': 'test-868d9cbd68-xq2fl', - 'namespace': 'test2'}} - port = {'port': 8080, 'protocol': 'TCP'} - spec_ports = {None: '31d59e41-05db-4a39-8aca-6a9a572c83cd'} - ep_name = 'test' - target = utils.get_current_endpoints_target( - ep, port, spec_ports, ep_name) - self.assertEqual( - target, ('10.0.2.107', 'test-868d9cbd68-xq2fl', 8080, - '31d59e41-05db-4a39-8aca-6a9a572c83cd')) - - def test_get_current_endpoints_target_without_target_ref(self): - ep = {'addresses': ['10.0.1.208'], 'conditions': {'ready': True}} - port = {'port': 8080, 'protocol': 'TCP'} - spec_ports = {None: '4472fab1-f01c-46a7-b197-5cba4f2d7135'} - ep_name = 'test' - target = utils.get_current_endpoints_target( - ep, port, spec_ports, ep_name) - self.assertEqual( - target, ('10.0.1.208', 'test', 8080, - '4472fab1-f01c-46a7-b197-5cba4f2d7135')) - - def test_get_klb_crd_path(self): - res = {'apiVersion': 'v1', - 'kind': 'Endpoints', - 'metadata': {'name': 'my-service', - 'namespace': 'default'}} - self.assertEqual(utils.get_klb_crd_path(res), - '/apis/openstack.org/v1/namespaces/default/' - 'kuryrloadbalancers/my-service') - - def test_get_res_link_core_res(self): - res = {'apiVersion': 'v1', - 'kind': 'Pod', - 'metadata': {'name': 'pod-1', - 'namespace': 'default'}} - self.assertEqual(utils.get_res_link(res), - '/api/v1/namespaces/default/pods/pod-1') - - def test_get_res_link_no_existent(self): - res = {'apiVersion': 'customapi/v1', - 'kind': 'ItsATrap!', - 'metadata': {'name': 'pod-1', - 'namespace': 'default'}} - self.assertRaises(KeyError, utils.get_res_link, res) - - def test_get_res_link_beta_res(self): - res = {'apiVersion': 'networking.k8s.io/v2beta2', - 'kind': 'NetworkPolicy', - 'metadata': {'name': 'np-1', - 'namespace': 'default'}} - self.assertEqual(utils.get_res_link(res), '/apis/networking.k8s.io/' - 'v2beta2/namespaces/default/networkpolicies/np-1') - - def test_get_res_link_no_namespace(self): - res = {'apiVersion': 'v1', - 'kind': 'Namespace', - 'metadata': {'name': 'ns-1'}} - - self.assertEqual(utils.get_res_link(res), '/api/v1/namespaces/ns-1') - - def test_get_res_link_custom_api(self): - res = {'apiVersion': 'openstack.org/v1', - 'kind': 'KuryrPort', - 'metadata': {'name': 'kp-1', - 'namespace': 'default'}} - - self.assertEqual(utils.get_res_link(res), - '/apis/openstack.org/v1/namespaces/default/' - 'kuryrports/kp-1') - - def test_get_res_link_no_apiversion(self): - res = {'kind': 'KuryrPort', - 'metadata': {'name': 'kp-1', - 'namespace': 'default'}} - self.assertRaises(KeyError, utils.get_res_link, res) - - def test_get_api_ver_core_api(self): - path = '/api/v1/namespaces/default/pods/pod-123' - self.assertEqual(utils.get_api_ver(path), 'v1') - - def test_get_api_ver_custom_resource(self): - path = '/apis/openstack.org/v1/namespaces/default/kuryrport/pod-123' - self.assertEqual(utils.get_api_ver(path), 'openstack.org/v1') - - def test_get_api_ver_random_path(self): - path = '/?search=foo' - self.assertRaises(ValueError, utils.get_api_ver, path) - - def test_get_res_selflink_still_available(self): - res = {'metadata': {'selfLink': '/foo'}} - - self.assertEqual(utils.get_res_link(res), '/foo') - - @mock.patch('kuryr_kubernetes.clients.get_network_client') - def test_get_subnet_id(self, m_get_net): - m_net = mock.Mock() - m_get_net.return_value = m_net - subnets = (mock.Mock(id=mock.sentinel.subnet1), - mock.Mock(id=mock.sentinel.subnet2)) - m_net.subnets.return_value = iter(subnets) - filters = {'name': 'foo', 'tags': 'bar'} - sub = utils.get_subnet_id(**filters) - m_net.subnets.assert_called_with(**filters) - self.assertEqual(mock.sentinel.subnet1, sub) - - @mock.patch('kuryr_kubernetes.clients.get_network_client') - def test_get_subnet_not_found(self, m_get_net): - m_net = mock.Mock() - m_get_net.return_value = m_net - m_net.subnets.return_value = iter(()) - filters = {'name': 'foo', 'tags': 'bar'} - sub = utils.get_subnet_id(**filters) - m_net.subnets.assert_called_with(**filters) - self.assertIsNone(sub) - - def test_is_pod_completed_pending(self): - self.assertFalse(utils.is_pod_completed({'status': {'phase': - k_const.K8S_POD_STATUS_PENDING}})) - - def test_is_pod_completed_succeeded(self): - self.assertTrue(utils.is_pod_completed({'status': {'phase': - k_const.K8S_POD_STATUS_SUCCEEDED}})) - - def test_is_pod_completed_failed(self): - self.assertTrue(utils.is_pod_completed({'status': {'phase': - k_const.K8S_POD_STATUS_FAILED}})) - - @mock.patch('kuryr_kubernetes.clients.get_network_client') - def test_cleanup_dead_ports_no_tags(self, m_get_net): - utils.cleanup_dead_ports() - m_get_net.assert_not_called() - - @mock.patch('oslo_utils.timeutils.utcnow') - @mock.patch('kuryr_kubernetes.clients.get_network_client') - @mock.patch('kuryr_kubernetes.clients.get_kubernetes_client') - def test_cleanup_dead_ports(self, m_get_k8s, m_get_net, m_utcnow): - cfg.CONF.set_override('resource_tags', ['foo'], - group='neutron_defaults') - m_net = mock.Mock() - time1 = '2022-04-14T09:00:00Z' - now = '2022-04-14T09:00:00Z' - m_utcnow.return_value = timeutils.parse_isotime(now) - port = os_port.Port(updated_at=time1, tags=['foo']) - m_net.ports.return_value = iter((port,)) - m_get_net.return_value = m_net - - m_k8s = mock.Mock() - m_k8s.get.return_value = {'items': [{'status': {'netId': 'netid'}}]} - m_get_k8s.return_value = m_k8s - - utils.cleanup_dead_ports() - - m_get_net.assert_called_once() - - @mock.patch('oslo_utils.timeutils.utcnow') - @mock.patch('kuryr_kubernetes.clients.get_network_client') - @mock.patch('kuryr_kubernetes.clients.get_kubernetes_client') - def test_cleanup_dead_no_tagged_ports(self, m_get_k8s, m_get_net, - m_utcnow): - cfg.CONF.set_override('resource_tags', ['foo'], - group='neutron_defaults') - m_net = mock.Mock() - time1 = '2022-04-14T09:00:00Z' - now = '2022-04-14T09:16:00Z' - m_utcnow.return_value = timeutils.parse_isotime(now) - port = os_port.Port(updated_at=time1, tags=[]) - m_net.ports.return_value = iter((port,)) - m_get_net.return_value = m_net - - m_k8s = mock.Mock() - m_k8s.get.return_value = {'items': [{'status': {'netId': 'netid'}}]} - m_get_k8s.return_value = m_k8s - - utils.cleanup_dead_ports() - - m_get_net.assert_called_once() - m_net.delete_port.assert_called_once_with(port) - - @mock.patch('kuryr_kubernetes.clients.get_network_client') - @mock.patch('kuryr_kubernetes.clients.get_kubernetes_client') - def test_cleanup_dead_no_networks(self, m_get_k8s, m_get_net): - cfg.CONF.set_override('resource_tags', ['foo'], - group='neutron_defaults') - m_net = mock.Mock() - m_net.ports.return_value = iter([]) - m_get_net.return_value = m_net - - m_k8s = mock.Mock() - m_k8s.get.return_value = {'items': []} - m_get_k8s.return_value = m_k8s - - utils.cleanup_dead_ports() - - m_get_net.assert_called_once() - m_net.delete_port.assert_not_called() - - def test__get_parent_port_ip(self): - os_net = self.useFixture(k_fix.MockNetworkClient()).client - - port_id = str(uuid.uuid4()) - ip_address = mock.sentinel.ip_address - - port_obj = fake.get_port_obj(ip_address=ip_address) - os_net.get_port.return_value = port_obj - - self.assertEqual(ip_address, utils.get_parent_port_ip(port_id)) diff --git a/kuryr_kubernetes/tests/unit/test_watcher.py b/kuryr_kubernetes/tests/unit/test_watcher.py deleted file mode 100644 index 7ec110f86..000000000 --- a/kuryr_kubernetes/tests/unit/test_watcher.py +++ /dev/null @@ -1,347 +0,0 @@ -# Copyright (c) 2016 Mirantis, Inc. -# All Rights Reserved. -# -# Licensed under the Apache License, Version 2.0 (the "License"); you may -# not use this file except in compliance with the License. You may obtain -# a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT -# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the -# License for the specific language governing permissions and limitations -# under the License. - -from eventlet import greenlet -from unittest import mock - -from kuryr_kubernetes.tests import base as test_base -from kuryr_kubernetes.tests.unit import kuryr_fixtures -from kuryr_kubernetes import watcher -from requests import exceptions - - -class TestWatcher(test_base.TestCase): - def setUp(self): - super(TestWatcher, self).setUp() - mock_client = self.useFixture(kuryr_fixtures.MockK8sClient()) - self.client = mock_client.client - - @mock.patch.object(watcher.Watcher, '_start_watch') - def test_add(self, m_start_watch): - paths = ['/test%s' % i for i in range(3)] - m_handler = mock.Mock() - watcher_obj = watcher.Watcher(m_handler) - - for path in paths: - watcher_obj.add(path) - - self.assertEqual(set(paths), watcher_obj._resources) - m_start_watch.assert_not_called() - - @mock.patch.object(watcher.Watcher, '_start_watch') - def test_add_running(self, m_start_watch): - paths = ['/test%s' % i for i in range(3)] - m_handler = mock.Mock() - watcher_obj = watcher.Watcher(m_handler) - watcher_obj._running = True - - for path in paths: - watcher_obj.add(path) - - self.assertEqual(set(paths), watcher_obj._resources) - m_start_watch.assert_has_calls([mock.call(path) for path in paths], - any_order=True) - - @mock.patch.object(watcher.Watcher, '_start_watch') - def test_add_watching(self, m_start_watch): - paths = ['/test%s' % i for i in range(3)] - m_handler = mock.Mock() - watcher_obj = watcher.Watcher(m_handler) - watcher_obj._running = True - m_watching = watcher_obj._watching = mock.MagicMock() - m_watching.__contains__.return_value = True - - for path in paths: - watcher_obj.add(path) - - self.assertEqual(set(paths), watcher_obj._resources) - m_start_watch.assert_not_called() - - @mock.patch.object(watcher.Watcher, '_stop_watch') - def test_remove(self, m_stop_watch): - path = '/test' - m_handler = mock.Mock() - watcher_obj = watcher.Watcher(m_handler) - watcher_obj._resources.add(path) - - watcher_obj.remove(path) - - self.assertEqual(set(), watcher_obj._resources) - m_stop_watch.assert_not_called() - - @mock.patch.object(watcher.Watcher, '_stop_watch') - def test_remove_watching(self, m_stop_watch): - path = '/test' - m_handler = mock.Mock() - watcher_obj = watcher.Watcher(m_handler) - watcher_obj._resources.add(path) - m_watching = watcher_obj._watching = mock.MagicMock() - m_watching.__contains__.return_value = True - - watcher_obj.remove(path) - - self.assertEqual(set(), watcher_obj._resources) - m_stop_watch.assert_called_once_with(path) - - @mock.patch.object(watcher.Watcher, '_start_watch') - def test_start(self, m_start_watch): - paths = ['/test%s' % i for i in range(3)] - m_handler = mock.Mock() - watcher_obj = watcher.Watcher(m_handler) - watcher_obj._resources.update(paths) - - watcher_obj.start() - - self.assertTrue(watcher_obj._running) - m_start_watch.assert_has_calls([mock.call(path) for path in paths], - any_order=True) - - @mock.patch.object(watcher.Watcher, '_start_watch') - def test_start_already_watching(self, m_start_watch): - paths = ['/test%s' % i for i in range(3)] - m_handler = mock.Mock() - watcher_obj = watcher.Watcher(m_handler) - watcher_obj._resources.update(paths) - m_watching = watcher_obj._watching = mock.MagicMock() - m_watching.__iter__.return_value = paths - - watcher_obj.start() - - self.assertTrue(watcher_obj._running) - m_start_watch.assert_not_called() - - @mock.patch.object(watcher.Watcher, '_stop_watch') - def test_stop(self, m_stop_watch): - paths = ['/test%s' % i for i in range(3)] - m_handler = mock.Mock() - watcher_obj = watcher.Watcher(m_handler) - watcher_obj._resources.update(paths) - - watcher_obj.stop() - - self.assertFalse(watcher_obj._running) - m_stop_watch.assert_not_called() - - @mock.patch.object(watcher.Watcher, '_stop_watch') - def test_stop_watching(self, m_stop_watch): - paths = ['/test%s' % i for i in range(3)] - m_handler = mock.Mock() - watcher_obj = watcher.Watcher(m_handler) - watcher_obj._resources.update(paths) - m_watching = watcher_obj._watching = mock.MagicMock() - m_watching.__iter__.return_value = paths - - watcher_obj.stop() - - self.assertFalse(watcher_obj._running) - m_stop_watch.assert_has_calls([mock.call(path) for path in paths], - any_order=True) - - @mock.patch.object(watcher.Watcher, '_watch') - def test_start_watch(self, m_watch): - path = '/test' - m_handler = mock.Mock() - watcher_obj = watcher.Watcher(m_handler) - - watcher_obj._start_watch(path) - - m_watch.assert_called_once_with(path) - self.assertTrue(watcher_obj._idle.get(path)) - self.assertIn(path, watcher_obj._watching) - - def test_start_watch_threaded(self): - path = '/test' - m_tg = mock.Mock() - m_tg.add_thread.return_value = mock.sentinel.watch_thread - m_handler = mock.Mock() - watcher_obj = watcher.Watcher(m_handler, m_tg) - - watcher_obj._start_watch(path) - - m_tg.add_thread.assert_called_once_with(watcher_obj._watch, path) - self.assertTrue(watcher_obj._idle.get(path)) - self.assertEqual(mock.sentinel.watch_thread, - watcher_obj._watching.get(path)) - - def test_stop_watch_threaded(self): - path = '/test' - m_tg = mock.Mock() - m_th = mock.Mock() - m_tt = mock.Mock() - m_handler = mock.Mock() - watcher_obj = watcher.Watcher(m_handler, m_tg) - watcher_obj._idle[path] = True - watcher_obj._watching[path] = m_th - watcher_obj._timers[path] = m_tt - - watcher_obj._stop_watch(path) - - m_tt.stop.assert_called() - m_th.stop.assert_called() - - def test_stop_watch_idle(self): - path = '/test' - m_tg = mock.Mock() - m_th = mock.Mock() - m_handler = mock.Mock() - watcher_obj = watcher.Watcher(m_handler, m_tg) - watcher_obj._idle[path] = False - watcher_obj._watching[path] = m_th - - watcher_obj._stop_watch(path) - - m_th.kill.assert_not_called() - - def _test_watch_mock_events(self, watcher_obj, events): - def client_watch(client_path): - for e in events: - self.assertTrue(watcher_obj._idle[client_path]) - yield e - self.assertTrue(watcher_obj._idle[client_path]) - self.client.watch.side_effect = client_watch - - @staticmethod - def _test_watch_create_watcher(path, handler, timeout=0): - watcher_obj = watcher.Watcher(handler, timeout=timeout) - watcher_obj._running = True - watcher_obj._resources.add(path) - watcher_obj._idle[path] = True - watcher_obj._watching[path] = None - return watcher_obj - - @mock.patch('sys.exit') - def test_watch(self, m_sys_exit): - path = '/test' - events = [{'e': i} for i in range(3)] - - def handler(event): - self.assertFalse(watcher_obj._idle[path]) - - m_handler = mock.Mock() - m_handler.side_effect = handler - watcher_obj = self._test_watch_create_watcher(path, m_handler) - self._test_watch_mock_events(watcher_obj, events) - - watcher_obj._watch(path) - - self.assertEqual(0, watcher_obj._timeout) - m_handler.assert_has_calls([mock.call(e) for e in events]) - # After all events have been "handled", since there is only - # one handler, we'll gracefully exit - m_sys_exit.assert_called_once_with(1) - - @mock.patch('sys.exit') - def test_watch_stopped(self, m_sys_exit): - path = '/test' - events = [{'e': i} for i in range(3)] - - def handler(event): - self.assertFalse(watcher_obj._idle[path]) - watcher_obj._running = False - - m_handler = mock.Mock() - m_handler.side_effect = handler - watcher_obj = self._test_watch_create_watcher(path, m_handler) - self._test_watch_mock_events(watcher_obj, events) - - watcher_obj._watch(path) - - m_handler.assert_called_once_with(events[0]) - self.assertNotIn(path, watcher_obj._idle) - self.assertNotIn(path, watcher_obj._watching) - m_sys_exit.assert_called_once_with(1) - - @mock.patch('sys.exit') - def test_watch_removed(self, m_sys_exit): - path = '/test' - events = [{'e': i} for i in range(3)] - - def handler(event): - self.assertFalse(watcher_obj._idle[path]) - watcher_obj._resources.remove(path) - - m_handler = mock.Mock() - m_handler.side_effect = handler - watcher_obj = self._test_watch_create_watcher(path, m_handler) - self._test_watch_mock_events(watcher_obj, events) - - watcher_obj._watch(path) - - m_handler.assert_called_once_with(events[0]) - self.assertNotIn(path, watcher_obj._idle) - self.assertNotIn(path, watcher_obj._watching) - m_sys_exit.assert_called_once_with(1) - - @mock.patch('sys.exit') - def test_watch_interrupted(self, m_sys_exit): - path = '/test' - events = [{'e': i} for i in range(3)] - - def handler(event): - self.assertFalse(watcher_obj._idle[path]) - raise greenlet.GreenletExit() - - m_handler = mock.Mock() - m_handler.side_effect = handler - watcher_obj = self._test_watch_create_watcher(path, m_handler) - self._test_watch_mock_events(watcher_obj, events) - - self.assertRaises(greenlet.GreenletExit, watcher_obj._watch, path) - - m_handler.assert_called_once_with(events[0]) - self.assertNotIn(path, watcher_obj._idle) - self.assertNotIn(path, watcher_obj._watching) - m_sys_exit.assert_called_once_with(1) - - @mock.patch('sys.exit') - def test_watch_client_request_failed(self, m_sys_exit): - path = '/test' - m_handler = mock.Mock() - watcher_obj = self._test_watch_create_watcher(path, m_handler) - watcher_obj._watch(path) - self.client.watch.side_effect = exceptions.ChunkedEncodingError( - "Connection Broken") - - self.client.watch.assert_called_once() - self.assertFalse(watcher_obj._alive) - m_sys_exit.assert_called_once_with(1) - - @mock.patch('sys.exit') - def test_watch_retry(self, m_sys_exit): - path = '/test' - events = [{'e': i} for i in range(3)] - side_effects = [exceptions.ChunkedEncodingError("Connection Broken")] - side_effects.extend(None for _ in events) - - m_handler = mock.Mock() - m_handler.side_effect = side_effects - watcher_obj = self._test_watch_create_watcher(path, m_handler, 10) - self._test_watch_mock_events(watcher_obj, events) - - watcher_obj._watch(path) - - m_handler.assert_has_calls([mock.call(e) for e in events]) - m_sys_exit.assert_called_once_with(1) - - def test_watch_restart(self): - tg = mock.Mock() - w = watcher.Watcher(lambda e: None, tg) - w.add('/test') - w.start() - tg.add_thread.assert_called_once_with(mock.ANY, '/test') - w.stop() - tg.add_thread = mock.Mock() # Reset mock. - w.start() - tg.add_thread.assert_called_once_with(mock.ANY, '/test') diff --git a/kuryr_kubernetes/utils.py b/kuryr_kubernetes/utils.py deleted file mode 100644 index cfac007ac..000000000 --- a/kuryr_kubernetes/utils.py +++ /dev/null @@ -1,846 +0,0 @@ -# Licensed under the Apache License, Version 2.0 (the "License"); you may -# not use this file except in compliance with the License. You may obtain -# a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT -# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the -# License for the specific language governing permissions and limitations -# under the License. - -import ipaddress -import os -import random -import re -import socket -import time - -import requests - -from kuryr.lib._i18n import _ -from kuryr.lib import constants as kl_const -from openstack import exceptions as os_exc -from os_vif import objects -from oslo_cache import core as cache -from oslo_config import cfg -from oslo_log import log -from oslo_serialization import jsonutils -from oslo_utils import timeutils - -from kuryr_kubernetes import clients -from kuryr_kubernetes import constants -from kuryr_kubernetes import exceptions -from kuryr_kubernetes.objects import lbaas as obj_lbaas -from kuryr_kubernetes.objects import vif -from kuryr_kubernetes import os_vif_util - -CONF = cfg.CONF -LOG = log.getLogger(__name__) - -VALID_MULTI_POD_POOLS_OPTS = {'noop': ['neutron-vif', - 'nested-vlan', - 'nested-macvlan', - 'nested-dpdk'], - 'neutron': ['neutron-vif'], - 'nested': ['nested-vlan'], - } -DEFAULT_TIMEOUT = 500 -DEFAULT_INTERVAL = 1 -DEFAULT_JITTER = 3 -MAX_BACKOFF = 60 -MAX_ATTEMPTS = 10 -ZOMBIE_AGE = 600 - - -subnet_caching_opts = [ - cfg.BoolOpt('caching', default=True, - help=_('Enable caching of subnets.')), - cfg.IntOpt('cache_time', default=3600, - help=_('TTL, in seconds, for cached subnets')), -] - -nodes_caching_opts = [ - cfg.BoolOpt('caching', default=True, - help=_('Enable caching of nodes.')), - cfg.IntOpt('cache_time', default=3600, - help=_('TTL, in seconds, for cached nodes')), -] - -CONF.register_opts(subnet_caching_opts, "subnet_caching") -CONF.register_opts(nodes_caching_opts, "nodes_caching") - -cache.configure(CONF) -subnet_cache_region = cache.create_region() -MEMOIZE = cache.get_memoization_decorator( - CONF, subnet_cache_region, "subnet_caching") -cache.configure_cache_region(CONF, subnet_cache_region) - -nodes_cache_region = cache.create_region() -MEMOIZE_NODE = cache.get_memoization_decorator( - CONF, nodes_cache_region, "nodes_caching") -cache.configure_cache_region(CONF, nodes_cache_region) - -RESOURCE_MAP = {'Endpoints': 'endpoints', - 'KuryrLoadBalancer': 'kuryrloadbalancers', - 'KuryrNetwork': 'kuryrnetworks', - 'KuryrNetworkPolicy': 'kuryrnetworkpolicies', - 'KuryrPort': 'kuryrports', - 'Namespace': 'namespaces', - 'NetworkPolicy': 'networkpolicies', - 'Node': 'nodes', - 'Pod': 'pods', - 'Service': 'services', - 'Machine': 'machines'} -API_VER_MAP = {'NetworkPolicy': 'networking.k8s.io/v1', - 'Pod': 'v1', - 'Service': 'v1'} -API_RE = re.compile(r'v\d+') - - -def get_klb_crd_path(obj): - """Return klb crd path from provided resource""" - namespace = obj['metadata']['namespace'] - lb_name = obj['metadata']['name'] - - return (f"{constants.K8S_API_CRD_NAMESPACES}/" - f"{namespace}/" - f"kuryrloadbalancers/" - f"{lb_name}") - - -def get_res_link(obj): - """Return selfLink equivalent for provided resource""" - # First try, if we still have it - try: - return obj['metadata']['selfLink'] - except KeyError: - pass - - # If not, let's proceed with the path assembling. - try: - res_type = RESOURCE_MAP[obj['kind']] - except KeyError: - LOG.error('Unknown resource kind: %s', obj.get('kind')) - raise - - namespace = '' - if obj['metadata'].get('namespace'): - namespace = f"/namespaces/{obj['metadata']['namespace']}" - - try: - api = f"/apis/{obj['apiVersion']}" - if API_RE.match(obj['apiVersion']): - api = f"/api/{obj['apiVersion']}" - except KeyError: - LOG.error("Object doesn't have an apiVersion available: %s", obj) - raise - - return f"{api}{namespace}/{res_type}/{obj['metadata']['name']}" - - -def get_api_ver(path): - """Get apiVersion out of resource path. - - Path usually is something simillar to: - - /api/v1/namespaces/default/pods/pod-5bb648d658-55n76 - - in case of core resources, and: - - /apis/openstack.org/v1/namespaces/default/kuryrloadbalancers/lb-324 - - in case of custom resoures. - """ - if path.startswith('/api/'): - return path.split('/')[2] - - if path.startswith('/apis/'): - return '/'.join(path.split('/')[2:4]) - - raise ValueError('Provided path is not Kubernetes api path: %s', path) - - -def utf8_json_decoder(byte_data): - """Deserializes the bytes into UTF-8 encoded JSON. - - :param byte_data: The bytes to be converted into the UTF-8 encoded JSON. - :returns: The UTF-8 encoded JSON represented by Python dictionary format. - """ - return jsonutils.loads(byte_data.decode('utf8')) - - -def convert_netns(netns): - """Convert /proc based netns path to Docker-friendly path. - - When CONF.docker_mode is set this method will change /proc to - /CONF.netns_proc_dir. This allows netns manipulations to work when running - in Docker container on Kubernetes host. - - :param netns: netns path to convert. - :return: Converted netns path. - """ - if CONF.cni_daemon.docker_mode: - return netns.replace('/proc', CONF.cni_daemon.netns_proc_dir) - else: - return netns - - -def get_res_unique_name(resource): - """Returns a unique name for the resource like pod or CRD. - - It returns a unique name for the resource composed of its name and the - namespace it is created in or just name for cluster-scoped resources. - - :returns: String with name of the resource - """ - try: - return "%(namespace)s/%(name)s" % resource['metadata'] - except KeyError: - return "%(name)s" % resource['metadata'] - - -def check_suitable_multi_pool_driver_opt(pool_driver, pod_driver): - return pod_driver in VALID_MULTI_POD_POOLS_OPTS.get(pool_driver, []) - - -def exponential_sleep(deadline, attempt, interval=DEFAULT_INTERVAL, - max_backoff=MAX_BACKOFF, jitter=DEFAULT_JITTER): - """Sleep for exponential duration. - - :param deadline: sleep timeout duration in seconds. - :param attempt: attempt count of sleep function. - :param interval: minimal time interval to sleep - :param max_backoff: maximum time to sleep - :param jitter: max value of jitter added to the sleep time - :return: the actual time that we've slept - """ - now = time.time() - seconds_left = deadline - now - - if seconds_left <= 0: - return 0 - - to_sleep = exponential_backoff(attempt, interval, max_backoff=max_backoff, - jitter=jitter) - - if to_sleep > seconds_left: - to_sleep = seconds_left - - if to_sleep < interval: - to_sleep = interval - - time.sleep(to_sleep) - return to_sleep - - -def exponential_backoff(attempt, interval=DEFAULT_INTERVAL, - max_backoff=MAX_BACKOFF, jitter=DEFAULT_JITTER): - """Return exponential backoff duration with jitter. - - This implements a variation of exponential backoff algorithm [1] (expected - backoff E(c) = interval * 2 ** attempt / 2). - - [1] https://en.wikipedia.org/wiki/Exponential_backoff - """ - - if attempt >= MAX_ATTEMPTS: - # No need to calculate very long intervals - attempt = MAX_ATTEMPTS - - backoff = 2 ** attempt * interval - - if max_backoff is not None and backoff > max_backoff: - backoff = max_backoff - - if jitter: - backoff += random.randint(0, jitter) - - return backoff - - -def get_node_name(): - # leader-elector container based on K8s way of doing leader election is - # assuming that hostname it sees is the node id. Containers within a pod - # are sharing the hostname, so this will match what leader-elector returns. - return socket.gethostname() - - -def get_leader_name(): - url = 'http://localhost:%d' % CONF.kubernetes.controller_ha_elector_port - try: - return requests.get(url).json()['name'] - except Exception: - LOG.exception('Error when fetching current leader pod name.') - # NOTE(dulek): Assuming there's no leader when we can't contact leader - # elector container. - return None - - -@MEMOIZE_NODE -def get_nodes_ips(node_subnets): - """Get the IPs of the trunk ports associated to the deployment.""" - trunk_ips = [] - os_net = clients.get_network_client() - tags = CONF.neutron_defaults.resource_tags - if tags: - ports = os_net.ports(status='ACTIVE', tags=tags) - else: - # NOTE(ltomasbo: if tags are not used, assume all the trunk ports are - # part of the kuryr deployment - ports = os_net.ports(status='ACTIVE') - for port in ports: - if (port.trunk_details and port.fixed_ips and - port.fixed_ips[0]['subnet_id'] in node_subnets): - trunk_ips.append(port.fixed_ips[0]['ip_address']) - return trunk_ips - - -@MEMOIZE -def get_subnet(subnet_id): - os_net = clients.get_network_client() - - n_subnet = os_net.get_subnet(subnet_id) - n_network = os_net.get_network(n_subnet.network_id) - - subnet = os_vif_util.neutron_to_osvif_subnet(n_subnet) - network = os_vif_util.neutron_to_osvif_network(n_network) - network.subnets.objects.append(subnet) - return network - - -@MEMOIZE -def get_subnet_cidr(subnet_id): - os_net = clients.get_network_client() - try: - subnet_obj = os_net.get_subnet(subnet_id) - except os_exc.ResourceNotFound: - LOG.exception("Subnet %s CIDR not found!", subnet_id) - raise - return subnet_obj.cidr - - -def get_subnet_id(**filters): - os_net = clients.get_network_client() - subnets = os_net.subnets(**filters) - - try: - return next(subnets).id - except StopIteration: - return None - - -@MEMOIZE -def get_subnets_id_cidrs(subnet_ids): - os_net = clients.get_network_client() - subnets = os_net.subnets() - cidrs = [(subnet.id, subnet.cidr) for subnet in subnets - if subnet.id in subnet_ids] - if len(cidrs) != len(subnet_ids): - existing = {subnet.id for subnet in subnets} - missing = set(subnet_ids) - existing - LOG.exception("CIDRs of subnets %s not found!", missing) - raise os_exc.ResourceNotFound() - return cidrs - - -def get_subnets_cidrs(subnet_ids): - return [x[1] for x in get_subnets_id_cidrs(subnet_ids)] - - -@MEMOIZE -def _get_subnetpool(subnetpool_id): - os_net = clients.get_network_client() - try: - subnetpool_obj = os_net.get_subnet_pool(subnetpool_id) - except os_exc.ResourceNotFound: - LOG.exception("Subnetpool %s not found!", subnetpool_id) - raise - return subnetpool_obj - - -def get_subnetpool_version(subnetpool_id): - subnetpool_obj = _get_subnetpool(subnetpool_id) - return subnetpool_obj.ip_version - - -def get_subnetpool_cidrs(subnetpool_id): - subnetpool_obj = _get_subnetpool(subnetpool_id) - return subnetpool_obj.prefixes - - -def extract_pod_annotation(annotation): - obj = objects.base.VersionedObject.obj_from_primitive(annotation) - # FIXME(dulek): This is code to maintain compatibility with Queens. We can - # remove it once we stop supporting upgrading from Queens, - # most likely in Stein. Note that this requires being sure - # that *all* the pod annotations are in new format. - if obj.obj_name() != vif.PodState.obj_name(): - # This is old format of annotations - single VIF object. We need to - # pack it in PodState object. - obj = vif.PodState(default_vif=obj) - - return obj - - -def has_limit(quota): - NO_LIMIT = -1 - return quota['limit'] != NO_LIMIT - - -def is_available(resource, resource_quota): - availability = resource_quota['limit'] - resource_quota['used'] - if availability <= 0: - LOG.error("Neutron quota exceeded for %s. Used %d out of %d limit.", - resource, resource_quota['used'], resource_quota['limit']) - return False - elif availability <= 3: - LOG.warning("Neutron quota low for %s. Used %d out of %d limit.", - resource, resource_quota['used'], resource_quota['limit']) - return True - - -def has_kuryr_crd(crd_url): - k8s = clients.get_kubernetes_client() - try: - k8s.get(crd_url, json=False, headers={'Connection': 'close'}) - except exceptions.K8sResourceNotFound: - LOG.error('CRD %s does not exists.', crd_url) - except exceptions.K8sClientException: - LOG.exception('Error fetching CRD %s, assuming it does not exist.', - crd_url) - return False - return True - - -def get_lbaas_spec(k8s_object): - # k8s_object can be service or endpoint - try: - annotations = k8s_object['metadata']['annotations'] - annotation = annotations[constants.K8S_ANNOTATION_LBAAS_SPEC] - except KeyError: - return None - obj_dict = jsonutils.loads(annotation) - obj = obj_lbaas.LBaaSServiceSpec.obj_from_primitive(obj_dict) - LOG.debug("Got LBaaSServiceSpec from annotation: %r", obj) - return obj - - -def set_lbaas_spec(service, lbaas_spec): - # TODO(ivc): extract annotation interactions - if lbaas_spec is None: - LOG.debug("Removing LBaaSServiceSpec annotation: %r", lbaas_spec) - annotation = None - else: - lbaas_spec.obj_reset_changes(recursive=True) - LOG.debug("Setting LBaaSServiceSpec annotation: %r", lbaas_spec) - annotation = jsonutils.dumps(lbaas_spec.obj_to_primitive(), - sort_keys=True) - svc_link = get_res_link(service) - ep_link = get_endpoints_link(service) - k8s = clients.get_kubernetes_client() - - try: - k8s.annotate(ep_link, - {constants.K8S_ANNOTATION_LBAAS_SPEC: annotation}) - except exceptions.K8sResourceNotFound as ex: - LOG.debug("Failed to annotate svc: %s", ex) - raise exceptions.ResourceNotReady(ep_link) - except exceptions.K8sClientException: - LOG.debug("Failed to annotate endpoint %r", ep_link) - raise - try: - k8s.annotate(svc_link, - {constants.K8S_ANNOTATION_LBAAS_SPEC: annotation}, - resource_version=service['metadata']['resourceVersion']) - except exceptions.K8sResourceNotFound as ex: - LOG.debug("Failed to annotate svc: %s", ex) - raise exceptions.ResourceNotReady(svc_link) - except exceptions.K8sClientException: - LOG.exception("Failed to annotate svc: %r", svc_link) - raise - - -def get_lbaas_state(endpoint): - try: - annotations = endpoint['metadata']['annotations'] - annotation = annotations[constants.K8S_ANNOTATION_LBAAS_STATE] - except KeyError: - return None - obj_dict = jsonutils.loads(annotation) - obj = obj_lbaas.LBaaSState.obj_from_primitive(obj_dict) - LOG.debug("Got LBaaSState from annotation: %r", obj) - return obj - - -def set_lbaas_state(endpoints, lbaas_state): - # TODO(ivc): extract annotation interactions - if lbaas_state is None: - LOG.debug("Removing LBaaSState annotation: %r", lbaas_state) - annotation = None - else: - lbaas_state.obj_reset_changes(recursive=True) - LOG.debug("Setting LBaaSState annotation: %r", lbaas_state) - annotation = jsonutils.dumps(lbaas_state.obj_to_primitive(), - sort_keys=True) - k8s = clients.get_kubernetes_client() - k8s.annotate(get_res_link(endpoints), - {constants.K8S_ANNOTATION_LBAAS_STATE: annotation}, - resource_version=endpoints['metadata']['resourceVersion']) - - -def get_endpoints_link(service): - svc_link = get_res_link(service) - link_parts = svc_link.split('/') - - if link_parts[-2] != 'services': - raise exceptions.IntegrityError( - f"Unsupported service link: {svc_link}") - link_parts[-2] = 'endpoints' - - return "/".join(link_parts) - - -def get_service_link(endpoints): - endpoints_link = get_res_link(endpoints) - link_parts = endpoints_link.split('/') - - if link_parts[-2] != 'endpoints': - raise exceptions.IntegrityError( - f"Unsupported endpoints link: {endpoints_link}") - link_parts[-2] = 'services' - - return "/".join(link_parts) - - -def has_port_changes(service, loadbalancer_crd): - if not loadbalancer_crd: - return False - link = get_res_link(service) - svc_port_set = service['spec'].get('ports') - - for port in svc_port_set: - port['targetPort'] = str(port['targetPort']) - spec_port_set = loadbalancer_crd['spec'].get('ports', []) - if spec_port_set: - if len(svc_port_set) != len(spec_port_set): - return True - pairs = zip(svc_port_set, spec_port_set) - diff = any(x != y for x, y in pairs) - if diff: - LOG.debug("LBaaS spec ports %(spec_ports)s != %(svc_ports)s " - "for %(link)s" % {'spec_ports': spec_port_set, - 'svc_ports': svc_port_set, - 'link': link}) - return diff - return False - - -def get_service_ports(service): - return [{'name': port.get('name'), - 'protocol': port.get('protocol', 'TCP'), - 'port': port['port'], - 'targetPort': str(port['targetPort'])} - for port in service['spec']['ports']] - - -@MEMOIZE -def get_service_subnet_version(): - os_net = clients.get_network_client() - svc_subnet_id = CONF.neutron_defaults.service_subnet - try: - svc_subnet = os_net.get_subnet(svc_subnet_id) - except os_exc.ResourceNotFound: - LOG.exception("Service subnet %s not found", svc_subnet_id) - raise - return svc_subnet.ip_version - - -def clean_lb_crd_status(loadbalancer_name): - namespace, name = loadbalancer_name.split('/') - k8s = clients.get_kubernetes_client() - try: - k8s.patch_crd('status', f'{constants.K8S_API_CRD_NAMESPACES}' - f'/{namespace}/kuryrloadbalancers/{name}', {}) - except exceptions.K8sResourceNotFound: - LOG.debug('KuryrLoadbalancer CRD not found %s', - name) - except exceptions.K8sClientException: - LOG.exception('Error updating KuryrLoadbalancer CRD %s', - name) - raise - - -def is_kubernetes_default_resource(obj): - """Check if Object is a resource associated to the API - - Verifies if the Object is on the default namespace - and has the name kubernetes. Those name and namespace - are given to Kubernetes Service and Endpoints for the API. - - :param obj: Kubernetes object dict - :returns: True if is default resource for the API, false - otherwise. - """ - return (obj['metadata']['name'] == 'kubernetes' and - obj['metadata']['namespace'] == 'default') - - -def get_pod_by_ip(pod_ip, namespace=None): - k8s = clients.get_kubernetes_client() - pod = {} - try: - if namespace: - pods = k8s.get(f'{constants.K8S_API_BASE}/namespaces/{namespace}/' - f'pods?fieldSelector=status.phase=Running,' - f'status.podIP={pod_ip}') - else: - pods = k8s.get(f'{constants.K8S_API_BASE}/' - f'pods?fieldSelector=status.phase=Running,' - f'status.podIP={pod_ip}') - except exceptions.K8sClientException: - LOG.exception('Error retrieving Pod with IP %s', pod_ip) - raise - if pods.get('items'): - # Only one Pod should have the IP - return pods['items'][0] - return pod - - -def get_current_endpoints_target(ep, port, spec_ports, ep_name): - """Retrieve details about one specific Endpoint target - - Defines the details about the Endpoint target, such as the - target address, name, port value and the Pool ID. In case, - the Endpoints has no targetRef defined, the name of the - target will be the same as the Endpoint. - - :param ep: Endpoint on the Endpoints object - :param port: Endpoint port - :param spec_ports: dict of port name associated to pool ID - :param ep_name: Name of the Endpoints object - :returns: Tuple with target address, target name, port number - and pool ID. - """ - target_ref = ep.get('targetRef', {}) - pod_name = ep_name - # NOTE(maysams): As we don't support dual-stack, we assume - # only one address is possible on the addresses field. - address = ep['addresses'][0] - if target_ref: - pod_name = target_ref.get('name', '') - return (address, pod_name, port['port'], - spec_ports.get(port.get('name'))) - - -def get_subnet_by_ip(nodes_subnets, target_ip): - ip = ipaddress.ip_address(target_ip) - for nodes_subnet in nodes_subnets: - if ip in ipaddress.ip_network(nodes_subnet[1]): - return nodes_subnet - - return None - - -def get_kuryrloadbalancer(name, namespace): - k8s = clients.get_kubernetes_client() - try: - return k8s.get(f'{constants.K8S_API_CRD_NAMESPACES}/' - f'{namespace}/kuryrloadbalancers/' - f'{name}') - except exceptions.K8sResourceNotFound: - return {} - - -def is_pod_completed(pod): - try: - return (pod['status']['phase'] in - (constants.K8S_POD_STATUS_SUCCEEDED, - constants.K8S_POD_STATUS_FAILED)) - except KeyError: - return False - - -def is_host_network(pod): - return pod['spec'].get('hostNetwork', False) - - -def is_pod_static(pod): - """Checks if Pod is static by comparing annotations.""" - try: - annotations = pod['metadata']['annotations'] - config_source = annotations[constants.K8S_ANNOTATION_CONFIG_SOURCE] - return config_source != 'api' - except KeyError: - return False - - -def get_nodename(): - # NOTE(dulek): At first try to get it using environment variable, - # otherwise assume hostname is the nodename. - try: - nodename = os.environ['KUBERNETES_NODE_NAME'] - except KeyError: - # NOTE(dulek): By default K8s nodeName is lowercased hostname. - nodename = socket.gethostname().lower() - return nodename - - -def get_referenced_object(obj, kind): - """Get referenced object. - - Helper function for getting objects out of the CRDs like - KuryrLoadBalancer, KuryrNetworkPolicy or KuryrPort needed solely for - creating Event object, so there will be no exceptions raises from this - function. - """ - for ref in obj['metadata'].get('ownerReferences', []): - if ref['kind'] != kind: - continue - - try: - return {'kind': kind, - 'apiVersion': ref['apiVersion'], - 'metadata': {'namespace': obj['metadata']['namespace'], - 'name': ref['name'], - 'uid': ref['uid']}} - except KeyError: - LOG.debug("Not all needed keys was found in ownerReferences " - "list: %s", ref) - - # There was no ownerReferences field, let's query API - k8s = clients.get_kubernetes_client() - data = {'metadata': {'name': obj['metadata']['name']}, - 'kind': kind, - 'apiVersion': API_VER_MAP[kind]} - if obj['metadata'].get('namespace'): - data['metadata']['namespace'] = obj['metadata']['namespace'] - try: - url = get_res_link(data) - except KeyError: - LOG.debug("Not all needed data was found in provided object: %s", - data) - return - - try: - return k8s.get(url) - except exceptions.K8sClientException: - LOG.debug('Error when fetching %s to add an event %s, ignoring', - kind, get_res_unique_name(obj)) - - -def cleanup_dead_ports(): - tags = set(CONF.neutron_defaults.resource_tags) - if not tags: - # NOTE(gryf): there is no reliable way for removing kuryr-related - # ports if there are no tags enabled - without tags there is a chance, - # that ports are down, created by someone/something else and would - # be deleted. - # Perhaps a be better idea to would be to have some mark in other - # field during port creation to identify "our" ports. - return - - os_net = clients.get_network_client() - k8s = clients.get_kubernetes_client() - - try: - crds = k8s.get(constants.K8S_API_CRD_KURYRNETWORKS) - except exceptions.K8sClientException as ex: - LOG.exception('Error fetching KuryrNetworks: %s', ex) - return - - for item in crds['items']: - network_id = item.get('status', {}).get('netId') - if not network_id: - continue - - for port in os_net.ports(status='DOWN', network_id=network_id, - device_owner=kl_const.DEVICE_OWNER, - not_tags=list(tags)): - now = timeutils.utcnow(True) - port_time = timeutils.parse_isotime(port.updated_at) - # NOTE(gryf): if port hanging more than 10 minutes already in DOWN - # state, consider it as a dead one. - if (now - port_time).seconds > ZOMBIE_AGE: - try: - os_net.delete_port(port) - except os_exc.SDKException as ex: - LOG.warning('There was an issue with port "%s" ' - 'removal: %s', port, ex) - - -def cleanup_dead_networks(): - """Cleanup all the dead networks and subnets without ports""" - - tags = set(CONF.neutron_defaults.resource_tags) - if not tags: - return - - os_net = clients.get_network_client() - k8s = clients.get_kubernetes_client() - - desc = ",".join(CONF.neutron_defaults.resource_tags) - - try: - crds = k8s.get(constants.K8S_API_CRD_KURYRNETWORKS) - except exceptions.K8sClientException as ex: - LOG.exception('Error fetching KuryrNetworks: %s', ex) - return - - kuryr_net_ids = [i['status']['netId'] for i in crds['items'] - if i.get('status', {}).get('netId')] - - for net in os_net.networks(description=desc): - - if net.id in kuryr_net_ids: - # Find out, if there are more subnets than expected, which suppose - # to not have tags. - for subnet in os_net.subnets(network_id=net.id, - not_tags=list(tags)): - now = timeutils.utcnow(True) - subnet_time = timeutils.parse_isotime(subnet.updated_at) - if (now - subnet_time).seconds > ZOMBIE_AGE: - try: - os_net.delete_subnet(subnet) - except os_exc.SDKException as ex: - LOG.warning('There was an issue with removing subnet ' - '"%s": %s', subnet, ex) - continue - - if len(list(os_net.ports(network_id=net.id))) > 0: - continue - - now = timeutils.utcnow(True) - net_time = timeutils.parse_isotime(net.updated_at) - # NOTE(gryf): if network hanging more than 10 minutes consider it as a - # orphaned. - if (now - net_time).seconds > ZOMBIE_AGE: - try: - os_net.delete_network(net) - except os_exc.SDKException as ex: - LOG.warning('There was an issue with network "%s" ' - 'removal: %s', net, ex) - - -def get_parent_port_id(vif_obj): - os_net = clients.get_network_client() - tags = [] - - if CONF.neutron_defaults.resource_tags: - tags = CONF.neutron_defaults.resource_tags - - trunks = os_net.trunks(tags=tags) - - for trunk in trunks: - for sp in trunk.sub_ports: - if sp['port_id'] == vif_obj.id: - return trunk.port_id - - return None - - -def get_parent_port_ip(port_id): - os_net = clients.get_network_client() - parent_port = os_net.get_port(port_id) - return parent_port.fixed_ips[0]['ip_address'] diff --git a/kuryr_kubernetes/version.py b/kuryr_kubernetes/version.py deleted file mode 100644 index 89900c997..000000000 --- a/kuryr_kubernetes/version.py +++ /dev/null @@ -1,15 +0,0 @@ -# Licensed under the Apache License, Version 2.0 (the "License"); you may -# not use this file except in compliance with the License. You may obtain -# a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT -# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the -# License for the specific language governing permissions and limitations -# under the License. - -import pbr.version - -version_info = pbr.version.VersionInfo('kuryr_kubernetes') diff --git a/kuryr_kubernetes/watcher.py b/kuryr_kubernetes/watcher.py deleted file mode 100644 index c17fa3f0e..000000000 --- a/kuryr_kubernetes/watcher.py +++ /dev/null @@ -1,239 +0,0 @@ -# Copyright (c) 2016 Mirantis, Inc. -# All Rights Reserved. -# -# Licensed under the Apache License, Version 2.0 (the "License"); you may -# not use this file except in compliance with the License. You may obtain -# a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT -# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the -# License for the specific language governing permissions and limitations -# under the License. - -import sys -import time - -from oslo_config import cfg -from oslo_log import log as logging - -from kuryr_kubernetes import clients -from kuryr_kubernetes import exceptions -from kuryr_kubernetes.handlers import health -from kuryr_kubernetes import utils - -LOG = logging.getLogger(__name__) -CONF = cfg.CONF - - -class Watcher(health.HealthHandler): - """Observes K8s resources' events using K8s '?watch=true' API. - - The `Watcher` maintains a list of K8s resources and manages the event - processing loops for those resources. Event handling is delegated to the - `callable` object passed as the `handler` initialization parameter that - will be run for each K8s event observed by the `Watcher`. - - The `Watcher` can operate in two different modes based on the - `thread_group` initialization parameter: - - - synchronous, when the event processing loops run on the same thread - that called 'add' or 'start' methods - - - asynchronous, when each event processing loop runs on its own thread - (`oslo_service.threadgroup.Thread`) from the `thread_group` - - When started, the `Watcher` will run the event processing loops for each - of the K8s resources on the list. Adding a K8s resource to the running - `Watcher` also ensures that the event processing loop for that resource is - running. - - Stopping the `Watcher` or removing the specific K8s resource from the - list will request the corresponding running event processing loops to - stop gracefully, but will not interrupt any running `handler`. Forcibly - stopping any 'stuck' `handler` is not supported by the `Watcher` and - should be handled externally (e.g. by using `thread_group.stop( - graceful=False)` for asynchronous `Watcher`). - """ - - def __init__(self, handler, thread_group=None, timeout=None): - """Initializes a new Watcher instance. - - :param handler: a `callable` object to be invoked for each observed - K8s event with the event body as a single argument. - Calling `handler` should never raise any exceptions - other than `eventlet.greenlet.GreenletExit` caused by - `eventlet.greenthread.GreenThread.kill` when the - `Watcher` is operating in asynchronous mode. - :param thread_group: an `oslo_service.threadgroup.ThreadGroup` - object used to run the event processing loops - asynchronously. If `thread_group` is not - specified, the `Watcher` will operate in a - synchronous mode. - """ - super(Watcher, self).__init__() - self._client = clients.get_kubernetes_client() - self._handler = handler - self._thread_group = thread_group - self._running = False - self._resources = set() - self._watching = {} - self._timers = {} - self._idle = {} - - if timeout is None: - timeout = CONF.kubernetes.watch_retry_timeout - self._timeout = timeout - - def add(self, path): - """Adds ths K8s resource to the Watcher. - - Adding a resource to a running `Watcher` also ensures that the event - processing loop for that resource is running. This method could block - for `Watcher`s operating in synchronous mode. - - :param path: K8s resource URL path - """ - self._resources.add(path) - if self._running and path not in self._watching: - self._start_watch(path) - - def remove(self, path): - """Removes the K8s resource from the Watcher. - - Also requests the corresponding event processing loop to stop if it - is running. - - :param path: K8s resource URL path - """ - self._resources.discard(path) - if path in self._watching: - self._stop_watch(path) - - def is_running(self): - return self._running - - def start(self): - """Starts the Watcher. - - Also ensures that the event processing loops are running. This method - could block for `Watcher`s operating in synchronous mode. - """ - self._running = True - for path in self._resources - set(self._watching): - self._start_watch(path) - - def stop(self): - """Stops the Watcher. - - Also requests all running event processing loops to stop. - """ - self._running = False - for path in list(self._watching): - self._stop_watch(path) - - def _reconcile(self, path): - LOG.debug(f'Getting {path} for reconciliation.') - try: - response = self._client.get(path) - resources = response['items'] - except exceptions.K8sClientException: - LOG.exception(f'Error getting path when reconciling.') - return - - # NOTE(gryf): For some resources (like pods) we could observe that - # 'items' is set to None. I'm not sure if that's a K8s issue, since - # accroding to the documentation is should be list. - if not resources: - return - - for resource in resources: - event = { - 'type': 'MODIFIED', - 'object': resource, - } - self._handler(event, injected=True) - - def _start_watch(self, path): - tg = self._thread_group - self._idle[path] = True - if tg: - self._watching[path] = tg.add_thread(self._watch, path) - period = CONF.kubernetes.watch_reconcile_period - if period > 0: - # Let's make sure handlers won't reconcile at the same time. - initial_delay = period + 5 * len(self._timers) - self._timers[path] = tg.add_timer_args( - period, self._reconcile, args=(path,), - initial_delay=initial_delay, stop_on_exception=False) - else: - self._watching[path] = None - self._watch(path) - - def _stop_watch(self, path): - if self._idle.get(path): - if self._thread_group and path in self._watching: - if CONF.kubernetes.watch_reconcile_period: - self._timers[path].stop() - self._watching[path].stop() - # NOTE(dulek): Thread gets killed immediately, so we need to - # take care of this ourselves. - if CONF.kubernetes.watch_reconcile_period: - self._timers.pop(path, None) - self._watching.pop(path, None) - self._idle.pop(path, None) - - def _graceful_watch_exit(self, path): - try: - self._watching.pop(path, None) - if CONF.kubernetes.watch_reconcile_period: - self._timers.pop(path, None) - self._idle.pop(path, None) - LOG.info("Stopped watching '%s'", path) - except KeyError: - LOG.error("Failed to exit watch gracefully") - finally: - if not self._watching and not self._idle: - self.stop() - LOG.info("No remaining active watchers, Exiting...") - sys.exit(1) - - def _watch(self, path): - attempts = 0 - deadline = 0 - while self._running and path in self._resources: - try: - retry = False - if attempts == 1: - deadline = time.time() + self._timeout - - if (attempts > 0 and - utils.exponential_sleep(deadline, attempts) == 0): - LOG.error("Failed watching '%s': deadline exceeded", path) - self._alive = False - return - - LOG.info("Started watching '%s'", path) - for event in self._client.watch(path): - # NOTE(esevan): Watcher retries watching for - # `self._timeout` duration with exponential backoff - # algorithm to tolerate against temporal exception such as - # temporal disconnection to the k8s api server. - attempts = 0 - self._idle[path] = False - self._handler(event) - self._idle[path] = True - if not (self._running and path in self._resources): - return - except Exception: - LOG.exception("Caught exception while watching.") - LOG.warning("Restarting(%s) watching '%s'.", - attempts, path) - attempts += 1 - retry = True - self._idle[path] = True - finally: - if not retry: - self._graceful_watch_exit(path) diff --git a/playbooks/copy-crio-logs.yaml b/playbooks/copy-crio-logs.yaml deleted file mode 100644 index 048c54613..000000000 --- a/playbooks/copy-crio-logs.yaml +++ /dev/null @@ -1,14 +0,0 @@ -- hosts: all - tasks: - - set_fact: - devstack_base_dir: /opt/stack - when: devstack_base_dir is not defined - - - name: Copy CRI-O logs - shell: - cmd: "{{ devstack_base_dir }}/kuryr-kubernetes/tools/gate/copy_crio_logs.sh" - executable: /bin/bash - chdir: "{{ zuul.project.src_dir }}" - environment: - DEVSTACK_BASE_DIR: "{{ devstack_base_dir }}" - become: true diff --git a/playbooks/copy-k8s-logs.yaml b/playbooks/copy-k8s-logs.yaml deleted file mode 100644 index 866566c34..000000000 --- a/playbooks/copy-k8s-logs.yaml +++ /dev/null @@ -1,14 +0,0 @@ -- hosts: all - tasks: - - set_fact: - devstack_base_dir: /opt/stack - when: devstack_base_dir is not defined - - - name: Copy Kubernetes resources and pods logs - shell: - cmd: "{{ devstack_base_dir }}/kuryr-kubernetes/tools/gate/copy_k8s_logs.sh" - executable: /bin/bash - chdir: "{{ zuul.project.src_dir }}" - environment: - DEVSTACK_BASE_DIR: "{{ devstack_base_dir }}" - become: true diff --git a/playbooks/e2e-tests.patch b/playbooks/e2e-tests.patch deleted file mode 100644 index 483a239f4..000000000 --- a/playbooks/e2e-tests.patch +++ /dev/null @@ -1,58 +0,0 @@ -diff --git a/test/e2e/framework/pod/wait.go b/test/e2e/framework/pod/wait.go -index 61ab7997ce6..eabf38006ad 100644 ---- a/test/e2e/framework/pod/wait.go -+++ b/test/e2e/framework/pod/wait.go -@@ -51,7 +51,7 @@ const ( - podScheduledBeforeTimeout = podListTimeout + (20 * time.Second) - - // podStartTimeout is how long to wait for the pod to be started. -- podStartTimeout = 5 * time.Minute -+ podStartTimeout = 2 * time.Minute - - // poll is how often to poll pods, nodes and claims. - poll = 2 * time.Second -diff --git a/test/e2e/network/netpol/network_legacy.go b/test/e2e/network/netpol/network_legacy.go -index fb52460560c..895f4c3df85 100644 ---- a/test/e2e/network/netpol/network_legacy.go -+++ b/test/e2e/network/netpol/network_legacy.go -@@ -435,6 +435,7 @@ var _ = common.SIGDescribe("NetworkPolicyLegacy [LinuxOnly]", func() { - - policy, err = f.ClientSet.NetworkingV1().NetworkPolicies(nsA.Name).Create(context.TODO(), policy, metav1.CreateOptions{}) - framework.ExpectNoError(err, "Error creating Network Policy %v: %v", policy.ObjectMeta.Name, err) -+ time.Sleep(60 * time.Second) - defer cleanupNetworkPolicy(f, policy) - - testCannotConnect(f, nsB, "client-a", service, 80) -@@ -957,6 +958,7 @@ var _ = common.SIGDescribe("NetworkPolicyLegacy [LinuxOnly]", func() { - // Client cannot connect to service after updating the server pod's labels to match the network policy's selector. - ginkgo.By(fmt.Sprintf("Updating server pod %s to be selected by network policy %s.", podServer.Name, policy.Name)) - updatePodLabel(f, f.Namespace, podServer.Name, "add", "/metadata/labels/isolated", nil) -+ time.Sleep(60 * time.Second) - testCannotConnect(f, f.Namespace, "client-a", service, allowedPort) - }) - -@@ -1103,6 +1105,7 @@ var _ = common.SIGDescribe("NetworkPolicyLegacy [LinuxOnly]", func() { - } - - policyAllowToServerInNSB, err = f.ClientSet.NetworkingV1().NetworkPolicies(f.Namespace.Name).Create(context.TODO(), policyAllowToServerInNSB, metav1.CreateOptions{}) -+ time.Sleep(60 * time.Second) - framework.ExpectNoError(err, "Error occurred while creating policy: policyAllowToServerInNSB.") - defer cleanupNetworkPolicy(f, policyAllowToServerInNSB) - -@@ -1807,6 +1810,7 @@ var _ = common.SIGDescribe("NetworkPolicy [Feature:SCTPConnectivity][LinuxOnly][ - - ginkgo.By("Testing pods can connect only to the port allowed by the policy.") - testCannotConnectProtocol(f, f.Namespace, "client-a", service, 80, v1.ProtocolSCTP) -+ time.Sleep(60 * time.Second) - testCanConnectProtocol(f, f.Namespace, "client-b", service, 81, v1.ProtocolSCTP) - }) - -@@ -2143,7 +2147,7 @@ func createNetworkClientPodWithRestartPolicy(f *framework.Framework, namespace * - Command: []string{"/bin/sh"}, - Args: []string{ - "-c", -- fmt.Sprintf("for i in $(seq 1 5); do /agnhost connect %s --protocol %s --timeout 8s && exit 0 || sleep 1; done; exit 1", net.JoinHostPort(targetService.Spec.ClusterIP, strconv.Itoa(targetPort)), connectProtocol), -+ fmt.Sprintf("sleep 30; for i in $(seq 1 300); do /agnhost connect %s --protocol %s --timeout 8s && exit 0 || sleep 1; done; exit 1", net.JoinHostPort(targetService.Spec.ClusterIP, strconv.Itoa(targetPort)), connectProtocol), - }, - }, - }, diff --git a/playbooks/get_amphora_tarball.yaml b/playbooks/get_amphora_tarball.yaml deleted file mode 100644 index e400adf9d..000000000 --- a/playbooks/get_amphora_tarball.yaml +++ /dev/null @@ -1,6 +0,0 @@ -- hosts: controller - tasks: - - name: Download amphora tarball - get_url: - url: "https://tarballs.openstack.org/octavia/test-images/test-only-amphora-x64-haproxy-ubuntu-focal.qcow2" - dest: /tmp/test-only-amphora-x64-haproxy-ubuntu-focal.qcow2 diff --git a/playbooks/run_k8s_e2e_tests.yaml b/playbooks/run_k8s_e2e_tests.yaml deleted file mode 100644 index 3cd450894..000000000 --- a/playbooks/run_k8s_e2e_tests.yaml +++ /dev/null @@ -1,106 +0,0 @@ -- hosts: all - tasks: - # NOTE(maysams): Revisit this package removal step - # once other operating systems are supported on the gates - - name: Remove old installation of Go - shell: | - apt remove -y --purge golang - apt autoremove -y - become: yes - ignore_errors: yes - - - name: Download GO {{ gopkg }} - get_url: - url: https://dl.google.com/go/{{ gopkg }} - dest: /tmp/{{ gopkg }} - force: yes - - - name: Unarchive GO - unarchive: - src: /tmp/{{ gopkg }} - dest: /usr/local - remote_src: yes - become: true - - - name: Clone K8s test-infra repository - git: - repo: https://github.com/kubernetes/test-infra - dest: ~/test-infra - force: yes - - - name: Install kubetest - shell: go install ./kubetest - args: - chdir: ~/test-infra - environment: - GO111MODULE: "on" - PATH: "{{ ansible_env.PATH }}:/usr/local/go/bin:{{ ansible_env.HOME }}/go/bin" - - - name: Clone kubernetes repository - git: - repo: https://github.com/kubernetes/kubernetes.git - version: "{{ kubetest_version }}" - dest: ~/kubernetes - force: yes - - - name: Patch e2e tests - # TODO(gryf): for some reason 'patch' plugin doesn't work - block: - - name: patch the kubernetes tests - shell: patch -Np1 < /opt/stack/kuryr-kubernetes/playbooks/e2e-tests.patch - args: - chdir: ~/kubernetes - - - name: Build e2e tests - block: - - name: Install make package - become: true - package: - name: "make" - state: present - - name: Build e2e tests - shell: | - make WHAT=cmd/kubectl - make WHAT=vendor/github.com/onsi/ginkgo/ginkgo - make WHAT=test/e2e/e2e.test - args: - chdir: ~/kubernetes - environment: - PATH: "{{ ansible_env.PATH }}:/usr/local/go/bin:{{ ansible_env.HOME }}/go/bin" - - - name: Create .kube folder within BASE - file: - path: "{{ ansible_env.HOME }}/.kube" - state: directory - become: yes - - - name: Copy kubeconfig file - shell: "cp /etc/kubernetes/admin.conf {{ ansible_env.HOME }}/.kube/config" - become: yes - - - name: Change kubeconfig file permission - file: - path: "{{ ansible_env.HOME }}/.kube/config" - owner: zuul - group: zuul - become: yes - - - name: Run Network Policy legacy tests - block: - - name: Run Network Policy tests without SCTPConnectivity - shell: kubetest --provider=local --check-version-skew=false --test --ginkgo-parallel={{ np_parallel_number }} --test_args="--ginkgo.focus=NetworkPolicyLegacy --ginkgo.skip=\[Feature:SCTPConnectivity|should.enforce.policies.to.check.ingress.and.egress.policies.can.be.controlled.independently.based.on.PodSelector" --dump=/tmp > ~/np_kubetest.log - args: - chdir: ~/kubernetes - environment: - GINKGO_NO_COLOR: "y" - KUBECONFIG: "{{ ansible_env.HOME }}/.kube/config" - PATH: "{{ ansible_env.PATH }}:/usr/local/go/bin:{{ ansible_env.HOME }}/go/bin" - always: - - name: Run Network Policy SCTPConnectivity tests - shell: kubetest --provider=local --check-version-skew=false --test --ginkgo-parallel={{ np_parallel_number }} --test_args="--ginkgo.focus=NetworkPolicy.\[Feature:SCTPConnectivity" --dump=/tmp > ~/np_sctp_kubetest.log - args: - chdir: ~/kubernetes - environment: - GINKGO_NO_COLOR: "y" - KUBECONFIG: "{{ ansible_env.HOME }}/.kube/config" - PATH: "{{ ansible_env.PATH }}:/usr/local/go/bin:{{ ansible_env.HOME }}/go/bin" diff --git a/releasenotes/notes/active-passive-ha-cfbda8e6b527b48e.yaml b/releasenotes/notes/active-passive-ha-cfbda8e6b527b48e.yaml deleted file mode 100644 index 266769496..000000000 --- a/releasenotes/notes/active-passive-ha-cfbda8e6b527b48e.yaml +++ /dev/null @@ -1,9 +0,0 @@ ---- -features: - - | - Kuryr-Kubernetes now supports running kuryr-controller service in - **Active/Passive HA mode**. This is only possible when running those services - as Pods on Kubernetes cluster, as Kubernetes is used for leader election. - Also it is required to add leader-elector container to the kuryr-controller - Pods. HA is controlled by ``[kubernetes]controller_ha`` option, which - defaults to ``False``. diff --git a/releasenotes/notes/add-tagging-ce56231f58bf7ad0.yaml b/releasenotes/notes/add-tagging-ce56231f58bf7ad0.yaml deleted file mode 100644 index e02e538c5..000000000 --- a/releasenotes/notes/add-tagging-ce56231f58bf7ad0.yaml +++ /dev/null @@ -1,11 +0,0 @@ ---- -features: - - | - Added possibility to ensure all OpenStack resources created by Kuryr are - tagged. In case of Neutron regular ``tags`` field is used. If Octavia - supports tagging (from Octavia API 2.5, i.e. Stein), ``tags`` field is used - as well, otherwise tags are put on ``description`` field. All this is - controlled by ``[neutron_defaults]resource_tags`` config option that can - hold a list of tags to be put on resources. This feature is useful to - correctly identify any leftovers in OpenStack after K8s cluster Kuryr was - serving gets deleted. diff --git a/releasenotes/notes/bp-openshift-router-support-5f28108b39a2826f.yaml b/releasenotes/notes/bp-openshift-router-support-5f28108b39a2826f.yaml deleted file mode 100644 index 651273b98..000000000 --- a/releasenotes/notes/bp-openshift-router-support-5f28108b39a2826f.yaml +++ /dev/null @@ -1,15 +0,0 @@ ---- -features: - - | - An OpenShift route is a way to expose a service by giving it an - externally-reachable hostname like www.example.com. - A defined route and the endpoints identified by its service can be - consumed by a router to provide named connectivity that allows external - clients to reach your applications. - Each route consists of a route name , target service details. - To enable it the following handlers should be added : - - .. code-block:: ini - - [kubernetes] - enabled_handlers=vif,lb,lbaasspec,ingresslb,ocproute diff --git a/releasenotes/notes/change-cni-daemon-default-port-e968a83fa1bf30b5.yaml b/releasenotes/notes/change-cni-daemon-default-port-e968a83fa1bf30b5.yaml deleted file mode 100644 index 7dff0e397..000000000 --- a/releasenotes/notes/change-cni-daemon-default-port-e968a83fa1bf30b5.yaml +++ /dev/null @@ -1,8 +0,0 @@ ---- -upgrade: - - | - kuryr-daemon used to listen on port 50036, but that's a port from local - range (on Ubuntu and RHEL default range is 32768-60999). This means that - there might have been a port conflict ("address already in use"). To avoid - that the default value of ``[cni_daemon]bind_address`` option was changed - to ``127.0.0.1:5036``. diff --git a/releasenotes/notes/changing-default-url-for-k8s-api-42c3b90183783291.yaml b/releasenotes/notes/changing-default-url-for-k8s-api-42c3b90183783291.yaml deleted file mode 100644 index a546f503f..000000000 --- a/releasenotes/notes/changing-default-url-for-k8s-api-42c3b90183783291.yaml +++ /dev/null @@ -1,16 +0,0 @@ ---- -upgrade: - - | - Option 'api_root' from kubernetes section changed default value from: - - .. code-block:: ini - - [kubernetes] - api_root=http://localhost:8080 - - to: - - .. code-block:: ini - - [kubernetes] - api_root=https://localhost:6443 diff --git a/releasenotes/notes/cni-health-checks-d2b70f2f2551a9fc.yaml b/releasenotes/notes/cni-health-checks-d2b70f2f2551a9fc.yaml deleted file mode 100644 index 75ee286d0..000000000 --- a/releasenotes/notes/cni-health-checks-d2b70f2f2551a9fc.yaml +++ /dev/null @@ -1,16 +0,0 @@ ---- -features: - - | - The CNI daemon now provides health checks allowing the deployer or the - orchestration layer to probe it for readiness and liveness. - - These health checks are served and executed by a Manager that runs - as part of CNI daemon, and offers two endpoints indicating whether - it is ready and alive. - - The Manager validates presence of NET_ADMIN capabilities, health status - of a transactional database, connectivity with Kubernetes API, quantity of - CNI add failures, health of CNI components and amount of memory - being consumed. The health checks fails if any of the presented checks - are not validated, causing the orchestration layer to restart. - More information can be found in the kuryr-kubernetes documentation. diff --git a/releasenotes/notes/containerization-2fba4dac5c097b19.yaml b/releasenotes/notes/containerization-2fba4dac5c097b19.yaml deleted file mode 100644 index 4d491edcf..000000000 --- a/releasenotes/notes/containerization-2fba4dac5c097b19.yaml +++ /dev/null @@ -1,6 +0,0 @@ ---- -features: - - | - Kuryr can now be run in containers on top of K8s cluster it is providing - networking for. A tool to generate K8s resource definitions is provided. - More information can be found in the kuryr-kubernetes documentation. diff --git a/releasenotes/notes/cri-o-support-ab7e810775754ea7.yaml b/releasenotes/notes/cri-o-support-ab7e810775754ea7.yaml deleted file mode 100644 index d2c8e4830..000000000 --- a/releasenotes/notes/cri-o-support-ab7e810775754ea7.yaml +++ /dev/null @@ -1,5 +0,0 @@ ---- -features: - - | - Added support for using cri-o (and podman & buildah) as container engine in - both container images and DevStack. diff --git a/releasenotes/notes/deprecate-handlers-caching-9cdfd772aba9a7ce.yaml b/releasenotes/notes/deprecate-handlers-caching-9cdfd772aba9a7ce.yaml deleted file mode 100644 index 19380647b..000000000 --- a/releasenotes/notes/deprecate-handlers-caching-9cdfd772aba9a7ce.yaml +++ /dev/null @@ -1,9 +0,0 @@ ---- -deprecations: - - | - Configuration sections ``[namespace_handler_caching]``, ``[np_handler_caching]`` - and ``[vif_handler_caching]`` have been deprecated due to simplifying quota usage - calculation for readiness checks. Instead of counting Neutron objects - (ports, sg, subnets, and networks), the quota_details extension is used, - which includes used, limit and reserved counts per resource. - In this way, caching becomes unnecessary. diff --git a/releasenotes/notes/deprecate-non-daemonized-6dd2154238b1628c.yaml b/releasenotes/notes/deprecate-non-daemonized-6dd2154238b1628c.yaml deleted file mode 100644 index 1888863bd..000000000 --- a/releasenotes/notes/deprecate-non-daemonized-6dd2154238b1628c.yaml +++ /dev/null @@ -1,18 +0,0 @@ ---- -upgrade: - - | - Legacy Kuryr deployment without running kuryr-daemon is now considered - deprecated. That possibility will be completely removed in one of the next - releases. Please note that this means that ``[cni_daemon]daemon_enabled`` - option will default to ``True``. -deprecations: - - | - Running Kuryr-Kubernetes without kuryr-daemon service is now deprecated. - Motivations for that move include: - - * Discoveries of bugs that are much easier to fix in kuryr-daemon. - * Further improvements in Kuryr scalability (e.g. moving choosing VIF from - pool into kuryr-daemon) are only possible when kuryr-daemon is present. - - Possibility of running Kuryr-Kubernetes without kuryr-daemon will be - removed in one of the future releases. diff --git a/releasenotes/notes/deprecate-sg-mode-option-96824c33335cd74b.yaml b/releasenotes/notes/deprecate-sg-mode-option-96824c33335cd74b.yaml deleted file mode 100644 index 7648a14f7..000000000 --- a/releasenotes/notes/deprecate-sg-mode-option-96824c33335cd74b.yaml +++ /dev/null @@ -1,10 +0,0 @@ ---- -deprecations: - - | - Setting the ``sg_mode`` option for octavia is being deprecated. - Main reason is that when ``sg_mode`` is create a new load balancer - security group is created. However, when ovn-octavia provider is - used that security group is not enforced, and thus there is no - need to have been created. - To address the other operation handled on this config, the - ``enforce_sg_rules`` config can be used instead. diff --git a/releasenotes/notes/deprecate-worker-nodes-subnet-e452c84df5b5ed5c.yaml b/releasenotes/notes/deprecate-worker-nodes-subnet-e452c84df5b5ed5c.yaml deleted file mode 100644 index a4cfcca0e..000000000 --- a/releasenotes/notes/deprecate-worker-nodes-subnet-e452c84df5b5ed5c.yaml +++ /dev/null @@ -1,12 +0,0 @@ ---- -features: - - | - Kuryr will now support nested mode with nodes VMs running in multiple - subnets. In order to use that functionality a new option - `[pod_vif_nested]worker_nodes_subnets` is introduced and will accept a list - of subnet IDs. -deprecations: - - | - Option `[pod_vif_nested]worker_nodes_subnet` is deprecated in favor of - `[pod_vif_nested]worker_nodes_subnets` that accepts a list instead of a - single ID. diff --git a/releasenotes/notes/deprecate_lbaasv2-a524aedf5d3a36bc.yaml b/releasenotes/notes/deprecate_lbaasv2-a524aedf5d3a36bc.yaml deleted file mode 100644 index a4d5d0ddd..000000000 --- a/releasenotes/notes/deprecate_lbaasv2-a524aedf5d3a36bc.yaml +++ /dev/null @@ -1,15 +0,0 @@ ---- -upgrade: - - | - Legacy Kuryr deployment relying on neutron-lbaas as the LBaaSv2 endpoint is - now deprecated. The possibility of using it as Kuryr's lbaasv2 endpoint - will be totally removed in one of the next releases. -deprecations: - - | - Running Kuryr-Kubernetes with neutron-lbaasv2 is now deprecated. The main - motivation for this is the deprecation of the neutron-lbaas implementation - in favour to Octavia. - - Possibility of running Kuryr-Kubernetes with the lbaas handler pointing to - anything but Octavia or SDN lbaas implementations will be removed in - future releases. diff --git a/releasenotes/notes/drop-ingress-d78a7a9be8f20da1.yaml b/releasenotes/notes/drop-ingress-d78a7a9be8f20da1.yaml deleted file mode 100644 index 7c4bc5f36..000000000 --- a/releasenotes/notes/drop-ingress-d78a7a9be8f20da1.yaml +++ /dev/null @@ -1,9 +0,0 @@ ---- -deprecations: - - | - Support for OpenShift's Routes (Ingress) gets removed as is not mantained - nor tested, and openshift route pods can be used instead. - - | - Support for namespace isolation is now deprecated and will be removed on - the first occasion as the same effect can now be achieved using Network - Policies support. diff --git a/releasenotes/notes/drop-py27-60f55b6bc1d082bc.yaml b/releasenotes/notes/drop-py27-60f55b6bc1d082bc.yaml deleted file mode 100644 index dc805e2b5..000000000 --- a/releasenotes/notes/drop-py27-60f55b6bc1d082bc.yaml +++ /dev/null @@ -1,6 +0,0 @@ ---- -upgrade: - - | - Python 2.7 support has been dropped. Last release of Kuryr-Kubernetes to support - py2.7 is OpenStack Train. The minimum version of Python now supported by - Kuryr-Kubernetes is Python 3.6. diff --git a/releasenotes/notes/fault-tolerable-watcher-24c51dbccabf5f17.yaml b/releasenotes/notes/fault-tolerable-watcher-24c51dbccabf5f17.yaml deleted file mode 100644 index b349793d7..000000000 --- a/releasenotes/notes/fault-tolerable-watcher-24c51dbccabf5f17.yaml +++ /dev/null @@ -1,21 +0,0 @@ ---- -upgrade: - - | - For the kuryr kubernetes watcher, - a new option 'watch_retry_timeout' has been added. - The following should be modified at kuryr.conf:: - - - [kubernetes] - # 'watch_retry_timeout' field is optional, - # default = 60 if not set. - watch_retry_timeout =