From d5d4ef1f9d24a22b5c8ec939cdd9409653379ff5 Mon Sep 17 00:00:00 2001 From: Luis Tomas Bolivar Date: Wed, 18 Apr 2018 11:03:27 +0000 Subject: [PATCH] Add namespace subnet driver for namespace creation This patch adds a new subnet driver that creates a new network for each created k8s namespace. It makes use of K8s CRDs to store the information about the network resources created for each namespace Partially Implements: blueprint network-namespace Change-Id: I7988e1da7a9ed57f29c85ddcd99bb2c87808010e --- devstack/plugin.sh | 12 + devstack/settings | 2 + doc/source/installation/index.rst | 1 + doc/source/installation/network_namespace.rst | 110 ++++++ kubernetes_crds/kuryrnet.yaml | 12 + kuryr_kubernetes/constants.py | 4 + kuryr_kubernetes/controller/drivers/base.py | 19 ++ .../controller/drivers/namespace_subnet.py | 159 +++++++++ .../controller/handlers/namespace.py | 38 ++- kuryr_kubernetes/exceptions.py | 4 + kuryr_kubernetes/k8s_client.py | 13 + kuryr_kubernetes/opts.py | 2 + .../drivers/test_namespace_subnet.py | 312 ++++++++++++++++++ .../controller/handlers/test_namespace.py | 171 ++++++++++ .../tests/unit/test_k8s_client.py | 28 ++ .../network-namespace-2353f8013be398cd.yaml | 16 + setup.cfg | 1 + tools/generate_k8s_resource_definitions.sh | 3 + 18 files changed, 905 insertions(+), 2 deletions(-) create mode 100644 doc/source/installation/network_namespace.rst create mode 100644 kubernetes_crds/kuryrnet.yaml create mode 100644 kuryr_kubernetes/controller/drivers/namespace_subnet.py create mode 100644 kuryr_kubernetes/tests/unit/controller/drivers/test_namespace_subnet.py create mode 100644 kuryr_kubernetes/tests/unit/controller/handlers/test_namespace.py create mode 100644 releasenotes/notes/network-namespace-2353f8013be398cd.yaml diff --git a/devstack/plugin.sh b/devstack/plugin.sh index bf43f11fb..b8d77ec5c 100644 --- a/devstack/plugin.sh +++ b/devstack/plugin.sh @@ -70,6 +70,9 @@ function configure_kuryr { iniset "$KURYR_CONFIG" kubernetes port_debug "$KURYR_PORT_DEBUG" + iniset "$KURYR_CONFIG" kubernetes pod_subnets_driver "$KURYR_SUBNET_DRIVER" + iniset "$KURYR_CONFIG" kubernetes enabled_handlers "$KURYR_ENABLED_HANDLERS" + KURYR_K8S_CONTAINERIZED_DEPLOYMENT=$(trueorfalse False KURYR_K8S_CONTAINERIZED_DEPLOYMENT) if [ "$KURYR_K8S_CONTAINERIZED_DEPLOYMENT" == "True" ]; then # This works around the issue of being unable to set oslo.privsep mode @@ -260,11 +263,14 @@ function configure_neutron_defaults { local service_subnet_id local subnetpool_id local router + local router_id # If a subnetpool is not passed, we get the one created in devstack's # Neutron module subnetpool_id=${KURYR_NEUTRON_DEFAULT_SUBNETPOOL_ID:-${SUBNETPOOL_V4_ID}} router=${KURYR_NEUTRON_DEFAULT_ROUTER:-$Q_ROUTER_NAME} + router_id="$(neutron router-show -c id -f value \ + "$router")" project_id=$(get_or_create_project \ "$KURYR_NEUTRON_DEFAULT_PROJECT" default) @@ -343,6 +349,10 @@ function configure_neutron_defaults { iniset "$KURYR_CONFIG" neutron_defaults pod_subnet "$pod_subnet_id" iniset "$KURYR_CONFIG" neutron_defaults pod_security_groups "$sg_ids" iniset "$KURYR_CONFIG" neutron_defaults service_subnet "$service_subnet_id" + if [ "$KURYR_SUBNET_DRIVER" == "namespace" ]; then + iniset "$KURYR_CONFIG" namespace_subnet pod_subnet_pool "$subnetpool_id" + iniset "$KURYR_CONFIG" namespace_subnet pod_router "$router_id" + fi if [ -n "$OVS_BRIDGE" ]; then iniset "$KURYR_CONFIG" neutron_defaults ovs_bridge "$OVS_BRIDGE" fi @@ -726,6 +736,7 @@ if [[ "$1" == "stack" && "$2" == "extra" ]]; then fi if is_service_enabled kuryr-kubernetes; then + /usr/local/bin/kubectl apply -f ${KURYR_HOME}/kubernetes_crds/kuryrnet.yaml if [ "$KURYR_K8S_CONTAINERIZED_DEPLOYMENT" == "False" ]; then run_kuryr_kubernetes else @@ -737,6 +748,7 @@ if [[ "$1" == "stack" && "$2" == "extra" ]]; then generate_containerized_kuryr_resources False fi fi + fi elif [[ "$1" == "stack" && "$2" == "test-config" ]]; then diff --git a/devstack/settings b/devstack/settings index 9187a1cf5..e1879b8b0 100644 --- a/devstack/settings +++ b/devstack/settings @@ -45,6 +45,8 @@ KURYR_K8S_API_CERT=${KURYR_K8S_API_CERT:-} KURYR_K8S_API_KEY=${KURYR_K8S_API_KEY:-} KURYR_K8S_API_CACERT=${KURYR_K8S_API_CACERT:-} KURYR_PORT_DEBUG=${KURYR_PORT_DEBUG:-True} +KURYR_SUBNET_DRIVER=${KURYR_SUBNET_DRIVER:-default} +KURYR_ENABLED_HANDLERS=${KURYR_ENABLED_HANDLERS:-vif,lb,lbaasspec} # OpenShift OPENSHIFT_BINARY_URL=${OPENSHIFT_BINARY_URL:-https://github.com/openshift/origin/releases/download/v3.9.0/openshift-origin-server-v3.9.0-191fece-linux-64bit.tar.gz} diff --git a/doc/source/installation/index.rst b/doc/source/installation/index.rst index 32ab1b558..235dac000 100644 --- a/doc/source/installation/index.rst +++ b/doc/source/installation/index.rst @@ -36,6 +36,7 @@ This section describes how you can install and configure kuryr-kubernetes devstack/index default_configuration trunk_ports + network_namespace testing_connectivity testing_nested_connectivity containerized diff --git a/doc/source/installation/network_namespace.rst b/doc/source/installation/network_namespace.rst new file mode 100644 index 000000000..dc1dcf741 --- /dev/null +++ b/doc/source/installation/network_namespace.rst @@ -0,0 +1,110 @@ +Enable network per namespace functionality (handler + driver) +============================================================= + +To enable the subnet driver that creates a new network for each new namespace +the next steps are needed: + +1. Enable the namespace handler to reach to namespace events, in this case, + creation and deletion. To do that you need to add it to the list of the + enabled handlers at kuryr.conf (details on how to edit this for + containerized deployment can be found at :doc:`./devstack/containerized`):: + + [kubernetes] + enabled_handlers=vif,lb,lbaasspec,namespace + + +2. Enable the namespace subnet driver by modifying the default + pod_subnet_driver option at kuryr.conf:: + + [kubernetes] + pod_subnets_driver = namespace + + +3. Select (and create if needed) the subnet pool from where the new subnets + will get their CIDR (e.g., the default on devstack deployment is + shared-default-subnetpool-v4):: + + [namespace_subnet] + pod_subnet_pool = SUBNET_POOL_ID + + +4. Select (and create if needed) the router where the new subnet will be + connected (e.g., the default on devstack deployments is router1):: + + [namespace_subnet] + pod_router = ROUTER_ID + + + Note if a new router is created, it must ensure the connectivity + requirements between pod, service and public subnets, as in the case for + the default subnet driver. + + +Note you need to restart the kuryr controller after applying the above +detailed steps. For devstack non-containerized deployments:: + + sudo systemctl restart devstack@kuryr-kubernetes.service + + +And for containerized deployments:: + + kubectl -n kube-system get pod | grep kuryr-controller + kubectl -n kube-system delete pod KURYR_CONTROLLER_POD_NAME + + +For directly enabling the driver when deploying with devstack, you just need +to add the namespace handler and state the namespace subnet driver with:: + + KURYR_SUBNET_DRIVER=namespace + KURYR_ENABLED_HANDLERS=vif,lb,lbaasspec,namespace + + +Testing the network per namespace functionality +----------------------------------------------- + +1. Create a namespace:: + + $ kubectl create namespace test + +2. Check resources has been created:: + + $ kubectl get namespaces + NAME STATUS AGE + test Active 4s + ... ... ... + + $ kubectl get kuryrnets + NAME AGE + ns-test 1m + + $ openstack network list | grep test + | 7c7b68c5-d3c4-431c-9f69-fbc777b43ee5 | ns/test-net | 8640d134-5ea2-437d-9e2a-89236f6c0198 | + + $ openstack subnet list | grep test + | 8640d134-5ea2-437d-9e2a-89236f6c0198 | ns/test-subnet | 7c7b68c5-d3c4-431c-9f69-fbc777b43ee5 | 10.0.1.128/26 | + +3. Create a pod in the created namespace:: + + $ kubectl run -n test --image kuryr/demo demo + deployment "demo" created + + $ kubectl -n test get pod -o wide + NAME READY STATUS RESTARTS AGE IP NODE + demo-5995548848-lmmjc 1/1 Running 0 7s 10.0.1.136 node1 + + +4. Create a service:: + + $ kubectl expose -n test deploy/demo --port 80 --target-port 8080 + service "demo" exposed + + $ kubectl -n test get svc + NAME TYPE CLUSTER-IP EXTERNAL-IP PORT(S) AGE + demo ClusterIP 10.0.0.141 80/TCP 18s + + +5. Test service connectivity:: + + $ curl 10.0.0.141 + demo-5995548848-lmmjc: HELLO! I AM ALIVE!!! + diff --git a/kubernetes_crds/kuryrnet.yaml b/kubernetes_crds/kuryrnet.yaml new file mode 100644 index 000000000..b7c6b51ea --- /dev/null +++ b/kubernetes_crds/kuryrnet.yaml @@ -0,0 +1,12 @@ +apiVersion: apiextensions.k8s.io/v1beta1 +kind: CustomResourceDefinition +metadata: + name: kuryrnets.openstack.org +spec: + group: openstack.org + version: v1 + scope: Cluster + names: + plural: kuryrnets + singular: kuryrnet + kind: KuryrNet diff --git a/kuryr_kubernetes/constants.py b/kuryr_kubernetes/constants.py index a5e414063..819415601 100644 --- a/kuryr_kubernetes/constants.py +++ b/kuryr_kubernetes/constants.py @@ -15,18 +15,22 @@ K8S_API_BASE = '/api/v1' K8S_API_NAMESPACES = K8S_API_BASE + '/namespaces' +K8S_API_CRD = '/apis/openstack.org/v1' K8S_OBJ_NAMESPACE = 'Namespace' K8S_OBJ_POD = 'Pod' K8S_OBJ_SERVICE = 'Service' K8S_OBJ_ENDPOINTS = 'Endpoints' +K8S_OBJ_KURYRNET = 'KuryrNet' + K8S_POD_STATUS_PENDING = 'Pending' K8S_ANNOTATION_PREFIX = 'openstack.org/kuryr' K8S_ANNOTATION_VIF = K8S_ANNOTATION_PREFIX + '-vif' K8S_ANNOTATION_LBAAS_SPEC = K8S_ANNOTATION_PREFIX + '-lbaas-spec' K8S_ANNOTATION_LBAAS_STATE = K8S_ANNOTATION_PREFIX + '-lbaas-state' +K8S_ANNOTATION_NET_CRD = K8S_ANNOTATION_PREFIX + '-net-crd' K8S_OS_VIF_NOOP_PLUGIN = "noop" diff --git a/kuryr_kubernetes/controller/drivers/base.py b/kuryr_kubernetes/controller/drivers/base.py index 48f290892..3146e22d6 100644 --- a/kuryr_kubernetes/controller/drivers/base.py +++ b/kuryr_kubernetes/controller/drivers/base.py @@ -129,6 +129,25 @@ class PodSubnetsDriver(DriverBase): """ raise NotImplementedError() + def create_namespace_network(self, namespace): + """Create network resources for a namespace. + + :param namespace: string with the namespace name + :return: CRD KuryrNet dict + """ + raise NotImplementedError() + + def rollback_network_resources(self, router_id, net_id, subnet_id, + namespace): + """Rollback created network resources for a namespace. + + :param router_id: OpenStack router ID where the network is connected + :param net_id: OpenStack network ID + :param subnet_id: OpenStack subnet ID + :param namespace: name of the Kubernetes namespace object + """ + raise NotImplementedError() + @six.add_metaclass(abc.ABCMeta) class ServiceSubnetsDriver(DriverBase): diff --git a/kuryr_kubernetes/controller/drivers/namespace_subnet.py b/kuryr_kubernetes/controller/drivers/namespace_subnet.py new file mode 100644 index 000000000..d6c92cde8 --- /dev/null +++ b/kuryr_kubernetes/controller/drivers/namespace_subnet.py @@ -0,0 +1,159 @@ +# Copyright 2018 Red Hat, Inc. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +from kuryr.lib._i18n import _ +from oslo_config import cfg as oslo_cfg +from oslo_log import log as logging + +from kuryr_kubernetes import clients +from kuryr_kubernetes import constants +from kuryr_kubernetes.controller.drivers import default_subnet +from kuryr_kubernetes import exceptions + +from neutronclient.common import exceptions as n_exc + +LOG = logging.getLogger(__name__) + +namespace_subnet_driver_opts = [ + oslo_cfg.StrOpt('pod_router', + help=_("Default Neutron router ID where pod subnet(s) is " + "connected")), + oslo_cfg.StrOpt('pod_subnet_pool', + help=_("Default Neutron subnet pool ID where pod subnets " + "get their cidr from")), +] + +oslo_cfg.CONF.register_opts(namespace_subnet_driver_opts, "namespace_subnet") + + +class NamespacePodSubnetDriver(default_subnet.DefaultPodSubnetDriver): + """Provides subnet for Pod port based on a Pod's namespace.""" + + def get_subnets(self, pod, project_id): + pod_namespace = pod['metadata']['namespace'] + subnet_id = self._get_namespace_subnet(pod_namespace) + + return {subnet_id: default_subnet._get_subnet(subnet_id)} + + def _get_namespace_subnet(self, namespace): + kubernetes = clients.get_kubernetes_client() + try: + ns = kubernetes.get('%s/namespaces/%s' % (constants.K8S_API_BASE, + namespace)) + except exceptions.K8sClientException: + LOG.exception("Kubernetes Client Exception.") + raise exceptions.ResourceNotReady(namespace) + + try: + annotations = ns['metadata']['annotations'] + net_crd_url = annotations[constants.K8S_ANNOTATION_NET_CRD] + except KeyError: + LOG.exception("Namespace missing CRD annotations for selecting " + "the corresponding subnet.") + raise exceptions.ResourceNotReady(namespace) + + try: + net_crd = kubernetes.get('%s/kuryrnets/%s' % ( + constants.K8S_API_CRD, net_crd_url)) + except exceptions.K8sClientException as ex: + LOG.exception("Kubernetes Client Exception.") + raise ex + + return net_crd['spec']['subnetId'] + + def create_namespace_network(self, namespace): + neutron = clients.get_neutron_client() + + router_id = oslo_cfg.CONF.namespace_subnet.pod_router + subnet_pool_id = oslo_cfg.CONF.namespace_subnet.pod_subnet_pool + + # create network with namespace as name + network_name = "ns/" + namespace + "-net" + subnet_name = "ns/" + namespace + "-subnet" + try: + neutron_net = neutron.create_network( + { + "network": {"name": network_name} + }).get('network') + + # create a subnet within that network + neutron_subnet = neutron.create_subnet( + { + "subnet": { + "network_id": neutron_net['id'], + "ip_version": 4, + "name": subnet_name, + "enable_dhcp": False, + "subnetpool_id": subnet_pool_id, + } + }).get('subnet') + + # connect the subnet to the router + neutron.add_interface_router(router_id, + {"subnet_id": neutron_subnet['id']}) + except n_exc.NeutronClientException as ex: + LOG.error("Error creating neutron resources for the namespace " + "%s: %s", namespace, ex) + raise ex + + # create CRD resource for the network + try: + net_crd = self._add_kuryrnet_crd(namespace, neutron_net['id'], + router_id, neutron_subnet['id']) + except exceptions.K8sClientException as ex: + LOG.exception("Kuryrnet CRD could not be added. Rolling back " + "network resources created for the namespace.") + self.rollback_network_resources(router_id, neutron_net['id'], + neutron_subnet['id'], namespace) + raise ex + return net_crd + + def rollback_network_resources(self, router_id, net_id, subnet_id, + namespace): + neutron = clients.get_neutron_client() + try: + neutron.remove_interface_router(router_id, + {'subnet_id': subnet_id}) + neutron.delete_network(net_id) + except n_exc.NeutronClientException: + LOG.exception("Failed to clean up network resources associated to " + "%(net_id)s, created for the namespace: " + "%(namespace)s." % {'net_id': net_id, + 'namespace': namespace}) + + def _add_kuryrnet_crd(self, namespace, net_id, router_id, subnet_id): + kubernetes = clients.get_kubernetes_client() + net_crd_name = "ns-" + namespace + net_crd = { + 'apiVersion': 'openstack.org/v1', + 'kind': 'KuryrNet', + 'metadata': { + 'name': net_crd_name, + 'annotations': { + 'namespaceName': namespace, + } + }, + 'spec': { + 'netId': net_id, + 'routerId': router_id, + 'subnetId': subnet_id, + }, + } + try: + kubernetes.post('%s/kuryrnets' % constants.K8S_API_CRD, net_crd) + except exceptions.K8sClientException as ex: + LOG.exception("Kubernetes Client Exception creating kuryrnet " + "CRD.") + raise ex + return net_crd diff --git a/kuryr_kubernetes/controller/handlers/namespace.py b/kuryr_kubernetes/controller/handlers/namespace.py index 6fbf1450e..5af8da1e6 100644 --- a/kuryr_kubernetes/controller/handlers/namespace.py +++ b/kuryr_kubernetes/controller/handlers/namespace.py @@ -14,8 +14,10 @@ from oslo_log import log as logging +from kuryr_kubernetes import clients from kuryr_kubernetes import constants from kuryr_kubernetes.controller.drivers import base as drivers +from kuryr_kubernetes import exceptions from kuryr_kubernetes.handlers import k8s_base LOG = logging.getLogger(__name__) @@ -32,7 +34,39 @@ class NamespaceHandler(k8s_base.ResourceEventHandler): self._drv_sg = drivers.PodSecurityGroupsDriver.get_instance() def on_added(self, namespace): - LOG.debug("Creating namespace: %s", namespace) + ns_name = namespace['metadata']['name'] + net_crd = self._get_net_crd(namespace) + if net_crd: + LOG.debug("CRD existing at the new namespace") + return + + LOG.debug("Creating network resources for namespace: %s", ns_name) + net_crd = self._drv_subnets.create_namespace_network(ns_name) + try: + self._set_net_crd(namespace, net_crd) + except exceptions.K8sClientException: + LOG.exception("Failed to set annotation") + crd_spec = net_crd['spec'] + self._drv_subnets.rollback_network_resources( + crd_spec['routerId'], crd_spec['netId'], crd_spec['subnetId'], + ns_name) def on_deleted(self, namespace): - LOG.debug("Deleting namespace: %s", namespace) + pass + + def _get_net_crd(self, namespace): + try: + annotations = namespace['metadata']['annotations'] + net_crd = annotations[constants.K8S_ANNOTATION_NET_CRD] + except KeyError: + return None + return net_crd + + def _set_net_crd(self, namespace, net_crd): + LOG.debug("Setting CRD annotations: %s", net_crd) + + k8s = clients.get_kubernetes_client() + k8s.annotate(namespace['metadata']['selfLink'], + {constants.K8S_ANNOTATION_NET_CRD: + net_crd['metadata']['name']}, + resource_version=namespace['metadata']['resourceVersion']) diff --git a/kuryr_kubernetes/exceptions.py b/kuryr_kubernetes/exceptions.py index 93013f3ac..50bab5f19 100644 --- a/kuryr_kubernetes/exceptions.py +++ b/kuryr_kubernetes/exceptions.py @@ -34,6 +34,10 @@ class K8sResourceNotFound(K8sClientException): "found: %r" % resource) +class InvalidKuryrNetCRD(Exception): + pass + + class CNIError(Exception): pass diff --git a/kuryr_kubernetes/k8s_client.py b/kuryr_kubernetes/k8s_client.py index 1983b2c18..99d90a11b 100644 --- a/kuryr_kubernetes/k8s_client.py +++ b/kuryr_kubernetes/k8s_client.py @@ -104,6 +104,19 @@ class K8sClient(object): return response.json().get('status') raise exc.K8sClientException(response.text) + def post(self, path, body): + LOG.debug("Post %(path)s: %(body)s", {'path': path, 'body': body}) + url = self._base_url + path + header = {'Content-Type': 'application/json'} + if self.token: + header.update({'Authorization': 'Bearer %s' % self.token}) + + response = requests.post(url, json=body, cert=self.cert, + verify=self.verify_server, headers=header) + if response.ok: + return response.json() + raise exc.K8sClientException(response) + def annotate(self, path, annotations, resource_version=None): """Pushes a resource annotation to the K8s API resource diff --git a/kuryr_kubernetes/opts.py b/kuryr_kubernetes/opts.py index 71602007a..856d61850 100644 --- a/kuryr_kubernetes/opts.py +++ b/kuryr_kubernetes/opts.py @@ -17,6 +17,7 @@ from kuryr.lib import opts as lib_opts from kuryr_kubernetes.cni import health as cni_health from kuryr_kubernetes import config from kuryr_kubernetes.controller.drivers import default_subnet +from kuryr_kubernetes.controller.drivers import namespace_subnet from kuryr_kubernetes.controller.drivers import nested_vif from kuryr_kubernetes.controller.drivers import vif_pool from kuryr_kubernetes.controller.managers import health @@ -36,6 +37,7 @@ _kuryr_k8s_opts = [ ('cni_daemon', config.daemon_opts), ('health_server', health.health_server_opts), ('cni_health_server', cni_health.cni_health_server_opts), + ('namespace_subnet', namespace_subnet.namespace_subnet_driver_opts), ] diff --git a/kuryr_kubernetes/tests/unit/controller/drivers/test_namespace_subnet.py b/kuryr_kubernetes/tests/unit/controller/drivers/test_namespace_subnet.py new file mode 100644 index 000000000..785c67819 --- /dev/null +++ b/kuryr_kubernetes/tests/unit/controller/drivers/test_namespace_subnet.py @@ -0,0 +1,312 @@ +# Copyright (c) 2018 Red Hat, Inc. +# All Rights Reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); you may +# not use this file except in compliance with the License. You may obtain +# a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the +# License for the specific language governing permissions and limitations +# under the License. + +import mock + +from kuryr_kubernetes import constants +from kuryr_kubernetes.controller.drivers import namespace_subnet as subnet_drv +from kuryr_kubernetes import exceptions as k_exc +from kuryr_kubernetes.tests import base as test_base +from kuryr_kubernetes.tests.unit import kuryr_fixtures as k_fix + +from neutronclient.common import exceptions as n_exc + + +def get_pod_obj(): + return { + 'status': { + 'qosClass': 'BestEffort', + 'hostIP': '192.168.1.2', + }, + 'kind': 'Pod', + 'spec': { + 'schedulerName': 'default-scheduler', + 'containers': [{ + 'name': 'busybox', + 'image': 'busybox', + 'resources': {} + }], + 'nodeName': 'kuryr-devstack' + }, + 'metadata': { + 'name': 'busybox-sleep1', + 'namespace': 'default', + 'resourceVersion': '53808', + 'selfLink': '/api/v1/namespaces/default/pods/busybox-sleep1', + 'uid': '452176db-4a85-11e7-80bd-fa163e29dbbb', + 'annotations': { + 'openstack.org/kuryr-vif': {} + } + }} + + +def get_namespace_obj(): + return { + 'metadata': { + 'annotations': { + constants.K8S_ANNOTATION_NET_CRD: 'net_crd_url_sample' + } + } + } + + +class TestNamespacePodSubnetDriver(test_base.TestCase): + + @mock.patch('kuryr_kubernetes.controller.drivers' + '.default_subnet._get_subnet') + def test_get_subnets(self, m_get_subnet): + project_id = mock.sentinel.project_id + pod = get_pod_obj() + pod_namespace = pod['metadata']['namespace'] + subnet_id = mock.sentinel.subnet_id + subnet = mock.sentinel.subnet + + cls = subnet_drv.NamespacePodSubnetDriver + m_driver = mock.MagicMock(spec=cls) + + m_driver._get_namespace_subnet.return_value = subnet_id + m_get_subnet.return_value = subnet + + subnets = cls.get_subnets(m_driver, pod, project_id) + + self.assertEqual({subnet_id: subnet}, subnets) + m_driver._get_namespace_subnet.assert_called_once_with(pod_namespace) + m_get_subnet.assert_called_once_with(subnet_id) + + @mock.patch('kuryr_kubernetes.controller.drivers' + '.default_subnet._get_subnet') + def test_get_subnets_namespace_not_ready(self, m_get_subnet): + project_id = mock.sentinel.project_id + pod = get_pod_obj() + pod_namespace = pod['metadata']['namespace'] + + cls = subnet_drv.NamespacePodSubnetDriver + m_driver = mock.MagicMock(spec=cls) + + m_driver._get_namespace_subnet.side_effect = k_exc.ResourceNotReady( + pod_namespace) + + self.assertRaises(k_exc.ResourceNotReady, cls.get_subnets, m_driver, + pod, project_id) + + m_driver._get_namespace_subnet.assert_called_once_with(pod_namespace) + m_get_subnet.assert_not_called() + + def test__get_namespace_subnets(self): + cls = subnet_drv.NamespacePodSubnetDriver + m_driver = mock.MagicMock(spec=cls) + + namespace = mock.sentinel.namespace + subnet_id = mock.sentinel.subnet_id + ns = get_namespace_obj() + crd = { + 'spec': { + 'subnetId': subnet_id + } + } + + kubernetes = self.useFixture(k_fix.MockK8sClient()).client + kubernetes.get.side_effect = [ns, crd] + + subnet_id_resp = cls._get_namespace_subnet(m_driver, namespace) + kubernetes.get.assert_called() + self.assertEqual(subnet_id, subnet_id_resp) + + def test__get_namespace_subnets_get_namespace_exception(self): + cls = subnet_drv.NamespacePodSubnetDriver + m_driver = mock.MagicMock(spec=cls) + + namespace = mock.sentinel.namespace + + kubernetes = self.useFixture(k_fix.MockK8sClient()).client + kubernetes.get.side_effect = k_exc.K8sClientException + + self.assertRaises(k_exc.ResourceNotReady, cls._get_namespace_subnet, + m_driver, namespace) + kubernetes.get.assert_called_once() + + def test__get_namespace_subnets_missing_annotation(self): + cls = subnet_drv.NamespacePodSubnetDriver + m_driver = mock.MagicMock(spec=cls) + + namespace = mock.sentinel.namespace + subnet_id = mock.sentinel.subnet_id + ns = get_namespace_obj() + del ns['metadata']['annotations'][constants.K8S_ANNOTATION_NET_CRD] + crd = { + 'spec': { + 'subnetId': subnet_id + } + } + + kubernetes = self.useFixture(k_fix.MockK8sClient()).client + kubernetes.get.side_effect = [ns, crd] + + self.assertRaises(k_exc.ResourceNotReady, cls._get_namespace_subnet, + m_driver, namespace) + kubernetes.get.assert_called_once() + + def test__get_namespace_subnets_get_crd_exception(self): + cls = subnet_drv.NamespacePodSubnetDriver + m_driver = mock.MagicMock(spec=cls) + + namespace = mock.sentinel.namespace + ns = get_namespace_obj() + + kubernetes = self.useFixture(k_fix.MockK8sClient()).client + kubernetes.get.side_effect = [ns, k_exc.K8sClientException] + + self.assertRaises(k_exc.K8sClientException, cls._get_namespace_subnet, + m_driver, namespace) + kubernetes.get.assert_called() + + def test_create_namespace_network(self): + cls = subnet_drv.NamespacePodSubnetDriver + m_driver = mock.MagicMock(spec=cls) + + namespace = 'test' + + neutron = self.useFixture(k_fix.MockNeutronClient()).client + net = {'id': mock.sentinel.net} + neutron.create_network.return_value = {'network': net} + subnet = {'id': mock.sentinel.subnet} + neutron.create_subnet.return_value = {'subnet': subnet} + net_crd = mock.sentinel.net_crd + m_driver._add_kuryrnet_crd.return_value = net_crd + + net_crd_resp = cls.create_namespace_network(m_driver, namespace) + + self.assertEqual(net_crd_resp, net_crd) + neutron.create_network.assert_called_once() + neutron.create_subnet.assert_called_once() + neutron.add_interface_router.assert_called_once() + m_driver._add_kuryrnet_crd.assert_called_once() + + def test_create_namespace_network_net_exception(self): + cls = subnet_drv.NamespacePodSubnetDriver + m_driver = mock.MagicMock(spec=cls) + + namespace = 'test' + + neutron = self.useFixture(k_fix.MockNeutronClient()).client + neutron.create_network.side_effect = n_exc.NeutronClientException + + self.assertRaises(n_exc.NeutronClientException, + cls.create_namespace_network, m_driver, namespace) + + neutron.create_network.assert_called_once() + neutron.create_subnet.assert_not_called() + neutron.add_interface_router.assert_not_called() + m_driver._add_kuryrnet_crd.assert_not_called() + + def test_create_namespace_network_subnet_exception(self): + cls = subnet_drv.NamespacePodSubnetDriver + m_driver = mock.MagicMock(spec=cls) + + namespace = 'test' + + neutron = self.useFixture(k_fix.MockNeutronClient()).client + net = {'id': mock.sentinel.net} + neutron.create_network.return_value = {'network': net} + neutron.create_subnet.side_effect = n_exc.NeutronClientException + + self.assertRaises(n_exc.NeutronClientException, + cls.create_namespace_network, m_driver, namespace) + + neutron.create_network.assert_called_once() + neutron.create_subnet.assert_called_once() + neutron.add_interface_router.assert_not_called() + m_driver._add_kuryrnet_crd.assert_not_called() + + def test_create_namespace_network_router_exception(self): + pass + cls = subnet_drv.NamespacePodSubnetDriver + m_driver = mock.MagicMock(spec=cls) + + namespace = 'test' + + neutron = self.useFixture(k_fix.MockNeutronClient()).client + net = {'id': mock.sentinel.net} + neutron.create_network.return_value = {'network': net} + subnet = {'id': mock.sentinel.subnet} + neutron.create_subnet.return_value = {'subnet': subnet} + neutron.add_interface_router.side_effect = ( + n_exc.NeutronClientException) + + self.assertRaises(n_exc.NeutronClientException, + cls.create_namespace_network, m_driver, namespace) + + neutron.create_network.assert_called_once() + neutron.create_subnet.assert_called_once() + neutron.add_interface_router.assert_called_once() + m_driver._add_kuryrnet_crd.assert_not_called() + + def test_create_namespace_network_crd_exception(self): + cls = subnet_drv.NamespacePodSubnetDriver + m_driver = mock.MagicMock(spec=cls) + + namespace = 'test' + + neutron = self.useFixture(k_fix.MockNeutronClient()).client + net = {'id': mock.sentinel.net} + neutron.create_network.return_value = {'network': net} + subnet = {'id': mock.sentinel.subnet} + neutron.create_subnet.return_value = {'subnet': subnet} + m_driver._add_kuryrnet_crd.side_effect = k_exc.K8sClientException + + self.assertRaises(k_exc.K8sClientException, + cls.create_namespace_network, m_driver, namespace) + + neutron.create_network.assert_called_once() + neutron.create_subnet.assert_called_once() + neutron.add_interface_router.assert_called_once() + m_driver._add_kuryrnet_crd.assert_called_once() + m_driver.rollback_network_resources.assert_called_once() + + def test_rollback_network_resources(self): + cls = subnet_drv.NamespacePodSubnetDriver + m_driver = mock.MagicMock(spec=cls) + + router_id = mock.sentinel.router_id + net_id = mock.sentinel.net_id + subnet_id = mock.sentinel.subnet_id + namespace = mock.sentinel.namespace + + neutron = self.useFixture(k_fix.MockNeutronClient()).client + + cls.rollback_network_resources(m_driver, router_id, net_id, + subnet_id, namespace) + neutron.remove_interface_router.assert_called_with( + router_id, {'subnet_id': subnet_id}) + neutron.delete_network.assert_called_with(net_id) + + def test_rollback_network_resources_router_exception(self): + cls = subnet_drv.NamespacePodSubnetDriver + m_driver = mock.MagicMock(spec=cls) + + router_id = mock.sentinel.router_id + net_id = mock.sentinel.net_id + subnet_id = mock.sentinel.subnet_id + namespace = mock.sentinel.namespace + + neutron = self.useFixture(k_fix.MockNeutronClient()).client + neutron.remove_interface_router.side_effect = ( + n_exc.NeutronClientException) + + cls.rollback_network_resources(m_driver, router_id, net_id, + subnet_id, namespace) + neutron.remove_interface_router.assert_called_with( + router_id, {'subnet_id': subnet_id}) + neutron.delete_network.assert_not_called() diff --git a/kuryr_kubernetes/tests/unit/controller/handlers/test_namespace.py b/kuryr_kubernetes/tests/unit/controller/handlers/test_namespace.py new file mode 100644 index 000000000..668d64ed7 --- /dev/null +++ b/kuryr_kubernetes/tests/unit/controller/handlers/test_namespace.py @@ -0,0 +1,171 @@ +# Copyright (c) 2018 Red Hat, Inc. +# All Rights Reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); you may +# not use this file except in compliance with the License. You may obtain +# a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the +# License for the specific language governing permissions and limitations +# under the License. + +import ddt +import mock + +from neutronclient.common import exceptions as n_exc + +from kuryr_kubernetes.controller.drivers import base as drivers +from kuryr_kubernetes.controller.handlers import namespace +from kuryr_kubernetes import exceptions as k_exc +from kuryr_kubernetes.tests import base as test_base + + +@ddt.ddt +class TestNamespaceHandler(test_base.TestCase): + + def setUp(self): + super(TestNamespaceHandler, self).setUp() + + self._project_id = mock.sentinel.project_id + self._subnets = mock.sentinel.subnets + self._security_groups = mock.sentinel.security_groups + + self._namespace_version = mock.sentinel.namespace_version + self._namespace_link = mock.sentinel.namespace_link + + self._namespace_name = 'ns-test' + self._namespace = { + 'metadata': {'name': self._namespace_name, + 'resourceVersion': self._namespace_version, + 'selfLink': self._namespace_link}, + 'status': {'phase': 'Active'} + } + + self._handler = mock.MagicMock(spec=namespace.NamespaceHandler) + + self._handler._drv_project = mock.Mock(spec=drivers.PodProjectDriver) + self._handler._drv_subnets = mock.Mock(spec=drivers.PodSubnetsDriver) + self._handler._drv_sg = mock.Mock(spec=drivers.PodSecurityGroupsDriver) + + self._get_project = self._handler._drv_project.get_project + self._get_subnets = self._handler._drv_subnets.get_subnets + self._get_security_groups = self._handler._drv_sg.get_security_groups + + self._create_namespace_network = ( + self._handler._drv_subnets.create_namespace_network) + self._get_net_crd = self._handler._get_net_crd + self._set_net_crd = self._handler._set_net_crd + self._rollback_network_resources = ( + self._handler._drv_subnets.rollback_network_resources) + + self._get_project.return_value = self._project_id + self._get_subnets.return_value = self._subnets + self._get_security_groups.return_value = self._security_groups + + def _get_crd(self): + crd = { + 'kind': 'KuryrNet', + 'spec': { + 'routerId': mock.sentinel.router_id, + 'netId': mock.sentinel.net_id, + 'subnetId': mock.sentinel.subnet_id, + } + } + return crd + + @mock.patch.object(drivers.PodSecurityGroupsDriver, 'get_instance') + @mock.patch.object(drivers.PodSubnetsDriver, 'get_instance') + @mock.patch.object(drivers.PodProjectDriver, 'get_instance') + def test_init(self, m_get_project_driver, m_get_subnets_driver, + m_get_sg_driver): + project_driver = mock.sentinel.project_driver + subnets_driver = mock.sentinel.subnets_driver + sg_driver = mock.sentinel.sg_driver + + m_get_project_driver.return_value = project_driver + m_get_subnets_driver.return_value = subnets_driver + m_get_sg_driver.return_value = sg_driver + + handler = namespace.NamespaceHandler() + + self.assertEqual(project_driver, handler._drv_project) + self.assertEqual(subnets_driver, handler._drv_subnets) + self.assertEqual(sg_driver, handler._drv_sg) + + def test_on_added(self): + net_crd = self._get_crd() + + self._get_net_crd.return_value = None + self._create_namespace_network.return_value = net_crd + + namespace.NamespaceHandler.on_added(self._handler, self._namespace) + + self._get_net_crd.assert_called_once_with(self._namespace) + self._create_namespace_network.assert_called_once_with( + self._namespace_name) + self._set_net_crd.assert_called_once_with(self._namespace, net_crd) + self._rollback_network_resources.assert_not_called() + + def test_on_added_existing(self): + net_crd = self._get_crd() + + self._get_net_crd.return_value = net_crd + + namespace.NamespaceHandler.on_added(self._handler, self._namespace) + + self._get_net_crd.assert_called_once_with(self._namespace) + self._create_namespace_network.assert_not_called() + self._set_net_crd.assert_not_called() + self._rollback_network_resources.assert_not_called() + + @ddt.data((n_exc.NeutronClientException), (k_exc.K8sClientException)) + def test_on_added_create_exception(self, m_create_net): + self._get_net_crd.return_value = None + self._create_namespace_network.side_effect = m_create_net + + self.assertRaises(m_create_net, namespace.NamespaceHandler.on_added, + self._handler, self._namespace) + + self._get_net_crd.assert_called_once_with(self._namespace) + self._create_namespace_network.assert_called_once_with( + self._namespace_name) + self._set_net_crd.assert_not_called() + self._rollback_network_resources.assert_not_called() + + def test_on_added_set_crd_exception(self): + net_crd = self._get_crd() + + self._get_net_crd.return_value = None + self._create_namespace_network.return_value = net_crd + self._set_net_crd.side_effect = k_exc.K8sClientException + + namespace.NamespaceHandler.on_added(self._handler, self._namespace) + + self._get_net_crd.assert_called_once_with(self._namespace) + self._create_namespace_network.assert_called_once_with( + self._namespace_name) + self._set_net_crd.assert_called_once_with(self._namespace, net_crd) + self._rollback_network_resources.assert_called_once() + + def test_on_added_rollback_exception(self): + net_crd = self._get_crd() + + self._get_net_crd.return_value = None + self._create_namespace_network.return_value = net_crd + self._set_net_crd.side_effect = k_exc.K8sClientException + self._rollback_network_resources.side_effect = ( + n_exc.NeutronClientException) + + self.assertRaises(n_exc.NeutronClientException, + namespace.NamespaceHandler.on_added, + self._handler, self._namespace) + + self._get_net_crd.assert_called_once_with(self._namespace) + self._create_namespace_network.assert_called_once_with( + self._namespace_name) + self._set_net_crd.assert_called_once_with(self._namespace, net_crd) + self._rollback_network_resources.assert_called_once() diff --git a/kuryr_kubernetes/tests/unit/test_k8s_client.py b/kuryr_kubernetes/tests/unit/test_k8s_client.py index da5faea2b..1981bb565 100644 --- a/kuryr_kubernetes/tests/unit/test_k8s_client.py +++ b/kuryr_kubernetes/tests/unit/test_k8s_client.py @@ -340,3 +340,31 @@ class TestK8sClient(test_base.TestCase): self.assertRaises(exc.K8sClientException, next, self.client.watch(path)) + + @mock.patch('requests.post') + def test_post(self, m_post): + path = '/test' + body = {'test': 'body'} + ret = {'test': 'value'} + + m_resp = mock.MagicMock() + m_resp.ok = True + m_resp.json.return_value = ret + m_post.return_value = m_resp + + self.assertEqual(ret, self.client.post(path, body)) + m_post.assert_called_once_with(self.base_url + path, json=body, + headers=mock.ANY, cert=(None, None), + verify=False) + + @mock.patch('requests.post') + def test_post_exception(self, m_post): + path = '/test' + body = {'test': 'body'} + + m_resp = mock.MagicMock() + m_resp.ok = False + m_post.return_value = m_resp + + self.assertRaises(exc.K8sClientException, + self.client.post, path, body) diff --git a/releasenotes/notes/network-namespace-2353f8013be398cd.yaml b/releasenotes/notes/network-namespace-2353f8013be398cd.yaml new file mode 100644 index 000000000..f125561ac --- /dev/null +++ b/releasenotes/notes/network-namespace-2353f8013be398cd.yaml @@ -0,0 +1,16 @@ +--- +features: + - | + Introduced a new subnet driver that is able to create a new subnet + (including the network and its connection to the router) for each + namespace creation event. + + To enable it the namespace subnet driver must be selected and the + namespace handler needs to be enabled: + + .. code-block:: ini + + [kubernetes] + enabled_handlers=vif,lb,lbaasspec,namespace + pod_subnets_driver = namespace + diff --git a/setup.cfg b/setup.cfg index f248268f0..57bbb365e 100644 --- a/setup.cfg +++ b/setup.cfg @@ -48,6 +48,7 @@ kuryr_kubernetes.controller.drivers.service_project = kuryr_kubernetes.controller.drivers.pod_subnets = default = kuryr_kubernetes.controller.drivers.default_subnet:DefaultPodSubnetDriver + namespace = kuryr_kubernetes.controller.drivers.namespace_subnet:NamespacePodSubnetDriver kuryr_kubernetes.controller.drivers.service_subnets = default = kuryr_kubernetes.controller.drivers.default_subnet:DefaultServiceSubnetDriver diff --git a/tools/generate_k8s_resource_definitions.sh b/tools/generate_k8s_resource_definitions.sh index 3e4b4ca6e..978ef826a 100755 --- a/tools/generate_k8s_resource_definitions.sh +++ b/tools/generate_k8s_resource_definitions.sh @@ -38,6 +38,7 @@ if [ -z $CONTROLLER_CONF_PATH ]; then worker_nodes_subnet=${KURYR_K8S_WORKER_NODES_SUBNET} binding_driver=${KURYR_K8S_BINDING_DRIVER:-kuryr.lib.binding.drivers.vlan} binding_iface=${KURYR_K8S_BINDING_IFACE:-eth0} + pod_subnet_pool=${KURYR_NEUTRON_DEFAULT_SUBNETPOOL_ID} CONTROLLER_CONF_PATH="${OUTPUT_DIR}/kuryr.conf" rm -f $CONTROLLER_CONF_PATH @@ -63,6 +64,8 @@ service_subnet = $service_subnet_id pod_security_groups = $pod_sg pod_subnet = $pod_subnet_id project = $k8s_project_id +[namespace_subnet] +pod_subnet_pool = $pod_subnet_pool EOF if [ ! -z $binding_driver ]; then