Namespace event handling through KuryrNet CRD

This patch moves the namespace handling to be more aligned
with the k8s style.

Depends-on: If0aaf748d13027b3d660aa0f74c4f6653e911250

Change-Id: Ia2811d743f6c4791321b05977118d0b4276787b5
This commit is contained in:
Luis Tomas Bolivar 2020-02-10 08:46:03 +01:00
parent 44c0a307b5
commit 780c4dfa09
30 changed files with 951 additions and 884 deletions

View File

@ -99,7 +99,7 @@
vars: vars:
devstack_localrc: devstack_localrc:
DOCKER_CGROUP_DRIVER: "systemd" DOCKER_CGROUP_DRIVER: "systemd"
KURYR_ENABLED_HANDLERS: vif,lb,lbaasspec,namespace,pod_label,policy,kuryrnetpolicy KURYR_ENABLED_HANDLERS: vif,lb,lbaasspec,namespace,pod_label,policy,kuryrnetpolicy,kuryrnetwork
KURYR_SG_DRIVER: policy KURYR_SG_DRIVER: policy
KURYR_SUBNET_DRIVER: namespace KURYR_SUBNET_DRIVER: namespace
devstack_services: devstack_services:
@ -120,7 +120,7 @@
vars: vars:
devstack_localrc: devstack_localrc:
KURYR_SUBNET_DRIVER: namespace KURYR_SUBNET_DRIVER: namespace
KURYR_ENABLED_HANDLERS: vif,lb,lbaasspec,namespace,pod_label,policy,kuryrnetpolicy KURYR_ENABLED_HANDLERS: vif,lb,lbaasspec,namespace,pod_label,policy,kuryrnetpolicy,kuryrnetwork
KURYR_SG_DRIVER: policy KURYR_SG_DRIVER: policy
KURYR_USE_PORT_POOLS: true KURYR_USE_PORT_POOLS: true
KURYR_POD_VIF_DRIVER: neutron-vif KURYR_POD_VIF_DRIVER: neutron-vif
@ -134,7 +134,7 @@
parent: kuryr-kubernetes-tempest-containerized parent: kuryr-kubernetes-tempest-containerized
vars: vars:
devstack_localrc: devstack_localrc:
KURYR_ENABLED_HANDLERS: vif,lb,lbaasspec,namespace,pod_label,policy,kuryrnetpolicy KURYR_ENABLED_HANDLERS: vif,lb,lbaasspec,namespace,pod_label,policy,kuryrnetpolicy,kuryrnetwork
KURYR_SG_DRIVER: policy KURYR_SG_DRIVER: policy
KURYR_SUBNET_DRIVER: namespace KURYR_SUBNET_DRIVER: namespace

View File

@ -450,6 +450,7 @@ rules:
verbs: ["*"] verbs: ["*"]
resources: resources:
- kuryrnets - kuryrnets
- kuryrnetworks
- kuryrnetpolicies - kuryrnetpolicies
- kuryrloadbalancers - kuryrloadbalancers
- apiGroups: ["networking.k8s.io"] - apiGroups: ["networking.k8s.io"]

View File

@ -940,6 +940,7 @@ function update_tempest_conf_file {
iniset $TEMPEST_CONFIG kuryr_kubernetes ipv6 True iniset $TEMPEST_CONFIG kuryr_kubernetes ipv6 True
fi fi
iniset $TEMPEST_CONFIG kuryr_kubernetes validate_crd True iniset $TEMPEST_CONFIG kuryr_kubernetes validate_crd True
iniset $TEMPEST_CONFIG kuryr_kubernetes kuryrnetworks True
} }
source $DEST/kuryr-kubernetes/devstack/lib/kuryr_kubernetes source $DEST/kuryr-kubernetes/devstack/lib/kuryr_kubernetes
@ -1078,9 +1079,7 @@ if [[ "$1" == "stack" && "$2" == "extra" ]]; then
fi fi
if is_service_enabled kuryr-kubernetes; then if is_service_enabled kuryr-kubernetes; then
/usr/local/bin/kubectl apply -f ${KURYR_HOME}/kubernetes_crds/kuryrnet.yaml /usr/local/bin/kubectl apply -f ${KURYR_HOME}/kubernetes_crds/kuryr_crds/
/usr/local/bin/kubectl apply -f ${KURYR_HOME}/kubernetes_crds/kuryrnetpolicy.yaml
/usr/local/bin/kubectl apply -f ${KURYR_HOME}/kubernetes_crds/kuryrloadbalancer.yaml
if [ "$KURYR_K8S_CONTAINERIZED_DEPLOYMENT" == "True" ]; then if [ "$KURYR_K8S_CONTAINERIZED_DEPLOYMENT" == "True" ]; then
generate_containerized_kuryr_resources generate_containerized_kuryr_resources
fi fi

View File

@ -93,6 +93,7 @@ Edit ``kuryr.conf``:
verbs: ["*"] verbs: ["*"]
resources: resources:
- kuryrnets - kuryrnets
- kuryrnetworks
- kuryrnetpolicies - kuryrnetpolicies
- kuryrloadbalancers - kuryrloadbalancers
- apiGroups: ["networking.k8s.io"] - apiGroups: ["networking.k8s.io"]

View File

@ -13,16 +13,16 @@ the next steps are needed:
.. code-block:: ini .. code-block:: ini
[kubernetes] [kubernetes]
enabled_handlers=vif,lb,lbaasspec,namespace enabled_handlers=vif,lb,lbaasspec,namespace,kuryrnetwork
Note that if you also want to enable prepopulation of ports pools upon new Note that if you also want to enable prepopulation of ports pools upon new
namespace creation, you need to add the kuryrnet handler (more details on namespace creation, you need to also add the kuryrnetwork_population
:doc:`./ports-pool`): handler (more details on :doc:`./ports-pool`):
.. code-block:: ini .. code-block:: ini
[kubernetes] [kubernetes]
enabled_handlers=vif,lb,lbaasspec,namespace,kuryrnet enabled_handlers=vif,lb,lbaasspec,namespace,kuryrnetwork,kuryrnetwork_population
#. Enable the namespace subnet driver by modifying the default #. Enable the namespace subnet driver by modifying the default
pod_subnet_driver option at kuryr.conf: pod_subnet_driver option at kuryr.conf:
@ -73,7 +73,7 @@ to add the namespace handler and state the namespace subnet driver with:
.. code-block:: console .. code-block:: console
KURYR_SUBNET_DRIVER=namespace KURYR_SUBNET_DRIVER=namespace
KURYR_ENABLED_HANDLERS=vif,lb,lbaasspec,namespace KURYR_ENABLED_HANDLERS=vif,lb,lbaasspec,namespace,kuryrnetwork
.. note:: .. note::
@ -103,7 +103,7 @@ Testing the network per namespace functionality
test2 Active 5s test2 Active 5s
... ... ... ... ... ...
$ kubectl get kuryrnets $ kubectl get kuryrnetworks -A
NAME AGE NAME AGE
ns-test1 1m ns-test1 1m
ns-test2 1m ns-test2 1m
@ -152,7 +152,8 @@ Testing the network per namespace functionality
demo-5995548848-lmmjc: HELLO! I AM ALIVE!!! demo-5995548848-lmmjc: HELLO! I AM ALIVE!!!
#. And finally, to remove the namespace and all its resources, including #. And finally, to remove the namespace and all its resources, including
openstack networks, kuryrnet CRD, svc, pods, you just need to do: openstack networks, kuryrnetwork CRD, svc, pods, you just need to
do:
.. code-block:: console .. code-block:: console

View File

@ -10,16 +10,16 @@ be found at :doc:`./devstack/containerized`):
.. code-block:: ini .. code-block:: ini
[kubernetes] [kubernetes]
enabled_handlers=vif,lb,lbaasspec,policy,pod_label,namespace,kuryrnetpolicy enabled_handlers=vif,lb,lbaasspec,policy,pod_label,namespace,kuryrnetwork,kuryrnetpolicy
Note that if you also want to enable prepopulation of ports pools upon new Note that if you also want to enable prepopulation of ports pools upon new
namespace creation, you need to add the kuryrnet handler (more details on namespace creation, you need to also dd the kuryrnetwork_population handler
:doc:`./ports-pool`): (more details on :doc:`./ports-pool`):
.. code-block:: ini .. code-block:: ini
[kubernetes] [kubernetes]
enabled_handlers=vif,lb,lbaasspec,policy,pod_label,namespace,kuryrnetpolicy,kuryrnet enabled_handlers=vif,lb,lbaasspec,policy,pod_label,namespace,kuryrnetpolicy,kuryrnetwork,kuryrnetwork_population
After that, enable also the security group drivers for policies: After that, enable also the security group drivers for policies:

View File

@ -169,7 +169,7 @@ subnet), the next handler needs to be enabled:
.. code-block:: ini .. code-block:: ini
[kubernetes] [kubernetes]
enabled_handlers=vif,lb,lbaasspec,namespace,*kuryrnet* enabled_handlers=vif,lb,lbaasspec,namespace,*kuryrnetwork*
This can be enabled at devstack deployment time to by adding the next to the This can be enabled at devstack deployment time to by adding the next to the
@ -177,4 +177,4 @@ local.conf:
.. code-block:: bash .. code-block:: bash
KURYR_ENABLED_HANDLERS=vif,lb,lbaasspec,namespace,*kuryrnet* KURYR_ENABLED_HANDLERS=vif,lb,lbaasspec,namespace,*kuryrnetwork*

View File

@ -0,0 +1,59 @@
apiVersion: apiextensions.k8s.io/v1
kind: CustomResourceDefinition
metadata:
name: kuryrnetworks.openstack.org
spec:
group: openstack.org
scope: Namespaced
names:
plural: kuryrnetworks
singular: kuryrnetwork
kind: KuryrNetwork
shortNames:
- kns
versions:
- name: v1
served: true
storage: true
additionalPrinterColumns:
- name: SUBNET-CIDR
type: string
description: The subnet CIDR allocated to the namespace
jsonPath: .status.subnetCIDR
- name: Age
type: date
jsonPath: .metadata.creationTimestamp
schema:
openAPIV3Schema:
type: object
properties:
spec:
type: object
required:
- nsName
- projectId
- nsLabels
properties:
nsName:
type: string
projectId:
type: string
nsLabels:
x-kubernetes-preserve-unknown-fields: true
type: object
status:
type: object
properties:
netId:
type: string
populated:
type: boolean
routerId:
type: string
subnetCIDR:
type: string
subnetId:
type: string
nsLabels:
x-kubernetes-preserve-unknown-fields: true
type: object

View File

@ -18,6 +18,7 @@ K8S_API_NAMESPACES = K8S_API_BASE + '/namespaces'
K8S_API_CRD = '/apis/openstack.org/v1' K8S_API_CRD = '/apis/openstack.org/v1'
K8S_API_CRD_NAMESPACES = K8S_API_CRD + '/namespaces' K8S_API_CRD_NAMESPACES = K8S_API_CRD + '/namespaces'
K8S_API_CRD_KURYRNETS = K8S_API_CRD + '/kuryrnets' K8S_API_CRD_KURYRNETS = K8S_API_CRD + '/kuryrnets'
K8S_API_CRD_KURYRNETWORKS = K8S_API_CRD + '/kuryrnetworks'
K8S_API_CRD_KURYRNETPOLICIES = K8S_API_CRD + '/kuryrnetpolicies' K8S_API_CRD_KURYRNETPOLICIES = K8S_API_CRD + '/kuryrnetpolicies'
K8S_API_CRD_KURYRLOADBALANCERS = K8S_API_CRD + '/kuryrloadbalancers' K8S_API_CRD_KURYRLOADBALANCERS = K8S_API_CRD + '/kuryrloadbalancers'
K8S_API_POLICIES = '/apis/networking.k8s.io/v1/networkpolicies' K8S_API_POLICIES = '/apis/networking.k8s.io/v1/networkpolicies'
@ -30,6 +31,7 @@ K8S_OBJ_SERVICE = 'Service'
K8S_OBJ_ENDPOINTS = 'Endpoints' K8S_OBJ_ENDPOINTS = 'Endpoints'
K8S_OBJ_POLICY = 'NetworkPolicy' K8S_OBJ_POLICY = 'NetworkPolicy'
K8S_OBJ_KURYRNET = 'KuryrNet' K8S_OBJ_KURYRNET = 'KuryrNet'
K8S_OBJ_KURYRNETWORK = 'KuryrNetwork'
K8S_OBJ_KURYRNETPOLICY = 'KuryrNetPolicy' K8S_OBJ_KURYRNETPOLICY = 'KuryrNetPolicy'
K8S_OBJ_KURYRLOADBALANCER = 'KuryrLoadBalancer' K8S_OBJ_KURYRLOADBALANCER = 'KuryrLoadBalancer'
@ -55,6 +57,8 @@ K8S_ANNOTATION_OLD_DRIVER = 'old_driver'
K8S_ANNOTATION_CURRENT_DRIVER = 'current_driver' K8S_ANNOTATION_CURRENT_DRIVER = 'current_driver'
K8S_ANNOTATION_NEUTRON_PORT = 'neutron_id' K8S_ANNOTATION_NEUTRON_PORT = 'neutron_id'
KURYRNETWORK_FINALIZER = 'kuryrnetwork.finalizers.kuryr.openstack.org'
K8S_OS_VIF_NOOP_PLUGIN = "noop" K8S_OS_VIF_NOOP_PLUGIN = "noop"
CNI_EXCEPTION_CODE = 100 CNI_EXCEPTION_CODE = 100

View File

@ -170,7 +170,7 @@ class PodSubnetsDriver(DriverBase, metaclass=abc.ABCMeta):
def delete_namespace_subnet(self, kuryr_net_crd): def delete_namespace_subnet(self, kuryr_net_crd):
"""Delete network resources associated to a namespace. """Delete network resources associated to a namespace.
:param kuryr_net_crd: kuryrnet CRD obj dict that contains Neutron's :param kuryr_net_crd: kuryrnetwork CRD obj dict that contains Neutron's
network resources associated to a namespace network resources associated to a namespace
""" """
raise NotImplementedError() raise NotImplementedError()

View File

@ -54,39 +54,29 @@ class NamespacePodSubnetDriver(default_subnet.DefaultPodSubnetDriver):
def _get_namespace_subnet_id(self, namespace): def _get_namespace_subnet_id(self, namespace):
kubernetes = clients.get_kubernetes_client() kubernetes = clients.get_kubernetes_client()
try: try:
ns = kubernetes.get('%s/namespaces/%s' % (constants.K8S_API_BASE, net_crd_path = (f"{constants.K8S_API_CRD_NAMESPACES}/"
namespace)) f"{namespace}/kuryrnetworks/{namespace}")
net_crd = kubernetes.get(net_crd_path)
except exceptions.K8sResourceNotFound: except exceptions.K8sResourceNotFound:
LOG.warning("Namespace %s not found", namespace) LOG.debug("Kuryrnetwork resource not yet created, retrying...")
raise raise exceptions.ResourceNotReady(namespace)
except exceptions.K8sClientException: except exceptions.K8sClientException:
LOG.exception("Kubernetes Client Exception.") LOG.exception("Kubernetes Client Exception.")
raise exceptions.ResourceNotReady(namespace) raise
try: try:
annotations = ns['metadata']['annotations'] subnet_id = net_crd['status']['subnetId']
net_crd_name = annotations[constants.K8S_ANNOTATION_NET_CRD]
except KeyError: except KeyError:
LOG.debug("Namespace missing CRD annotations for selecting " LOG.debug("Subnet for namespace %s not yet created, retrying.",
"the corresponding subnet.") namespace)
raise exceptions.ResourceNotReady(namespace) raise exceptions.ResourceNotReady(namespace)
return subnet_id
try:
net_crd = kubernetes.get('%s/kuryrnets/%s' % (
constants.K8S_API_CRD, net_crd_name))
except exceptions.K8sResourceNotFound:
LOG.debug("Kuryrnet resource not yet created, retrying...")
raise exceptions.ResourceNotReady(net_crd_name)
except exceptions.K8sClientException:
LOG.exception("Kubernetes Client Exception.")
raise
return net_crd['spec']['subnetId']
def delete_namespace_subnet(self, net_crd): def delete_namespace_subnet(self, net_crd):
subnet_id = net_crd['spec']['subnetId'] subnet_id = net_crd['status'].get('subnetId')
net_id = net_crd['spec']['netId'] net_id = net_crd['status'].get('netId')
if net_id:
self._delete_namespace_network_resources(subnet_id, net_id) self._delete_namespace_network_resources(subnet_id, net_id)
def _delete_namespace_network_resources(self, subnet_id, net_id): def _delete_namespace_network_resources(self, subnet_id, net_id):
@ -147,24 +137,53 @@ class NamespacePodSubnetDriver(default_subnet.DefaultPodSubnetDriver):
LOG.exception("Error deleting network %s.", net_id) LOG.exception("Error deleting network %s.", net_id)
raise raise
def create_namespace_network(self, namespace, project_id): def create_network(self, ns_name, project_id):
os_net = clients.get_network_client() os_net = clients.get_network_client()
net_name = 'ns/' + ns_name + '-net'
tags = oslo_cfg.CONF.neutron_defaults.resource_tags
if tags:
networks = os_net.networks(name=net_name, tags=tags)
else:
networks = os_net.networks(name=net_name)
router_id = oslo_cfg.CONF.namespace_subnet.pod_router try:
subnet_pool_id = oslo_cfg.CONF.namespace_subnet.pod_subnet_pool # NOTE(ltomasbo): only one network must exists
return next(networks).id
except StopIteration:
LOG.debug('Network does not exist. Creating.')
# create network with namespace as name # create network with namespace as name
network_name = "ns/" + namespace + "-net"
subnet_name = "ns/" + namespace + "-subnet"
try: try:
neutron_net = os_net.create_network(name=network_name, neutron_net = os_net.create_network(name=net_name,
project_id=project_id) project_id=project_id)
c_utils.tag_neutron_resources([neutron_net]) c_utils.tag_neutron_resources([neutron_net])
except os_exc.SDKException:
LOG.exception("Error creating neutron resources for the namespace "
"%s", ns_name)
raise
return neutron_net.id
# create a subnet within that network def create_subnet(self, ns_name, project_id, net_id):
os_net = clients.get_network_client()
subnet_name = "ns/" + ns_name + "-subnet"
tags = oslo_cfg.CONF.neutron_defaults.resource_tags
if tags:
subnets = os_net.subnets(name=subnet_name, tags=tags)
else:
subnets = os_net.subnets(name=subnet_name)
try:
# NOTE(ltomasbo): only one subnet must exists
subnet = next(subnets)
return subnet.id, subnet.cidr
except StopIteration:
LOG.debug('Subnet does not exist. Creating.')
# create subnet with namespace as name
subnet_pool_id = oslo_cfg.CONF.namespace_subnet.pod_subnet_pool
try: try:
neutron_subnet = (os_net neutron_subnet = (os_net
.create_subnet(network_id=neutron_net.id, .create_subnet(network_id=net_id,
ip_version=4, ip_version=4,
name=subnet_name, name=subnet_name,
enable_dhcp=False, enable_dhcp=False,
@ -177,53 +196,18 @@ class NamespacePodSubnetDriver(default_subnet.DefaultPodSubnetDriver):
raise exceptions.ResourceNotReady(subnet_name) raise exceptions.ResourceNotReady(subnet_name)
c_utils.tag_neutron_resources([neutron_subnet]) c_utils.tag_neutron_resources([neutron_subnet])
return neutron_subnet.id, neutron_subnet.cidr
def add_subnet_to_router(self, subnet_id):
os_net = clients.get_network_client()
router_id = oslo_cfg.CONF.namespace_subnet.pod_router
try:
# connect the subnet to the router # connect the subnet to the router
clients.handle_neutron_errors(os_net.add_interface_to_router, os_net.add_interface_to_router(router_id, subnet_id=subnet_id)
router_id, except os_exc.BadRequestException:
subnet_id=neutron_subnet.id) LOG.debug("Subnet %s already connected to the router", subnet_id)
except os_exc.SDKException: except os_exc.SDKException:
LOG.exception("Error creating neutron resources for the namespace " LOG.exception("Error attaching the subnet %s to the router",
"%s", namespace) subnet_id)
raise raise
return {'netId': neutron_net.id, return router_id
'routerId': router_id,
'subnetId': neutron_subnet.id,
'subnetCIDR': neutron_subnet.cidr}
def rollback_network_resources(self, net_crd_spec, namespace):
os_net = clients.get_network_client()
try:
try:
clients.handle_neutron_errors(
os_net.remove_interface_from_router,
net_crd_spec['routerId'],
subnet_id=net_crd_spec['subnetId'])
except os_exc.NotFoundException:
# Nothing to worry about, either router or subnet is no more,
# or subnet is already detached.
pass
os_net.delete_network(net_crd_spec['netId'])
except os_exc.SDKException:
LOG.exception("Failed to clean up network resources associated to "
"%(net_id)s, created for the namespace: "
"%(namespace)s." % {'net_id': net_crd_spec['netId'],
'namespace': namespace})
def cleanup_namespace_networks(self, namespace):
os_net = clients.get_network_client()
net_name = 'ns/' + namespace + '-net'
tags = oslo_cfg.CONF.neutron_defaults.resource_tags
if tags:
networks = os_net.networks(name=net_name, tags=tags)
else:
networks = os_net.networks(name=net_name)
for net in networks:
net_id = net.id
subnets = net.subnet_ids
subnet_id = None
if subnets:
# NOTE(ltomasbo): Each network created by kuryr only has
# one subnet
subnet_id = subnets[0]
self._delete_namespace_network_resources(subnet_id, net_id)

View File

@ -221,18 +221,6 @@ def delete_security_group_rule(security_group_rule_id):
raise raise
def patch_kuryrnet_crd(crd, populated=True):
kubernetes = clients.get_kubernetes_client()
crd_name = crd['metadata']['name']
LOG.debug('Patching KuryrNet CRD %s' % crd_name)
try:
kubernetes.patch_crd('spec', crd['metadata']['selfLink'],
{'populated': populated})
except k_exc.K8sClientException:
LOG.exception('Error updating kuryrnet CRD %s', crd_name)
raise
def patch_kuryrnetworkpolicy_crd(crd, i_rules, e_rules, pod_selector, def patch_kuryrnetworkpolicy_crd(crd, i_rules, e_rules, pod_selector,
np_spec=None): np_spec=None):
kubernetes = clients.get_kubernetes_client() kubernetes = clients.get_kubernetes_client()
@ -394,19 +382,22 @@ def match_selector(selector, labels):
def get_namespace_subnet_cidr(namespace): def get_namespace_subnet_cidr(namespace):
kubernetes = clients.get_kubernetes_client() kubernetes = clients.get_kubernetes_client()
try: try:
ns_annotations = namespace['metadata']['annotations'] net_crd_path = (f"{constants.K8S_API_CRD_NAMESPACES}/"
ns_name = ns_annotations[constants.K8S_ANNOTATION_NET_CRD] f"{namespace['metadata']['name']}/kuryrnetworks/"
except KeyError: f"{namespace['metadata']['name']}")
LOG.exception('Namespace handler must be enabled to support ' net_crd = kubernetes.get(net_crd_path)
'Network Policies with namespaceSelector') except k_exc.K8sResourceNotFound:
LOG.exception('Namespace not yet ready')
raise k_exc.ResourceNotReady(namespace) raise k_exc.ResourceNotReady(namespace)
try:
net_crd = kubernetes.get('{}/kuryrnets/{}'.format(
constants.K8S_API_CRD, ns_name))
except k_exc.K8sClientException: except k_exc.K8sClientException:
LOG.exception("Kubernetes Client Exception.") LOG.exception("Kubernetes Client Exception.")
raise raise
return net_crd['spec']['subnetCIDR'] try:
subnet_cidr = net_crd['status']['subnetCIDR']
except KeyError:
LOG.exception('Namespace not yet ready')
raise k_exc.ResourceNotReady(namespace)
return subnet_cidr
def tag_neutron_resources(resources): def tag_neutron_resources(resources):

View File

@ -0,0 +1,161 @@
# Copyright 2020 Red Hat, Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from oslo_config import cfg as oslo_cfg
from oslo_log import log as logging
from kuryr_kubernetes import clients
from kuryr_kubernetes import constants
from kuryr_kubernetes.controller.drivers import base as drivers
from kuryr_kubernetes.controller.drivers import utils as driver_utils
from kuryr_kubernetes import exceptions as k_exc
from kuryr_kubernetes.handlers import k8s_base
LOG = logging.getLogger(__name__)
class KuryrNetworkHandler(k8s_base.ResourceEventHandler):
"""Controller side of KuryrNetwork process for Kubernetes pods.
`KuryrNetworkHandler` runs on the Kuryr-Kubernetes controller and is
responsible for creating the OpenStack resources associated to the
newly created namespaces, and update the KuryrNetwork CRD status with
them.
"""
OBJECT_KIND = constants.K8S_OBJ_KURYRNETWORK
OBJECT_WATCH_PATH = constants.K8S_API_CRD_KURYRNETWORKS
def __init__(self):
super(KuryrNetworkHandler, self).__init__()
self._drv_project = drivers.NamespaceProjectDriver.get_instance()
self._drv_subnets = drivers.PodSubnetsDriver.get_instance()
self._drv_sg = drivers.PodSecurityGroupsDriver.get_instance()
self._drv_vif_pool = drivers.VIFPoolDriver.get_instance(
specific_driver='multi_pool')
self._drv_vif_pool.set_vif_driver()
if self._is_network_policy_enabled():
self._drv_lbaas = drivers.LBaaSDriver.get_instance()
self._drv_svc_sg = (
drivers.ServiceSecurityGroupsDriver.get_instance())
def on_present(self, kuryrnet_crd):
ns_name = kuryrnet_crd['spec']['nsName']
project_id = kuryrnet_crd['spec']['projectId']
kns_status = kuryrnet_crd.get('status', {})
crd_creation = False
net_id = kns_status.get('netId')
if not net_id:
net_id = self._drv_subnets.create_network(ns_name, project_id)
status = {'netId': net_id}
self._patch_kuryrnetwork_crd(kuryrnet_crd, status)
crd_creation = True
subnet_id = kns_status.get('subnetId')
if not subnet_id or crd_creation:
subnet_id, subnet_cidr = self._drv_subnets.create_subnet(
ns_name, project_id, net_id)
status = {'subnetId': subnet_id, 'subnetCIDR': subnet_cidr}
self._patch_kuryrnetwork_crd(kuryrnet_crd, status)
crd_creation = True
if not kns_status.get('routerId') or crd_creation:
router_id = self._drv_subnets.add_subnet_to_router(subnet_id)
status = {'routerId': router_id, 'populated': False}
self._patch_kuryrnetwork_crd(kuryrnet_crd, status)
crd_creation = True
# check labels to create sg rules
ns_labels = kns_status.get('nsLabels', {})
if (crd_creation or
ns_labels != kuryrnet_crd['spec']['nsLabels']):
# update SG and svc SGs
namespace = driver_utils.get_namespace(ns_name)
crd_selectors = self._drv_sg.update_namespace_sg_rules(namespace)
if (self._is_network_policy_enabled() and crd_selectors and
oslo_cfg.CONF.octavia_defaults.enforce_sg_rules):
services = driver_utils.get_services()
self._update_services(services, crd_selectors, project_id)
# update status
status = {'nsLabels': kuryrnet_crd['spec']['nsLabels']}
self._patch_kuryrnetwork_crd(kuryrnet_crd, status, labels=True)
def on_finalize(self, kuryrnet_crd):
LOG.debug("Deleting kuryrnetwork CRD resources: %s", kuryrnet_crd)
net_id = kuryrnet_crd['status'].get('netId')
if net_id:
self._drv_vif_pool.delete_network_pools(
kuryrnet_crd['status']['netId'])
try:
self._drv_subnets.delete_namespace_subnet(kuryrnet_crd)
except k_exc.ResourceNotReady:
LOG.debug("Subnet is not ready to be removed.")
# TODO(ltomasbo): Once KuryrPort CRDs is supported, we should
# execute a delete network ports method here to remove the
# ports associated to the namespace/subnet, ensuring next
# retry will be successful
raise
namespace = {
'metadata': {'name': kuryrnet_crd['spec']['nsName']}}
crd_selectors = self._drv_sg.delete_namespace_sg_rules(namespace)
if (self._is_network_policy_enabled() and crd_selectors and
oslo_cfg.CONF.octavia_defaults.enforce_sg_rules):
project_id = kuryrnet_crd['spec']['projectId']
services = driver_utils.get_services()
self._update_services(services, crd_selectors, project_id)
kubernetes = clients.get_kubernetes_client()
LOG.debug('Removing finalizer for KuryrNet CRD %s', kuryrnet_crd)
try:
kubernetes.patch_crd('metadata',
kuryrnet_crd['metadata']['selfLink'],
'finalizers',
action='remove')
except k_exc.K8sClientException:
LOG.exception('Error removing kuryrnetwork CRD finalizer for %s',
kuryrnet_crd)
raise
def _is_network_policy_enabled(self):
enabled_handlers = oslo_cfg.CONF.kubernetes.enabled_handlers
svc_sg_driver = oslo_cfg.CONF.kubernetes.service_security_groups_driver
return ('policy' in enabled_handlers and svc_sg_driver == 'policy')
def _update_services(self, services, crd_selectors, project_id):
for service in services.get('items'):
if not driver_utils.service_matches_affected_pods(
service, crd_selectors):
continue
sgs = self._drv_svc_sg.get_security_groups(service,
project_id)
self._drv_lbaas.update_lbaas_sg(service, sgs)
def _patch_kuryrnetwork_crd(self, kuryrnet_crd, status, labels=False):
kubernetes = clients.get_kubernetes_client()
LOG.debug('Patching KuryrNetwork CRD %s', kuryrnet_crd)
try:
if labels:
kubernetes.patch_crd('status',
kuryrnet_crd['metadata']['selfLink'],
status)
else:
kubernetes.patch('status',
kuryrnet_crd['metadata']['selfLink'],
status)
except k_exc.K8sResourceNotFound:
LOG.debug('KuryrNetwork CRD not found %s', kuryrnet_crd)
except k_exc.K8sClientException:
LOG.exception('Error updating kuryrNetwork CRD %s', kuryrnet_crd)
raise

View File

@ -1,4 +1,4 @@
# Copyright 2019 Red Hat, Inc. # Copyright 2020 Red Hat, Inc.
# #
# Licensed under the Apache License, Version 2.0 (the "License"); # Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License. # you may not use this file except in compliance with the License.
@ -14,9 +14,9 @@
from oslo_log import log as logging from oslo_log import log as logging
from kuryr_kubernetes import clients
from kuryr_kubernetes import constants from kuryr_kubernetes import constants
from kuryr_kubernetes.controller.drivers import base as drivers from kuryr_kubernetes.controller.drivers import base as drivers
from kuryr_kubernetes.controller.drivers import utils as driver_utils
from kuryr_kubernetes import exceptions from kuryr_kubernetes import exceptions
from kuryr_kubernetes.handlers import k8s_base from kuryr_kubernetes.handlers import k8s_base
from kuryr_kubernetes import utils from kuryr_kubernetes import utils
@ -24,45 +24,35 @@ from kuryr_kubernetes import utils
LOG = logging.getLogger(__name__) LOG = logging.getLogger(__name__)
class KuryrNetHandler(k8s_base.ResourceEventHandler): class KuryrNetworkPopulationHandler(k8s_base.ResourceEventHandler):
"""Controller side of KuryrNet process for Kubernetes pods. """Controller side of KuryrNetwork process for Kubernetes pods.
`KuryrNetHandler` runs on the Kuryr-Kubernetes controller and is `KuryrNetworkPopulationHandler` runs on the Kuryr-Kubernetes controller
responsible for populating pools for newly created namespaces. and is responsible for populating pools for newly created namespaces.
""" """
OBJECT_KIND = constants.K8S_OBJ_KURYRNET OBJECT_KIND = constants.K8S_OBJ_KURYRNETWORK
OBJECT_WATCH_PATH = constants.K8S_API_CRD_KURYRNETS OBJECT_WATCH_PATH = constants.K8S_API_CRD_KURYRNETWORKS
def __init__(self): def __init__(self):
super(KuryrNetHandler, self).__init__() super(KuryrNetworkPopulationHandler, self).__init__()
self._drv_project = drivers.NamespaceProjectDriver.get_instance()
self._drv_subnets = drivers.PodSubnetsDriver.get_instance() self._drv_subnets = drivers.PodSubnetsDriver.get_instance()
self._drv_vif_pool = drivers.VIFPoolDriver.get_instance( self._drv_vif_pool = drivers.VIFPoolDriver.get_instance(
specific_driver='multi_pool') specific_driver='multi_pool')
self._drv_vif_pool.set_vif_driver() self._drv_vif_pool.set_vif_driver()
def on_added(self, kuryrnet_crd): def on_added(self, kuryrnet_crd):
subnet_id = kuryrnet_crd['spec'].get('subnetId') subnet_id = kuryrnet_crd['status'].get('subnetId')
if kuryrnet_crd['spec'].get('populated'): if not subnet_id:
return
if kuryrnet_crd['status'].get('populated'):
LOG.debug("Subnet %s already populated", subnet_id) LOG.debug("Subnet %s already populated", subnet_id)
return return
namespace = kuryrnet_crd['metadata']['annotations'].get( namespace = kuryrnet_crd['spec'].get('nsName')
'namespaceName') project_id = kuryrnet_crd['spec'].get('projectId')
namespace_obj = driver_utils.get_namespace(namespace)
if not namespace_obj:
LOG.debug("Skipping Kuryrnet addition. Inexistent namespace.")
return
namespace_kuryrnet_annotations = driver_utils.get_annotations(
namespace_obj, constants.K8S_ANNOTATION_NET_CRD)
if namespace_kuryrnet_annotations != kuryrnet_crd['metadata']['name']:
# NOTE(ltomasbo): Ensure pool is not populated if namespace is not
# yet annotated with kuryrnet information
return
# NOTE(ltomasbo): using namespace name instead of object as it is not # NOTE(ltomasbo): using namespace name instead of object as it is not
# required # required
project_id = self._drv_project.get_project(namespace)
subnets = self._drv_subnets.get_namespace_subnet(namespace, subnet_id) subnets = self._drv_subnets.get_namespace_subnet(namespace, subnet_id)
nodes = utils.get_nodes_ips() nodes = utils.get_nodes_ips()
@ -74,7 +64,7 @@ class KuryrNetHandler(k8s_base.ResourceEventHandler):
# the pools will not get the ports loaded (as they are not ACTIVE) # the pools will not get the ports loaded (as they are not ACTIVE)
# and new population actions may be triggered if the controller was # and new population actions may be triggered if the controller was
# restarted before performing the populated=true patching. # restarted before performing the populated=true patching.
driver_utils.patch_kuryrnet_crd(kuryrnet_crd, populated=True) self._patch_kuryrnetwork_crd(kuryrnet_crd, populated=True)
# TODO(ltomasbo): Skip the master node where pods are not usually # TODO(ltomasbo): Skip the master node where pods are not usually
# allocated. # allocated.
for node_ip in nodes: for node_ip in nodes:
@ -86,5 +76,16 @@ class KuryrNetHandler(k8s_base.ResourceEventHandler):
except exceptions.ResourceNotReady: except exceptions.ResourceNotReady:
# Ensure the repopulation is retriggered if the system was not # Ensure the repopulation is retriggered if the system was not
# yet ready to perform the repopulation actions # yet ready to perform the repopulation actions
driver_utils.patch_kuryrnet_crd(kuryrnet_crd, populated=False) self._patch_kuryrnetwork_crd(kuryrnet_crd, populated=False)
raise
def _patch_kuryrnetwork_crd(self, kns_crd, populated=True):
kubernetes = clients.get_kubernetes_client()
crd_name = kns_crd['metadata']['name']
LOG.debug('Patching KuryrNetwork CRD %s' % crd_name)
try:
kubernetes.patch_crd('status', kns_crd['metadata']['selfLink'],
{'populated': populated})
except exceptions.K8sClientException:
LOG.exception('Error updating kuryrnet CRD %s', crd_name)
raise raise

View File

@ -584,12 +584,12 @@ class LoadBalancerHandler(k8s_base.ResourceEventHandler):
status_data = {"loadBalancer": { status_data = {"loadBalancer": {
"ingress": [{"ip": lb_ip_address.format()}]}} "ingress": [{"ip": lb_ip_address.format()}]}}
k8s = clients.get_kubernetes_client() k8s = clients.get_kubernetes_client()
svc_link = self._get_service_link(endpoints) svc_status_link = self._get_service_link(endpoints) + '/status'
try: try:
k8s.patch("status", svc_link, status_data) k8s.patch("status", svc_status_link, status_data)
except k_exc.K8sClientException: except k_exc.K8sClientException:
# REVISIT(ivc): only raise ResourceNotReady for NotFound # REVISIT(ivc): only raise ResourceNotReady for NotFound
raise k_exc.ResourceNotReady(svc_link) raise k_exc.ResourceNotReady(svc_status_link)
def _get_service_link(self, endpoints): def _get_service_link(self, endpoints):
ep_link = endpoints['metadata']['selfLink'] ep_link = endpoints['metadata']['selfLink']

View File

@ -12,18 +12,11 @@
# See the License for the specific language governing permissions and # See the License for the specific language governing permissions and
# limitations under the License. # limitations under the License.
import eventlet
import time
from openstack import exceptions as os_exc
from oslo_config import cfg as oslo_cfg
from oslo_log import log as logging from oslo_log import log as logging
from oslo_serialization import jsonutils
from kuryr_kubernetes import clients from kuryr_kubernetes import clients
from kuryr_kubernetes import constants from kuryr_kubernetes import constants
from kuryr_kubernetes.controller.drivers import base as drivers from kuryr_kubernetes.controller.drivers import base as drivers
from kuryr_kubernetes.controller.drivers import utils as driver_utils
from kuryr_kubernetes import exceptions from kuryr_kubernetes import exceptions
from kuryr_kubernetes.handlers import k8s_base from kuryr_kubernetes.handlers import k8s_base
from kuryr_kubernetes import utils from kuryr_kubernetes import utils
@ -31,9 +24,6 @@ from kuryr_kubernetes import utils
LOG = logging.getLogger(__name__) LOG = logging.getLogger(__name__)
DEFAULT_CLEANUP_INTERVAL = 60
DEFAULT_CLEANUP_RETRIES = 10
class NamespaceHandler(k8s_base.ResourceEventHandler): class NamespaceHandler(k8s_base.ResourceEventHandler):
OBJECT_KIND = constants.K8S_OBJ_NAMESPACE OBJECT_KIND = constants.K8S_OBJ_NAMESPACE
@ -42,114 +32,116 @@ class NamespaceHandler(k8s_base.ResourceEventHandler):
def __init__(self): def __init__(self):
super(NamespaceHandler, self).__init__() super(NamespaceHandler, self).__init__()
self._drv_project = drivers.NamespaceProjectDriver.get_instance() self._drv_project = drivers.NamespaceProjectDriver.get_instance()
self._drv_subnets = drivers.PodSubnetsDriver.get_instance() self._upgrade_crds()
self._drv_sg = drivers.PodSecurityGroupsDriver.get_instance()
self._drv_vif_pool = drivers.VIFPoolDriver.get_instance(
specific_driver='multi_pool')
self._drv_vif_pool.set_vif_driver()
if self._is_network_policy_enabled():
self._drv_lbaas = drivers.LBaaSDriver.get_instance()
self._drv_svc_sg = (
drivers.ServiceSecurityGroupsDriver.get_instance())
# NOTE(ltomasbo): Checks and clean up leftovers due to def _upgrade_crds(self):
# kuryr-controller retarts k8s = clients.get_kubernetes_client()
eventlet.spawn(self._cleanup_namespace_leftovers) try:
net_crds = k8s.get(constants.K8S_API_CRD_KURYRNETS)
namespaces = k8s.get(constants.K8S_API_NAMESPACES)
except exceptions.K8sResourceNotFound:
return
except exceptions.K8sClientException:
LOG.warning("Error retriving namespace information")
raise
ns_dict = {'ns-' + ns['metadata']['name']: ns
for ns in namespaces.get('items')}
for net_crd in net_crds.get('items'):
try:
ns = ns_dict[net_crd['metadata']['name']]
except KeyError:
# Note(ltomasbo): The CRD does not have an associated
# namespace. It must be deleted
LOG.debug('No namespace associated, deleting kuryrnet crd: '
'%s', net_crd)
else:
try:
ns_net_annotations = ns['metadata']['annotations'][
constants.K8S_ANNOTATION_NET_CRD]
except KeyError:
LOG.debug('Namespace associated is not annotated: %s', ns)
else:
LOG.debug('Removing annotation: %', ns_net_annotations)
k8s.remove_annotations(ns['metadata']['selfLink'],
constants.K8S_ANNOTATION_NET_CRD)
try:
k8s.delete(net_crd['metadata']['selfLink'])
except exceptions.K8sResourceNotFound:
LOG.debug('Kuryrnet object already deleted: %s', net_crd)
def on_present(self, namespace): def on_present(self, namespace):
ns_labels = namespace['metadata'].get('labels', {})
ns_name = namespace['metadata']['name'] ns_name = namespace['metadata']['name']
current_namespace_labels = namespace['metadata'].get('labels') kns_crd = self._get_kns_crd(ns_name)
previous_namespace_labels = driver_utils.get_annotated_labels( if kns_crd:
namespace, constants.K8S_ANNOTATION_NAMESPACE_LABEL) LOG.debug("Previous CRD existing at the new namespace.")
LOG.debug("Got previous namespace labels from annotation: %r", self._update_labels(kns_crd, ns_labels)
previous_namespace_labels)
project_id = self._drv_project.get_project(namespace)
if current_namespace_labels != previous_namespace_labels:
crd_selectors = self._drv_sg.update_namespace_sg_rules(namespace)
self._set_namespace_labels(namespace, current_namespace_labels)
if (self._is_network_policy_enabled() and crd_selectors and
oslo_cfg.CONF.octavia_defaults.enforce_sg_rules):
services = driver_utils.get_services()
self._update_services(services, crd_selectors, project_id)
net_crd_id = self._get_net_crd_id(namespace)
if net_crd_id:
LOG.debug("CRD existing at the new namespace")
return return
net_crd_name = 'ns-' + ns_name
net_crd = self._get_net_crd(net_crd_name)
if net_crd:
LOG.debug("Previous CRD existing at the new namespace. "
"Deleting namespace resources and retying its creation.")
self.on_deleted(namespace, net_crd)
raise exceptions.ResourceNotReady(namespace)
# NOTE(ltomasbo): Ensure there is no previously created networks
# leftovers due to a kuryr-controller crash/restart
LOG.debug("Deleting leftovers network resources for namespace: %s",
ns_name)
self._drv_subnets.cleanup_namespace_networks(ns_name)
LOG.debug("Creating network resources for namespace: %s", ns_name)
net_crd_spec = self._drv_subnets.create_namespace_network(ns_name,
project_id)
# create CRD resource for the network
try: try:
net_crd = self._add_kuryrnet_crd(ns_name, net_crd_spec) self._add_kuryrnetwork_crd(ns_name, ns_labels)
self._drv_sg.create_namespace_sg_rules(namespace)
self._set_net_crd(namespace, net_crd)
except (exceptions.K8sClientException,
exceptions.K8sResourceNotFound):
LOG.exception("Kuryrnet CRD creation failed. Rolling back "
"resources created for the namespace.")
self._drv_subnets.rollback_network_resources(net_crd_spec, ns_name)
try:
self._del_kuryrnet_crd(net_crd_name)
except exceptions.K8sClientException: except exceptions.K8sClientException:
LOG.exception("Error when trying to rollback the KuryrNet CRD " LOG.exception("Kuryrnetwork CRD creation failed.")
"object %s", net_crd_name)
raise exceptions.ResourceNotReady(namespace) raise exceptions.ResourceNotReady(namespace)
def on_deleted(self, namespace, net_crd=None): def _update_labels(self, kns_crd, ns_labels):
LOG.debug("Deleting namespace: %s", namespace) kns_status = kns_crd.get('status')
if not net_crd: if kns_status:
net_crd_id = self._get_net_crd_id(namespace) kns_crd_labels = kns_crd['status'].get('nsLabels', {})
if not net_crd_id: if kns_crd_labels == ns_labels:
LOG.warning("There is no CRD annotated at the namespace %s", # Labels are already up to date, nothing to do
namespace)
return
net_crd = self._get_net_crd(net_crd_id)
if not net_crd:
LOG.warning("This should not happen. Probably this is event "
"is processed twice due to a restart or etcd is "
"not in sync")
# NOTE(ltomasbo): We should rely on etcd properly behaving, so
# we are returning here to prevent duplicated events processing
# but not to prevent etcd failures.
return return
net_crd_name = net_crd['metadata']['name'] kubernetes = clients.get_kubernetes_client()
LOG.debug('Patching KuryrNetwork CRD %s', kns_crd)
self._drv_vif_pool.delete_network_pools(net_crd['spec']['netId'])
try: try:
self._drv_subnets.delete_namespace_subnet(net_crd) kubernetes.patch_crd('spec', kns_crd['metadata']['selfLink'],
except exceptions.ResourceNotReady: {'nsLabels': ns_labels})
LOG.debug("Subnet is not ready to be removed.") except exceptions.K8sResourceNotFound:
# TODO(ltomasbo): Once KuryrPort CRDs is supported, we should LOG.debug('KuryrNetwork CRD not found %s', kns_crd)
# execute a delete network ports method here to remove the ports except exceptions.K8sClientException:
# associated to the namespace/subnet, ensuring next retry will be LOG.exception('Error updating kuryrnetwork CRD %s', kns_crd)
# successful
raise raise
self._del_kuryrnet_crd(net_crd_name)
crd_selectors = self._drv_sg.delete_namespace_sg_rules(namespace)
if (self._is_network_policy_enabled() and crd_selectors and def _get_kns_crd(self, namespace):
oslo_cfg.CONF.octavia_defaults.enforce_sg_rules): k8s = clients.get_kubernetes_client()
try:
kuryrnetwork_crd = k8s.get('{}/{}/kuryrnetworks/{}'.format(
constants.K8S_API_CRD_NAMESPACES, namespace,
namespace))
except exceptions.K8sResourceNotFound:
return None
except exceptions.K8sClientException:
LOG.exception("Kubernetes Client Exception.")
raise
return kuryrnetwork_crd
def _add_kuryrnetwork_crd(self, namespace, ns_labels):
project_id = self._drv_project.get_project(namespace) project_id = self._drv_project.get_project(namespace)
services = driver_utils.get_services() kubernetes = clients.get_kubernetes_client()
self._update_services(services, crd_selectors, project_id)
kns_crd = {
'apiVersion': 'openstack.org/v1',
'kind': 'KuryrNetwork',
'metadata': {
'name': namespace,
'finalizers': [constants.KURYRNETWORK_FINALIZER],
},
'spec': {
'nsName': namespace,
'projectId': project_id,
'nsLabels': ns_labels,
}
}
try:
kubernetes.post('{}/{}/kuryrnetworks'.format(
constants.K8S_API_CRD_NAMESPACES, namespace), kns_crd)
except exceptions.K8sClientException:
LOG.exception("Kubernetes Client Exception creating kuryrnetwork "
"CRD.")
raise
def is_ready(self, quota): def is_ready(self, quota):
if not utils.has_kuryr_crd(constants.K8S_API_CRD_KURYRNETS): if not utils.has_kuryr_crd(constants.K8S_API_CRD_KURYRNETS):
@ -165,173 +157,3 @@ class NamespaceHandler(k8s_base.ResourceEventHandler):
if not utils.is_available(resource, resource_quota): if not utils.is_available(resource, resource_quota):
return False return False
return True return True
def _get_net_crd_id(self, namespace):
try:
annotations = namespace['metadata']['annotations']
net_crd_id = annotations[constants.K8S_ANNOTATION_NET_CRD]
except KeyError:
return None
return net_crd_id
def _get_net_crd(self, net_crd_id):
k8s = clients.get_kubernetes_client()
try:
kuryrnet_crd = k8s.get('%s/kuryrnets/%s' % (constants.K8S_API_CRD,
net_crd_id))
except exceptions.K8sResourceNotFound:
return None
except exceptions.K8sClientException:
LOG.exception("Kubernetes Client Exception.")
raise
return kuryrnet_crd
def _set_net_crd(self, namespace, net_crd):
LOG.debug("Setting CRD annotations: %s", net_crd)
k8s = clients.get_kubernetes_client()
k8s.annotate(namespace['metadata']['selfLink'],
{constants.K8S_ANNOTATION_NET_CRD:
net_crd['metadata']['name']},
resource_version=namespace['metadata']['resourceVersion'])
def _add_kuryrnet_crd(self, namespace, net_crd_spec):
kubernetes = clients.get_kubernetes_client()
net_crd_name = "ns-" + namespace
spec = {k: v for k, v in net_crd_spec.items()}
# NOTE(ltomasbo): To know if the subnet has bee populated with pools.
# This is only needed by the kuryrnet handler to skip actions. But its
# addition does not have any impact if not used
spec['populated'] = False
net_crd = {
'apiVersion': 'openstack.org/v1',
'kind': 'KuryrNet',
'metadata': {
'name': net_crd_name,
'annotations': {
'namespaceName': namespace,
}
},
'spec': spec,
}
try:
kubernetes.post('%s/kuryrnets' % constants.K8S_API_CRD, net_crd)
except exceptions.K8sClientException:
LOG.exception("Kubernetes Client Exception creating kuryrnet "
"CRD.")
raise
return net_crd
def _del_kuryrnet_crd(self, net_crd_name):
kubernetes = clients.get_kubernetes_client()
try:
kubernetes.delete('%s/kuryrnets/%s' % (constants.K8S_API_CRD,
net_crd_name))
except exceptions.K8sResourceNotFound:
LOG.debug("KuryrNetPolicy CRD not found: %s", net_crd_name)
except exceptions.K8sClientException:
LOG.exception("Kubernetes Client Exception deleting kuryrnet "
"CRD.")
raise
def _set_namespace_labels(self, namespace, labels):
if not labels:
LOG.debug("Removing Label annotation: %r", labels)
annotation = None
else:
annotation = jsonutils.dumps(labels, sort_keys=True)
LOG.debug("Setting Labels annotation: %r", annotation)
k8s = clients.get_kubernetes_client()
k8s.annotate(namespace['metadata']['selfLink'],
{constants.K8S_ANNOTATION_NAMESPACE_LABEL: annotation},
resource_version=namespace['metadata']['resourceVersion'])
def _update_services(self, services, crd_selectors, project_id):
for service in services.get('items'):
if not driver_utils.service_matches_affected_pods(
service, crd_selectors):
continue
sgs = self._drv_svc_sg.get_security_groups(service,
project_id)
self._drv_lbaas.update_lbaas_sg(service, sgs)
def _is_network_policy_enabled(self):
enabled_handlers = oslo_cfg.CONF.kubernetes.enabled_handlers
svc_sg_driver = oslo_cfg.CONF.kubernetes.service_security_groups_driver
return ('policy' in enabled_handlers and svc_sg_driver == 'policy')
def _cleanup_namespace_leftovers(self):
k8s = clients.get_kubernetes_client()
for i in range(DEFAULT_CLEANUP_RETRIES):
retry = False
try:
net_crds = k8s.get(constants.K8S_API_CRD_KURYRNETS)
namespaces = k8s.get(constants.K8S_API_NAMESPACES)
except exceptions.K8sClientException:
LOG.warning("Error retriving namespace information")
return
ns_dict = {'ns-' + ns['metadata']['name']: ns
for ns in namespaces.get('items')}
for net_crd in net_crds.get('items'):
try:
ns_dict[net_crd['metadata']['name']]
except KeyError:
# Note(ltomasbo): The CRD does not have an associated
# namespace. It must be deleted
LOG.debug("Removing namespace leftovers associated to: "
"%s", net_crd)
# removing the 'ns-' preceding the namespace name on the
# net CRDs
ns_name = net_crd['metadata']['name'][3:]
# only namespace name is needed for on_deleted, faking the
# nonexistent object
ns_to_delete = {'metadata': {'name': ns_name}}
try:
self.on_deleted(ns_to_delete, net_crd)
except exceptions.ResourceNotReady:
LOG.debug("Cleanup of namespace %s failed. A retry "
"will be triggered.", ns_name)
retry = True
continue
if not retry:
break
# Leave time between retries to help Neutron to complete actions
time.sleep(DEFAULT_CLEANUP_INTERVAL)
# NOTE(ltomasbo): to ensure we don't miss created network resources
# without associated kuryrnet objects, we do a second search
os_net = clients.get_network_client()
tags = oslo_cfg.CONF.neutron_defaults.resource_tags
if not tags:
return
for i in range(DEFAULT_CLEANUP_RETRIES):
retry = False
subnets = os_net.subnets(tags=tags)
namespaces = k8s.get(constants.K8S_API_NAMESPACES)
ns_nets = ['ns/' + ns['metadata']['name'] + '-subnet'
for ns in namespaces.get('items')]
for subnet in subnets:
# NOTE(ltomasbo): subnet name is ns/NAMESPACE_NAME-subnet
if subnet.name not in ns_nets:
if (subnet.subnet_pool_id !=
oslo_cfg.CONF.namespace_subnet.pod_subnet_pool):
# Not a kuryr generated network
continue
try:
self._drv_subnets._delete_namespace_network_resources(
subnet.id, subnet.network_id)
except (os_exc.SDKException, exceptions.ResourceNotReady):
LOG.debug("Cleanup of network namespace resources %s "
"failed. A retry will be triggered.",
subnet.network_id)
retry = True
continue
if not retry:
break
# Leave time between retries to help Neutron to complete actions
time.sleep(DEFAULT_CLEANUP_INTERVAL)

View File

@ -145,16 +145,16 @@ class NetworkPolicyHandler(k8s_base.ResourceEventHandler):
def _get_policy_net_id(self, policy): def _get_policy_net_id(self, policy):
policy_ns = policy['metadata']['namespace'] policy_ns = policy['metadata']['namespace']
kuryrnet_name = 'ns-' + str(policy_ns)
kubernetes = clients.get_kubernetes_client() kubernetes = clients.get_kubernetes_client()
try: try:
net_crd = kubernetes.get('{}/{}'.format( path = (f'{k_const.K8S_API_CRD_NAMESPACES}/{policy_ns}/'
k_const.K8S_API_CRD_KURYRNETS, kuryrnet_name)) f'kuryrnetworks/{policy_ns}')
net_crd = kubernetes.get(path)
except exceptions.K8sClientException: except exceptions.K8sClientException:
LOG.exception("Kubernetes Client Exception.") LOG.exception("Kubernetes Client Exception.")
raise raise
return net_crd['spec']['netId'] return net_crd['status']['netId']
def _is_egress_only_policy(self, policy): def _is_egress_only_policy(self, policy):
policy_types = policy['spec'].get('policyTypes', []) policy_types = policy['spec'].get('policyTypes', [])

View File

@ -177,10 +177,10 @@ class VIFHandler(k8s_base.ResourceEventHandler):
except k_exc.ResourceNotReady: except k_exc.ResourceNotReady:
# NOTE(ltomasbo): If the namespace object gets deleted first the # NOTE(ltomasbo): If the namespace object gets deleted first the
# namespace security group driver will raise a ResourceNotReady # namespace security group driver will raise a ResourceNotReady
# exception as it cannot access anymore the kuryrnet CRD annotated # exception as it cannot access anymore the kuryrnetwork CRD
# on the namespace object. In such case we set security groups to # annotated on the namespace object. In such case we set security
# empty list so that if pools are enabled they will be properly # groups to empty list so that if pools are enabled they will be
# released. # properly released.
security_groups = [] security_groups = []
state = driver_utils.get_pod_state(pod) state = driver_utils.get_pod_state(pod)

View File

@ -34,10 +34,6 @@ class K8sResourceNotFound(K8sClientException):
"found: %r" % resource) "found: %r" % resource)
class InvalidKuryrNetCRD(Exception):
pass
class InvalidKuryrNetworkAnnotation(Exception): class InvalidKuryrNetworkAnnotation(Exception):
pass pass

View File

@ -68,6 +68,14 @@ class ResourceEventHandler(dispatch.EventConsumer, health.HealthHandler):
event_type = event.get('type') event_type = event.get('type')
obj = event.get('object') obj = event.get('object')
if 'MODIFIED' == event_type: if 'MODIFIED' == event_type:
deletion_timestamp = None
try:
deletion_timestamp = obj['metadata']['deletionTimestamp']
except (KeyError, TypeError):
pass
if deletion_timestamp:
self.on_finalize(obj)
return
self.on_modified(obj) self.on_modified(obj)
self.on_present(obj) self.on_present(obj)
elif 'ADDED' == event_type: elif 'ADDED' == event_type:
@ -87,3 +95,6 @@ class ResourceEventHandler(dispatch.EventConsumer, health.HealthHandler):
def on_deleted(self, obj): def on_deleted(self, obj):
pass pass
def on_finalize(self, obj):
pass

View File

@ -109,8 +109,6 @@ class K8sClient(object):
def patch(self, field, path, data): def patch(self, field, path, data):
LOG.debug("Patch %(path)s: %(data)s", { LOG.debug("Patch %(path)s: %(data)s", {
'path': path, 'data': data}) 'path': path, 'data': data})
if field == 'status':
path = path + '/' + str(field)
content_type = 'application/merge-patch+json' content_type = 'application/merge-patch+json'
url, header = self._get_url_and_header(path, content_type) url, header = self._get_url_and_header(path, content_type)
response = self.session.patch(url, json={field: data}, response = self.session.patch(url, json={field: data},
@ -119,14 +117,18 @@ class K8sClient(object):
self._raise_from_response(response) self._raise_from_response(response)
return response.json().get('status') return response.json().get('status')
def patch_crd(self, field, path, data): def patch_crd(self, field, path, data, action='replace'):
content_type = 'application/json-patch+json' content_type = 'application/json-patch+json'
url, header = self._get_url_and_header(path, content_type) url, header = self._get_url_and_header(path, content_type)
data = [{'op': 'replace', if action == 'remove':
'path': '/{}/{}'.format(field, np_field), data = [{'op': action,
'path': f'/{field}/{data}'}]
else:
data = [{'op': action,
'path': f'/{field}/{crd_field}',
'value': value} 'value': value}
for np_field, value in data.items()] for crd_field, value in data.items()]
LOG.debug("Patch %(path)s: %(data)s", { LOG.debug("Patch %(path)s: %(data)s", {
'path': path, 'data': data}) 'path': path, 'data': data})
@ -167,6 +169,21 @@ class K8sClient(object):
self._raise_from_response(response) self._raise_from_response(response)
return response.json().get('status') return response.json().get('status')
def remove_annotations(self, path, annotation_name):
content_type = 'application/json-patch+json'
url, header = self._get_url_and_header(path, content_type)
data = [{'op': 'remove',
'path': '/metadata/annotations',
'value': annotation_name}]
response = self.session.patch(url, data=jsonutils.dumps(data),
headers=header, cert=self.cert,
verify=self.verify_server)
if response.ok:
return response.json().get('status')
raise exc.K8sClientException(response.text)
def post(self, path, body): def post(self, path, body):
LOG.debug("Post %(path)s: %(body)s", {'path': path, 'body': body}) LOG.debug("Post %(path)s: %(body)s", {'path': path, 'body': body})
url = self._base_url + path url = self._base_url + path

View File

@ -19,7 +19,6 @@ import munch
from openstack import exceptions as os_exc from openstack import exceptions as os_exc
from oslo_config import cfg as oslo_cfg from oslo_config import cfg as oslo_cfg
from kuryr_kubernetes import constants
from kuryr_kubernetes.controller.drivers import namespace_subnet as subnet_drv from kuryr_kubernetes.controller.drivers import namespace_subnet as subnet_drv
from kuryr_kubernetes import exceptions as k_exc from kuryr_kubernetes import exceptions as k_exc
from kuryr_kubernetes.tests import base as test_base from kuryr_kubernetes.tests import base as test_base
@ -54,16 +53,6 @@ def get_pod_obj():
}} }}
def get_namespace_obj():
return {
'metadata': {
'annotations': {
constants.K8S_ANNOTATION_NET_CRD: 'net_crd_url_sample'
}
}
}
class TestNamespacePodSubnetDriver(test_base.TestCase): class TestNamespacePodSubnetDriver(test_base.TestCase):
@mock.patch('kuryr_kubernetes.utils.get_subnet') @mock.patch('kuryr_kubernetes.utils.get_subnet')
@ -110,21 +99,20 @@ class TestNamespacePodSubnetDriver(test_base.TestCase):
namespace = mock.sentinel.namespace namespace = mock.sentinel.namespace
subnet_id = mock.sentinel.subnet_id subnet_id = mock.sentinel.subnet_id
ns = get_namespace_obj()
crd = { crd = {
'spec': { 'status': {
'subnetId': subnet_id 'subnetId': subnet_id
} }
} }
kubernetes = self.useFixture(k_fix.MockK8sClient()).client kubernetes = self.useFixture(k_fix.MockK8sClient()).client
kubernetes.get.side_effect = [ns, crd] kubernetes.get.return_value = crd
subnet_id_resp = cls._get_namespace_subnet_id(m_driver, namespace) subnet_id_resp = cls._get_namespace_subnet_id(m_driver, namespace)
kubernetes.get.assert_called() kubernetes.get.assert_called()
self.assertEqual(subnet_id, subnet_id_resp) self.assertEqual(subnet_id, subnet_id_resp)
def test__get_namespace_subnet_id_get_namespace_exception(self): def test__get_namespace_subnet_id_get_crd_exception(self):
cls = subnet_drv.NamespacePodSubnetDriver cls = subnet_drv.NamespacePodSubnetDriver
m_driver = mock.MagicMock(spec=cls) m_driver = mock.MagicMock(spec=cls)
@ -133,41 +121,6 @@ class TestNamespacePodSubnetDriver(test_base.TestCase):
kubernetes = self.useFixture(k_fix.MockK8sClient()).client kubernetes = self.useFixture(k_fix.MockK8sClient()).client
kubernetes.get.side_effect = k_exc.K8sClientException kubernetes.get.side_effect = k_exc.K8sClientException
self.assertRaises(k_exc.ResourceNotReady,
cls._get_namespace_subnet_id, m_driver, namespace)
kubernetes.get.assert_called_once()
def test__get_namespace_subnet_id_missing_annotation(self):
cls = subnet_drv.NamespacePodSubnetDriver
m_driver = mock.MagicMock(spec=cls)
namespace = mock.sentinel.namespace
subnet_id = mock.sentinel.subnet_id
ns = get_namespace_obj()
del ns['metadata']['annotations'][constants.K8S_ANNOTATION_NET_CRD]
crd = {
'spec': {
'subnetId': subnet_id
}
}
kubernetes = self.useFixture(k_fix.MockK8sClient()).client
kubernetes.get.side_effect = [ns, crd]
self.assertRaises(k_exc.ResourceNotReady,
cls._get_namespace_subnet_id, m_driver, namespace)
kubernetes.get.assert_called_once()
def test__get_namespace_subnet_id_get_crd_exception(self):
cls = subnet_drv.NamespacePodSubnetDriver
m_driver = mock.MagicMock(spec=cls)
namespace = mock.sentinel.namespace
ns = get_namespace_obj()
kubernetes = self.useFixture(k_fix.MockK8sClient()).client
kubernetes.get.side_effect = [ns, k_exc.K8sClientException]
self.assertRaises(k_exc.K8sClientException, self.assertRaises(k_exc.K8sClientException,
cls._get_namespace_subnet_id, m_driver, namespace) cls._get_namespace_subnet_id, m_driver, namespace)
kubernetes.get.assert_called() kubernetes.get.assert_called()
@ -206,7 +159,24 @@ class TestNamespacePodSubnetDriver(test_base.TestCase):
os_net.delete_network.assert_called_once_with(net_id) os_net.delete_network.assert_called_once_with(net_id)
os_net.ports.assert_called_with(status='DOWN', network_id=net_id) os_net.ports.assert_called_with(status='DOWN', network_id=net_id)
def test_create_namespace_network(self): def test_create_network(self):
cls = subnet_drv.NamespacePodSubnetDriver
m_driver = mock.MagicMock(spec=cls)
namespace = 'test'
project_id = mock.sentinel.project_id
os_net = self.useFixture(k_fix.MockNetworkClient()).client
os_net.networks.return_value = iter([])
net = munch.Munch({'id': mock.sentinel.net})
os_net.create_network.return_value = net
net_id_resp = cls.create_network(m_driver, namespace, project_id)
self.assertEqual(net_id_resp, net['id'])
os_net.create_network.assert_called_once()
os_net.networks.assert_called_once()
def test_create_network_existing(self):
cls = subnet_drv.NamespacePodSubnetDriver cls = subnet_drv.NamespacePodSubnetDriver
m_driver = mock.MagicMock(spec=cls) m_driver = mock.MagicMock(spec=cls)
@ -214,126 +184,101 @@ class TestNamespacePodSubnetDriver(test_base.TestCase):
project_id = mock.sentinel.project_id project_id = mock.sentinel.project_id
os_net = self.useFixture(k_fix.MockNetworkClient()).client os_net = self.useFixture(k_fix.MockNetworkClient()).client
net = munch.Munch({'id': mock.sentinel.net}) net = munch.Munch({'id': mock.sentinel.net})
os_net.create_network.return_value = net os_net.networks.return_value = iter([net])
net_id_resp = cls.create_network(m_driver, namespace, project_id)
self.assertEqual(net_id_resp, net['id'])
os_net.create_network.assert_not_called()
os_net.networks.assert_called_once()
def test_create_subnet(self):
cls = subnet_drv.NamespacePodSubnetDriver
m_driver = mock.MagicMock(spec=cls)
namespace = 'test'
project_id = mock.sentinel.project_id
net_id = mock.sentinel.net_id
subnet = munch.Munch({'id': mock.sentinel.subnet, subnet = munch.Munch({'id': mock.sentinel.subnet,
'cidr': mock.sentinel.cidr}) 'cidr': mock.sentinel.cidr})
os_net = self.useFixture(k_fix.MockNetworkClient()).client
os_net.subnets.return_value = iter([])
os_net.create_subnet.return_value = subnet os_net.create_subnet.return_value = subnet
subnet_id, subnet_cidr = cls.create_subnet(m_driver, namespace,
project_id, net_id)
self.assertEqual(subnet_id, subnet['id'])
self.assertEqual(subnet_cidr, subnet['cidr'])
os_net.create_subnet.assert_called_once()
os_net.subnets.assert_called_once()
def test_create_subnet_existing(self):
cls = subnet_drv.NamespacePodSubnetDriver
m_driver = mock.MagicMock(spec=cls)
namespace = 'test'
project_id = mock.sentinel.project_id
net_id = mock.sentinel.net_id
subnet = munch.Munch({'id': mock.sentinel.subnet,
'cidr': mock.sentinel.cidr})
os_net = self.useFixture(k_fix.MockNetworkClient()).client
os_net.subnets.return_value = iter([subnet])
subnet_id, subnet_cidr = cls.create_subnet(m_driver, namespace,
project_id, net_id)
self.assertEqual(subnet_id, subnet['id'])
self.assertEqual(subnet_cidr, subnet['cidr'])
os_net.create_subnet.assert_not_called()
os_net.subnets.assert_called_once()
def test_add_subnet_to_router(self):
cls = subnet_drv.NamespacePodSubnetDriver
m_driver = mock.MagicMock(spec=cls)
subnet_id = mock.sentinel.subnet_id
os_net = self.useFixture(k_fix.MockNetworkClient()).client
os_net.add_interface_to_router.return_value = {} os_net.add_interface_to_router.return_value = {}
router_id = 'router1' router_id = 'router1'
oslo_cfg.CONF.set_override('pod_router', oslo_cfg.CONF.set_override('pod_router',
router_id, router_id,
group='namespace_subnet') group='namespace_subnet')
net_crd = {'netId': net['id'],
'routerId': router_id,
'subnetId': subnet['id'],
'subnetCIDR': subnet['cidr']}
net_crd_resp = cls.create_namespace_network(m_driver, namespace, router_id_resp = cls.add_subnet_to_router(m_driver, subnet_id)
project_id) self.assertEqual(router_id_resp, router_id)
self.assertEqual(net_crd_resp, net_crd)
os_net.create_network.assert_called_once()
os_net.create_subnet.assert_called_once()
os_net.add_interface_to_router.assert_called_once() os_net.add_interface_to_router.assert_called_once()
def test_create_namespace_network_net_exception(self): def test_add_subnet_to_router_already_connected(self):
cls = subnet_drv.NamespacePodSubnetDriver cls = subnet_drv.NamespacePodSubnetDriver
m_driver = mock.MagicMock(spec=cls) m_driver = mock.MagicMock(spec=cls)
namespace = 'test' subnet_id = mock.sentinel.subnet_id
project_id = mock.sentinel.project_id
os_net = self.useFixture(k_fix.MockNetworkClient()).client os_net = self.useFixture(k_fix.MockNetworkClient()).client
os_net.create_network.side_effect = os_exc.SDKException os_net.add_interface_to_router.side_effect = (
os_exc.BadRequestException)
router_id = 'router1'
oslo_cfg.CONF.set_override('pod_router',
router_id,
group='namespace_subnet')
self.assertRaises(os_exc.SDKException, router_id_resp = cls.add_subnet_to_router(m_driver, subnet_id)
cls.create_namespace_network, m_driver, namespace, self.assertEqual(router_id_resp, router_id)
project_id)
os_net.create_network.assert_called_once()
os_net.create_subnet.assert_not_called()
os_net.add_interface_to_router.assert_not_called()
def test_create_namespace_network_subnet_exception(self):
cls = subnet_drv.NamespacePodSubnetDriver
m_driver = mock.MagicMock(spec=cls)
namespace = 'test'
project_id = mock.sentinel.project_id
os_net = self.useFixture(k_fix.MockNetworkClient()).client
net = munch.Munch({'id': mock.sentinel.net})
os_net.create_network.return_value = net
os_net.create_subnet.side_effect = os_exc.SDKException
self.assertRaises(os_exc.SDKException,
cls.create_namespace_network, m_driver, namespace,
project_id)
os_net.create_network.assert_called_once()
os_net.create_subnet.assert_called_once()
os_net.add_interface_to_router.assert_not_called()
def test_create_namespace_network_router_exception(self):
cls = subnet_drv.NamespacePodSubnetDriver
m_driver = mock.MagicMock(spec=cls)
namespace = 'test'
project_id = mock.sentinel.project_id
os_net = self.useFixture(k_fix.MockNetworkClient()).client
net = munch.Munch({'id': mock.sentinel.net})
os_net.create_network.return_value = net
subnet = munch.Munch({'id': mock.sentinel.subnet})
os_net.create_subnet.return_value = subnet
os_net.add_interface_to_router.side_effect = os_exc.SDKException
self.assertRaises(os_exc.SDKException,
cls.create_namespace_network, m_driver, namespace,
project_id)
os_net.create_network.assert_called_once()
os_net.create_subnet.assert_called_once()
os_net.add_interface_to_router.assert_called_once() os_net.add_interface_to_router.assert_called_once()
def test_rollback_network_resources(self): def test_add_subnet_to_router_exception(self):
cls = subnet_drv.NamespacePodSubnetDriver cls = subnet_drv.NamespacePodSubnetDriver
m_driver = mock.MagicMock(spec=cls) m_driver = mock.MagicMock(spec=cls)
router_id = mock.sentinel.router_id
net_id = mock.sentinel.net_id
subnet_id = mock.sentinel.subnet_id subnet_id = mock.sentinel.subnet_id
crd_spec = {
'subnetId': subnet_id,
'routerId': router_id,
'netId': net_id,
}
namespace = mock.sentinel.namespace
os_net = self.useFixture(k_fix.MockNetworkClient()).client os_net = self.useFixture(k_fix.MockNetworkClient()).client
os_net.remove_interface_from_router.return_value = {} os_net.add_interface_to_router.side_effect = (
cls.rollback_network_resources(m_driver, crd_spec, namespace)
os_net.remove_interface_from_router.assert_called_with(
router_id, subnet_id=subnet_id)
os_net.delete_network.assert_called_with(net_id)
def test_rollback_network_resources_router_exception(self):
cls = subnet_drv.NamespacePodSubnetDriver
m_driver = mock.MagicMock(spec=cls)
router_id = mock.sentinel.router_id
net_id = mock.sentinel.net_id
subnet_id = mock.sentinel.subnet_id
crd_spec = {
'subnetId': subnet_id,
'routerId': router_id,
'netId': net_id,
}
namespace = mock.sentinel.namespace
os_net = self.useFixture(k_fix.MockNetworkClient()).client
os_net.remove_interface_from_router.side_effect = (
os_exc.SDKException) os_exc.SDKException)
router_id = 'router1'
oslo_cfg.CONF.set_override('pod_router',
router_id,
group='namespace_subnet')
cls.rollback_network_resources(m_driver, crd_spec, namespace) self.assertRaises(os_exc.SDKException,
os_net.remove_interface_from_router.assert_called_with( cls.add_subnet_to_router, m_driver, subnet_id)
router_id, subnet_id=subnet_id) os_net.add_interface_to_router.assert_called_once()
os_net.delete_network.assert_not_called()

View File

@ -0,0 +1,281 @@
# Copyright 2020 Red Hat, Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import mock
from oslo_config import cfg as oslo_cfg
from kuryr_kubernetes.controller.drivers import base as drivers
from kuryr_kubernetes.controller.drivers import namespace_subnet as subnet_drv
from kuryr_kubernetes.controller.drivers import utils as driver_utils
from kuryr_kubernetes.controller.drivers import vif_pool
from kuryr_kubernetes.controller.handlers import kuryrnetwork
from kuryr_kubernetes import exceptions as k_exc
from kuryr_kubernetes.tests import base as test_base
from kuryr_kubernetes.tests.unit import kuryr_fixtures as k_fix
class TestKuryrNetworkHandler(test_base.TestCase):
def setUp(self):
super(TestKuryrNetworkHandler, self).setUp()
self._project_id = mock.sentinel.project_id
self._subnets = mock.sentinel.subnets
self._kuryrnet_crd = {
'metadata': {
'name': 'ns-test-namespace',
'selfLink': 'test-selfLink',
},
'spec': {
'nsName': 'test-namespace',
'projectId': 'test-project',
'nsLabels': {},
},
'status': {
}
}
self._handler = mock.MagicMock(
spec=kuryrnetwork.KuryrNetworkHandler)
self._handler._drv_project = mock.Mock(spec=drivers.PodProjectDriver)
# NOTE(ltomasbo): The KuryrNet handler is associated to the usage of
# namespace subnet driver,
self._handler._drv_subnets = mock.Mock(
spec=subnet_drv.NamespacePodSubnetDriver)
self._handler._drv_sg = mock.Mock(spec=drivers.PodSecurityGroupsDriver)
self._handler._drv_vif_pool = mock.MagicMock(
spec=vif_pool.MultiVIFPool)
self._get_project = self._handler._drv_project.get_project
self._set_vif_driver = self._handler._drv_vif_pool.set_vif_driver
self._create_network = self._handler._drv_subnets.create_network
self._create_subnet = self._handler._drv_subnets.create_subnet
self._delete_namespace_subnet = (
self._handler._drv_subnets.delete_namespace_subnet)
self._add_subnet_to_router = (
self._handler._drv_subnets.add_subnet_to_router)
self._delete_ns_sg_rules = (
self._handler._drv_sg.delete_namespace_sg_rules)
self._update_ns_sg_rules = (
self._handler._drv_sg.update_namespace_sg_rules)
self._delete_network_pools = (
self._handler._drv_vif_pool.delete_network_pools)
self._get_project.return_value = self._project_id
@mock.patch.object(drivers.LBaaSDriver, 'get_instance')
@mock.patch.object(drivers.VIFPoolDriver, 'get_instance')
@mock.patch.object(drivers.PodSecurityGroupsDriver, 'get_instance')
@mock.patch.object(drivers.PodSubnetsDriver, 'get_instance')
@mock.patch.object(drivers.NamespaceProjectDriver, 'get_instance')
def test_init(self, m_get_project_driver, m_get_subnet_driver,
m_get_sg_driver, m_get_vif_pool_driver, m_get_lbaas_driver):
project_driver = mock.sentinel.project_driver
subnet_driver = mock.sentinel.subnet_driver
sg_driver = mock.sentinel.sg_driver
vif_pool_driver = mock.Mock(spec=vif_pool.MultiVIFPool)
lbaas_driver = mock.sentinel.lbaas_driver
m_get_project_driver.return_value = project_driver
m_get_subnet_driver.return_value = subnet_driver
m_get_sg_driver.return_value = sg_driver
m_get_vif_pool_driver.return_value = vif_pool_driver
m_get_lbaas_driver.return_value = lbaas_driver
handler = kuryrnetwork.KuryrNetworkHandler()
self.assertEqual(project_driver, handler._drv_project)
self.assertEqual(subnet_driver, handler._drv_subnets)
self.assertEqual(sg_driver, handler._drv_sg)
self.assertEqual(vif_pool_driver, handler._drv_vif_pool)
@mock.patch.object(driver_utils, 'get_services')
@mock.patch.object(driver_utils, 'get_namespace')
def test_on_present(self, m_get_ns, m_get_svc):
net_id = mock.sentinel.net_id
subnet_id = mock.sentinel.subnet_id
subnet_cidr = mock.sentinel.subnet_cidr
router_id = mock.sentinel.router_id
ns = mock.sentinel.namespace
self._create_network.return_value = net_id
self._create_subnet.return_value = (subnet_id, subnet_cidr)
self._add_subnet_to_router.return_value = router_id
m_get_ns.return_value = ns
m_get_svc.return_value = []
kuryrnetwork.KuryrNetworkHandler.on_present(self._handler,
self._kuryrnet_crd)
self._handler._patch_kuryrnetwork_crd.assert_called()
self._create_network.assert_called_once_with(
self._kuryrnet_crd['spec']['nsName'],
self._kuryrnet_crd['spec']['projectId'])
self._create_subnet.assert_called_once_with(
self._kuryrnet_crd['spec']['nsName'],
self._kuryrnet_crd['spec']['projectId'],
net_id)
self._add_subnet_to_router.assert_called_once_with(subnet_id)
m_get_ns.assert_called_once_with(self._kuryrnet_crd['spec']['nsName'])
self._update_ns_sg_rules.assert_called_once_with(ns)
m_get_svc.assert_called_once()
self._handler._update_services.assert_called_once()
@mock.patch.object(driver_utils, 'get_services')
@mock.patch.object(driver_utils, 'get_namespace')
def test_on_present_no_sg_enforce(self, m_get_ns, m_get_svc):
net_id = mock.sentinel.net_id
subnet_id = mock.sentinel.subnet_id
subnet_cidr = mock.sentinel.subnet_cidr
router_id = mock.sentinel.router_id
ns = mock.sentinel.namespace
self._create_network.return_value = net_id
self._create_subnet.return_value = (subnet_id, subnet_cidr)
self._add_subnet_to_router.return_value = router_id
m_get_ns.return_value = ns
oslo_cfg.CONF.set_override('enforce_sg_rules',
False,
group='octavia_defaults')
self.addCleanup(oslo_cfg.CONF.clear_override, 'enforce_sg_rules',
group='octavia_defaults')
kuryrnetwork.KuryrNetworkHandler.on_present(self._handler,
self._kuryrnet_crd)
self._handler._patch_kuryrnetwork_crd.assert_called()
self._create_network.assert_called_once_with(
self._kuryrnet_crd['spec']['nsName'],
self._kuryrnet_crd['spec']['projectId'])
self._create_subnet.assert_called_once_with(
self._kuryrnet_crd['spec']['nsName'],
self._kuryrnet_crd['spec']['projectId'],
net_id)
self._add_subnet_to_router.assert_called_once_with(subnet_id)
m_get_ns.assert_called_once_with(self._kuryrnet_crd['spec']['nsName'])
self._update_ns_sg_rules.assert_called_once_with(ns)
m_get_svc.assert_not_called()
self._handler._update_services.assert_not_called()
@mock.patch.object(driver_utils, 'get_namespace')
def test_on_present_existing(self, m_get_ns):
net_id = mock.sentinel.net_id
subnet_id = mock.sentinel.subnet_id
subnet_cidr = mock.sentinel.subnet_cidr
router_id = mock.sentinel.router_id
kns_crd = self._kuryrnet_crd.copy()
kns_crd['status'] = {
'netId': net_id,
'subnetId': subnet_id,
'subnetCIDR': subnet_cidr,
'routerId': router_id}
kuryrnetwork.KuryrNetworkHandler.on_present(self._handler, kns_crd)
self._handler._patch_kuryrnetwork_crd.assert_not_called()
self._create_network.assert_not_called()
self._create_subnet.assert_not_called()
self._add_subnet_to_router.assert_not_called()
m_get_ns.assert_not_called()
@mock.patch.object(driver_utils, 'get_services')
def test_on_finalize(self, m_get_svc):
net_id = mock.sentinel.net_id
kns_crd = self._kuryrnet_crd.copy()
kns_crd['status'] = {'netId': net_id}
crd_selector = mock.sentinel.crd_selector
self._delete_ns_sg_rules.return_value = [crd_selector]
m_get_svc.return_value = []
kubernetes = self.useFixture(k_fix.MockK8sClient()).client
kuryrnetwork.KuryrNetworkHandler.on_finalize(self._handler, kns_crd)
self._delete_network_pools.assert_called_once_with(net_id)
self._delete_namespace_subnet.assert_called_once_with(kns_crd)
self._delete_ns_sg_rules.assert_called_once()
m_get_svc.assert_called_once()
self._handler._update_services.assert_called_once()
kubernetes.patch_crd.assert_called_once()
@mock.patch.object(driver_utils, 'get_services')
def test_on_finalize_no_network(self, m_get_svc):
crd_selector = mock.sentinel.crd_selector
self._delete_ns_sg_rules.return_value = [crd_selector]
m_get_svc.return_value = []
kubernetes = self.useFixture(k_fix.MockK8sClient()).client
kuryrnetwork.KuryrNetworkHandler.on_finalize(self._handler,
self._kuryrnet_crd)
self._delete_network_pools.assert_not_called()
self._delete_namespace_subnet.assert_not_called()
self._delete_ns_sg_rules.assert_called_once()
m_get_svc.assert_called_once()
self._handler._update_services.assert_called_once()
kubernetes.patch_crd.assert_called_once()
@mock.patch.object(driver_utils, 'get_services')
def test_on_finalize_no_sg_enforce(self, m_get_svc):
net_id = mock.sentinel.net_id
kns_crd = self._kuryrnet_crd.copy()
kns_crd['status'] = {'netId': net_id}
crd_selector = mock.sentinel.crd_selector
self._delete_ns_sg_rules.return_value = [crd_selector]
m_get_svc.return_value = []
kubernetes = self.useFixture(k_fix.MockK8sClient()).client
oslo_cfg.CONF.set_override('enforce_sg_rules',
False,
group='octavia_defaults')
self.addCleanup(oslo_cfg.CONF.clear_override, 'enforce_sg_rules',
group='octavia_defaults')
kuryrnetwork.KuryrNetworkHandler.on_finalize(
self._handler, kns_crd)
self._delete_network_pools.assert_called_once_with(net_id)
self._delete_namespace_subnet.assert_called_once_with(kns_crd)
self._delete_ns_sg_rules.assert_called_once()
m_get_svc.assert_not_called()
self._handler._update_services.assert_not_called()
kubernetes.patch_crd.assert_called_once()
@mock.patch.object(driver_utils, 'get_services')
def test_on_finalize_finalizer_exception(self, m_get_svc):
net_id = mock.sentinel.net_id
kns_crd = self._kuryrnet_crd.copy()
kns_crd['status'] = {'netId': net_id}
crd_selector = mock.sentinel.crd_selector
self._delete_ns_sg_rules.return_value = [crd_selector]
m_get_svc.return_value = []
kubernetes = self.useFixture(k_fix.MockK8sClient()).client
kubernetes.patch_crd.side_effect = k_exc.K8sClientException
self.assertRaises(
k_exc.K8sClientException,
kuryrnetwork.KuryrNetworkHandler.on_finalize,
self._handler, kns_crd)
self._delete_network_pools.assert_called_once_with(net_id)
self._delete_namespace_subnet.assert_called_once_with(kns_crd)
self._delete_ns_sg_rules.assert_called_once()
m_get_svc.assert_called_once()
self._handler._update_services.assert_called_once()
kubernetes.patch_crd.assert_called_once()

View File

@ -1,4 +1,4 @@
# Copyright 2019, Inc. # Copyright 2020, Inc.
# #
# Licensed under the Apache License, Version 2.0 (the "License"); # Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License. # you may not use this file except in compliance with the License.
@ -18,31 +18,34 @@ from kuryr_kubernetes.controller.drivers import base as drivers
from kuryr_kubernetes.controller.drivers import namespace_subnet as subnet_drv from kuryr_kubernetes.controller.drivers import namespace_subnet as subnet_drv
from kuryr_kubernetes.controller.drivers import utils as driver_utils from kuryr_kubernetes.controller.drivers import utils as driver_utils
from kuryr_kubernetes.controller.drivers import vif_pool from kuryr_kubernetes.controller.drivers import vif_pool
from kuryr_kubernetes.controller.handlers import kuryrnet from kuryr_kubernetes.controller.handlers import kuryrnetwork_population
from kuryr_kubernetes.tests import base as test_base from kuryr_kubernetes.tests import base as test_base
from kuryr_kubernetes import utils from kuryr_kubernetes import utils
class TestKuryrNetHandler(test_base.TestCase): class TestKuryrNetworkPopulationHandler(test_base.TestCase):
def setUp(self): def setUp(self):
super(TestKuryrNetHandler, self).setUp() super(TestKuryrNetworkPopulationHandler, self).setUp()
self._project_id = mock.sentinel.project_id self._project_id = mock.sentinel.project_id
self._subnets = mock.sentinel.subnets self._subnets = mock.sentinel.subnets
self._kuryrnet_crd = { self._kuryrnet_crd = {
'metadata': { 'metadata': {
'name': 'test-namespace', 'name': 'test-namespace',
'annotations': { },
'namespaceName': 'test-namespace'
}},
'spec': { 'spec': {
'nsName': 'test-namespace',
'projectId': 'test-project',
'nsLabels': {},
},
'status': {
'subnetId': 'test-subnet' 'subnetId': 'test-subnet'
} }
} }
self._handler = mock.MagicMock(spec=kuryrnet.KuryrNetHandler) self._handler = mock.MagicMock(
self._handler._drv_project = mock.Mock(spec=drivers.PodProjectDriver) spec=kuryrnetwork_population.KuryrNetworkPopulationHandler)
# NOTE(ltomasbo): The KuryrNet handler is associated to the usage of # NOTE(ltomasbo): The KuryrNet handler is associated to the usage of
# namespace subnet driver, # namespace subnet driver,
self._handler._drv_subnets = mock.Mock( self._handler._drv_subnets = mock.Mock(
@ -50,64 +53,57 @@ class TestKuryrNetHandler(test_base.TestCase):
self._handler._drv_vif_pool = mock.MagicMock( self._handler._drv_vif_pool = mock.MagicMock(
spec=vif_pool.MultiVIFPool) spec=vif_pool.MultiVIFPool)
self._get_project = self._handler._drv_project.get_project
self._get_namespace_subnet = ( self._get_namespace_subnet = (
self._handler._drv_subnets.get_namespace_subnet) self._handler._drv_subnets.get_namespace_subnet)
self._set_vif_driver = self._handler._drv_vif_pool.set_vif_driver self._set_vif_driver = self._handler._drv_vif_pool.set_vif_driver
self._populate_pool = self._handler._drv_vif_pool.populate_pool self._populate_pool = self._handler._drv_vif_pool.populate_pool
self._patch_kuryrnetwork_crd = self._handler._patch_kuryrnetwork_crd
self._get_project.return_value = self._project_id
self._get_namespace_subnet.return_value = self._subnets self._get_namespace_subnet.return_value = self._subnets
@mock.patch.object(drivers.VIFPoolDriver, 'get_instance') @mock.patch.object(drivers.VIFPoolDriver, 'get_instance')
@mock.patch.object(drivers.PodSubnetsDriver, 'get_instance') @mock.patch.object(drivers.PodSubnetsDriver, 'get_instance')
@mock.patch.object(drivers.NamespaceProjectDriver, 'get_instance') def test_init(self, m_get_subnet_driver, m_get_vif_pool_driver):
def test_init(self, m_get_project_driver, m_get_subnet_driver,
m_get_vif_pool_driver):
project_driver = mock.sentinel.project_driver
subnet_driver = mock.sentinel.subnet_driver subnet_driver = mock.sentinel.subnet_driver
vif_pool_driver = mock.Mock(spec=vif_pool.MultiVIFPool) vif_pool_driver = mock.Mock(spec=vif_pool.MultiVIFPool)
m_get_project_driver.return_value = project_driver
m_get_subnet_driver.return_value = subnet_driver m_get_subnet_driver.return_value = subnet_driver
m_get_vif_pool_driver.return_value = vif_pool_driver m_get_vif_pool_driver.return_value = vif_pool_driver
handler = kuryrnet.KuryrNetHandler() handler = kuryrnetwork_population.KuryrNetworkPopulationHandler()
self.assertEqual(project_driver, handler._drv_project)
self.assertEqual(subnet_driver, handler._drv_subnets) self.assertEqual(subnet_driver, handler._drv_subnets)
self.assertEqual(vif_pool_driver, handler._drv_vif_pool) self.assertEqual(vif_pool_driver, handler._drv_vif_pool)
@mock.patch.object(driver_utils, 'get_annotations') @mock.patch.object(driver_utils, 'get_annotations')
@mock.patch.object(driver_utils, 'get_namespace') @mock.patch.object(driver_utils, 'get_namespace')
@mock.patch.object(driver_utils, 'patch_kuryrnet_crd')
@mock.patch.object(utils, 'get_nodes_ips') @mock.patch.object(utils, 'get_nodes_ips')
def test_on_added(self, m_get_nodes_ips, m_patch_kn_crd, m_get_ns, def test_on_added(self, m_get_nodes_ips, m_get_ns, m_get_ann):
m_get_ann):
m_get_nodes_ips.return_value = ['node-ip'] m_get_nodes_ips.return_value = ['node-ip']
m_get_ns.return_value = mock.sentinel.ns m_get_ns.return_value = mock.sentinel.ns
m_get_ann.return_value = self._kuryrnet_crd['metadata']['name'] m_get_ann.return_value = self._kuryrnet_crd['metadata']['name']
kuryrnet.KuryrNetHandler.on_added(self._handler, self._kuryrnet_crd) kuryrnetwork_population.KuryrNetworkPopulationHandler.on_added(
self._handler, self._kuryrnet_crd)
self._get_project.assert_called_once()
self._get_namespace_subnet.assert_called_once_with( self._get_namespace_subnet.assert_called_once_with(
self._kuryrnet_crd['metadata']['annotations']['namespaceName'], self._kuryrnet_crd['spec']['nsName'],
self._kuryrnet_crd['spec']['subnetId']) self._kuryrnet_crd['status']['subnetId'])
self._populate_pool.assert_called_once_with('node-ip', self._populate_pool.assert_called_once_with(
self._project_id, 'node-ip', self._kuryrnet_crd['spec']['projectId'], self._subnets,
self._subnets,
[]) [])
m_patch_kn_crd.assert_called_once() self._patch_kuryrnetwork_crd.assert_called_once()
@mock.patch.object(driver_utils, 'get_annotations') def test_on_added_no_subnet(self):
@mock.patch.object(driver_utils, 'get_namespace') kns = self._kuryrnet_crd.copy()
def test_on_added_no_namespace(self, m_get_ns, m_get_ann): kns['status'] = {}
m_get_ns.return_value = None kuryrnetwork_population.KuryrNetworkPopulationHandler.on_added(
ns_name = self._kuryrnet_crd['metadata']['annotations'].get( self._handler, kns)
'namespaceName') self._get_namespace_subnet.assert_not_called()
kuryrnet.KuryrNetHandler.on_added(self._handler, self._kuryrnet_crd) def test_on_added_populated(self):
kns = self._kuryrnet_crd.copy()
m_get_ns.assert_called_once_with(ns_name) kns['status'] = {'populated': True}
m_get_ann.assert_not_called() kuryrnetwork_population.KuryrNetworkPopulationHandler.on_added(
self._handler, kns)
self._get_namespace_subnet.assert_not_called()

View File

@ -15,10 +15,7 @@
import mock import mock
from openstack import exceptions as o_exc
from kuryr_kubernetes.controller.drivers import base as drivers from kuryr_kubernetes.controller.drivers import base as drivers
from kuryr_kubernetes.controller.drivers import vif_pool
from kuryr_kubernetes.controller.handlers import namespace from kuryr_kubernetes.controller.handlers import namespace
from kuryr_kubernetes import exceptions as k_exc from kuryr_kubernetes import exceptions as k_exc
from kuryr_kubernetes.tests import base as test_base from kuryr_kubernetes.tests import base as test_base
@ -48,268 +45,66 @@ class TestNamespaceHandler(test_base.TestCase):
self._handler._drv_project = mock.Mock( self._handler._drv_project = mock.Mock(
spec=drivers.NamespaceProjectDriver) spec=drivers.NamespaceProjectDriver)
self._handler._drv_subnets = mock.Mock(spec=drivers.PodSubnetsDriver)
self._handler._drv_sg = mock.Mock(spec=drivers.PodSecurityGroupsDriver)
self._handler._drv_vif_pool = mock.MagicMock(
spec=vif_pool.MultiVIFPool)
self._get_project = self._handler._drv_project.get_project self._get_project = self._handler._drv_project.get_project
self._get_subnets = self._handler._drv_subnets.get_subnets self._update_labels = self._handler._update_labels
self._get_kns_crd = self._handler._get_kns_crd
self._create_namespace_network = ( self._add_kuryrnetwork_crd = self._handler._add_kuryrnetwork_crd
self._handler._drv_subnets.create_namespace_network)
self._delete_namespace_subnet = (
self._handler._drv_subnets.delete_namespace_subnet)
self._delete_namespace_sg_rules = (
self._handler._drv_sg.delete_namespace_sg_rules)
self._cleanup_namespace_networks = (
self._handler._drv_subnets.cleanup_namespace_networks)
self._get_net_crd = self._handler._get_net_crd
self._set_net_crd = self._handler._set_net_crd
self._get_net_crd_id = self._handler._get_net_crd_id
self._add_kuryrnet_crd = self._handler._add_kuryrnet_crd
self._del_kuryrnet_crd = self._handler._del_kuryrnet_crd
self._rollback_network_resources = (
self._handler._drv_subnets.rollback_network_resources)
self._delete_network_pools = (
self._handler._drv_vif_pool.delete_network_pools)
self._get_project.return_value = self._project_id self._get_project.return_value = self._project_id
self._get_subnets.return_value = self._subnets
def _get_crd(self): def _get_crd(self):
crd = { crd = {
'kind': 'KuryrNet', 'kind': 'KuryrNet',
'metadata': { 'metadata': {
'selfLink': mock.sentinel.self_link, 'selfLink': mock.sentinel.self_link,
'name': 'ns-' + self._namespace_name, 'name': self._namespace_name,
'namespace': self._namespace_name,
}, },
'spec': { 'spec': {}
'routerId': mock.sentinel.router_id,
'netId': mock.sentinel.net_id,
'subnetId': mock.sentinel.subnet_id,
}
} }
return crd return crd
@mock.patch.object(drivers.LBaaSDriver, 'get_instance') @mock.patch.object(namespace.NamespaceHandler, '_upgrade_crds')
@mock.patch.object(drivers.VIFPoolDriver, 'get_instance')
@mock.patch.object(drivers.PodSecurityGroupsDriver, 'get_instance')
@mock.patch.object(drivers.PodSubnetsDriver, 'get_instance')
@mock.patch.object(drivers.NamespaceProjectDriver, 'get_instance') @mock.patch.object(drivers.NamespaceProjectDriver, 'get_instance')
def test_init(self, m_get_project_driver, m_get_subnets_driver, def test_init(self, m_get_project_driver, m_upgrade_crds):
m_get_sg_driver, m_get_vif_pool_driver, m_get_lbaas_driver):
project_driver = mock.sentinel.project_driver project_driver = mock.sentinel.project_driver
subnets_driver = mock.sentinel.subnets_driver
sg_driver = mock.sentinel.sg_driver
vif_pool_driver = mock.Mock(spec=vif_pool.MultiVIFPool)
lbaas_driver = mock.sentinel.lbaas_driver
m_get_project_driver.return_value = project_driver m_get_project_driver.return_value = project_driver
m_get_subnets_driver.return_value = subnets_driver
m_get_sg_driver.return_value = sg_driver
m_get_vif_pool_driver.return_value = vif_pool_driver
m_get_lbaas_driver.return_value = lbaas_driver
handler = namespace.NamespaceHandler() handler = namespace.NamespaceHandler()
self.assertEqual(project_driver, handler._drv_project) self.assertEqual(project_driver, handler._drv_project)
self.assertEqual(subnets_driver, handler._drv_subnets) m_upgrade_crds.assert_called_once()
self.assertEqual(sg_driver, handler._drv_sg)
self.assertEqual(vif_pool_driver, handler._drv_vif_pool)
def test_on_present(self): def test_on_present(self):
net_crd = self._get_crd() self._get_kns_crd.return_value = None
self._get_net_crd_id.return_value = None
self._get_net_crd.return_value = None
self._create_namespace_network.return_value = {'test_net': 'uuid'}
net_crd_spec = {'test_net': 'uuid'}
self._add_kuryrnet_crd.return_value = net_crd
namespace.NamespaceHandler.on_present(self._handler, self._namespace) namespace.NamespaceHandler.on_present(self._handler, self._namespace)
self._get_net_crd_id.assert_called_once_with(self._namespace) self._get_kns_crd.assert_called_once_with(
self._get_net_crd.assert_called_once_with(self._crd_id) self._namespace['metadata']['name'])
self._cleanup_namespace_networks.assert_called_once_with( self._add_kuryrnetwork_crd.assert_called_once_with(
self._namespace_name) self._namespace['metadata']['name'], {})
self._create_namespace_network.assert_called_once_with(
self._namespace_name, self._project_id)
self._add_kuryrnet_crd.assert_called_once_with(self._namespace_name,
net_crd_spec)
self._set_net_crd.assert_called_once_with(self._namespace, net_crd)
self._rollback_network_resources.assert_not_called()
def test_on_present_existing(self): def test_on_present_existing(self):
net_crd_id = mock.sentinel.net_crd_id net_crd = self._get_crd()
self._get_net_crd_id.return_value = net_crd_id self._get_kns_crd.return_value = net_crd
namespace.NamespaceHandler.on_present(self._handler, self._namespace) namespace.NamespaceHandler.on_present(self._handler, self._namespace)
self._get_net_crd_id.assert_called_once_with(self._namespace) self._get_kns_crd.assert_called_once_with(
self._cleanup_namespace_networks.assert_not_called() self._namespace['metadata']['name'])
self._create_namespace_network.assert_not_called() self._update_labels.assert_called_once_with(net_crd, {})
self._add_kuryrnetwork_crd.assert_not_called()
def test_on_present_create_network_exception(self):
self._get_net_crd_id.return_value = None
self._get_net_crd.return_value = None
self._create_namespace_network.side_effect = (
o_exc.SDKException)
self.assertRaises(o_exc.SDKException,
namespace.NamespaceHandler.on_present,
self._handler, self._namespace)
self._get_net_crd_id.assert_called_once_with(self._namespace)
self._get_net_crd.assert_called_once_with(self._crd_id)
self._cleanup_namespace_networks.assert_called_once_with(
self._namespace_name)
self._create_namespace_network.assert_called_once_with(
self._namespace_name, self._project_id)
self._set_net_crd.assert_not_called()
def test_on_present_add_kuryrnet_crd_exception(self): def test_on_present_add_kuryrnet_crd_exception(self):
self._get_net_crd_id.return_value = None self._get_kns_crd.return_value = None
self._get_net_crd.return_value = None self._add_kuryrnetwork_crd.side_effect = k_exc.K8sClientException
self._create_namespace_network.return_value = {'test_net': 'uuid'}
net_crd_spec = {'test_net': 'uuid'}
self._add_kuryrnet_crd.side_effect = k_exc.K8sClientException
self.assertRaises(k_exc.ResourceNotReady, self.assertRaises(k_exc.ResourceNotReady,
namespace.NamespaceHandler.on_present, namespace.NamespaceHandler.on_present,
self._handler, self._namespace) self._handler, self._namespace)
self._get_net_crd_id.assert_called_once_with(self._namespace) self._get_kns_crd.assert_called_once_with(
self._get_net_crd.assert_called_once_with(self._crd_id) self._namespace['metadata']['name'])
self._cleanup_namespace_networks.assert_called_once_with( self._add_kuryrnetwork_crd.assert_called_once_with(
self._namespace_name) self._namespace['metadata']['name'], {})
self._create_namespace_network.assert_called_once_with(
self._namespace_name, self._project_id)
self._add_kuryrnet_crd.assert_called_once_with(self._namespace_name,
net_crd_spec)
self._set_net_crd.assert_not_called()
self._rollback_network_resources.assert_called_once()
def test_on_present_set_crd_exception(self):
net_crd = self._get_crd()
self._get_net_crd_id.return_value = None
self._get_net_crd.return_value = None
self._create_namespace_network.return_value = {'test_net': 'uuid'}
net_crd_spec = {'test_net': 'uuid'}
self._add_kuryrnet_crd.return_value = net_crd
self._set_net_crd.side_effect = k_exc.K8sClientException
self.assertRaises(k_exc.ResourceNotReady,
namespace.NamespaceHandler.on_present,
self._handler, self._namespace)
self._get_net_crd_id.assert_called_once_with(self._namespace)
self._get_net_crd.assert_called_once_with(self._crd_id)
self._cleanup_namespace_networks.assert_called_once_with(
self._namespace_name)
self._cleanup_namespace_networks.assert_called_once_with(
self._namespace_name)
self._create_namespace_network.assert_called_once_with(
self._namespace_name, self._project_id)
self._add_kuryrnet_crd.assert_called_once_with(self._namespace_name,
net_crd_spec)
self._set_net_crd.assert_called_once_with(self._namespace, net_crd)
self._rollback_network_resources.assert_called_once()
def test_on_present_rollback_exception(self):
net_crd = self._get_crd()
self._get_net_crd_id.return_value = None
self._get_net_crd.return_value = None
self._create_namespace_network.return_value = {'test_net': 'uuid'}
net_crd_spec = {'test_net': 'uuid'}
self._add_kuryrnet_crd.return_value = net_crd
self._set_net_crd.side_effect = k_exc.K8sClientException
self._rollback_network_resources.side_effect = (
o_exc.SDKException)
self.assertRaises(o_exc.SDKException,
namespace.NamespaceHandler.on_present,
self._handler, self._namespace)
self._get_net_crd_id.assert_called_once_with(self._namespace)
self._get_net_crd.assert_called_once_with(self._crd_id)
self._cleanup_namespace_networks.assert_called_once_with(
self._namespace_name)
self._create_namespace_network.assert_called_once_with(
self._namespace_name, self._project_id)
self._add_kuryrnet_crd.assert_called_once_with(self._namespace_name,
net_crd_spec)
self._set_net_crd.assert_called_once_with(self._namespace, net_crd)
self._rollback_network_resources.assert_called_once()
def test_on_present_del_kuryrnet_exception(self):
net_crd = self._get_crd()
self._get_net_crd_id.return_value = None
self._get_net_crd.return_value = None
self._create_namespace_network.return_value = {'test_net': 'uuid'}
net_crd_spec = {'test_net': 'uuid'}
self._add_kuryrnet_crd.return_value = net_crd
self._set_net_crd.side_effect = k_exc.K8sClientException
self._del_kuryrnet_crd.side_effect = k_exc.K8sClientException
self.assertRaises(k_exc.ResourceNotReady,
namespace.NamespaceHandler.on_present,
self._handler, self._namespace)
self._get_net_crd_id.assert_called_once_with(self._namespace)
self._get_net_crd.assert_called_once_with(self._crd_id)
self._cleanup_namespace_networks.assert_called_once_with(
self._namespace_name)
self._create_namespace_network.assert_called_once_with(
self._namespace_name, self._project_id)
self._add_kuryrnet_crd.assert_called_once_with(self._namespace_name,
net_crd_spec)
self._set_net_crd.assert_called_once_with(self._namespace, net_crd)
self._rollback_network_resources.assert_called_once()
self._del_kuryrnet_crd.assert_called_once()
def test_on_deleted(self):
net_crd_id = mock.sentinel.net_crd_id
net_crd = self._get_crd()
self._get_net_crd_id.return_value = net_crd_id
self._get_net_crd.return_value = net_crd
self._delete_namespace_sg_rules.return_value = []
namespace.NamespaceHandler.on_deleted(self._handler, self._namespace)
self._get_net_crd_id.assert_called_once_with(self._namespace)
self._get_net_crd.assert_called_once_with(net_crd_id)
self._delete_network_pools.assert_called_once_with(
net_crd['spec']['netId'])
self._delete_namespace_subnet.assert_called_once_with(net_crd)
self._del_kuryrnet_crd.assert_called_once_with(self._crd_id)
def test_on_deleted_missing_crd_annotation(self):
self._get_net_crd_id.return_value = None
namespace.NamespaceHandler.on_deleted(self._handler, self._namespace)
self._get_net_crd_id.assert_called_once_with(self._namespace)
self._get_net_crd.assert_not_called()
self._delete_network_pools.assert_not_called()
self._delete_namespace_subnet.assert_not_called()
self._del_kuryrnet_crd.assert_not_called()
def test_on_deleted_k8s_exception(self):
net_crd_id = mock.sentinel.net_crd_id
self._get_net_crd_id.return_value = net_crd_id
self._get_net_crd.side_effect = k_exc.K8sClientException
self.assertRaises(k_exc.K8sClientException,
namespace.NamespaceHandler.on_deleted,
self._handler, self._namespace)
self._get_net_crd_id.assert_called_once_with(self._namespace)
self._get_net_crd.assert_called_once_with(net_crd_id)
self._delete_network_pools.assert_not_called()
self._delete_namespace_subnet.assert_not_called()

View File

@ -101,7 +101,8 @@ kuryr_kubernetes.controller.handlers =
policy = kuryr_kubernetes.controller.handlers.policy:NetworkPolicyHandler policy = kuryr_kubernetes.controller.handlers.policy:NetworkPolicyHandler
pod_label = kuryr_kubernetes.controller.handlers.pod_label:PodLabelHandler pod_label = kuryr_kubernetes.controller.handlers.pod_label:PodLabelHandler
kuryrnetpolicy = kuryr_kubernetes.controller.handlers.kuryrnetpolicy:KuryrNetPolicyHandler kuryrnetpolicy = kuryr_kubernetes.controller.handlers.kuryrnetpolicy:KuryrNetPolicyHandler
kuryrnet = kuryr_kubernetes.controller.handlers.kuryrnet:KuryrNetHandler kuryrnetwork = kuryr_kubernetes.controller.handlers.kuryrnetwork:KuryrNetworkHandler
kuryrnetwork_population = kuryr_kubernetes.controller.handlers.kuryrnetwork_population:KuryrNetworkPopulationHandler
test_handler = kuryr_kubernetes.tests.unit.controller.handlers.test_fake_handler:TestHandler test_handler = kuryr_kubernetes.tests.unit.controller.handlers.test_fake_handler:TestHandler
kuryr_kubernetes.controller.drivers.multi_vif = kuryr_kubernetes.controller.drivers.multi_vif =

View File

@ -34,6 +34,7 @@ sudo chown ${USER}:${USER} ${HOME}/.kube/config
/usr/local/bin/kubectl --kubeconfig=${HOME}/.kube/config get ingress -o yaml --all-namespaces >> ${K8S_LOG_DIR}/ingress.txt /usr/local/bin/kubectl --kubeconfig=${HOME}/.kube/config get ingress -o yaml --all-namespaces >> ${K8S_LOG_DIR}/ingress.txt
/usr/local/bin/kubectl --kubeconfig=${HOME}/.kube/config get namespaces -o yaml >> ${K8S_LOG_DIR}/namespaces.txt /usr/local/bin/kubectl --kubeconfig=${HOME}/.kube/config get namespaces -o yaml >> ${K8S_LOG_DIR}/namespaces.txt
/usr/local/bin/kubectl --kubeconfig=${HOME}/.kube/config get kuryrnets -o yaml --all-namespaces >> ${K8S_LOG_DIR}/kuryrnets_crds.txt /usr/local/bin/kubectl --kubeconfig=${HOME}/.kube/config get kuryrnets -o yaml --all-namespaces >> ${K8S_LOG_DIR}/kuryrnets_crds.txt
/usr/local/bin/kubectl --kubeconfig=${HOME}/.kube/config get kuryrnetworks -o yaml --all-namespaces >> ${K8S_LOG_DIR}/kuryrnetworks_crds.txt
/usr/local/bin/kubectl --kubeconfig=${HOME}/.kube/config get endpoints -o yaml --all-namespaces >> ${K8S_LOG_DIR}/endpoints.txt /usr/local/bin/kubectl --kubeconfig=${HOME}/.kube/config get endpoints -o yaml --all-namespaces >> ${K8S_LOG_DIR}/endpoints.txt
# Kubernetes pods logs # Kubernetes pods logs
mkdir -p ${K8S_LOG_DIR}/pod_logs mkdir -p ${K8S_LOG_DIR}/pod_logs