Browse Source

Namespace event handling through KuryrNet CRD

This patch moves the namespace handling to be more aligned
with the k8s style.

Depends-on: If0aaf748d13027b3d660aa0f74c4f6653e911250

Change-Id: Ia2811d743f6c4791321b05977118d0b4276787b5
changes/44/706744/38
Luis Tomas Bolivar 2 years ago
parent
commit
780c4dfa09
  1. 6
      .zuul.d/octavia.yaml
  2. 1
      devstack/lib/kuryr_kubernetes
  3. 5
      devstack/plugin.sh
  4. 1
      doc/source/installation/manual.rst
  5. 15
      doc/source/installation/network_namespace.rst
  6. 8
      doc/source/installation/network_policy.rst
  7. 4
      doc/source/installation/ports-pool.rst
  8. 0
      kubernetes_crds/kuryr_crds/kuryrloadbalancer.yaml
  9. 0
      kubernetes_crds/kuryr_crds/kuryrnet.yaml
  10. 0
      kubernetes_crds/kuryr_crds/kuryrnetpolicy.yaml
  11. 59
      kubernetes_crds/kuryr_crds/kuryrnetwork.yaml
  12. 4
      kuryr_kubernetes/constants.py
  13. 2
      kuryr_kubernetes/controller/drivers/base.py
  14. 160
      kuryr_kubernetes/controller/drivers/namespace_subnet.py
  15. 33
      kuryr_kubernetes/controller/drivers/utils.py
  16. 161
      kuryr_kubernetes/controller/handlers/kuryrnetwork.py
  17. 55
      kuryr_kubernetes/controller/handlers/kuryrnetwork_population.py
  18. 6
      kuryr_kubernetes/controller/handlers/lbaas.py
  19. 358
      kuryr_kubernetes/controller/handlers/namespace.py
  20. 8
      kuryr_kubernetes/controller/handlers/policy.py
  21. 8
      kuryr_kubernetes/controller/handlers/vif.py
  22. 4
      kuryr_kubernetes/exceptions.py
  23. 11
      kuryr_kubernetes/handlers/k8s_base.py
  24. 31
      kuryr_kubernetes/k8s_client.py
  25. 199
      kuryr_kubernetes/tests/unit/controller/drivers/test_namespace_subnet.py
  26. 281
      kuryr_kubernetes/tests/unit/controller/handlers/test_kuryrnetwork.py
  27. 80
      kuryr_kubernetes/tests/unit/controller/handlers/test_kuryrnetwork_population.py
  28. 257
      kuryr_kubernetes/tests/unit/controller/handlers/test_namespace.py
  29. 3
      setup.cfg
  30. 1
      tools/gate/copy_k8s_logs.sh

6
.zuul.d/octavia.yaml

@ -99,7 +99,7 @@
vars:
devstack_localrc:
DOCKER_CGROUP_DRIVER: "systemd"
KURYR_ENABLED_HANDLERS: vif,lb,lbaasspec,namespace,pod_label,policy,kuryrnetpolicy
KURYR_ENABLED_HANDLERS: vif,lb,lbaasspec,namespace,pod_label,policy,kuryrnetpolicy,kuryrnetwork
KURYR_SG_DRIVER: policy
KURYR_SUBNET_DRIVER: namespace
devstack_services:
@ -120,7 +120,7 @@
vars:
devstack_localrc:
KURYR_SUBNET_DRIVER: namespace
KURYR_ENABLED_HANDLERS: vif,lb,lbaasspec,namespace,pod_label,policy,kuryrnetpolicy
KURYR_ENABLED_HANDLERS: vif,lb,lbaasspec,namespace,pod_label,policy,kuryrnetpolicy,kuryrnetwork
KURYR_SG_DRIVER: policy
KURYR_USE_PORT_POOLS: true
KURYR_POD_VIF_DRIVER: neutron-vif
@ -134,7 +134,7 @@
parent: kuryr-kubernetes-tempest-containerized
vars:
devstack_localrc:
KURYR_ENABLED_HANDLERS: vif,lb,lbaasspec,namespace,pod_label,policy,kuryrnetpolicy
KURYR_ENABLED_HANDLERS: vif,lb,lbaasspec,namespace,pod_label,policy,kuryrnetpolicy,kuryrnetwork
KURYR_SG_DRIVER: policy
KURYR_SUBNET_DRIVER: namespace

1
devstack/lib/kuryr_kubernetes

@ -450,6 +450,7 @@ rules:
verbs: ["*"]
resources:
- kuryrnets
- kuryrnetworks
- kuryrnetpolicies
- kuryrloadbalancers
- apiGroups: ["networking.k8s.io"]

5
devstack/plugin.sh

@ -940,6 +940,7 @@ function update_tempest_conf_file {
iniset $TEMPEST_CONFIG kuryr_kubernetes ipv6 True
fi
iniset $TEMPEST_CONFIG kuryr_kubernetes validate_crd True
iniset $TEMPEST_CONFIG kuryr_kubernetes kuryrnetworks True
}
source $DEST/kuryr-kubernetes/devstack/lib/kuryr_kubernetes
@ -1078,9 +1079,7 @@ if [[ "$1" == "stack" && "$2" == "extra" ]]; then
fi
if is_service_enabled kuryr-kubernetes; then
/usr/local/bin/kubectl apply -f ${KURYR_HOME}/kubernetes_crds/kuryrnet.yaml
/usr/local/bin/kubectl apply -f ${KURYR_HOME}/kubernetes_crds/kuryrnetpolicy.yaml
/usr/local/bin/kubectl apply -f ${KURYR_HOME}/kubernetes_crds/kuryrloadbalancer.yaml
/usr/local/bin/kubectl apply -f ${KURYR_HOME}/kubernetes_crds/kuryr_crds/
if [ "$KURYR_K8S_CONTAINERIZED_DEPLOYMENT" == "True" ]; then
generate_containerized_kuryr_resources
fi

1
doc/source/installation/manual.rst

@ -93,6 +93,7 @@ Edit ``kuryr.conf``:
verbs: ["*"]
resources:
- kuryrnets
- kuryrnetworks
- kuryrnetpolicies
- kuryrloadbalancers
- apiGroups: ["networking.k8s.io"]

15
doc/source/installation/network_namespace.rst

@ -13,16 +13,16 @@ the next steps are needed:
.. code-block:: ini
[kubernetes]
enabled_handlers=vif,lb,lbaasspec,namespace
enabled_handlers=vif,lb,lbaasspec,namespace,kuryrnetwork
Note that if you also want to enable prepopulation of ports pools upon new
namespace creation, you need to add the kuryrnet handler (more details on
:doc:`./ports-pool`):
namespace creation, you need to also add the kuryrnetwork_population
handler (more details on :doc:`./ports-pool`):
.. code-block:: ini
[kubernetes]
enabled_handlers=vif,lb,lbaasspec,namespace,kuryrnet
enabled_handlers=vif,lb,lbaasspec,namespace,kuryrnetwork,kuryrnetwork_population
#. Enable the namespace subnet driver by modifying the default
pod_subnet_driver option at kuryr.conf:
@ -73,7 +73,7 @@ to add the namespace handler and state the namespace subnet driver with:
.. code-block:: console
KURYR_SUBNET_DRIVER=namespace
KURYR_ENABLED_HANDLERS=vif,lb,lbaasspec,namespace
KURYR_ENABLED_HANDLERS=vif,lb,lbaasspec,namespace,kuryrnetwork
.. note::
@ -103,7 +103,7 @@ Testing the network per namespace functionality
test2 Active 5s
... ... ...
$ kubectl get kuryrnets
$ kubectl get kuryrnetworks -A
NAME AGE
ns-test1 1m
ns-test2 1m
@ -152,7 +152,8 @@ Testing the network per namespace functionality
demo-5995548848-lmmjc: HELLO! I AM ALIVE!!!
#. And finally, to remove the namespace and all its resources, including
openstack networks, kuryrnet CRD, svc, pods, you just need to do:
openstack networks, kuryrnetwork CRD, svc, pods, you just need to
do:
.. code-block:: console

8
doc/source/installation/network_policy.rst

@ -10,16 +10,16 @@ be found at :doc:`./devstack/containerized`):
.. code-block:: ini
[kubernetes]
enabled_handlers=vif,lb,lbaasspec,policy,pod_label,namespace,kuryrnetpolicy
enabled_handlers=vif,lb,lbaasspec,policy,pod_label,namespace,kuryrnetwork,kuryrnetpolicy
Note that if you also want to enable prepopulation of ports pools upon new
namespace creation, you need to add the kuryrnet handler (more details on
:doc:`./ports-pool`):
namespace creation, you need to also dd the kuryrnetwork_population handler
(more details on :doc:`./ports-pool`):
.. code-block:: ini
[kubernetes]
enabled_handlers=vif,lb,lbaasspec,policy,pod_label,namespace,kuryrnetpolicy,kuryrnet
enabled_handlers=vif,lb,lbaasspec,policy,pod_label,namespace,kuryrnetpolicy,kuryrnetwork,kuryrnetwork_population
After that, enable also the security group drivers for policies:

4
doc/source/installation/ports-pool.rst

@ -169,7 +169,7 @@ subnet), the next handler needs to be enabled:
.. code-block:: ini
[kubernetes]
enabled_handlers=vif,lb,lbaasspec,namespace,*kuryrnet*
enabled_handlers=vif,lb,lbaasspec,namespace,*kuryrnetwork*
This can be enabled at devstack deployment time to by adding the next to the
@ -177,4 +177,4 @@ local.conf:
.. code-block:: bash
KURYR_ENABLED_HANDLERS=vif,lb,lbaasspec,namespace,*kuryrnet*
KURYR_ENABLED_HANDLERS=vif,lb,lbaasspec,namespace,*kuryrnetwork*

0
kubernetes_crds/kuryrloadbalancer.yaml → kubernetes_crds/kuryr_crds/kuryrloadbalancer.yaml

0
kubernetes_crds/kuryrnet.yaml → kubernetes_crds/kuryr_crds/kuryrnet.yaml

0
kubernetes_crds/kuryrnetpolicy.yaml → kubernetes_crds/kuryr_crds/kuryrnetpolicy.yaml

59
kubernetes_crds/kuryr_crds/kuryrnetwork.yaml

@ -0,0 +1,59 @@
apiVersion: apiextensions.k8s.io/v1
kind: CustomResourceDefinition
metadata:
name: kuryrnetworks.openstack.org
spec:
group: openstack.org
scope: Namespaced
names:
plural: kuryrnetworks
singular: kuryrnetwork
kind: KuryrNetwork
shortNames:
- kns
versions:
- name: v1
served: true
storage: true
additionalPrinterColumns:
- name: SUBNET-CIDR
type: string
description: The subnet CIDR allocated to the namespace
jsonPath: .status.subnetCIDR
- name: Age
type: date
jsonPath: .metadata.creationTimestamp
schema:
openAPIV3Schema:
type: object
properties:
spec:
type: object
required:
- nsName
- projectId
- nsLabels
properties:
nsName:
type: string
projectId:
type: string
nsLabels:
x-kubernetes-preserve-unknown-fields: true
type: object
status:
type: object
properties:
netId:
type: string
populated:
type: boolean
routerId:
type: string
subnetCIDR:
type: string
subnetId:
type: string
nsLabels:
x-kubernetes-preserve-unknown-fields: true
type: object

4
kuryr_kubernetes/constants.py

@ -18,6 +18,7 @@ K8S_API_NAMESPACES = K8S_API_BASE + '/namespaces'
K8S_API_CRD = '/apis/openstack.org/v1'
K8S_API_CRD_NAMESPACES = K8S_API_CRD + '/namespaces'
K8S_API_CRD_KURYRNETS = K8S_API_CRD + '/kuryrnets'
K8S_API_CRD_KURYRNETWORKS = K8S_API_CRD + '/kuryrnetworks'
K8S_API_CRD_KURYRNETPOLICIES = K8S_API_CRD + '/kuryrnetpolicies'
K8S_API_CRD_KURYRLOADBALANCERS = K8S_API_CRD + '/kuryrloadbalancers'
K8S_API_POLICIES = '/apis/networking.k8s.io/v1/networkpolicies'
@ -30,6 +31,7 @@ K8S_OBJ_SERVICE = 'Service'
K8S_OBJ_ENDPOINTS = 'Endpoints'
K8S_OBJ_POLICY = 'NetworkPolicy'
K8S_OBJ_KURYRNET = 'KuryrNet'
K8S_OBJ_KURYRNETWORK = 'KuryrNetwork'
K8S_OBJ_KURYRNETPOLICY = 'KuryrNetPolicy'
K8S_OBJ_KURYRLOADBALANCER = 'KuryrLoadBalancer'
@ -55,6 +57,8 @@ K8S_ANNOTATION_OLD_DRIVER = 'old_driver'
K8S_ANNOTATION_CURRENT_DRIVER = 'current_driver'
K8S_ANNOTATION_NEUTRON_PORT = 'neutron_id'
KURYRNETWORK_FINALIZER = 'kuryrnetwork.finalizers.kuryr.openstack.org'
K8S_OS_VIF_NOOP_PLUGIN = "noop"
CNI_EXCEPTION_CODE = 100

2
kuryr_kubernetes/controller/drivers/base.py

@ -170,7 +170,7 @@ class PodSubnetsDriver(DriverBase, metaclass=abc.ABCMeta):
def delete_namespace_subnet(self, kuryr_net_crd):
"""Delete network resources associated to a namespace.
:param kuryr_net_crd: kuryrnet CRD obj dict that contains Neutron's
:param kuryr_net_crd: kuryrnetwork CRD obj dict that contains Neutron's
network resources associated to a namespace
"""
raise NotImplementedError()

160
kuryr_kubernetes/controller/drivers/namespace_subnet.py

@ -54,40 +54,30 @@ class NamespacePodSubnetDriver(default_subnet.DefaultPodSubnetDriver):
def _get_namespace_subnet_id(self, namespace):
kubernetes = clients.get_kubernetes_client()
try:
ns = kubernetes.get('%s/namespaces/%s' % (constants.K8S_API_BASE,
namespace))
net_crd_path = (f"{constants.K8S_API_CRD_NAMESPACES}/"
f"{namespace}/kuryrnetworks/{namespace}")
net_crd = kubernetes.get(net_crd_path)
except exceptions.K8sResourceNotFound:
LOG.warning("Namespace %s not found", namespace)
raise
LOG.debug("Kuryrnetwork resource not yet created, retrying...")
raise exceptions.ResourceNotReady(namespace)
except exceptions.K8sClientException:
LOG.exception("Kubernetes Client Exception.")
raise exceptions.ResourceNotReady(namespace)
raise
try:
annotations = ns['metadata']['annotations']
net_crd_name = annotations[constants.K8S_ANNOTATION_NET_CRD]
subnet_id = net_crd['status']['subnetId']
except KeyError:
LOG.debug("Namespace missing CRD annotations for selecting "
"the corresponding subnet.")
LOG.debug("Subnet for namespace %s not yet created, retrying.",
namespace)
raise exceptions.ResourceNotReady(namespace)
try:
net_crd = kubernetes.get('%s/kuryrnets/%s' % (
constants.K8S_API_CRD, net_crd_name))
except exceptions.K8sResourceNotFound:
LOG.debug("Kuryrnet resource not yet created, retrying...")
raise exceptions.ResourceNotReady(net_crd_name)
except exceptions.K8sClientException:
LOG.exception("Kubernetes Client Exception.")
raise
return net_crd['spec']['subnetId']
return subnet_id
def delete_namespace_subnet(self, net_crd):
subnet_id = net_crd['spec']['subnetId']
net_id = net_crd['spec']['netId']
subnet_id = net_crd['status'].get('subnetId')
net_id = net_crd['status'].get('netId')
self._delete_namespace_network_resources(subnet_id, net_id)
if net_id:
self._delete_namespace_network_resources(subnet_id, net_id)
def _delete_namespace_network_resources(self, subnet_id, net_id):
os_net = clients.get_network_client()
@ -147,83 +137,77 @@ class NamespacePodSubnetDriver(default_subnet.DefaultPodSubnetDriver):
LOG.exception("Error deleting network %s.", net_id)
raise
def create_namespace_network(self, namespace, project_id):
def create_network(self, ns_name, project_id):
os_net = clients.get_network_client()
net_name = 'ns/' + ns_name + '-net'
tags = oslo_cfg.CONF.neutron_defaults.resource_tags
if tags:
networks = os_net.networks(name=net_name, tags=tags)
else:
networks = os_net.networks(name=net_name)
router_id = oslo_cfg.CONF.namespace_subnet.pod_router
subnet_pool_id = oslo_cfg.CONF.namespace_subnet.pod_subnet_pool
try:
# NOTE(ltomasbo): only one network must exists
return next(networks).id
except StopIteration:
LOG.debug('Network does not exist. Creating.')
# create network with namespace as name
network_name = "ns/" + namespace + "-net"
subnet_name = "ns/" + namespace + "-subnet"
try:
neutron_net = os_net.create_network(name=network_name,
neutron_net = os_net.create_network(name=net_name,
project_id=project_id)
c_utils.tag_neutron_resources([neutron_net])
# create a subnet within that network
try:
neutron_subnet = (os_net
.create_subnet(network_id=neutron_net.id,
ip_version=4,
name=subnet_name,
enable_dhcp=False,
subnetpool_id=subnet_pool_id,
project_id=project_id))
except os_exc.ConflictException:
LOG.debug("Max number of retries on neutron side achieved, "
"raising ResourceNotReady to retry subnet creation "
"for %s", subnet_name)
raise exceptions.ResourceNotReady(subnet_name)
c_utils.tag_neutron_resources([neutron_subnet])
# connect the subnet to the router
clients.handle_neutron_errors(os_net.add_interface_to_router,
router_id,
subnet_id=neutron_subnet.id)
except os_exc.SDKException:
LOG.exception("Error creating neutron resources for the namespace "
"%s", namespace)
"%s", ns_name)
raise
return {'netId': neutron_net.id,
'routerId': router_id,
'subnetId': neutron_subnet.id,
'subnetCIDR': neutron_subnet.cidr}
return neutron_net.id
def rollback_network_resources(self, net_crd_spec, namespace):
def create_subnet(self, ns_name, project_id, net_id):
os_net = clients.get_network_client()
try:
try:
clients.handle_neutron_errors(
os_net.remove_interface_from_router,
net_crd_spec['routerId'],
subnet_id=net_crd_spec['subnetId'])
except os_exc.NotFoundException:
# Nothing to worry about, either router or subnet is no more,
# or subnet is already detached.
pass
os_net.delete_network(net_crd_spec['netId'])
except os_exc.SDKException:
LOG.exception("Failed to clean up network resources associated to "
"%(net_id)s, created for the namespace: "
"%(namespace)s." % {'net_id': net_crd_spec['netId'],
'namespace': namespace})
def cleanup_namespace_networks(self, namespace):
os_net = clients.get_network_client()
net_name = 'ns/' + namespace + '-net'
subnet_name = "ns/" + ns_name + "-subnet"
tags = oslo_cfg.CONF.neutron_defaults.resource_tags
if tags:
networks = os_net.networks(name=net_name, tags=tags)
subnets = os_net.subnets(name=subnet_name, tags=tags)
else:
networks = os_net.networks(name=net_name)
subnets = os_net.subnets(name=subnet_name)
for net in networks:
net_id = net.id
subnets = net.subnet_ids
subnet_id = None
if subnets:
# NOTE(ltomasbo): Each network created by kuryr only has
# one subnet
subnet_id = subnets[0]
self._delete_namespace_network_resources(subnet_id, net_id)
try:
# NOTE(ltomasbo): only one subnet must exists
subnet = next(subnets)
return subnet.id, subnet.cidr
except StopIteration:
LOG.debug('Subnet does not exist. Creating.')
# create subnet with namespace as name
subnet_pool_id = oslo_cfg.CONF.namespace_subnet.pod_subnet_pool
try:
neutron_subnet = (os_net
.create_subnet(network_id=net_id,
ip_version=4,
name=subnet_name,
enable_dhcp=False,
subnetpool_id=subnet_pool_id,
project_id=project_id))
except os_exc.ConflictException:
LOG.debug("Max number of retries on neutron side achieved, "
"raising ResourceNotReady to retry subnet creation "
"for %s", subnet_name)
raise exceptions.ResourceNotReady(subnet_name)
c_utils.tag_neutron_resources([neutron_subnet])
return neutron_subnet.id, neutron_subnet.cidr
def add_subnet_to_router(self, subnet_id):
os_net = clients.get_network_client()
router_id = oslo_cfg.CONF.namespace_subnet.pod_router
try:
# connect the subnet to the router
os_net.add_interface_to_router(router_id, subnet_id=subnet_id)
except os_exc.BadRequestException:
LOG.debug("Subnet %s already connected to the router", subnet_id)
except os_exc.SDKException:
LOG.exception("Error attaching the subnet %s to the router",
subnet_id)
raise
return router_id

33
kuryr_kubernetes/controller/drivers/utils.py

@ -221,18 +221,6 @@ def delete_security_group_rule(security_group_rule_id):
raise
def patch_kuryrnet_crd(crd, populated=True):
kubernetes = clients.get_kubernetes_client()
crd_name = crd['metadata']['name']
LOG.debug('Patching KuryrNet CRD %s' % crd_name)
try:
kubernetes.patch_crd('spec', crd['metadata']['selfLink'],
{'populated': populated})
except k_exc.K8sClientException:
LOG.exception('Error updating kuryrnet CRD %s', crd_name)
raise
def patch_kuryrnetworkpolicy_crd(crd, i_rules, e_rules, pod_selector,
np_spec=None):
kubernetes = clients.get_kubernetes_client()
@ -394,19 +382,22 @@ def match_selector(selector, labels):
def get_namespace_subnet_cidr(namespace):
kubernetes = clients.get_kubernetes_client()
try:
ns_annotations = namespace['metadata']['annotations']
ns_name = ns_annotations[constants.K8S_ANNOTATION_NET_CRD]
except KeyError:
LOG.exception('Namespace handler must be enabled to support '
'Network Policies with namespaceSelector')
net_crd_path = (f"{constants.K8S_API_CRD_NAMESPACES}/"
f"{namespace['metadata']['name']}/kuryrnetworks/"
f"{namespace['metadata']['name']}")
net_crd = kubernetes.get(net_crd_path)
except k_exc.K8sResourceNotFound:
LOG.exception('Namespace not yet ready')
raise k_exc.ResourceNotReady(namespace)
try:
net_crd = kubernetes.get('{}/kuryrnets/{}'.format(
constants.K8S_API_CRD, ns_name))
except k_exc.K8sClientException:
LOG.exception("Kubernetes Client Exception.")
raise
return net_crd['spec']['subnetCIDR']
try:
subnet_cidr = net_crd['status']['subnetCIDR']
except KeyError:
LOG.exception('Namespace not yet ready')
raise k_exc.ResourceNotReady(namespace)
return subnet_cidr
def tag_neutron_resources(resources):

161
kuryr_kubernetes/controller/handlers/kuryrnetwork.py

@ -0,0 +1,161 @@
# Copyright 2020 Red Hat, Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from oslo_config import cfg as oslo_cfg
from oslo_log import log as logging
from kuryr_kubernetes import clients
from kuryr_kubernetes import constants
from kuryr_kubernetes.controller.drivers import base as drivers
from kuryr_kubernetes.controller.drivers import utils as driver_utils
from kuryr_kubernetes import exceptions as k_exc
from kuryr_kubernetes.handlers import k8s_base
LOG = logging.getLogger(__name__)
class KuryrNetworkHandler(k8s_base.ResourceEventHandler):
"""Controller side of KuryrNetwork process for Kubernetes pods.
`KuryrNetworkHandler` runs on the Kuryr-Kubernetes controller and is
responsible for creating the OpenStack resources associated to the
newly created namespaces, and update the KuryrNetwork CRD status with
them.
"""
OBJECT_KIND = constants.K8S_OBJ_KURYRNETWORK
OBJECT_WATCH_PATH = constants.K8S_API_CRD_KURYRNETWORKS
def __init__(self):
super(KuryrNetworkHandler, self).__init__()
self._drv_project = drivers.NamespaceProjectDriver.get_instance()
self._drv_subnets = drivers.PodSubnetsDriver.get_instance()
self._drv_sg = drivers.PodSecurityGroupsDriver.get_instance()
self._drv_vif_pool = drivers.VIFPoolDriver.get_instance(
specific_driver='multi_pool')
self._drv_vif_pool.set_vif_driver()
if self._is_network_policy_enabled():
self._drv_lbaas = drivers.LBaaSDriver.get_instance()
self._drv_svc_sg = (
drivers.ServiceSecurityGroupsDriver.get_instance())
def on_present(self, kuryrnet_crd):
ns_name = kuryrnet_crd['spec']['nsName']
project_id = kuryrnet_crd['spec']['projectId']
kns_status = kuryrnet_crd.get('status', {})
crd_creation = False
net_id = kns_status.get('netId')
if not net_id:
net_id = self._drv_subnets.create_network(ns_name, project_id)
status = {'netId': net_id}
self._patch_kuryrnetwork_crd(kuryrnet_crd, status)
crd_creation = True
subnet_id = kns_status.get('subnetId')
if not subnet_id or crd_creation:
subnet_id, subnet_cidr = self._drv_subnets.create_subnet(
ns_name, project_id, net_id)
status = {'subnetId': subnet_id, 'subnetCIDR': subnet_cidr}
self._patch_kuryrnetwork_crd(kuryrnet_crd, status)
crd_creation = True
if not kns_status.get('routerId') or crd_creation:
router_id = self._drv_subnets.add_subnet_to_router(subnet_id)
status = {'routerId': router_id, 'populated': False}
self._patch_kuryrnetwork_crd(kuryrnet_crd, status)
crd_creation = True
# check labels to create sg rules
ns_labels = kns_status.get('nsLabels', {})
if (crd_creation or
ns_labels != kuryrnet_crd['spec']['nsLabels']):
# update SG and svc SGs
namespace = driver_utils.get_namespace(ns_name)
crd_selectors = self._drv_sg.update_namespace_sg_rules(namespace)
if (self._is_network_policy_enabled() and crd_selectors and
oslo_cfg.CONF.octavia_defaults.enforce_sg_rules):
services = driver_utils.get_services()
self._update_services(services, crd_selectors, project_id)
# update status
status = {'nsLabels': kuryrnet_crd['spec']['nsLabels']}
self._patch_kuryrnetwork_crd(kuryrnet_crd, status, labels=True)
def on_finalize(self, kuryrnet_crd):
LOG.debug("Deleting kuryrnetwork CRD resources: %s", kuryrnet_crd)
net_id = kuryrnet_crd['status'].get('netId')
if net_id:
self._drv_vif_pool.delete_network_pools(
kuryrnet_crd['status']['netId'])
try:
self._drv_subnets.delete_namespace_subnet(kuryrnet_crd)
except k_exc.ResourceNotReady:
LOG.debug("Subnet is not ready to be removed.")
# TODO(ltomasbo): Once KuryrPort CRDs is supported, we should
# execute a delete network ports method here to remove the
# ports associated to the namespace/subnet, ensuring next
# retry will be successful
raise
namespace = {
'metadata': {'name': kuryrnet_crd['spec']['nsName']}}
crd_selectors = self._drv_sg.delete_namespace_sg_rules(namespace)
if (self._is_network_policy_enabled() and crd_selectors and
oslo_cfg.CONF.octavia_defaults.enforce_sg_rules):
project_id = kuryrnet_crd['spec']['projectId']
services = driver_utils.get_services()
self._update_services(services, crd_selectors, project_id)
kubernetes = clients.get_kubernetes_client()
LOG.debug('Removing finalizer for KuryrNet CRD %s', kuryrnet_crd)
try:
kubernetes.patch_crd('metadata',
kuryrnet_crd['metadata']['selfLink'],
'finalizers',
action='remove')
except k_exc.K8sClientException:
LOG.exception('Error removing kuryrnetwork CRD finalizer for %s',
kuryrnet_crd)
raise
def _is_network_policy_enabled(self):
enabled_handlers = oslo_cfg.CONF.kubernetes.enabled_handlers
svc_sg_driver = oslo_cfg.CONF.kubernetes.service_security_groups_driver
return ('policy' in enabled_handlers and svc_sg_driver == 'policy')
def _update_services(self, services, crd_selectors, project_id):
for service in services.get('items'):
if not driver_utils.service_matches_affected_pods(
service, crd_selectors):
continue
sgs = self._drv_svc_sg.get_security_groups(service,
project_id)
self._drv_lbaas.update_lbaas_sg(service, sgs)
def _patch_kuryrnetwork_crd(self, kuryrnet_crd, status, labels=False):
kubernetes = clients.get_kubernetes_client()
LOG.debug('Patching KuryrNetwork CRD %s', kuryrnet_crd)
try:
if labels:
kubernetes.patch_crd('status',
kuryrnet_crd['metadata']['selfLink'],
status)
else:
kubernetes.patch('status',
kuryrnet_crd['metadata']['selfLink'],
status)
except k_exc.K8sResourceNotFound:
LOG.debug('KuryrNetwork CRD not found %s', kuryrnet_crd)
except k_exc.K8sClientException:
LOG.exception('Error updating kuryrNetwork CRD %s', kuryrnet_crd)
raise

55
kuryr_kubernetes/controller/handlers/kuryrnet.py → kuryr_kubernetes/controller/handlers/kuryrnetwork_population.py

@ -1,4 +1,4 @@
# Copyright 2019 Red Hat, Inc.
# Copyright 2020 Red Hat, Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
@ -14,9 +14,9 @@
from oslo_log import log as logging
from kuryr_kubernetes import clients
from kuryr_kubernetes import constants
from kuryr_kubernetes.controller.drivers import base as drivers
from kuryr_kubernetes.controller.drivers import utils as driver_utils
from kuryr_kubernetes import exceptions
from kuryr_kubernetes.handlers import k8s_base
from kuryr_kubernetes import utils
@ -24,45 +24,35 @@ from kuryr_kubernetes import utils
LOG = logging.getLogger(__name__)
class KuryrNetHandler(k8s_base.ResourceEventHandler):
"""Controller side of KuryrNet process for Kubernetes pods.
class KuryrNetworkPopulationHandler(k8s_base.ResourceEventHandler):
"""Controller side of KuryrNetwork process for Kubernetes pods.
`KuryrNetHandler` runs on the Kuryr-Kubernetes controller and is
responsible for populating pools for newly created namespaces.
`KuryrNetworkPopulationHandler` runs on the Kuryr-Kubernetes controller
and is responsible for populating pools for newly created namespaces.
"""
OBJECT_KIND = constants.K8S_OBJ_KURYRNET
OBJECT_WATCH_PATH = constants.K8S_API_CRD_KURYRNETS
OBJECT_KIND = constants.K8S_OBJ_KURYRNETWORK
OBJECT_WATCH_PATH = constants.K8S_API_CRD_KURYRNETWORKS
def __init__(self):
super(KuryrNetHandler, self).__init__()
self._drv_project = drivers.NamespaceProjectDriver.get_instance()
super(KuryrNetworkPopulationHandler, self).__init__()
self._drv_subnets = drivers.PodSubnetsDriver.get_instance()
self._drv_vif_pool = drivers.VIFPoolDriver.get_instance(
specific_driver='multi_pool')
self._drv_vif_pool.set_vif_driver()
def on_added(self, kuryrnet_crd):
subnet_id = kuryrnet_crd['spec'].get('subnetId')
if kuryrnet_crd['spec'].get('populated'):
LOG.debug("Subnet %s already populated", subnet_id)
subnet_id = kuryrnet_crd['status'].get('subnetId')
if not subnet_id:
return
namespace = kuryrnet_crd['metadata']['annotations'].get(
'namespaceName')
namespace_obj = driver_utils.get_namespace(namespace)
if not namespace_obj:
LOG.debug("Skipping Kuryrnet addition. Inexistent namespace.")
return
namespace_kuryrnet_annotations = driver_utils.get_annotations(
namespace_obj, constants.K8S_ANNOTATION_NET_CRD)
if namespace_kuryrnet_annotations != kuryrnet_crd['metadata']['name']:
# NOTE(ltomasbo): Ensure pool is not populated if namespace is not
# yet annotated with kuryrnet information
if kuryrnet_crd['status'].get('populated'):
LOG.debug("Subnet %s already populated", subnet_id)
return
namespace = kuryrnet_crd['spec'].get('nsName')
project_id = kuryrnet_crd['spec'].get('projectId')
# NOTE(ltomasbo): using namespace name instead of object as it is not
# required
project_id = self._drv_project.get_project(namespace)
subnets = self._drv_subnets.get_namespace_subnet(namespace, subnet_id)
nodes = utils.get_nodes_ips()
@ -74,7 +64,7 @@ class KuryrNetHandler(k8s_base.ResourceEventHandler):
# the pools will not get the ports loaded (as they are not ACTIVE)
# and new population actions may be triggered if the controller was
# restarted before performing the populated=true patching.
driver_utils.patch_kuryrnet_crd(kuryrnet_crd, populated=True)
self._patch_kuryrnetwork_crd(kuryrnet_crd, populated=True)
# TODO(ltomasbo): Skip the master node where pods are not usually
# allocated.
for node_ip in nodes:
@ -86,5 +76,16 @@ class KuryrNetHandler(k8s_base.ResourceEventHandler):
except exceptions.ResourceNotReady:
# Ensure the repopulation is retriggered if the system was not
# yet ready to perform the repopulation actions
driver_utils.patch_kuryrnet_crd(kuryrnet_crd, populated=False)
self._patch_kuryrnetwork_crd(kuryrnet_crd, populated=False)
raise
def _patch_kuryrnetwork_crd(self, kns_crd, populated=True):
kubernetes = clients.get_kubernetes_client()
crd_name = kns_crd['metadata']['name']
LOG.debug('Patching KuryrNetwork CRD %s' % crd_name)
try:
kubernetes.patch_crd('status', kns_crd['metadata']['selfLink'],
{'populated': populated})
except exceptions.K8sClientException:
LOG.exception('Error updating kuryrnet CRD %s', crd_name)
raise

6
kuryr_kubernetes/controller/handlers/lbaas.py

@ -584,12 +584,12 @@ class LoadBalancerHandler(k8s_base.ResourceEventHandler):
status_data = {"loadBalancer": {
"ingress": [{"ip": lb_ip_address.format()}]}}
k8s = clients.get_kubernetes_client()
svc_link = self._get_service_link(endpoints)
svc_status_link = self._get_service_link(endpoints) + '/status'
try:
k8s.patch("status", svc_link, status_data)
k8s.patch("status", svc_status_link, status_data)
except k_exc.K8sClientException:
# REVISIT(ivc): only raise ResourceNotReady for NotFound
raise k_exc.ResourceNotReady(svc_link)
raise k_exc.ResourceNotReady(svc_status_link)
def _get_service_link(self, endpoints):
ep_link = endpoints['metadata']['selfLink']

358
kuryr_kubernetes/controller/handlers/namespace.py

@ -12,18 +12,11 @@
# See the License for the specific language governing permissions and
# limitations under the License.
import eventlet
import time
from openstack import exceptions as os_exc
from oslo_config import cfg as oslo_cfg
from oslo_log import log as logging
from oslo_serialization import jsonutils
from kuryr_kubernetes import clients
from kuryr_kubernetes import constants
from kuryr_kubernetes.controller.drivers import base as drivers
from kuryr_kubernetes.controller.drivers import utils as driver_utils
from kuryr_kubernetes import exceptions
from kuryr_kubernetes.handlers import k8s_base
from kuryr_kubernetes import utils
@ -31,9 +24,6 @@ from kuryr_kubernetes import utils
LOG = logging.getLogger(__name__)
DEFAULT_CLEANUP_INTERVAL = 60
DEFAULT_CLEANUP_RETRIES = 10
class NamespaceHandler(k8s_base.ResourceEventHandler):
OBJECT_KIND = constants.K8S_OBJ_NAMESPACE
@ -42,296 +32,128 @@ class NamespaceHandler(k8s_base.ResourceEventHandler):
def __init__(self):
super(NamespaceHandler, self).__init__()
self._drv_project = drivers.NamespaceProjectDriver.get_instance()
self._drv_subnets = drivers.PodSubnetsDriver.get_instance()
self._drv_sg = drivers.PodSecurityGroupsDriver.get_instance()
self._drv_vif_pool = drivers.VIFPoolDriver.get_instance(
specific_driver='multi_pool')
self._drv_vif_pool.set_vif_driver()
if self._is_network_policy_enabled():
self._drv_lbaas = drivers.LBaaSDriver.get_instance()
self._drv_svc_sg = (
drivers.ServiceSecurityGroupsDriver.get_instance())
self._upgrade_crds()
# NOTE(ltomasbo): Checks and clean up leftovers due to
# kuryr-controller retarts
eventlet.spawn(self._cleanup_namespace_leftovers)
def _upgrade_crds(self):
k8s = clients.get_kubernetes_client()
try:
net_crds = k8s.get(constants.K8S_API_CRD_KURYRNETS)
namespaces = k8s.get(constants.K8S_API_NAMESPACES)
except exceptions.K8sResourceNotFound:
return
except exceptions.K8sClientException:
LOG.warning("Error retriving namespace information")
raise
def on_present(self, namespace):
ns_name = namespace['metadata']['name']
current_namespace_labels = namespace['metadata'].get('labels')
previous_namespace_labels = driver_utils.get_annotated_labels(
namespace, constants.K8S_ANNOTATION_NAMESPACE_LABEL)
LOG.debug("Got previous namespace labels from annotation: %r",
previous_namespace_labels)
ns_dict = {'ns-' + ns['metadata']['name']: ns
for ns in namespaces.get('items')}
project_id = self._drv_project.get_project(namespace)
if current_namespace_labels != previous_namespace_labels:
crd_selectors = self._drv_sg.update_namespace_sg_rules(namespace)
self._set_namespace_labels(namespace, current_namespace_labels)
if (self._is_network_policy_enabled() and crd_selectors and
oslo_cfg.CONF.octavia_defaults.enforce_sg_rules):
services = driver_utils.get_services()
self._update_services(services, crd_selectors, project_id)
for net_crd in net_crds.get('items'):
try:
ns = ns_dict[net_crd['metadata']['name']]
except KeyError:
# Note(ltomasbo): The CRD does not have an associated
# namespace. It must be deleted
LOG.debug('No namespace associated, deleting kuryrnet crd: '
'%s', net_crd)
else:
try:
ns_net_annotations = ns['metadata']['annotations'][
constants.K8S_ANNOTATION_NET_CRD]
except KeyError:
LOG.debug('Namespace associated is not annotated: %s', ns)
else:
LOG.debug('Removing annotation: %', ns_net_annotations)
k8s.remove_annotations(ns['metadata']['selfLink'],
constants.K8S_ANNOTATION_NET_CRD)
try:
k8s.delete(net_crd['metadata']['selfLink'])
except exceptions.K8sResourceNotFound:
LOG.debug('Kuryrnet object already deleted: %s', net_crd)
net_crd_id = self._get_net_crd_id(namespace)
if net_crd_id:
LOG.debug("CRD existing at the new namespace")
def on_present(self, namespace):
ns_labels = namespace['metadata'].get('labels', {})
ns_name = namespace['metadata']['name']
kns_crd = self._get_kns_crd(ns_name)
if kns_crd:
LOG.debug("Previous CRD existing at the new namespace.")
self._update_labels(kns_crd, ns_labels)
return
net_crd_name = 'ns-' + ns_name
net_crd = self._get_net_crd(net_crd_name)
if net_crd:
LOG.debug("Previous CRD existing at the new namespace. "
"Deleting namespace resources and retying its creation.")
self.on_deleted(namespace, net_crd)
raise exceptions.ResourceNotReady(namespace)
# NOTE(ltomasbo): Ensure there is no previously created networks
# leftovers due to a kuryr-controller crash/restart
LOG.debug("Deleting leftovers network resources for namespace: %s",
ns_name)
self._drv_subnets.cleanup_namespace_networks(ns_name)
LOG.debug("Creating network resources for namespace: %s", ns_name)
net_crd_spec = self._drv_subnets.create_namespace_network(ns_name,
project_id)
# create CRD resource for the network
try:
net_crd = self._add_kuryrnet_crd(ns_name, net_crd_spec)
self._drv_sg.create_namespace_sg_rules(namespace)
self._set_net_crd(namespace, net_crd)
except (exceptions.K8sClientException,
exceptions.K8sResourceNotFound):
LOG.exception("Kuryrnet CRD creation failed. Rolling back "
"resources created for the namespace.")
self._drv_subnets.rollback_network_resources(net_crd_spec, ns_name)
try:
self._del_kuryrnet_crd(net_crd_name)
except exceptions.K8sClientException:
LOG.exception("Error when trying to rollback the KuryrNet CRD "
"object %s", net_crd_name)
self._add_kuryrnetwork_crd(ns_name, ns_labels)
except exceptions.K8sClientException:
LOG.exception("Kuryrnetwork CRD creation failed.")
raise exceptions.ResourceNotReady(namespace)
def on_deleted(self, namespace, net_crd=None):
LOG.debug("Deleting namespace: %s", namespace)
if not net_crd:
net_crd_id = self._get_net_crd_id(namespace)
if not net_crd_id:
LOG.warning("There is no CRD annotated at the namespace %s",
namespace)
return
net_crd = self._get_net_crd(net_crd_id)
if not net_crd:
LOG.warning("This should not happen. Probably this is event "
"is processed twice due to a restart or etcd is "
"not in sync")
# NOTE(ltomasbo): We should rely on etcd properly behaving, so
# we are returning here to prevent duplicated events processing
# but not to prevent etcd failures.
def _update_labels(self, kns_crd, ns_labels):
kns_status = kns_crd.get('status')
if kns_status:
kns_crd_labels = kns_crd['status'].get('nsLabels', {})
if kns_crd_labels == ns_labels:
# Labels are already up to date, nothing to do
return
net_crd_name = net_crd['metadata']['name']
self._drv_vif_pool.delete_network_pools(net_crd['spec']['netId'])
kubernetes = clients.get_kubernetes_client()
LOG.debug('Patching KuryrNetwork CRD %s', kns_crd)
try:
self._drv_subnets.delete_namespace_subnet(net_crd)
except exceptions.ResourceNotReady:
LOG.debug("Subnet is not ready to be removed.")
# TODO(ltomasbo): Once KuryrPort CRDs is supported, we should
# execute a delete network ports method here to remove the ports
# associated to the namespace/subnet, ensuring next retry will be
# successful
kubernetes.patch_crd('spec', kns_crd['metadata']['selfLink'],
{'nsLabels': ns_labels})
except exceptions.K8sResourceNotFound:
LOG.debug('KuryrNetwork CRD not found %s', kns_crd)
except exceptions.K8sClientException:
LOG.exception('Error updating kuryrnetwork CRD %s', kns_crd)
raise
self._del_kuryrnet_crd(net_crd_name)
crd_selectors = self._drv_sg.delete_namespace_sg_rules(namespace)
if (self._is_network_policy_enabled() and crd_selectors and
oslo_cfg.CONF.octavia_defaults.enforce_sg_rules):
project_id = self._drv_project.get_project(namespace)
services = driver_utils.get_services()
self._update_services(services, crd_selectors, project_id)
def is_ready(self, quota):
if not utils.has_kuryr_crd(constants.K8S_API_CRD_KURYRNETS):
return False
return self._check_quota(quota)
def _check_quota(self, quota):
resources = ('subnets', 'networks', 'security_groups')
for resource in resources:
resource_quota = quota[resource]
if utils.has_limit(resource_quota):
if not utils.is_available(resource, resource_quota):
return False
return True
def _get_net_crd_id(self, namespace):
try:
annotations = namespace['metadata']['annotations']
net_crd_id = annotations[constants.K8S_ANNOTATION_NET_CRD]
except KeyError:
return None
return net_crd_id
def _get_net_crd(self, net_crd_id):
def _get_kns_crd(self, namespace):
k8s = clients.get_kubernetes_client()
try:
kuryrnet_crd = k8s.get('%s/kuryrnets/%s' % (constants.K8S_API_CRD,
net_crd_id))
kuryrnetwork_crd = k8s.get('{}/{}/kuryrnetworks/{}'.format(
constants.K8S_API_CRD_NAMESPACES, namespace,
namespace))
except exceptions.K8sResourceNotFound:
return None
except exceptions.K8sClientException:
LOG.exception("Kubernetes Client Exception.")
raise
return kuryrnet_crd
def _set_net_crd(self, namespace, net_crd):
LOG.debug("Setting CRD annotations: %s", net_crd)
return kuryrnetwork_crd
k8s = clients.get_kubernetes_client()
k8s.annotate(namespace['metadata']['selfLink'],
{constants.K8S_ANNOTATION_NET_CRD:
net_crd['metadata']['name']},
resource_version=namespace['metadata']['resourceVersion'])
def _add_kuryrnet_crd(self, namespace, net_crd_spec):
def _add_kuryrnetwork_crd(self, namespace, ns_labels):
project_id = self._drv_project.get_project(namespace)
kubernetes = clients.get_kubernetes_client()
net_crd_name = "ns-" + namespace
spec = {k: v for k, v in net_crd_spec.items()}
# NOTE(ltomasbo): To know if the subnet has bee populated with pools.
# This is only needed by the kuryrnet handler to skip actions. But its
# addition does not have any impact if not used
spec['populated'] = False
net_crd = {
kns_crd = {
'apiVersion': 'openstack.org/v1',
'kind': 'KuryrNet',
'kind': 'KuryrNetwork',
'metadata': {
'name': net_crd_name,
'annotations': {
'namespaceName': namespace,
}
'name': namespace,
'finalizers': [constants.KURYRNETWORK_FINALIZER],
},
'spec': spec,
'spec': {
'nsName': namespace,
'projectId': project_id,
'nsLabels': ns_labels,
}
}
try:
kubernetes.post('%s/kuryrnets' % constants.K8S_API_CRD, net_crd)
kubernetes.post('{}/{}/kuryrnetworks'.format(
constants.K8S_API_CRD_NAMESPACES, namespace), kns_crd)
except exceptions.K8sClientException:
LOG.exception("Kubernetes Client Exception creating kuryrnet "
LOG.exception("Kubernetes Client Exception creating kuryrnetwork "
"CRD.")
raise
return net_crd
def _del_kuryrnet_crd(self, net_crd_name):
kubernetes = clients.get_kubernetes_client()
try:
kubernetes.delete('%s/kuryrnets/%s' % (constants.K8S_API_CRD,
net_crd_name))
except exceptions.K8sResourceNotFound:
LOG.debug("KuryrNetPolicy CRD not found: %s", net_crd_name)
except exceptions.K8sClientException:
LOG.exception("Kubernetes Client Exception deleting kuryrnet "
"CRD.")
raise
def _set_namespace_labels(self, namespace, labels):
if not labels:
LOG.debug("Removing Label annotation: %r", labels)
annotation = None
else:
annotation = jsonutils.dumps(labels, sort_keys=True)
LOG.debug("Setting Labels annotation: %r", annotation)
k8s = clients.get_kubernetes_client()
k8s.annotate(namespace['metadata']['selfLink'],
{constants.K8S_ANNOTATION_NAMESPACE_LABEL: annotation},
resource_version=namespace['metadata']['resourceVersion'])
def _update_services(self, services, crd_selectors, project_id):
for service in services.get('items'):
if not driver_utils.service_matches_affected_pods(
service, crd_selectors):
continue
sgs = self._drv_svc_sg.get_security_groups(service,
project_id)
self._drv_lbaas.update_lbaas_sg(service, sgs)
def _is_network_policy_enabled(self):
enabled_handlers = oslo_cfg.CONF.kubernetes.enabled_handlers
svc_sg_driver = oslo_cfg.CONF.kubernetes.service_security_groups_driver
return ('policy' in enabled_handlers and svc_sg_driver == 'policy')
def _cleanup_namespace_leftovers(self):
k8s = clients.get_kubernetes_client()
for i in range(DEFAULT_CLEANUP_RETRIES):
retry = False
try:
net_crds = k8s.get(constants.K8S_API_CRD_KURYRNETS)
namespaces = k8s.get(constants.K8S_API_NAMESPACES)
except exceptions.K8sClientException:
LOG.warning("Error retriving namespace information")
return
ns_dict = {'ns-' + ns['metadata']['name']: ns
for ns in namespaces.get('items')}
for net_crd in net_crds.get('items'):
try:
ns_dict[net_crd['metadata']['name']]
except KeyError:
# Note(ltomasbo): The CRD does not have an associated
# namespace. It must be deleted
LOG.debug("Removing namespace leftovers associated to: "
"%s", net_crd)
# removing the 'ns-' preceding the namespace name on the
# net CRDs
ns_name = net_crd['metadata']['name'][3:]
# only namespace name is needed for on_deleted, faking the
# nonexistent object
ns_to_delete = {'metadata': {'name': ns_name}}
try:
self.on_deleted(ns_to_delete, net_crd)
except exceptions.ResourceNotReady:
LOG.debug("Cleanup of namespace %s failed. A retry "
"will be triggered.", ns_name)
retry = True
continue
if not retry:
break
# Leave time between retries to help Neutron to complete actions
time.sleep(DEFAULT_CLEANUP_INTERVAL)
def is_ready(self, quota):
if not utils.has_kuryr_crd(constants.K8S_API_CRD_KURYRNETS):
return False
return self._check_quota(quota)
# NOTE(ltomasbo): to ensure we don't miss created network resources
# without associated kuryrnet objects, we do a second search
os_net = clients.get_network_client()
tags = oslo_cfg.CONF.neutron_defaults.resource_tags
if not tags:
return
def _check_quota(self, quota):
resources = ('subnets', 'networks', 'security_groups')
for i in range(DEFAULT_CLEANUP_RETRIES):
retry = False
subnets = os_net.subnets(tags=tags)
namespaces = k8s.get(constants.K8S_API_NAMESPACES)
ns_nets = ['ns/' + ns['metadata']['name'] + '-subnet'
for ns in namespaces.get('items')]
for subnet in subnets:
# NOTE(ltomasbo): subnet name is ns/NAMESPACE_NAME-subnet
if subnet.name not in ns_nets:
if (subnet.subnet_pool_id !=
oslo_cfg.CONF.namespace_subnet.pod_subnet_pool):
# Not a kuryr generated network
continue
try:
self._drv_subnets._delete_namespace_network_resources(
subnet.id, subnet.network_id)
except (os_exc.SDKException, exceptions.ResourceNotReady):
LOG.debug("Cleanup of network namespace resources %s "
"failed. A retry will be triggered.",
subnet.network_id)
retry = True
continue
if not retry:
break
# Leave time between retries to help Neutron to complete actions
time.sleep(DEFAULT_CLEANUP_INTERVAL)
for resource in resources:
resource_quota = quota[resource]
if utils.has_limit(resource_quota):
if not utils.is_available(resource, resource_quota):
return False
return True

8
kuryr_kubernetes/controller/handlers/policy.py

@ -145,16 +145,16 @@ class NetworkPolicyHandler(k8s_base.ResourceEventHandler):
def _get_policy_net_id(self, policy):
policy_ns = policy['metadata']['namespace']
kuryrnet_name = 'ns-' + str(policy_ns)
kubernetes = clients.get_kubernetes_client()
try:
net_crd = kubernetes.get('{}/{}'.format(
k_const.K8S_API_CRD_KURYRNETS, kuryrnet_name))
path = (f'{k_const.K8S_API_CRD_NAMESPACES}/{policy_ns}/'
f'kuryrnetworks/{policy_ns}')
net_crd = kubernetes.get(path)
except exceptions.K8sClientException:
LOG.exception("Kubernetes Client Exception.")
raise
return net_crd['spec']['netId']
return net_crd['status']['netId']
def _is_egress_only_policy(self, policy):
policy_types = policy['spec'].get('policyTypes', [])

8
kuryr_kubernetes/controller/handlers/vif.py

@ -177,10 +177,10 @@ class VIFHandler(k8s_base.ResourceEventHandler):
except k_exc.ResourceNotReady:
# NOTE(ltomasbo): If the namespace object gets deleted first the
# namespace security group driver will raise a ResourceNotReady
# exception as it cannot access anymore the kuryrnet CRD annotated
# on the namespace object. In such case we set security groups to
# empty list so that if pools are enabled they will be properly
# released.
# exception as it cannot access anymore the kuryrnetwork CRD
# annotated on the namespace object. In such case we set security
# groups to empty list so that if pools are enabled they will be
# properly released.
security_groups = []
state = driver_utils.get_pod_state(pod)

4
kuryr_kubernetes/exceptions.py

@ -34,10 +34,6 @@ class K8sResourceNotFound(K8sClientException):
"found: %r" % resource)
class InvalidKuryrNetCRD(Exception):
pass
class InvalidKuryrNetworkAnnotation(Exception):
pass

11
kuryr_kubernetes/handlers/k8s_base.py

@ -68,6 +68,14 @@ class ResourceEventHandler(dispatch.EventConsumer, health.HealthHandler):
event_type = event.get('type')
obj = event.get('object')
if 'MODIFIED' == event_type:
deletion_timestamp = None
try:
deletion_timestamp = obj['metadata']['deletionTimestamp']
except (KeyError, TypeError):
pass
if deletion_timestamp:
self.on_finalize(obj)
return
self.on_modified(obj)
self.on_present(obj)
elif 'ADDED' == event_type:
@ -87,3 +95,6 @@ class ResourceEventHandler(dispatch.EventConsumer, health.HealthHandler):
def on_deleted(self, obj):
pass
def on_finalize(self, obj):
pass

31
kuryr_kubernetes/k8s_client.py

@ -109,8 +109,6 @@ class K8sClient(object):
def patch(self, field, path, data):
LOG.debug("Patch %(path)s: %(data)s", {
'path': path, 'data': data})
if field == 'status':
path = path + '/' + str(field)
content_type = 'application/merge-patch+json'
url, header = self._get_url_and_header(path, content_type)
response = self.session.patch(url, json={field: data},
@ -119,14 +117,18 @@ class K8sClient(object):
self._raise_from_response(response)
return response.json().get('status')
def patch_crd(self, field, path, data):
def patch_crd(self, field, path, data, action='replace'):
content_type = 'application/json-patch+json'
url, header = self._get_url_and_header(path, content_type)
data = [{'op': 'replace',
'path': '/{}/{}'.format(field, np_field),
'value': value}
for np_field, value in data.items()]
if action == 'remove':
data = [{'op': action,
'path': f'/{field}/{data}'}]
else:
data = [{'op': action,
'path': f'/{field}/{crd_field}',
'value': value}
for crd_field, value in data.items()]
LOG.debug("Patch %(path)s: %(data)s", {
'path': path, 'data': data})
@ -167,6 +169,21 @@ class K8sClient(object):
self._raise_from_response(response)
return response.json().get('status')
def remove_annotations(self, path, annotation_name):
content_type = 'application/json-patch+json'
url, header = self._get_url_and_header(path, content_type)
data = [{'op': 'remove',
'path': '/metadata/annotations',
'value': annotation_name}]
response = self.session.patch(url, data=jsonutils.dumps(data),
headers=header, cert=self.cert,
verify=self.verify_server)
if response.ok:
return response.json().get('status')
raise exc.K8sClientException(response.text)
def post(self, path, body):
LOG.debug("Post %(path)s: %(body)s", {'path': path, 'body': body})
url = self._base_url + path

199
kuryr_kubernetes/tests/unit/controller/drivers/test_namespace_subnet.py

@ -19,7 +19,6 @@ import munch
from openstack import exceptions as os_exc
from oslo_config import cfg as oslo_cfg
from kuryr_kubernetes import constants
from kuryr_kubernetes.controller.drivers import namespace_subnet as subnet_drv
from kuryr_kubernetes import exceptions as k_exc
from kuryr_kubernetes.tests import base as test_base
@ -54,16 +53,6 @@ def get_pod_obj():
}}
def get_namespace_obj():
return {
'metadata': {
'annotations': {
constants.K8S_ANNOTATION_NET_CRD: 'net_crd_url_sample'
}
}
}
class TestNamespacePodSubnetDriver(test_base.TestCase):
@mock.patch('kuryr_kubernetes.utils.get_subnet')
@ -110,63 +99,27 @@ class TestNamespacePodSubnetDriver(test_base.TestCase):
namespace = mock.sentinel.namespace
subnet_id = mock.sentinel.subnet_id
ns = get_namespace_obj()
crd = {
'spec': {
'status': {
'subnetId': subnet_id
}
}
kubernetes = self.useFixture(k_fix.MockK8sClient()).client
kubernetes.get.side_effect = [ns, crd]
kubernetes.get.return_value = crd
subnet_id_resp = cls._get_namespace_subnet_id(m_driver, namespace)
kubernetes.get.assert_called()
self.assertEqual(subnet_id, subnet_id_resp)
def test__get_namespace_subnet_id_get_namespace_exception(self):
cls = subnet_drv.NamespacePodSubnetDriver
m_driver = mock.MagicMock(spec=cls)
namespace = mock.sentinel.namespace
kubernetes = self.useFixture(k_fix.MockK8sClient()).client
kubernetes.get.side_effect = k_exc.K8sClientException