Browse Source

KuryrNetworkPolicy CRD

This commit is a huge refactoring of how we handle network policies. In
general:

* KuryrNetPolicy is replaced by KuryrNetworkPolicy. The upgrade path
  is handled in the constructor of KuryrNetworkPolicyHandler.
* New CRD has spec and status properties. spec is always populated by
  NetworkPolicyHandler. status is handled by KuryrNetworkPolicyHandler.
  This means that in order to trigger SG rules recalculation on Pod ang
  Service events, the NetworkPolicy is "bumped" with a dummy annotation.
* NetworkPolicyHandler injects finalizers onto NetworkPolicy and
  KuryrNetworkPolicy objects, so that objects cannot get removed before
  KuryrNetworkPolicyHandler won't process deletion correctly.

Depends-On: https://review.opendev.org/742209
Change-Id: Iafc982e590ada0cd9d82e922c103583e4304e9ce
changes/54/720454/22
Michał Dulko 6 months ago
parent
commit
a1708e1c76
26 changed files with 1286 additions and 1574 deletions
  1. +3
    -3
      .zuul.d/octavia.yaml
  2. +2
    -2
      .zuul.d/sdn.yaml
  3. +1
    -0
      devstack/lib/kuryr_kubernetes
  4. +1
    -0
      devstack/plugin.sh
  5. +26
    -35
      doc/source/devref/network_policy.rst
  6. +1
    -0
      doc/source/installation/manual.rst
  7. +26
    -79
      doc/source/installation/network_policy.rst
  8. +0
    -2
      kubernetes_crds/kuryr_crds/kuryrnetpolicy.yaml
  9. +158
    -0
      kubernetes_crds/kuryr_crds/kuryrnetworkpolicy.yaml
  10. +6
    -0
      kuryr_kubernetes/constants.py
  11. +2
    -26
      kuryr_kubernetes/controller/drivers/base.py
  12. +26
    -9
      kuryr_kubernetes/controller/drivers/lbaasv2.py
  13. +178
    -219
      kuryr_kubernetes/controller/drivers/network_policy.py
  14. +117
    -297
      kuryr_kubernetes/controller/drivers/network_policy_security_groups.py
  15. +64
    -33
      kuryr_kubernetes/controller/drivers/utils.py
  16. +0
    -37
      kuryr_kubernetes/controller/handlers/kuryrnetpolicy.py
  17. +307
    -0
      kuryr_kubernetes/controller/handlers/kuryrnetworkpolicy.py
  18. +24
    -18
      kuryr_kubernetes/controller/handlers/pod_label.py
  19. +11
    -115
      kuryr_kubernetes/controller/handlers/policy.py
  20. +103
    -248
      kuryr_kubernetes/tests/unit/controller/drivers/test_network_policy.py
  21. +75
    -245
      kuryr_kubernetes/tests/unit/controller/drivers/test_network_policy_security_groups.py
  22. +112
    -0
      kuryr_kubernetes/tests/unit/controller/handlers/test_kuryrnetworkpolicy.py
  23. +16
    -14
      kuryr_kubernetes/tests/unit/controller/handlers/test_pod_label.py
  24. +25
    -191
      kuryr_kubernetes/tests/unit/controller/handlers/test_policy.py
  25. +1
    -1
      setup.cfg
  26. +1
    -0
      tools/gate/copy_k8s_logs.sh

+ 3
- 3
.zuul.d/octavia.yaml View File

@@ -99,7 +99,7 @@
vars:
devstack_localrc:
DOCKER_CGROUP_DRIVER: "systemd"
KURYR_ENABLED_HANDLERS: vif,endpoints,service,namespace,pod_label,policy,kuryrnetpolicy,kuryrnetwork,kuryrport,kuryrloadbalancer
KURYR_ENABLED_HANDLERS: vif,endpoints,service,namespace,pod_label,policy,kuryrnetworkpolicy,kuryrnetwork,kuryrport,kuryrloadbalancer
KURYR_SG_DRIVER: policy
KURYR_SUBNET_DRIVER: namespace
devstack_services:
@@ -120,7 +120,7 @@
vars:
devstack_localrc:
KURYR_SUBNET_DRIVER: namespace
KURYR_ENABLED_HANDLERS: vif,endpoints,service,namespace,pod_label,policy,kuryrnetpolicy,kuryrnetwork,kuryrport,kuryrloadbalancer
KURYR_ENABLED_HANDLERS: vif,endpoints,service,namespace,pod_label,policy,kuryrnetworkpolicy,kuryrnetwork,kuryrport,kuryrloadbalancer
KURYR_SG_DRIVER: policy
KURYR_USE_PORT_POOLS: true
KURYR_POD_VIF_DRIVER: neutron-vif
@@ -134,7 +134,7 @@
parent: kuryr-kubernetes-tempest-containerized
vars:
devstack_localrc:
KURYR_ENABLED_HANDLERS: vif,endpoints,service,namespace,pod_label,policy,kuryrnetpolicy,kuryrnetwork,kuryrport,kuryrloadbalancer
KURYR_ENABLED_HANDLERS: vif,endpoints,service,namespace,pod_label,policy,kuryrnetworkpolicy,kuryrnetwork,kuryrport,kuryrloadbalancer
KURYR_SG_DRIVER: policy
KURYR_SUBNET_DRIVER: namespace



+ 2
- 2
.zuul.d/sdn.yaml View File

@@ -98,7 +98,7 @@
KURYR_LB_ALGORITHM: SOURCE_IP_PORT
KURYR_SUBNET_DRIVER: namespace
KURYR_SG_DRIVER: policy
KURYR_ENABLED_HANDLERS: vif,endpoints,service,namespace,pod_label,policy,kuryrnetpolicy,kuryrnetwork,kuryrport,kuryrloadbalancer
KURYR_ENABLED_HANDLERS: vif,endpoints,service,namespace,pod_label,policy,kuryrnetworkpolicy,kuryrnetwork,kuryrport,kuryrloadbalancer
voting: false

- job:
@@ -144,7 +144,7 @@
KURYR_ENFORCE_SG_RULES: false
KURYR_LB_ALGORITHM: SOURCE_IP_PORT
KURYR_HYPERKUBE_VERSION: v1.16.0
KURYR_ENABLED_HANDLERS: vif,endpoints,service,namespace,pod_label,policy,kuryrnetpolicy,kuryrnetwork,kuryrport,kuryrloadbalancer
KURYR_ENABLED_HANDLERS: vif,endpoints,service,namespace,pod_label,policy,kuryrnetworkpolicy,kuryrnetwork,kuryrport,kuryrloadbalancer
KURYR_SG_DRIVER: policy
KURYR_SUBNET_DRIVER: namespace
KURYR_K8S_CONTAINERIZED_DEPLOYMENT: true


+ 1
- 0
devstack/lib/kuryr_kubernetes View File

@@ -452,6 +452,7 @@ rules:
- kuryrnets
- kuryrnetworks
- kuryrnetpolicies
- kuryrnetworkpolicies
- kuryrloadbalancers
- kuryrports
- apiGroups: ["networking.k8s.io"]


+ 1
- 0
devstack/plugin.sh View File

@@ -975,6 +975,7 @@ function update_tempest_conf_file {
iniset $TEMPEST_CONFIG kuryr_kubernetes kuryrnetworks True
iniset $TEMPEST_CONFIG kuryr_kubernetes kuryrports True
iniset $TEMPEST_CONFIG kuryr_kubernetes kuryrloadbalancers True
iniset $TEMPEST_CONFIG kuryr_kubernetes new_kuryrnetworkpolicy_crd True
}

source $DEST/kuryr-kubernetes/devstack/lib/kuryr_kubernetes


+ 26
- 35
doc/source/devref/network_policy.rst View File

@@ -47,22 +47,22 @@ The network policy CRD has the following format:
.. code-block:: yaml

apiVersion: openstack.org/v1
kind: KuryrNetPolicy
kind: KuryrNetworkPolicy
metadata:
...
spec:
egressSgRules:
- security_group_rule:
- sgRule:
...
ingressSgRules:
- security_group_rule:
...
networkpolicy_spec:
- sgRule:
...
podSelector:
...
status:
securityGroupId: ...
securityGroupName: ...
podSelector: ...
securityGroupRules: ...

A new handler has been added to react to Network Policy events, and the existing
ones, for instance service/pod handlers, have been modified to account for the
@@ -201,26 +201,25 @@ are assumed to assumed to affect Ingress.
.. code-block:: yaml

apiVersion: openstack.org/v1
kind: KuryrNetPolicy
kind: KuryrNetworkPolicy
metadata:
name: np-default-deny
name: default-deny
namespace: default
...
spec:
egressSgRules:
- security_group_rule:
- sgRule:
description: Kuryr-Kubernetes NetPolicy SG rule
direction: egress
ethertype: IPv4
id: 60a0d59c-2102-43e0-b025-75c98b7d9315
security_group_id: 20d9b623-f1e0-449d-95c1-01624cb3e315
ingressSgRules: []
networkpolicy_spec:
...
podSelector:
...
status:
securityGroupId: 20d9b623-f1e0-449d-95c1-01624cb3e315
securityGroupName: sg-default-deny
securityGroupRules: ...
podSelector: ...


Allow traffic from pod
@@ -263,37 +262,33 @@ restriction was enforced.
.. code-block:: yaml

apiVersion: openstack.org/v1
kind: KuryrNetPolicy
kind: KuryrNetworkPolicy
metadata:
name: np-allow-monitoring-via-pod-selector
name: allow-monitoring-via-pod-selector
namespace: default
...
spec:
egressSgRules:
- security_group_rule:
- sgRule:
description: Kuryr-Kubernetes NetPolicy SG rule
direction: egress
ethertype: IPv4
id: 203a14fe-1059-4eff-93ed-a42bd957145d
security_group_id: 7f0ef8c2-4846-4d8c-952f-94a9098fff17
ingressSgRules:
- namespace: default
security_group_rule:
sgRule:
description: Kuryr-Kubernetes NetPolicy SG rule
direction: ingress
ethertype: IPv4
id: 7987c382-f2a9-47f7-b6e8-1a3a1bcb7d95
port_range_max: 8080
port_range_min: 8080
protocol: tcp
remote_ip_prefix: 10.0.1.143
security_group_id: 7f0ef8c2-4846-4d8c-952f-94a9098fff17
networkpolicy_spec:
...
podSelector:
...
status:
securityGroupId: 7f0ef8c2-4846-4d8c-952f-94a9098fff17
securityGroupName: sg-allow-monitoring-via-pod-selector
securityGroupRules: ...
podSelector: ...


Allow traffic from namespace
@@ -337,36 +332,32 @@ egress rule allowing traffic to everywhere.
.. code-block:: yaml

apiVersion: openstack.org/v1
kind: KuryrNetPolicy
name: np-allow-test-via-ns-selector
kind: KuryrNetworkPolicy
name: allow-test-via-ns-selector
namespace: default
...
spec:
egressSgRules:
- security_group_rule:
- sgRule:
description: Kuryr-Kubernetes NetPolicy SG rule
direction: egress
ethertype: IPv4
id: 8c21bf42-c8b9-4628-b0a1-bd0dbb192e6b
security_group_id: c480327c-2db4-4eb6-af1e-eeb0ce9b46c9
ingressSgRules:
- namespace: dev
security_group_rule:
sgRule:
description: Kuryr-Kubernetes NetPolicy SG rule
direction: ingress
ethertype: IPv4
id: 2a33b802-56ad-430a-801d-690f653198ef
port_range_max: 8080
port_range_min: 8080
protocol: tcp
remote_ip_prefix: 10.0.1.192/26
security_group_id: c480327c-2db4-4eb6-af1e-eeb0ce9b46c9
networkpolicy_spec:
...
podSelector:
...
status:
securityGroupId: c480327c-2db4-4eb6-af1e-eeb0ce9b46c9
securityGroupName: sg-allow-test-via-ns-selector
securityGroupRules: ...
podSelector: ...

.. note::



+ 1
- 0
doc/source/installation/manual.rst View File

@@ -95,6 +95,7 @@ Edit ``kuryr.conf``:
- kuryrnets
- kuryrnetworks
- kuryrnetpolicies
- kuryrnetworkpolicies
- kuryrloadbalancers
- apiGroups: ["networking.k8s.io"]
resources:


+ 26
- 79
doc/source/installation/network_policy.rst View File

@@ -10,7 +10,7 @@ be found at :doc:`./devstack/containerized`):
.. code-block:: ini

[kubernetes]
enabled_handlers=vif,lb,lbaasspec,policy,pod_label,namespace,kuryrnetwork,kuryrnetpolicy
enabled_handlers=vif,lb,lbaasspec,policy,pod_label,namespace,kuryrnetwork,kuryrnetworkpolicy

Note that if you also want to enable prepopulation of ports pools upon new
namespace creation, you need to also dd the kuryrnetwork_population handler
@@ -19,7 +19,7 @@ namespace creation, you need to also dd the kuryrnetwork_population handler
.. code-block:: ini

[kubernetes]
enabled_handlers=vif,lb,lbaasspec,policy,pod_label,namespace,kuryrnetpolicy,kuryrnetwork,kuryrnetwork_population
enabled_handlers=vif,lb,lbaasspec,policy,pod_label,namespace,kuryrnetworkpolicy,kuryrnetwork,kuryrnetwork_population

After that, enable also the security group drivers for policies:

@@ -82,7 +82,7 @@ to add the policy, pod_label and namespace handler and drivers with:

.. code-block:: bash

KURYR_ENABLED_HANDLERS=vif,lb,lbaasspec,policy,pod_label,namespace,kuryrnetpolicy
KURYR_ENABLED_HANDLERS=vif,lb,lbaasspec,policy,pod_label,namespace,kuryrnetworkpolicy
KURYR_SG_DRIVER=policy
KURYR_SUBNET_DRIVER=namespace

@@ -143,9 +143,9 @@ Testing the network policy support functionality

.. code-block:: console

$ kubectl get kuryrnetpolicies
$ kubectl get kuryrnetworkpolicies
NAME AGE
np-test-network-policy 2s
test-network-policy 2s

$ kubectl get networkpolicies
NAME POD-SELECTOR AGE
@@ -158,69 +158,42 @@ Testing the network policy support functionality

.. code-block:: console

$ kubectl get kuryrnetpolicy np-test-network-policy -o yaml
$ kubectl get kuryrnetworkpolicy test-network-policy -o yaml

apiVersion: openstack.org/v1
kind: KuryrNetPolicy
kind: KuryrNetworkPolicy
metadata:
annotations:
networkpolicy_name: test-network-policy
networkpolicy_namespace: default
networkpolicy_uid: aee1c59f-c634-11e8-b63d-002564fdd760
networkPolicyLink: <link>
clusterName: ""
creationTimestamp: 2018-10-02T11:17:02Z
generation: 0
name: np-test-network-policy
name: test-network-policy
namespace: default
resourceVersion: "2117"
selfLink: /apis/openstack.org/v1/namespaces/default/kuryrnetpolicies/np-test-network-policy
selfLink: /apis/openstack.org/v1/namespaces/default/kuryrnetworkpolicies/test-network-policy
uid: afb99326-c634-11e8-b63d-002564fdd760
spec:
egressSgRules:
- security_group_rule:
- sgRule:
description: Kuryr-Kubernetes NetPolicy SG rule
direction: egress
ethertype: IPv4
id: 6297c198-b385-44f3-8b43-29951f933a8f
port_range_max: 5978
port_range_min: 5978
protocol: tcp
security_group_id: cdee7815-3b49-4a3e-abc8-31e384ab75c5
ingressSgRules:
- security_group_rule:
- sgRule:
description: Kuryr-Kubernetes NetPolicy SG rule
direction: ingress
ethertype: IPv4
id: f4e11e73-81c6-4c1b-9760-714eedff417b
port_range_max: 6379
port_range_min: 6379
protocol: tcp
security_group_id: cdee7815-3b49-4a3e-abc8-31e384ab75c5
status:
securityGroupId: cdee7815-3b49-4a3e-abc8-31e384ab75c5
securityGroupName: sg-test-network-policy
networkpolicy_spec:
egress:
- to:
- namespaceSelector:
matchLabels:
project: default
ports:
- port: 5978
protocol: TCP
ingress:
- from:
- namespaceSelector:
matchLabels:
project: default
ports:
- port: 6379
protocol: TCP
podSelector:
matchLabels:
project: default
policyTypes:
- Ingress
- Egress
securityGroupRules:

$ openstack security group rule list sg-test-network-policy --protocol tcp -c "IP Protocol" -c "Port Range" -c "Direction" --long
+-------------+------------+-----------+
@@ -273,67 +246,41 @@ Testing the network policy support functionality
$ kubectl patch networkpolicy test-network-policy -p '{"spec":{"ingress":[{"ports":[{"port": 8080,"protocol": "TCP"}]}]}}'
networkpolicy "test-network-policy" patched

$ kubectl get knp np-test-network-policy -o yaml
$ kubectl get knp test-network-policy -o yaml
apiVersion: openstack.org/v1
kind: KuryrNetPolicy
kind: KuryrNetworkPolicy
metadata:
annotations:
networkpolicy_name: test-network-policy
networkpolicy_namespace: default
networkpolicy_uid: aee1c59f-c634-11e8-b63d-002564fdd760
networkPolicyLink: <link>
clusterName: ""
creationTimestamp: 2018-10-02T11:17:02Z
generation: 0
name: np-test-network-policy
name: test-network-policy
namespace: default
resourceVersion: "1546"
selfLink: /apis/openstack.org/v1/namespaces/default/kuryrnetpolicies/np-test-network-policy
selfLink: /apis/openstack.org/v1/namespaces/default/kuryrnetworkpolicies/np-test-network-policy
uid: afb99326-c634-11e8-b63d-002564fdd760
spec:
egressSgRules:
- security_group_rule:
- sgRule:
description: Kuryr-Kubernetes NetPolicy SG rule
direction: egress
ethertype: IPv4
id: 1969a0b3-55e1-43d7-ba16-005b4ed4cbb7
port_range_max: 5978
port_range_min: 5978
protocol: tcp
security_group_id: cdee7815-3b49-4a3e-abc8-31e384ab75c5
ingressSgRules:
- security_group_rule:
- sgRule:
description: Kuryr-Kubernetes NetPolicy SG rule
direction: ingress
ethertype: IPv4
id: 6598aa1f-4f94-4fb2-81ce-d3649ba28f33
port_range_max: 8080
port_range_min: 8080
protocol: tcp
security_group_id: cdee7815-3b49-4a3e-abc8-31e384ab75c5
status:
securityGroupId: cdee7815-3b49-4a3e-abc8-31e384ab75c5
networkpolicy_spec:
egress:
- ports:
- port: 5978
protocol: TCP
to:
- namespaceSelector:
matchLabels:
project: default
ingress:
- ports:
- port: 8080
protocol: TCP
from:
- namespaceSelector:
matchLabels:
project: default
podSelector:
matchLabels:
project: default
policyTypes:
- Ingress
- Egress
securityGroupRules:

$ openstack security group rule list sg-test-network-policy -c "IP Protocol" -c "Port Range" -c "Direction" --long
+-------------+------------+-----------+
@@ -388,6 +335,6 @@ Testing the network policy support functionality
.. code-block:: console

$ kubectl delete -f network_policy.yml
$ kubectl get kuryrnetpolicies
$ kubectl get kuryrnetworkpolicies
$ kubectl get networkpolicies
$ openstack security group list | grep sg-test-network-policy

+ 0
- 2
kubernetes_crds/kuryr_crds/kuryrnetpolicy.yaml View File

@@ -9,8 +9,6 @@ spec:
plural: kuryrnetpolicies
singular: kuryrnetpolicy
kind: KuryrNetPolicy
shortNames:
- knp
versions:
- name: v1
served: true


+ 158
- 0
kubernetes_crds/kuryr_crds/kuryrnetworkpolicy.yaml View File

@@ -0,0 +1,158 @@
apiVersion: apiextensions.k8s.io/v1
kind: CustomResourceDefinition
metadata:
name: kuryrnetworkpolicies.openstack.org
spec:
group: openstack.org
scope: Namespaced
names:
plural: kuryrnetworkpolicies
singular: kuryrnetworkpolicy
kind: KuryrNetworkPolicy
shortNames:
- knp
versions:
- name: v1
served: true
storage: true
additionalPrinterColumns:
- name: SG-ID
type: string
description: The ID of the SG associated to the policy
jsonPath: .status.securityGroupId
- name: Age
type: date
jsonPath: .metadata.creationTimestamp
schema:
openAPIV3Schema:
type: object
required:
- status
- spec
properties:
spec:
type: object
required:
- egressSgRules
- ingressSgRules
- podSelector
- policyTypes
properties:
egressSgRules:
type: array
items:
type: object
required:
- sgRule
properties:
affectedPods:
type: array
items:
type: object
properties:
podIP:
type: string
podNamespace:
type: string
required:
- podIP
- podNamespace
namespace:
type: string
sgRule:
type: object
properties:
description:
type: string
direction:
type: string
ethertype:
type: string
port_range_max:
type: integer
port_range_min:
type: integer
protocol:
type: string
remote_ip_prefix:
type: string
ingressSgRules:
type: array
items:
type: object
required:
- sgRule
properties:
affectedPods:
type: array
items:
type: object
properties:
podIP:
type: string
podNamespace:
type: string
required:
- podIP
- podNamespace
namespace:
type: string
sgRule:
type: object
properties:
description:
type: string
direction:
type: string
ethertype:
type: string
port_range_max:
type: integer
port_range_min:
type: integer
protocol:
type: string
remote_ip_prefix:
type: string
podSelector:
x-kubernetes-preserve-unknown-fields: true
type: object
policyTypes:
type: array
items:
type: string
status:
type: object
required:
- securityGroupRules
properties:
securityGroupId:
type: string
securityGroupRules:
type: array
items:
type: object
required:
- id
properties:
id:
type: string
description:
type: string
direction:
type: string
ethertype:
type: string
port_range_max:
type: integer
port_range_min:
type: integer
protocol:
type: string
remote_ip_prefix:
type: string
security_group_id:
type: string
podSelector:
x-kubernetes-preserve-unknown-fields: true
type: object

+ 6
- 0
kuryr_kubernetes/constants.py View File

@@ -23,9 +23,11 @@ K8S_API_CRD_NAMESPACES = K8S_API_CRD + '/namespaces'
K8S_API_CRD_KURYRNETS = K8S_API_CRD + '/kuryrnets'
K8S_API_CRD_KURYRNETWORKS = K8S_API_CRD + '/kuryrnetworks'
K8S_API_CRD_KURYRNETPOLICIES = K8S_API_CRD + '/kuryrnetpolicies'
K8S_API_CRD_KURYRNETWORKPOLICIES = K8S_API_CRD + '/kuryrnetworkpolicies'
K8S_API_CRD_KURYRLOADBALANCERS = K8S_API_CRD + '/kuryrloadbalancers'
K8S_API_CRD_KURYRPORTS = K8S_API_CRD + '/kuryrports'
K8S_API_POLICIES = '/apis/networking.k8s.io/v1/networkpolicies'
K8S_API_NETWORKING = '/apis/networking.k8s.io/v1'

K8S_API_NPWG_CRD = '/apis/k8s.cni.cncf.io/v1'

@@ -37,6 +39,7 @@ K8S_OBJ_POLICY = 'NetworkPolicy'
K8S_OBJ_KURYRNET = 'KuryrNet'
K8S_OBJ_KURYRNETWORK = 'KuryrNetwork'
K8S_OBJ_KURYRNETPOLICY = 'KuryrNetPolicy'
K8S_OBJ_KURYRNETWORKPOLICY = 'KuryrNetworkPolicy'
K8S_OBJ_KURYRLOADBALANCER = 'KuryrLoadBalancer'
K8S_OBJ_KURYRPORT = 'KuryrPort'

@@ -47,11 +50,13 @@ K8S_POD_STATUS_FAILED = 'Failed'
K8S_ANNOTATION_PREFIX = 'openstack.org/kuryr'
K8S_ANNOTATION_VIF = K8S_ANNOTATION_PREFIX + '-vif'
K8S_ANNOTATION_LABEL = K8S_ANNOTATION_PREFIX + '-pod-label'
K8S_ANNOTATION_IP = K8S_ANNOTATION_PREFIX + '-pod-ip'
K8S_ANNOTATION_NAMESPACE_LABEL = K8S_ANNOTATION_PREFIX + '-namespace-label'
K8S_ANNOTATION_LBAAS_SPEC = K8S_ANNOTATION_PREFIX + '-lbaas-spec'
K8S_ANNOTATION_LBAAS_STATE = K8S_ANNOTATION_PREFIX + '-lbaas-state'
K8S_ANNOTATION_NET_CRD = K8S_ANNOTATION_PREFIX + '-net-crd'
K8S_ANNOTATION_NETPOLICY_CRD = K8S_ANNOTATION_PREFIX + '-netpolicy-crd'
K8S_ANNOTATION_POLICY = K8S_ANNOTATION_PREFIX + '-counter'

K8S_ANNOTATION_NPWG_PREFIX = 'k8s.v1.cni.cncf.io'
K8S_ANNOTATION_NPWG_NETWORK = K8S_ANNOTATION_NPWG_PREFIX + '/networks'
@@ -68,6 +73,7 @@ POD_FINALIZER = KURYR_FQDN + '/pod-finalizer'
KURYRNETWORK_FINALIZER = 'kuryrnetwork.finalizers.kuryr.openstack.org'
KURYRLB_FINALIZER = 'kuryr.openstack.org/kuryrloadbalancer-finalizers'
SERVICE_FINALIZER = 'kuryr.openstack.org/service-finalizer'
NETWORKPOLICY_FINALIZER = 'kuryr.openstack.org/networkpolicy-finalizer'

KURYRPORT_FINALIZER = KURYR_FQDN + '/kuryrport-finalizer'
KURYRPORT_LABEL = KURYR_FQDN + '/nodeName'


+ 2
- 26
kuryr_kubernetes/controller/drivers/base.py View File

@@ -697,13 +697,10 @@ class NetworkPolicyDriver(DriverBase, metaclass=abc.ABCMeta):
ALIAS = 'network_policy'

@abc.abstractmethod
def ensure_network_policy(self, policy, project_id):
def ensure_network_policy(self, policy):
"""Policy created or updated

:param policy: dict containing Kubernetes NP object
:param project_id: openstack project_id
:returns: list of Pod objects affected by the network policy
creation or its podSelector modification
"""
raise NotImplementedError()

@@ -711,7 +708,7 @@ class NetworkPolicyDriver(DriverBase, metaclass=abc.ABCMeta):
def release_network_policy(self, kuryrnetpolicy):
"""Delete a network policy

:param kuryrnetpolicy: dict containing Kuryrnetpolicy CRD object
:param kuryrnetpolicy: dict containing NetworkPolicy object
"""
raise NotImplementedError()

@@ -729,18 +726,6 @@ class NetworkPolicyDriver(DriverBase, metaclass=abc.ABCMeta):
"""
raise NotImplementedError()

@abc.abstractmethod
def knps_on_namespace(self, namespace):
"""Check if there si kuryr network policy CRDs on the namespace

This method returns true if there are knps on the specified namespace
or false otherwise

:param namespace: namespace name where the knps CRDs should be
:returns: true if knps CRDs on the namespace, false otherwise
"""
raise NotImplementedError()

@abc.abstractmethod
def namespaced_pods(self, policy):
"""Return pods on the policy namespace
@@ -752,15 +737,6 @@ class NetworkPolicyDriver(DriverBase, metaclass=abc.ABCMeta):
"""
raise NotImplementedError()

@abc.abstractmethod
def get_kuryrnetpolicy_crd(self, policy):
"""Return kuryrnetpolicy CRD object associated to the policy

:param policy: dict containing Kubernetes NP object
:returns: kuryrnetpolicy CRD object associated to the policy
"""
raise NotImplementedError()


class NetworkPolicyProjectDriver(DriverBase, metaclass=abc.ABCMeta):
"""Get an OpenStack project id for K8s network policies"""


+ 26
- 9
kuryr_kubernetes/controller/drivers/lbaasv2.py View File

@@ -749,24 +749,41 @@ class LBaaSv2Driver(base.LBaaSDriver):
endpoints_link = utils.get_endpoints_link(service)
k8s = clients.get_kubernetes_client()
try:
endpoint = k8s.get(endpoints_link)
k8s.get(endpoints_link)
except k_exc.K8sResourceNotFound:
LOG.debug("Endpoint not Found. Skipping LB SG update for "
"%s as the LB resources are not present", lbaas_name)
return

lbaas = utils.get_lbaas_state(endpoint)
if not lbaas:
LOG.debug('Endpoint not yet annotated with lbaas state.')
try:
klb = k8s.get(f'{k_const.K8S_API_CRD_NAMESPACES}/{svc_namespace}/'
f'kuryrloadbalancers/{svc_name}')
except k_exc.K8sResourceNotFound:
LOG.debug('No KuryrLoadBalancer for service %s created yet.',
lbaas_name)
raise k_exc.ResourceNotReady(svc_name)

if (not klb.get('status', {}).get('loadbalancer') or
klb.get('status', {}).get('listeners') is None):
LOG.debug('KuryrLoadBalancer for service %s not populated yet.',
lbaas_name)
raise k_exc.ResourceNotReady(svc_name)

lbaas_obj = lbaas.loadbalancer
lbaas_obj.security_groups = sgs
klb['status']['loadbalancer']['security_groups'] = sgs

utils.set_lbaas_state(endpoint, lbaas)
lb = klb['status']['loadbalancer']
try:
k8s.patch_crd('status/loadbalancer', klb['metadata']['selfLink'],
{'security_groups': sgs})
except k_exc.K8sResourceNotFound:
LOG.debug('KuryrLoadBalancer CRD not found %s', lbaas_name)
return
except k_exc.K8sClientException:
LOG.exception('Error updating KuryLoadBalancer CRD %s', lbaas_name)
raise

lsnr_ids = {(listener['protocol'], listener['port']): listener['id']
for listener in lbaas.listeners}
for listener in klb['status']['listeners']}

for port in svc_ports:
port_protocol = port['protocol']
@@ -779,6 +796,6 @@ class LBaaSv2Driver(base.LBaaSDriver):
"%s and port %s. Skipping", port_protocol,
lbaas_port)
continue
self._apply_members_security_groups(lbaas_obj, lbaas_port,
self._apply_members_security_groups(lb, lbaas_port,
target_port, port_protocol,
sg_rule_name, listener_id, sgs)

+ 178
- 219
kuryr_kubernetes/controller/drivers/network_policy.py View File

@@ -38,91 +38,94 @@ class NetworkPolicyDriver(base.NetworkPolicyDriver):
self.os_net = clients.get_network_client()
self.kubernetes = clients.get_kubernetes_client()

def ensure_network_policy(self, policy, project_id):
def ensure_network_policy(self, policy):
"""Create security group rules out of network policies

Triggered by events from network policies, this method ensures that
security groups and security group rules are created or updated in
reaction to kubernetes network policies events.

In addition it returns the pods affected by the policy:
- Creation: pods on the namespace of the created policy
- Update: pods that needs to be updated in case of PodSelector
modification, i.e., the pods that were affected by the previous
PodSelector
KuryrNetworkPolicy object is created with the security group rules
definitions required to represent the NetworkPolicy.
"""
LOG.debug("Creating network policy %s", policy['metadata']['name'])

if self.get_kuryrnetpolicy_crd(policy):
previous_selector = (
self.update_security_group_rules_from_network_policy(policy))
if previous_selector or previous_selector == {}:
return self.affected_pods(policy, previous_selector)
if previous_selector is None:
return self.namespaced_pods(policy)
i_rules, e_rules = self._get_security_group_rules_from_network_policy(
policy)

knp = self._get_knp_crd(policy)
if not knp:
self._create_knp_crd(policy, i_rules, e_rules)
else:
self.create_security_group_rules_from_network_policy(policy,
project_id)
self._patch_knp_crd(policy, i_rules, e_rules, knp)

def _convert_old_sg_rule(self, rule):
del rule['security_group_rule']['id']
del rule['security_group_rule']['security_group_id']
result = {
'sgRule': rule['security_group_rule'],
}

if 'namespace' in rule:
result['namespace'] = rule['namespace']

if 'remote_ip_prefixes' in rule:
result['affectedPods'] = []
for ip, namespace in rule['remote_ip_prefixes']:
result['affectedPods'].append({
'podIP': ip,
'podNamespace': namespace,
})

return result

def get_from_old_crd(self, netpolicy):
name = netpolicy['metadata']['name'][3:] # Remove 'np-'
namespace = netpolicy['metadata']['namespace']
link = (f'{constants.K8S_API_NETWORKING}/namespaces/{namespace}/'
f'networkpolicies/{name}')
knp = {
'apiVersion': constants.K8S_API_CRD_VERSION,
'kind': constants.K8S_OBJ_KURYRNETWORKPOLICY,
'metadata': {
'namespace': namespace,
'name': name,
'annotations': {
'networkPolicyLink': link,
},
'finalizers': [constants.NETWORKPOLICY_FINALIZER],
},
'spec': {
'podSelector':
netpolicy['spec']['networkpolicy_spec']['podSelector'],
'egressSgRules': [self._convert_old_sg_rule(r) for r in
netpolicy['spec']['egressSgRules']],
'ingressSgRules': [self._convert_old_sg_rule(r) for r in
netpolicy['spec']['ingressSgRules']],
'policyTypes':
netpolicy['spec']['networkpolicy_spec']['policyTypes'],
},
'status': {
'podSelector': netpolicy['spec']['podSelector'],
'securityGroupId': netpolicy['spec']['securityGroupId'],
# We'll just let KuryrNetworkPolicyHandler figure out if rules
# are created on its own.
'securityGroupRules': [],
},
}

return knp

def update_security_group_rules_from_network_policy(self, policy):
"""Update security group rules
def _get_security_group_rules_from_network_policy(self, policy):
"""Get security group rules required to represent an NP

This method updates security group rules based on CRUD events gotten
from a configuration or patch to an existing network policy
This method creates the security group rules bodies coming out of a
network policies' parsing.
"""
crd = self.get_kuryrnetpolicy_crd(policy)
crd_name = crd['metadata']['name']
LOG.debug("Already existing CRD %s", crd_name)
sg_id = crd['spec']['securityGroupId']
# Fetch existing SG rules from kuryrnetpolicy CRD
existing_sg_rules = []
existing_i_rules = crd['spec'].get('ingressSgRules')
existing_e_rules = crd['spec'].get('egressSgRules')
if existing_i_rules or existing_e_rules:
existing_sg_rules = existing_i_rules + existing_e_rules
existing_pod_selector = crd['spec'].get('podSelector')
# Parse network policy update and get new ruleset
i_rules, e_rules = self.parse_network_policy_rules(policy, sg_id)
current_sg_rules = i_rules + e_rules
# Get existing security group rules ids
sgr_ids = [x['security_group_rule'].pop('id') for x in
existing_sg_rules]
# SG rules that are meant to be kept get their id back
sg_rules_to_keep = [existing_sg_rules.index(rule) for rule in
existing_sg_rules if rule in current_sg_rules]
for sg_rule in sg_rules_to_keep:
sgr_id = sgr_ids[sg_rule]
existing_sg_rules[sg_rule]['security_group_rule']['id'] = sgr_id
# Delete SG rules that are no longer in the updated policy
sg_rules_to_delete = [existing_sg_rules.index(rule) for rule in
existing_sg_rules if rule not in
current_sg_rules]
for sg_rule in sg_rules_to_delete:
driver_utils.delete_security_group_rule(sgr_ids[sg_rule])
# Create new rules that weren't already on the security group
sg_rules_to_add = [rule for rule in current_sg_rules if rule not in
existing_sg_rules]
for sg_rule in sg_rules_to_add:
sgr_id = driver_utils.create_security_group_rule(sg_rule)
if sg_rule['security_group_rule'].get('direction') == 'ingress':
for i_rule in i_rules:
if sg_rule == i_rule:
i_rule["security_group_rule"]["id"] = sgr_id
else:
for e_rule in e_rules:
if sg_rule == e_rule:
e_rule["security_group_rule"]["id"] = sgr_id
# Annotate kuryrnetpolicy CRD with current policy and ruleset
pod_selector = policy['spec'].get('podSelector')
driver_utils.patch_kuryrnetworkpolicy_crd(crd, i_rules, e_rules,
pod_selector,
np_spec=policy['spec'])
i_rules, e_rules = self.parse_network_policy_rules(policy)
# Add default rules to allow traffic from host and svc subnet
i_rules += self._get_default_np_rules()

if existing_pod_selector != pod_selector:
return existing_pod_selector
return False
return i_rules, e_rules

def _add_default_np_rules(self, sg_id):
def _get_default_np_rules(self):
"""Add extra SG rule to allow traffic from svcs and host.

This method adds the base security group rules for the NP security
@@ -130,6 +133,7 @@ class NetworkPolicyDriver(base.NetworkPolicyDriver):
- Ensure traffic is allowed from the services subnet
- Ensure traffic is allowed from the host
"""
rules = []
default_cidrs = []
if CONF.octavia_defaults.enforce_sg_rules:
default_cidrs.append(utils.get_subnet_cidr(
@@ -141,27 +145,21 @@ class NetworkPolicyDriver(base.NetworkPolicyDriver):
ethertype = constants.IPv4
if ipaddress.ip_network(cidr).version == constants.IP_VERSION_6:
ethertype = constants.IPv6
default_rule = {
'security_group_rule': {
rules.append({
'sgRule': {
'ethertype': ethertype,
'security_group_id': sg_id,
'direction': 'ingress',
'description': 'Kuryr-Kubernetes NetPolicy SG rule',
'remote_ip_prefix': cidr
}}
driver_utils.create_security_group_rule(default_rule)
'remote_ip_prefix': cidr,
}})

def create_security_group_rules_from_network_policy(self, policy,
project_id):
"""Create initial security group and rules
return rules

This method creates the initial security group for hosting security
group rules coming out of network policies' parsing.
"""
sg_name = ("sg-" + policy['metadata']['namespace'] + "-" +
policy['metadata']['name'])
desc = "Kuryr-Kubernetes NetPolicy SG"
sg = None
def create_security_group(self, knp, project_id):
sg_name = ("sg-" + knp['metadata']['namespace'] + "-" +
knp['metadata']['name'])
desc = ("Kuryr-Kubernetes Network Policy %s SG" %
utils.get_res_unique_name(knp))
try:
# Create initial security group
sg = self.os_net.create_security_group(name=sg_name,
@@ -176,46 +174,14 @@ class NetworkPolicyDriver(base.NetworkPolicyDriver):
# rules just after creation.
for sgr in sg.security_group_rules:
self.os_net.delete_security_group_rule(sgr['id'])

i_rules, e_rules = self.parse_network_policy_rules(policy, sg.id)
for i_rule in i_rules:
sgr_id = driver_utils.create_security_group_rule(i_rule)
i_rule['security_group_rule']['id'] = sgr_id

for e_rule in e_rules:
sgr_id = driver_utils.create_security_group_rule(e_rule)
e_rule['security_group_rule']['id'] = sgr_id

# Add default rules to allow traffic from host and svc subnet
self._add_default_np_rules(sg.id)
except (os_exc.SDKException, exceptions.ResourceNotReady):
LOG.exception("Error creating security group for network policy "
" %s", policy['metadata']['name'])
# If there's any issue creating sg rules, remove them
if sg:
self.os_net.delete_security_group(sg.id)
" %s", knp['metadata']['name'])
raise

try:
self._add_kuryrnetpolicy_crd(policy, project_id, sg.id, i_rules,
e_rules)
except exceptions.K8sClientException:
LOG.exception("Rolling back security groups")
# Same with CRD creation
self.os_net.delete_security_group(sg.id)
raise

try:
crd = self.get_kuryrnetpolicy_crd(policy)
self.kubernetes.annotate(policy['metadata']['selfLink'],
{"kuryrnetpolicy_selfLink":
crd['metadata']['selfLink']})
except exceptions.K8sClientException:
LOG.exception('Error annotating network policy')
raise
return sg.id

def _get_pods(self, pod_selector, namespace=None,
namespace_selector=None):
def _get_pods(self, pod_selector, namespace=None, namespace_selector=None):
matching_pods = {"items": []}
if namespace_selector:
matching_namespaces = driver_utils.get_namespaces(
@@ -232,7 +198,6 @@ class NetworkPolicyDriver(base.NetworkPolicyDriver):
if not namespace_selector and namespace:
matching_namespaces.append(self.kubernetes.get(
'{}/namespaces/{}'.format(constants.K8S_API_BASE, namespace)))

else:
matching_namespaces.extend(driver_utils.get_namespaces(
namespace_selector).get('items'))
@@ -285,7 +250,7 @@ class NetworkPolicyDriver(base.NetworkPolicyDriver):

def _create_sg_rules_with_container_ports(
self, container_ports, allow_all, resource, matched_pods,
crd_rules, sg_id, direction, port, pod_selector=None,
crd_rules, direction, port, pod_selector=None,
policy_namespace=None):
cidr, ns = self._get_resource_details(resource)
for pod, container_port in container_ports:
@@ -308,18 +273,18 @@ class NetworkPolicyDriver(base.NetworkPolicyDriver):
if not allow_all and matched_pods and cidr:
for container_port, pods in matched_pods.items():
sg_rule = driver_utils.create_security_group_rule_body(
sg_id, direction, container_port,
direction, container_port,
protocol=port.get('protocol'),
cidr=cidr, pods=pods)
if sg_rule not in crd_rules:
crd_rules.append(sg_rule)
if direction == 'egress':
self._create_svc_egress_sg_rule(
sg_id, policy_namespace, crd_rules,
policy_namespace, crd_rules,
resource=resource, port=container_port,
protocol=port.get('protocol'))

def _create_sg_rule_body_on_text_port(self, sg_id, direction, port,
def _create_sg_rule_body_on_text_port(self, direction, port,
resources, crd_rules, pod_selector,
policy_namespace, allow_all=False):
"""Create SG rules when named port is used in the NP rule
@@ -352,7 +317,7 @@ class NetworkPolicyDriver(base.NetworkPolicyDriver):
for resource in resources:
self._create_sg_rules_with_container_ports(
container_ports, allow_all, resource, matched_pods,
crd_rules, sg_id, direction, port)
crd_rules, direction, port)
elif direction == "egress":
for resource in resources:
# NOTE(maysams) Skipping objects that refers to ipblocks
@@ -364,24 +329,24 @@ class NetworkPolicyDriver(base.NetworkPolicyDriver):
container_ports = driver_utils.get_ports(resource, port)
self._create_sg_rules_with_container_ports(
container_ports, allow_all, resource, matched_pods,
crd_rules, sg_id, direction, port, pod_selector,
crd_rules, direction, port, pod_selector,
policy_namespace)
if allow_all:
container_port = None
for container_port, pods in matched_pods.items():
for ethertype in (constants.IPv4, constants.IPv6):
sg_rule = driver_utils.create_security_group_rule_body(
sg_id, direction, container_port,
direction, container_port,
protocol=port.get('protocol'),
ethertype=ethertype,
pods=pods)
crd_rules.append(sg_rule)
if direction == 'egress':
self._create_svc_egress_sg_rule(
sg_id, policy_namespace, crd_rules,
policy_namespace, crd_rules,
port=container_port, protocol=port.get('protocol'))

def _create_sg_rule_on_number_port(self, allowed_resources, sg_id,
def _create_sg_rule_on_number_port(self, allowed_resources,
direction, port, sg_rule_body_list,
policy_namespace):
for resource in allowed_resources:
@@ -393,52 +358,51 @@ class NetworkPolicyDriver(base.NetworkPolicyDriver):
continue
sg_rule = (
driver_utils.create_security_group_rule_body(
sg_id, direction, port.get('port'),
direction, port.get('port'),
protocol=port.get('protocol'),
cidr=cidr,
namespace=ns))
sg_rule_body_list.append(sg_rule)
if direction == 'egress':
self._create_svc_egress_sg_rule(
sg_id, policy_namespace, sg_rule_body_list,
policy_namespace, sg_rule_body_list,
resource=resource, port=port.get('port'),
protocol=port.get('protocol'))

def _create_all_pods_sg_rules(self, port, sg_id, direction,
def _create_all_pods_sg_rules(self, port, direction,
sg_rule_body_list, pod_selector,
policy_namespace):
if type(port.get('port')) is not int:
all_pods = driver_utils.get_namespaced_pods().get('items')
self._create_sg_rule_body_on_text_port(
sg_id, direction, port, all_pods,
direction, port, all_pods,
sg_rule_body_list, pod_selector, policy_namespace,
allow_all=True)
else:
for ethertype in (constants.IPv4, constants.IPv6):
sg_rule = (
driver_utils.create_security_group_rule_body(
sg_id, direction, port.get('port'),
direction, port.get('port'),
ethertype=ethertype,
protocol=port.get('protocol')))
sg_rule_body_list.append(sg_rule)
if direction == 'egress':
self._create_svc_egress_sg_rule(
sg_id, policy_namespace, sg_rule_body_list,
policy_namespace, sg_rule_body_list,
port=port.get('port'),
protocol=port.get('protocol'))

def _create_default_sg_rule(self, sg_id, direction, sg_rule_body_list):
def _create_default_sg_rule(self, direction, sg_rule_body_list):
for ethertype in (constants.IPv4, constants.IPv6):
default_rule = {
'security_group_rule': {
'sgRule': {
'ethertype': ethertype,
'security_group_id': sg_id,
'direction': direction,
'description': 'Kuryr-Kubernetes NetPolicy SG rule',
}}
sg_rule_body_list.append(default_rule)

def _parse_sg_rules(self, sg_rule_body_list, direction, policy, sg_id):
def _parse_sg_rules(self, sg_rule_body_list, direction, policy):
"""Parse policy into security group rules.

This method inspects the policy object and create the equivalent
@@ -460,16 +424,14 @@ class NetworkPolicyDriver(base.NetworkPolicyDriver):
# traffic as NP policy is not affecting ingress
LOG.debug('Applying default all open for ingress for '
'policy %s', policy['metadata']['selfLink'])
self._create_default_sg_rule(
sg_id, direction, sg_rule_body_list)
self._create_default_sg_rule(direction, sg_rule_body_list)
elif direction == 'egress':
if policy_types and 'Egress' not in policy_types:
# NOTE(ltomasbo): add default rule to enable all egress
# traffic as NP policy is not affecting egress
LOG.debug('Applying default all open for egress for '
'policy %s', policy['metadata']['selfLink'])
self._create_default_sg_rule(
sg_id, direction, sg_rule_body_list)
self._create_default_sg_rule(direction, sg_rule_body_list)
else:
LOG.warning('Not supported policyType at network policy %s',
policy['metadata']['selfLink'])
@@ -487,7 +449,7 @@ class NetworkPolicyDriver(base.NetworkPolicyDriver):
policy['metadata']['selfLink'])
for ethertype in (constants.IPv4, constants.IPv6):
rule = driver_utils.create_security_group_rule_body(
sg_id, direction, ethertype=ethertype)
direction, ethertype=ethertype)
sg_rule_body_list.append(rule)

for rule_block in rule_list:
@@ -519,20 +481,20 @@ class NetworkPolicyDriver(base.NetworkPolicyDriver):
if allowed_resources or allow_all or selectors:
if type(port.get('port')) is not int:
self._create_sg_rule_body_on_text_port(
sg_id, direction, port, allowed_resources,
direction, port, allowed_resources,
sg_rule_body_list, pod_selector,
policy_namespace)
else:
self._create_sg_rule_on_number_port(
allowed_resources, sg_id, direction, port,
allowed_resources, direction, port,
sg_rule_body_list, policy_namespace)
if allow_all:
self._create_all_pods_sg_rules(
port, sg_id, direction, sg_rule_body_list,
port, direction, sg_rule_body_list,
pod_selector, policy_namespace)
else:
self._create_all_pods_sg_rules(
port, sg_id, direction, sg_rule_body_list,
port, direction, sg_rule_body_list,
pod_selector, policy_namespace)
elif allowed_resources or allow_all or selectors:
for resource in allowed_resources:
@@ -543,27 +505,27 @@ class NetworkPolicyDriver(base.NetworkPolicyDriver):
if not cidr:
continue
rule = driver_utils.create_security_group_rule_body(
sg_id, direction,
direction,
port_range_min=1,
port_range_max=65535,
cidr=cidr,
namespace=namespace)
sg_rule_body_list.append(rule)
if direction == 'egress':
rule = self._create_svc_egress_sg_rule(
sg_id, policy_namespace, sg_rule_body_list,
self._create_svc_egress_sg_rule(
policy_namespace, sg_rule_body_list,
resource=resource)
if allow_all:
for ethertype in (constants.IPv4, constants.IPv6):
rule = driver_utils.create_security_group_rule_body(
sg_id, direction,
direction,
port_range_min=1,
port_range_max=65535,
ethertype=ethertype)
sg_rule_body_list.append(rule)
if direction == 'egress':
self._create_svc_egress_sg_rule(
sg_id, policy_namespace, sg_rule_body_list)
self._create_svc_egress_sg_rule(policy_namespace,
sg_rule_body_list)
else:
LOG.debug('This network policy specifies no %(direction)s '
'%(rule_direction)s and no ports: %(policy)s',
@@ -571,15 +533,14 @@ class NetworkPolicyDriver(base.NetworkPolicyDriver):
'rule_direction': rule_direction,
'policy': policy['metadata']['selfLink']})

def _create_svc_egress_sg_rule(self, sg_id, policy_namespace,
sg_rule_body_list, resource=None,
port=None, protocol=None):
def _create_svc_egress_sg_rule(self, policy_namespace, sg_rule_body_list,
resource=None, port=None, protocol=None):
services = driver_utils.get_services()
if not resource:
svc_subnet = utils.get_subnet_cidr(
CONF.neutron_defaults.service_subnet)
rule = driver_utils.create_security_group_rule_body(
sg_id, 'egress', port, protocol=protocol, cidr=svc_subnet)
'egress', port, protocol=protocol, cidr=svc_subnet)
if rule not in sg_rule_body_list:
sg_rule_body_list.append(rule)
return
@@ -613,7 +574,7 @@ class NetworkPolicyDriver(base.NetworkPolicyDriver):
if not cluster_ip:
continue
rule = driver_utils.create_security_group_rule_body(
sg_id, 'egress', port, protocol=protocol,
'egress', port, protocol=protocol,
cidr=cluster_ip)
if rule not in sg_rule_body_list:
sg_rule_body_list.append(rule)
@@ -626,7 +587,7 @@ class NetworkPolicyDriver(base.NetworkPolicyDriver):
return True
return False

def parse_network_policy_rules(self, policy, sg_id):
def parse_network_policy_rules(self, policy):
"""Create security group rule bodies out of network policies.

Whenever a notification from the handler 'on-present' method is
@@ -637,10 +598,8 @@ class NetworkPolicyDriver(base.NetworkPolicyDriver):
ingress_sg_rule_body_list = []
egress_sg_rule_body_list = []

self._parse_sg_rules(ingress_sg_rule_body_list, 'ingress', policy,
sg_id)
self._parse_sg_rules(egress_sg_rule_body_list, 'egress', policy,
sg_id)
self._parse_sg_rules(ingress_sg_rule_body_list, 'ingress', policy)
self._parse_sg_rules(egress_sg_rule_body_list, 'egress', policy)

return ingress_sg_rule_body_list, egress_sg_rule_body_list

@@ -657,19 +616,15 @@ class NetworkPolicyDriver(base.NetworkPolicyDriver):
LOG.exception("Error deleting security group %s.", sg_id)
raise

def release_network_policy(self, netpolicy_crd):
if netpolicy_crd is not None:
self.delete_np_sg(netpolicy_crd['spec']['securityGroupId'])
self._del_kuryrnetpolicy_crd(
netpolicy_crd['metadata']['name'],
netpolicy_crd['metadata']['namespace'])
def release_network_policy(self, policy):
return self._del_knp_crd(policy)

def get_kuryrnetpolicy_crd(self, policy):
netpolicy_crd_name = "np-" + policy['metadata']['name']
def _get_knp_crd(self, policy):
netpolicy_crd_name = policy['metadata']['name']
netpolicy_crd_namespace = policy['metadata']['namespace']
try:
netpolicy_crd = self.kubernetes.get(
'{}/{}/kuryrnetpolicies/{}'.format(
'{}/{}/kuryrnetworkpolicies/{}'.format(
constants.K8S_API_CRD_NAMESPACES, netpolicy_crd_namespace,
netpolicy_crd_name))
except exceptions.K8sResourceNotFound:
@@ -679,77 +634,81 @@ class NetworkPolicyDriver(base.NetworkPolicyDriver):
raise
return netpolicy_crd

def knps_on_namespace(self, namespace):
try:
netpolicy_crds = self.kubernetes.get(
'{}/{}/kuryrnetpolicies'.format(
constants.K8S_API_CRD_NAMESPACES,
namespace))
except exceptions.K8sClientException:
LOG.exception("Kubernetes Client Exception.")
raise
if netpolicy_crds.get('items'):
return True
return False

def _add_kuryrnetpolicy_crd(self, policy, project_id, sg_id, i_rules,
e_rules):
def _create_knp_crd(self, policy, i_rules, e_rules):
networkpolicy_name = policy['metadata']['name']
netpolicy_crd_name = "np-" + networkpolicy_name
namespace = policy['metadata']['namespace']
pod_selector = policy['spec'].get('podSelector')
policy_types = policy['spec'].get('policyTypes', [])
netpolicy_crd = {
'apiVersion': 'openstack.org/v1',
'kind': constants.K8S_OBJ_KURYRNETPOLICY,
'kind': constants.K8S_OBJ_KURYRNETWORKPOLICY,
'metadata': {
'name': netpolicy_crd_name,
'name': networkpolicy_name,
'namespace': namespace,
'annotations': {
'networkpolicy_name': networkpolicy_name,
'networkpolicy_namespace': namespace,
'networkpolicy_uid': policy['metadata']['uid'],
'networkPolicyLink': policy['metadata']['selfLink'],
},
'finalizers': [constants.NETWORKPOLICY_FINALIZER],
},
'spec': {
'securityGroupName': "sg-" + networkpolicy_name,
'securityGroupId': sg_id,
'ingressSgRules': i_rules,
'egressSgRules': e_rules,
'podSelector': pod_selector,
'networkpolicy_spec': policy['spec']
'policyTypes': policy_types,
},
'status': {
'securityGroupRules': [],
},
}

try:
LOG.debug("Creating KuryrNetPolicy CRD %s" % netpolicy_crd)
kubernetes_post = '{}/{}/kuryrnetpolicies'.format(
LOG.debug("Creating KuryrNetworkPolicy CRD %s" % netpolicy_crd)
url = '{}/{}/kuryrnetworkpolicies'.format(
constants.K8S_API_CRD_NAMESPACES,
namespace)
self.kubernetes.post(kubernetes_post, netpolicy_crd)
netpolicy_crd = self.kubernetes.post(url, netpolicy_crd)
except exceptions.K8sClientException:
LOG.exception("Kubernetes Client Exception creating kuryrnetpolicy"
" CRD. %s" % exceptions.K8sClientException)
LOG.exception("Kubernetes Client Exception creating "
"KuryrNetworkPolicy CRD.")
raise
return netpolicy_crd

def _del_kuryrnetpolicy_crd(self, netpolicy_crd_name,
netpolicy_crd_namespace):
def _patch_knp_crd(self, policy, i_rules, e_rules, knp):
networkpolicy_name = policy['metadata']['name']
namespace = policy['metadata']['namespace']
pod_selector = policy['spec'].get('podSelector')
url = (f'{constants.K8S_API_CRD_NAMESPACES}/{namespace}'
f'/kuryrnetworkpolicies/{networkpolicy_name}')

# FIXME(dulek): Rules should be hashable objects, not dict so that
# we could compare them easily here.
data = {
'ingressSgRules': i_rules,
'egressSgRules': e_rules,
}
if knp['spec'].get('podSelector') != pod_selector:
data['podSelector'] = pod_selector

self.kubernetes.patch_crd('spec', url, data)

def _del_knp_crd(self, policy):
try:
LOG.debug("Deleting KuryrNetPolicy CRD %s" % netpolicy_crd_name)
self.kubernetes.delete('{}/{}/kuryrnetpolicies/{}'.format(
constants.K8S_API_CRD_NAMESPACES,
netpolicy_crd_namespace,
netpolicy_crd_name))
ns = policy['metadata']['namespace']
name = policy['metadata']['name']
LOG.debug("Deleting KuryrNetworkPolicy CRD %s" % name)
self.kubernetes.delete('{}/{}/kuryrnetworkpolicies/{}'.format(
constants.K8S_API_CRD_NAMESPACES, ns, name))
return True
except exceptions.K8sResourceNotFound:
LOG.debug("KuryrNetPolicy CRD Object not found: %s",
netpolicy_crd_name)
LOG.debug("KuryrNetworkPolicy CRD Object not found: %s", name)
return False
except exceptions.K8sClientException:
LOG.exception("Kubernetes Client Exception deleting kuryrnetpolicy"
" CRD.")
LOG.exception("Kubernetes Client Exception deleting "
"KuryrNetworkPolicy CRD %s." % name)
raise

def affected_pods(self, policy, selector=None):
if selector or selector == {}:
if selector is not None:
pod_selector = selector
else:
pod_selector = policy['spec'].get('podSelector')


+ 117
- 297
kuryr_kubernetes/controller/drivers/network_policy_security_groups.py View File

@@ -12,6 +12,8 @@
# See the License for the specific language governing permissions and
# limitations under the License.

import uuid

from oslo_config import cfg
from oslo_log import log as logging

@@ -21,6 +23,7 @@ from kuryr_kubernetes import constants
from kuryr_kubernetes.controller.drivers import base
from kuryr_kubernetes.controller.drivers import utils as driver_utils
from kuryr_kubernetes import exceptions
from kuryr_kubernetes import utils

LOG = logging.getLogger(__name__)

@@ -29,9 +32,7 @@ def _get_namespace_labels(namespace):
kubernetes = clients.get_kubernetes_client()

try:
path = '{}/{}'.format(
constants.K8S_API_NAMESPACES, namespace)
LOG.debug("K8s API Query %s", path)
path = '{}/{}'.format(constants.K8S_API_NAMESPACES, namespace)
namespaces = kubernetes.get(path)
LOG.debug("Return Namespace: %s", namespaces)
except exceptions.K8sResourceNotFound:
@@ -43,107 +44,41 @@ def _get_namespace_labels(namespace):
return namespaces['metadata'].get('labels')


def _create_sg_rule(sg_id, direction, cidr, port=None, namespace=None):
if port:
sg_rule = driver_utils.create_security_group_rule_body(
sg_id, direction, port.get('port'),
protocol=port.get('protocol'), cidr=cidr, namespace=namespace)
else:
sg_rule = driver_utils.create_security_group_rule_body(
sg_id, direction, port_range_min=1,
port_range_max=65535, cidr=cidr, namespace=namespace)

sgr_id = driver_utils.create_security_group_rule(sg_rule)
def _bump_networkpolicy(knp):
kubernetes = clients.get_kubernetes_client()

sg_rule['security_group_rule']['id'] = sgr_id
return sg_rule
try:
kubernetes.annotate(
knp['metadata']['annotations']['networkPolicyLink'],
{constants.K8S_ANNOTATION_POLICY: str(uuid.uuid4())})
except exceptions.K8sResourceNotFound:
LOG.exception("NetworkPolicy not found")
raise
except exceptions.K8sClientException:
LOG.exception("Kubernetes Client Exception")
raise


def _get_crd_rule(crd_rules, container_port):
"""Returns a CRD rule that matches a container port
def _create_sg_rules_with_container_ports(container_ports, matched):
"""Checks if security group rules based on container ports will be updated

Retrieves the CRD rule that contains a given port in
the range of the rule ports.
"""
for crd_rule in crd_rules:
remote_ip_prefixes = crd_rule.get('remote_ip_prefixes')
min_port = crd_rule['security_group_rule'].get('port_range_min')
max_port = crd_rule['security_group_rule'].get('port_range_max')
if (remote_ip_prefixes and (
min_port >= container_port and
container_port <= max_port)):
return crd_rule


def _create_sg_rules_with_container_ports(matched_pods, container_ports,
allow_all, namespace, matched,
crd_rules, sg_id, direction,
port, rule_selected_pod):
"""Create security group rules based on container ports

If it's an allow from/to everywhere rule or a rule with a
NamespaceSelector, updates a sg rule that might already exist
and match the named port or creates a new one with the
remote_ip_prefixes field containing the matched pod info.
Otherwise, creates rules for each container port without
a remote_ip_prefixes field.

param matched_pods: List of dicts where the key is a container
port and value is the pods that have the port
param container_ports: List of tuples with pods and port values
param allow_all: True is it's an allow from/to everywhere rule,
False otherwise.
param namespace: Namespace name
param matched: If a sg rule was created for the NP rule
param crd_rules: List of sg rules to update when patching the CRD
param sg_id: ID of the security group
param direction: String representing rule direction, ingress or egress
param port: Dict containing port and protocol
param rule_selected_pod: K8s Pod object selected by the rules selectors

return: True if a sg rule was created, False otherwise.
return: True if a sg rule needs to be created, False otherwise.
"""
for pod, container_port in container_ports:
pod_namespace = pod['metadata']['namespace']
pod_ip = driver_utils.get_pod_ip(pod)
if not pod_ip:
LOG.debug("Skipping SG rule creation for pod %s due to "
"no IP assigned", pod['metadata']['name'])
continue
return matched
return False

pod_info = {pod_ip: pod_namespace}
matched = True
if allow_all or namespace:
crd_rule = _get_crd_rule(crd_rules, container_port)
if crd_rule:
crd_rule['remote_ip_prefixes'].update(pod_info)
else:
if container_port in matched_pods:
matched_pods[container_port].update(pod_info)
else:
matched_pods[container_port] = pod_info
else:
pod_ip = driver_utils.get_pod_ip(rule_selected_pod)
if not pod_ip:
LOG.debug("Skipping SG rule creation for pod %s due to no IP "
"assigned", rule_selected_pod['metadata']['name'])
continue
sg_rule = driver_utils.create_security_group_rule_body(
sg_id, direction, container_port,
protocol=port.get('protocol'),
cidr=pod_ip, pods=pod_info)
sgr_id = driver_utils.create_security_group_rule(sg_rule)
sg_rule['security_group_rule']['id'] = sgr_id
if sg_rule not in crd_rules:
crd_rules.append(sg_rule)
return matched


def _create_sg_rule_on_text_port(sg_id, direction, port, rule_selected_pods,
crd_rules, matched, crd,
allow_all=False, namespace=None):
matched_pods = {}

def _create_sg_rule_on_text_port(direction, port, rule_selected_pods, matched,
crd):
spec_pod_selector = crd['spec'].get('podSelector')
policy_namespace = crd['metadata']['namespace']
spec_pods = driver_utils.get_pods(
@@ -151,11 +86,8 @@ def _create_sg_rule_on_text_port(sg_id, direction, port, rule_selected_pods,
if direction == 'ingress':
for spec_pod in spec_pods:
container_ports = driver_utils.get_ports(spec_pod, port)
for rule_selected_pod in rule_selected_pods:
matched = _create_sg_rules_with_container_ports(
matched_pods, container_ports, allow_all, namespace,
matched, crd_rules, sg_id, direction, port,
rule_selected_pod)
matched = _create_sg_rules_with_container_ports(
container_ports, matched)
elif direction == 'egress':
for rule_selected_pod in rule_selected_pods:
pod_label = rule_selected_pod['metadata'].get('labels')
@@ -168,51 +100,11 @@ def _create_sg_rule_on_text_port(sg_id, direction, port, rule_selected_pods,
container_ports = driver_utils.get_ports(
rule_selected_pod, port)
matched = _create_sg_rules_with_container_ports(
matched_pods, container_ports, allow_all,
namespace, matched, crd_rules, sg_id, direction,
port, rule_selected_pod)

_apply_sg_rules_on_matched_pods(matched_pods, sg_id, direction, namespace,
port, crd_rules, allow_all)

container_ports, matched)
return matched


def _apply_sg_rules_on_matched_pods(matched_pods, sg_id, direction, namespace,
port, crd_rules, allow_all=False):
for container_port, pods in matched_pods.items():
if allow_all:
for ethertype in (constants.IPv4, constants.IPv6):
sg_rule = driver_utils.create_security_group_rule_body(
sg_id, direction, container_port,
protocol=port.get('protocol'),
ethertype=ethertype,
pods=pods)
sgr_id = driver_utils.create_security_group_rule(sg_rule)
sg_rule['security_group_rule']['id'] = sgr_id
if sg_rule not in crd_rules:
crd_rules.append(sg_rule)
else:
namespace_obj = driver_utils.get_namespace(namespace)
if not namespace_obj:
LOG.debug("Skipping SG rule creation. Inexistent"
" namespace.")
continue
namespace_cidr = driver_utils.get_namespace_subnet_cidr(
namespace_obj)
sg_rule = driver_utils.create_security_group_rule_body(
sg_id, direction, container_port,
protocol=port.get('protocol'), cidr=namespace_cidr,
pods=pods)
sgr_id = driver_utils.create_security_group_rule(sg_rule)
sg_rule['security_group_rule']['id'] = sgr_id
if sg_rule not in crd_rules:
crd_rules.append(sg_rule)


def _create_sg_rules(crd, pod, pod_selector, rule_block,
crd_rules, direction, matched, namespace=None,
allow_all=False):
def _create_sg_rules(crd, pod, pod_selector, rule_block, direction, matched):
pod_labels = pod['metadata'].get('labels')
pod_ip = driver_utils.get_pod_ip(pod)
if not pod_ip:
@@ -224,73 +116,52 @@ def _create_sg_rules(crd, pod, pod_selector, rule_block,
# with empty value or with '{}', as they have same result in here.
if pod_selector:
if driver_utils.match_selector(pod_selector, pod_labels):
sg_id = crd['spec']['securityGroupId']
if 'ports' in rule_block:
for port in rule_block['ports']:
if type(port.get('port')) is not int:
matched = _create_sg_rule_on_text_port(
sg_id, direction, port, [pod],
crd_rules, matched, crd)
direction, port, [pod], matched, crd)
else:
matched = True
sg_rule = _create_sg_rule(
sg_id, direction, cidr=pod_ip, port=port,
namespace=namespace)
if sg_rule not in crd_rules:
crd_rules.append(sg_rule)
else:
matched = True
sg_rule = _create_sg_rule(
sg_id, direction, cidr=pod_ip, namespace=namespace)
if sg_rule not in crd_rules:
crd_rules.append(sg_rule)
else:
# NOTE (maysams) When a policy with namespaceSelector and text port
# is applied the port on the pods needs to be retrieved.
sg_id = crd['spec']['securityGroupId']
if 'ports' in rule_block:
for port in rule_block['ports']:
if type(port.get('port')) is not int:
matched = (
_create_sg_rule_on_text_port(
sg_id, direction, port, [pod],
crd_rules, matched, crd,
allow_all=allow_all, namespace=namespace))
matched = _create_sg_rule_on_text_port(
direction, port, [pod], matched, crd)
return matched


def _parse_selectors_on_pod(crd, pod, pod_selector, namespace_selector,
rule_block, crd_rules, direction, matched):
rule_block, direction, matched):
pod_namespace = pod['metadata']['namespace']
pod_namespace_labels = _get_namespace_labels(pod_namespace)
policy_namespace = crd['metadata']['namespace']

if namespace_selector == {}:
matched = _create_sg_rules(crd, pod, pod_selector, rule_block,
crd_rules, direction, matched,
allow_all=True)
direction, matched)
elif namespace_selector:
if (pod_namespace_labels and
driver_utils.match_selector(namespace_selector,
pod_namespace_labels)):
matched = _create_sg_rules(crd, pod, pod_selector,
rule_block, crd_rules,
direction, matched,
namespace=pod_namespace)
rule_block, direction, matched)
else:
if pod_namespace == policy_namespace:
matched = _create_sg_rules(crd, pod, pod_selector, rule_block,
crd_rules, direction, matched,
namespace=pod_namespace)
return matched, crd_rules
direction, matched)
return matched


def _parse_selectors_on_namespace(crd, direction, pod_selector,
ns_selector, rule_block, crd_rules,
namespace, matched):
ns_selector, rule_block, namespace, matched):
ns_name = namespace['metadata'].get('name')
ns_labels = namespace['metadata'].get('labels')
sg_id = crd['spec']['securityGroupId']

if (ns_selector and ns_labels and
driver_utils.match_selector(ns_selector, ns_labels)):
@@ -301,10 +172,8 @@ def _parse_selectors_on_namespace(crd, direction, pod_selector,
if type(port.get('port')) is not int:
matched = (
_create_sg_rule_on_text_port(
sg_id, direction, port, pods,
crd_rules, matched, crd))
direction, port, pods, matched, crd))
else:
matched = True
for pod in pods:
pod_ip = driver_utils.get_pod_ip(pod)
if not pod_ip:
@@ -312,11 +181,7 @@ def _parse_selectors_on_namespace(crd, direction, pod_selector,
LOG.debug("Skipping SG rule creation for pod "
"%s due to no IP assigned", pod_name)
continue
sg_rule = _create_sg_rule(
sg_id, direction, pod_ip, port=port,
namespace=ns_name)
if sg_rule not in crd_rules:
crd_rules.append(sg_rule)
matched = True
else:
for pod in pods:
pod_ip = driver_utils.get_pod_ip(pod)
@@ -326,45 +191,25 @@ def _parse_selectors_on_namespace(crd, direction, pod_selector,
" to no IP assigned", pod_name)
continue
matched = True
sg_rule = _create_sg_rule(
sg_id, direction, pod_ip,
namespace=ns_name)
if sg_rule not in crd_rules:
crd_rules.append(sg_rule)
else:
ns_pods = driver_utils.get_pods(ns_selector)['items']
ns_cidr = driver_utils.get_namespace_subnet_cidr(namespace)
if 'ports' in rule_block:
for port in rule_block['ports']:
if type(port.get('port')) is not int:
matched = (
_create_sg_rule_on_text_port(
sg_id, direction, port, ns_pods,
crd_rules, matched, crd))
direction, port, ns_pods, matched, crd))
else:
matched = True
sg_rule = _create_sg_rule(
sg_id, direction, ns_cidr,
port=port, namespace=ns_name)
if sg_rule not in crd_rules:
crd_rules.append(sg_rule)
else:
matched = True
sg_rule = _create_sg_rule(
sg_id, direction, ns_cidr,
namespace=ns_name)
if sg_rule not in crd_rules:
crd_rules.append(sg_rule)
return matched, crd_rules
return matched


def _parse_rules(direction, crd, pod=None, namespace=None):
policy = crd['spec']['networkpolicy_spec']
def _parse_rules(direction, crd, policy, pod=None, namespace=None):
rule_direction = 'from'
crd_rules = crd['spec'].get('ingressSgRules')
if direction == 'egress':
rule_direction = 'to'
crd_rules = crd['spec'].get('egressSgRules')

matched = False
rule_list = policy.get(direction, [])
@@ -373,13 +218,13 @@ def _parse_rules(direction, crd, pod=None, namespace=None):
namespace_selector = rule.get('namespaceSelector')
pod_selector = rule.get('podSelector')
if pod:
matched, crd_rules = _parse_selectors_on_pod(
matched = _parse_selectors_on_pod(
crd, pod, pod_selector, namespace_selector,
rule_block, crd_rules, direction, matched)
rule_block, direction, matched)
elif namespace:
matched, crd_rules = _parse_selectors_on_namespace(
matched = _parse_selectors_on_namespace(
crd, direction, pod_selector, namespace_selector,
rule_block, crd_rules, namespace, matched)
rule_block, namespace, matched)

# NOTE(maysams): Cover the case of a network policy that allows
# from everywhere on a named port, e.g., when there is no 'from'
@@ -387,84 +232,62 @@ def _parse_rules(direction, crd, pod=None, namespace=None):
if pod and not matched:
for port in rule_block.get('ports', []):
if type(port.get('port')) is not int:
sg_id = crd['spec']['securityGroupId']
if (not rule_block.get(rule_direction, [])
or direction == "ingress"):
matched = (_create_sg_rule_on_text_port(
sg_id, direction, port, [pod],
crd_rules, matched, crd,
allow_all=True))
return matched, crd_rules
matched = _create_sg_rule_on_text_port(
direction, port, [pod], matched, crd)
return matched


def _parse_rules_on_delete_namespace(rule_list, direction, ns_name):
matched = False
rules = []
for rule in rule_list:
LOG.debug('Parsing %(dir)s Rule %(r)s', {'dir': direction,
'r': rule})
LOG.debug('Parsing %(dir)s Rule %(r)s', {'dir': direction, 'r': rule})
rule_namespace = rule.get('namespace', None)
remote_ip_prefixes = rule.get('remote_ip_prefixes', {})
affectedPods = rule.get('affectedPods', [])
if rule_namespace and rule_namespace == ns_name:
matched = True
driver_utils.delete_security_group_rule(
rule['security_group_rule']['id'])
elif remote_ip_prefixes:
for remote_ip, namespace in list(remote_ip_prefixes.items()):
if namespace == ns_name:
matched = True
remote_ip_prefixes.pop(remote_ip)
if remote_ip_prefixes:
rule['remote_ip_prefixes'] = remote_ip_prefixes
rules.append(rule)
else:
rules.append(rule)
return matched, rules
return True
elif affectedPods:
for pod_info in affectedPods:
if pod_info['podNamespace'] == ns_name:
return True
return False


def _parse_rules_on_delete_pod(rule_list, direction, pod_ip):
matched = False
rules = []
for rule in rule_list:
LOG.debug('Parsing %(dir)s Rule %(r)s', {'dir': direction,
'r': rule})
remote_ip_prefix = rule['security_group_rule'].get(
'remote_ip_prefix')
remote_ip_prefixes = rule.get('remote_ip_prefixes', {})
LOG.debug('Parsing %(dir)s Rule %(r)s', {'dir': direction, 'r': rule})
remote_ip_prefix = rule['sgRule'].get('remote_ip_prefix')
affectedPods = rule.get('affectedPods', [])
if remote_ip_prefix and remote_ip_prefix == pod_ip:
matched = True
driver_utils.delete_security_group_rule(
rule['security_group_rule']['id'])
elif remote_ip_prefixes:
if pod_ip in remote_ip_prefixes:
matched = True
remote_ip_prefixes.pop(pod_ip)
if remote_ip_prefixes:
rule['remote_ip_prefixes'] = remote_ip_prefixes
rules.append(rule)
else: