Browse Source

Add Network Policy support to services

This patch adds support for Network Policy on services. It
applies pods' security groups onto the services in front of them.
It makes the next assumptions:
- All the pods pointed by one svc have the same labels, thus the same
sgs being enforced
- Only copies the SG rules that have the same protocol and direction
as the listener being created
- Adds a default rule to NP to enable traffic from services subnet CIDR

Partially Implements: blueprint k8s-network-policies
Change-Id: Ibd4b51ff40b69af26ab7e7b81d18e63abddf775b
tags/0.6.1
Luis Tomas Bolivar 6 months ago
parent
commit
b200d368cd

+ 68
- 16
kuryr_kubernetes/controller/drivers/lbaasv2.py View File

@@ -31,6 +31,7 @@ from kuryr_kubernetes import constants as const
31 31
 from kuryr_kubernetes.controller.drivers import base
32 32
 from kuryr_kubernetes import exceptions as k_exc
33 33
 from kuryr_kubernetes.objects import lbaas as obj_lbaas
34
+from kuryr_kubernetes import utils
34 35
 
35 36
 CONF = cfg.CONF
36 37
 LOG = logging.getLogger(__name__)
@@ -199,6 +200,53 @@ class LBaaSv2Driver(base.LBaaSDriver):
199 200
                 LOG.exception('Failed when creating security group rule '
200 201
                               'for listener %s.', listener.name)
201 202
 
203
+    def _apply_members_security_groups(self, loadbalancer, port, target_port,
204
+                                       protocol, sg_rule_name):
205
+        neutron = clients.get_neutron_client()
206
+        if CONF.octavia_defaults.sg_mode == 'create':
207
+            sg_id = self._find_listeners_sg(loadbalancer)
208
+        else:
209
+            sg_id = self._get_vip_port(loadbalancer).get('security_groups')[0]
210
+
211
+        # Check if Network Policy allows listener on the pods
212
+        for sg in loadbalancer.security_groups:
213
+            if sg != sg_id:
214
+                rules = neutron.list_security_group_rules(
215
+                    security_group_id=sg)
216
+                for rule in rules['security_group_rules']:
217
+                    # copying ingress rules with same protocol onto the
218
+                    # loadbalancer sg rules
219
+                    # NOTE(ltomasbo): NP security groups only have
220
+                    # remote_ip_prefix, not remote_group_id, therefore only
221
+                    # applying the ones with remote_ip_prefix
222
+                    if (rule['protocol'] == protocol.lower() and
223
+                            rule['direction'] == 'ingress' and
224
+                            rule['remote_ip_prefix']):
225
+                        # If listener port not in allowed range, skip
226
+                        min_port = rule.get('port_range_min')
227
+                        max_port = rule.get('port_range_max')
228
+                        if (min_port and target_port not in range(min_port,
229
+                                                                  max_port+1)):
230
+                            continue
231
+                        try:
232
+                            neutron.create_security_group_rule({
233
+                                'security_group_rule': {
234
+                                    'direction': 'ingress',
235
+                                    'port_range_min': port,
236
+                                    'port_range_max': port,
237
+                                    'protocol': protocol,
238
+                                    'remote_ip_prefix': rule[
239
+                                        'remote_ip_prefix'],
240
+                                    'security_group_id': sg_id,
241
+                                    'description': sg_rule_name,
242
+                                },
243
+                            })
244
+                        except n_exc.NeutronClientException as ex:
245
+                            if ex.status_code != requests.codes.conflict:
246
+                                LOG.exception('Failed when creating security '
247
+                                              'group rule for listener %s.',
248
+                                              sg_rule_name)
249
+
202 250
     def _extend_lb_security_group_rules(self, loadbalancer, listener):
203 251
         neutron = clients.get_neutron_client()
204 252
 
@@ -242,7 +290,7 @@ class LBaaSv2Driver(base.LBaaSDriver):
242 290
                                       'rule for listener %s.', listener.name)
243 291
 
244 292
         # ensure routes have access to the services
245
-        service_subnet_cidr = self._get_subnet_cidr(loadbalancer.subnet_id)
293
+        service_subnet_cidr = utils.get_subnet_cidr(loadbalancer.subnet_id)
246 294
         try:
247 295
             # add access from service subnet
248 296
             neutron.create_security_group_rule({
@@ -261,7 +309,7 @@ class LBaaSv2Driver(base.LBaaSDriver):
261 309
             # support
262 310
             worker_subnet_id = CONF.pod_vif_nested.worker_nodes_subnet
263 311
             if worker_subnet_id:
264
-                worker_subnet_cidr = self._get_subnet_cidr(worker_subnet_id)
312
+                worker_subnet_cidr = utils.get_subnet_cidr(worker_subnet_id)
265 313
                 neutron.create_security_group_rule({
266 314
                     'security_group_rule': {
267 315
                         'direction': 'ingress',
@@ -321,7 +369,10 @@ class LBaaSv2Driver(base.LBaaSDriver):
321 369
                       lbaas.delete_listener,
322 370
                       listener.id)
323 371
 
324
-        sg_id = self._find_listeners_sg(loadbalancer)
372
+        if CONF.octavia_defaults.sg_mode == 'create':
373
+            sg_id = self._find_listeners_sg(loadbalancer)
374
+        else:
375
+            sg_id = self._get_vip_port(loadbalancer).get('security_groups')[0]
325 376
         if sg_id:
326 377
             rules = neutron.list_security_group_rules(
327 378
                 security_group_id=sg_id, description=listener.name)
@@ -363,7 +414,7 @@ class LBaaSv2Driver(base.LBaaSDriver):
363 414
 
364 415
     def ensure_member(self, loadbalancer, pool,
365 416
                       subnet_id, ip, port, target_ref_namespace,
366
-                      target_ref_name):
417
+                      target_ref_name, listener_port=None):
367 418
         name = ("%s/%s" % (target_ref_namespace, target_ref_name))
368 419
         name += ":%s" % port
369 420
         member = obj_lbaas.LBaaSMember(name=name,
@@ -372,9 +423,19 @@ class LBaaSv2Driver(base.LBaaSDriver):
372 423
                                        subnet_id=subnet_id,
373 424
                                        ip=ip,
374 425
                                        port=port)
375
-        return self._ensure_provisioned(loadbalancer, member,
376
-                                        self._create_member,
377
-                                        self._find_member)
426
+        result = self._ensure_provisioned(loadbalancer, member,
427
+                                          self._create_member,
428
+                                          self._find_member)
429
+
430
+        network_policy = (
431
+            'policy' in CONF.kubernetes.enabled_handlers and
432
+            CONF.kubernetes.service_security_groups_driver == 'policy')
433
+        if network_policy and listener_port:
434
+            protocol = pool.protocol
435
+            sg_rule_name = pool.name
436
+            self._apply_members_security_groups(loadbalancer, listener_port,
437
+                                                port, protocol, sg_rule_name)
438
+        return result
378 439
 
379 440
     def release_member(self, loadbalancer, member):
380 441
         lbaas = clients.get_loadbalancer_client()
@@ -397,15 +458,6 @@ class LBaaSv2Driver(base.LBaaSDriver):
397 458
 
398 459
         return None
399 460
 
400
-    def _get_subnet_cidr(self, subnet_id):
401
-        neutron = clients.get_neutron_client()
402
-        try:
403
-            subnet_obj = neutron.show_subnet(subnet_id)
404
-        except n_exc.NeutronClientException:
405
-            LOG.exception("Subnet %s CIDR not found!", subnet_id)
406
-            raise
407
-        return subnet_obj.get('subnet')['cidr']
408
-
409 461
     def _create_loadbalancer(self, loadbalancer):
410 462
         lbaas = clients.get_loadbalancer_client()
411 463
 

+ 43
- 24
kuryr_kubernetes/controller/drivers/network_policy.py View File

@@ -17,10 +17,12 @@ from oslo_log import log as logging
17 17
 from neutronclient.common import exceptions as n_exc
18 18
 
19 19
 from kuryr_kubernetes import clients
20
+from kuryr_kubernetes import config
20 21
 from kuryr_kubernetes import constants
21 22
 from kuryr_kubernetes.controller.drivers import base
22
-from kuryr_kubernetes.controller.drivers import utils
23
+from kuryr_kubernetes.controller.drivers import utils as driver_utils
23 24
 from kuryr_kubernetes import exceptions
25
+from kuryr_kubernetes import utils
24 26
 
25 27
 LOG = logging.getLogger(__name__)
26 28
 
@@ -93,14 +95,14 @@ class NetworkPolicyDriver(base.NetworkPolicyDriver):
93 95
                               current_sg_rules]
94 96
         for sg_rule in sg_rules_to_delete:
95 97
             try:
96
-                utils.delete_security_group_rule(sgr_ids[sg_rule])
98
+                driver_utils.delete_security_group_rule(sgr_ids[sg_rule])
97 99
             except n_exc.NotFound:
98 100
                 LOG.debug('Trying to delete non existing sg_rule %s', sg_rule)
99 101
         # Create new rules that weren't already on the security group
100 102
         sg_rules_to_add = [rule for rule in current_sg_rules if rule not in
101 103
                            existing_sg_rules]
102 104
         for sg_rule in sg_rules_to_add:
103
-            sgr_id = utils.create_security_group_rule(sg_rule)
105
+            sgr_id = driver_utils.create_security_group_rule(sg_rule)
104 106
             if sg_rule['security_group_rule'].get('direction') == 'ingress':
105 107
                 for i_rule in i_rules:
106 108
                     if sg_rule == i_rule:
@@ -111,8 +113,8 @@ class NetworkPolicyDriver(base.NetworkPolicyDriver):
111 113
                         e_rule["security_group_rule"]["id"] = sgr_id
112 114
         # Annotate kuryrnetpolicy CRD with current policy and ruleset
113 115
         pod_selector = policy['spec'].get('podSelector')
114
-        utils.patch_kuryr_crd(crd, i_rules, e_rules, pod_selector,
115
-                              np_spec=policy['spec'])
116
+        driver_utils.patch_kuryr_crd(crd, i_rules, e_rules, pod_selector,
117
+                                     np_spec=policy['spec'])
116 118
 
117 119
         if existing_pod_selector != pod_selector:
118 120
             return existing_pod_selector
@@ -142,13 +144,26 @@ class NetworkPolicyDriver(base.NetworkPolicyDriver):
142 144
             sg_id = sg['security_group']['id']
143 145
             i_rules, e_rules = self.parse_network_policy_rules(policy, sg_id)
144 146
             for i_rule in i_rules:
145
-                sgr_id = utils.create_security_group_rule(i_rule)
147
+                sgr_id = driver_utils.create_security_group_rule(i_rule)
146 148
                 i_rule['security_group_rule']['id'] = sgr_id
147 149
 
148 150
             for e_rule in e_rules:
149
-                sgr_id = utils.create_security_group_rule(e_rule)
151
+                sgr_id = driver_utils.create_security_group_rule(e_rule)
150 152
                 e_rule['security_group_rule']['id'] = sgr_id
151 153
 
154
+            # NOTE(ltomasbo): Add extra SG rule to allow traffic from services
155
+            # subnet
156
+            svc_cidr = utils.get_subnet_cidr(
157
+                config.CONF.neutron_defaults.service_subnet)
158
+            svc_rule = {
159
+                u'security_group_rule': {
160
+                    u'ethertype': 'IPv4',
161
+                    u'security_group_id': sg_id,
162
+                    u'direction': 'ingress',
163
+                    u'description': 'Kuryr-Kubernetes NetPolicy SG rule',
164
+                    u'remote_ip_prefix': svc_cidr
165
+                }}
166
+            driver_utils.create_security_group_rule(svc_rule)
152 167
         except (n_exc.NeutronClientException, exceptions.ResourceNotReady):
153 168
             LOG.exception("Error creating security group for network policy "
154 169
                           " %s", policy['metadata']['name'])
@@ -179,12 +194,13 @@ class NetworkPolicyDriver(base.NetworkPolicyDriver):
179 194
         ips = []
180 195
         matching_pods = []
181 196
         if namespace_selector:
182
-            matching_namespaces = utils.get_namespaces(namespace_selector)
197
+            matching_namespaces = driver_utils.get_namespaces(
198
+                namespace_selector)
183 199
             for ns in matching_namespaces.get('items'):
184
-                matching_pods = utils.get_pods(pod_selector,
185
-                                               ns['metadata']['name'])
200
+                matching_pods = driver_utils.get_pods(pod_selector,
201
+                                                      ns['metadata']['name'])
186 202
         else:
187
-            matching_pods = utils.get_pods(pod_selector, namespace)
203
+            matching_pods = driver_utils.get_pods(pod_selector, namespace)
188 204
         for pod in matching_pods.get('items'):
189 205
             if pod['status']['podIP']:
190 206
                 ips.append(pod['status']['podIP'])
@@ -214,7 +230,8 @@ class NetworkPolicyDriver(base.NetworkPolicyDriver):
214 230
             ns_cidr = self._get_namespace_subnet_cidr(ns)
215 231
             cidrs.append(ns_cidr)
216 232
         else:
217
-            matching_namespaces = utils.get_namespaces(namespace_selector)
233
+            matching_namespaces = driver_utils.get_namespaces(
234
+                namespace_selector)
218 235
             for ns in matching_namespaces.get('items'):
219 236
                 # NOTE(ltomasbo): This requires the namespace handler to be
220 237
                 # also enabled
@@ -280,7 +297,7 @@ class NetworkPolicyDriver(base.NetworkPolicyDriver):
280 297
         if rule_list[0] == {}:
281 298
             LOG.debug('Applying default all open policy from %s',
282 299
                       policy['metadata']['selfLink'])
283
-            rule = utils.create_security_group_rule_body(
300
+            rule = driver_utils.create_security_group_rule_body(
284 301
                 sg_id, direction, port_range_min=1, port_range_max=65535)
285 302
             sg_rule_body_list.append(rule)
286 303
 
@@ -294,31 +311,33 @@ class NetworkPolicyDriver(base.NetworkPolicyDriver):
294 311
                 for port in rule_block['ports']:
295 312
                     if allowed_cidrs or allow_all or selectors:
296 313
                         for cidr in allowed_cidrs:
297
-                            rule = utils.create_security_group_rule_body(
298
-                                sg_id, direction, port.get('port'),
299
-                                protocol=port.get('protocol'),
300
-                                cidr=cidr)
314
+                            rule = (
315
+                                driver_utils.create_security_group_rule_body(
316
+                                    sg_id, direction, port.get('port'),
317
+                                    protocol=port.get('protocol'),
318
+                                    cidr=cidr))
301 319
                             sg_rule_body_list.append(rule)
302 320
                         if allow_all:
303
-                            rule = utils.create_security_group_rule_body(
304
-                                sg_id, direction, port.get('port'),
305
-                                protocol=port.get('protocol'))
321
+                            rule = (
322
+                                driver_utils.create_security_group_rule_body(
323
+                                    sg_id, direction, port.get('port'),
324
+                                    protocol=port.get('protocol')))
306 325
                             sg_rule_body_list.append(rule)
307 326
                     else:
308
-                        rule = utils.create_security_group_rule_body(
327
+                        rule = driver_utils.create_security_group_rule_body(
309 328
                             sg_id, direction, port.get('port'),
310 329
                             protocol=port.get('protocol'))
311 330
                         sg_rule_body_list.append(rule)
312 331
             elif allowed_cidrs or allow_all or selectors:
313 332
                 for cidr in allowed_cidrs:
314
-                    rule = utils.create_security_group_rule_body(
333
+                    rule = driver_utils.create_security_group_rule_body(
315 334
                         sg_id, direction,
316 335
                         port_range_min=1,
317 336
                         port_range_max=65535,
318 337
                         cidr=cidr)
319 338
                     sg_rule_body_list.append(rule)
320 339
                 if allow_all:
321
-                    rule = utils.create_security_group_rule_body(
340
+                    rule = driver_utils.create_security_group_rule_body(
322 341
                         sg_id, direction,
323 342
                         port_range_min=1,
324 343
                         port_range_max=65535)
@@ -456,7 +475,7 @@ class NetworkPolicyDriver(base.NetworkPolicyDriver):
456 475
             pod_selector = policy['spec'].get('podSelector')
457 476
         if pod_selector:
458 477
             policy_namespace = policy['metadata']['namespace']
459
-            pods = utils.get_pods(pod_selector, policy_namespace)
478
+            pods = driver_utils.get_pods(pod_selector, policy_namespace)
460 479
             return pods.get('items')
461 480
         else:
462 481
             # NOTE(ltomasbo): It affects all the pods on the namespace

+ 41
- 56
kuryr_kubernetes/controller/drivers/network_policy_security_groups.py View File

@@ -192,36 +192,40 @@ def _parse_rules(direction, crd, pod):
192 192
     return matched, crd_rules
193 193
 
194 194
 
195
-class NetworkPolicySecurityGroupsDriver(base.PodSecurityGroupsDriver):
196
-    """Provides security groups for pods based on network policies"""
197
-
198
-    def get_security_groups(self, pod, project_id):
199
-        sg_list = []
195
+def _get_pod_sgs(pod, project_id):
196
+    sg_list = []
200 197
 
201
-        pod_labels = pod['metadata'].get('labels')
202
-        pod_namespace = pod['metadata']['namespace']
198
+    pod_labels = pod['metadata'].get('labels')
199
+    pod_namespace = pod['metadata']['namespace']
203 200
 
204
-        knp_crds = _get_kuryrnetpolicy_crds(namespace=pod_namespace)
205
-        for crd in knp_crds.get('items'):
206
-            pod_selector = crd['spec'].get('podSelector')
207
-            if pod_selector:
208
-                if _match_selector(pod_selector, pod_labels):
209
-                    LOG.debug("Appending %s",
210
-                              str(crd['spec']['securityGroupId']))
211
-                    sg_list.append(str(crd['spec']['securityGroupId']))
212
-            else:
213
-                LOG.debug("Appending %s", str(crd['spec']['securityGroupId']))
201
+    knp_crds = _get_kuryrnetpolicy_crds(namespace=pod_namespace)
202
+    for crd in knp_crds.get('items'):
203
+        pod_selector = crd['spec'].get('podSelector')
204
+        if pod_selector:
205
+            if _match_selector(pod_selector, pod_labels):
206
+                LOG.debug("Appending %s",
207
+                          str(crd['spec']['securityGroupId']))
214 208
                 sg_list.append(str(crd['spec']['securityGroupId']))
209
+        else:
210
+            LOG.debug("Appending %s", str(crd['spec']['securityGroupId']))
211
+            sg_list.append(str(crd['spec']['securityGroupId']))
215 212
 
216
-        # NOTE(maysams) Pods that are not selected by any Networkpolicy
217
-        # are fully accessible. Thus, the default security group is associated.
213
+    # NOTE(maysams) Pods that are not selected by any Networkpolicy
214
+    # are fully accessible. Thus, the default security group is associated.
215
+    if not sg_list:
216
+        sg_list = config.CONF.neutron_defaults.pod_security_groups
218 217
         if not sg_list:
219
-            sg_list = config.CONF.neutron_defaults.pod_security_groups
220
-            if not sg_list:
221
-                raise cfg.RequiredOptError('pod_security_groups',
222
-                                           cfg.OptGroup('neutron_defaults'))
218
+            raise cfg.RequiredOptError('pod_security_groups',
219
+                                       cfg.OptGroup('neutron_defaults'))
220
+
221
+    return sg_list[:]
223 222
 
224
-        return sg_list[:]
223
+
224
+class NetworkPolicySecurityGroupsDriver(base.PodSecurityGroupsDriver):
225
+    """Provides security groups for pods based on network policies"""
226
+
227
+    def get_security_groups(self, pod, project_id):
228
+        return _get_pod_sgs(pod, project_id)
225 229
 
226 230
     def create_sg_rules(self, pod):
227 231
         LOG.debug("Creating sg rule for pod: %s", pod['metadata']['name'])
@@ -297,36 +301,17 @@ class NetworkPolicyServiceSecurityGroupsDriver(
297 301
     def get_security_groups(self, service, project_id):
298 302
         sg_list = []
299 303
         svc_namespace = service['metadata']['namespace']
300
-        svc_labels = service['metadata'].get('labels')
301
-        LOG.debug("Using labels %s", svc_labels)
302
-
303
-        knp_crds = _get_kuryrnetpolicy_crds(namespace=svc_namespace)
304
-        for crd in knp_crds.get('items'):
305
-            pod_selector = crd['spec'].get('podSelector')
306
-            if pod_selector:
307
-                crd_labels = pod_selector.get('matchLabels', None)
308
-                crd_expressions = pod_selector.get('matchExpressions', None)
309
-
310
-                match_exp = match_lb = True
311
-                if crd_expressions:
312
-                    match_exp = _match_expressions(crd_expressions,
313
-                                                   svc_labels)
314
-                if crd_labels and svc_labels:
315
-                    match_lb = _match_labels(crd_labels, svc_labels)
316
-                if match_exp and match_lb:
317
-                    LOG.debug("Appending %s",
318
-                              str(crd['spec']['securityGroupId']))
319
-                    sg_list.append(str(crd['spec']['securityGroupId']))
320
-            else:
321
-                LOG.debug("Appending %s", str(crd['spec']['securityGroupId']))
322
-                sg_list.append(str(crd['spec']['securityGroupId']))
323
-
324
-        # NOTE(maysams) Pods that are not selected by any Networkpolicy
325
-        # are fully accessible. Thus, the default security group is associated.
326
-        if not sg_list:
327
-            sg_list = config.CONF.neutron_defaults.pod_security_groups
328
-            if not sg_list:
329
-                raise cfg.RequiredOptError('pod_security_groups',
330
-                                           cfg.OptGroup('neutron_defaults'))
331
-
304
+        svc_selector = service['spec'].get('selector')
305
+
306
+        # skip is no selector
307
+        if svc_selector:
308
+            # get affected pods by svc selector
309
+            pods = driver_utils.get_pods({'selector': svc_selector},
310
+                                         svc_namespace).get('items')
311
+            # NOTE(ltomasbo): We assume all the pods pointed by a service
312
+            # have the same labels, and the same policy will be applied to
313
+            # all of them. Hence only considering the security groups applied
314
+            # to the first one.
315
+            if pods:
316
+                return _get_pod_sgs(pods[0], project_id)
332 317
         return sg_list[:]

+ 18
- 13
kuryr_kubernetes/controller/drivers/utils.py View File

@@ -102,21 +102,26 @@ def get_pods(selector, namespace=None):
102 102
 
103 103
     """
104 104
     kubernetes = clients.get_kubernetes_client()
105
-    labels = selector.get('matchLabels', None)
106
-    if labels:
107
-        # Removing pod-template-hash as pods will not have it and
108
-        # otherwise there will be no match
109
-        labels.pop('pod-template-hash', None)
110
-        labels = replace_encoded_characters(labels)
111 105
 
112
-    exps = selector.get('matchExpressions', None)
113
-    if exps:
114
-        exps = ', '.join(format_expression(exp) for exp in exps)
106
+    svc_selector = selector.get('selector')
107
+    if svc_selector:
108
+        labels = replace_encoded_characters(svc_selector)
109
+    else:
110
+        labels = selector.get('matchLabels', None)
115 111
         if labels:
116
-            expressions = parse.quote("," + exps)
117
-            labels += expressions
118
-        else:
119
-            labels = parse.quote(exps)
112
+            # Removing pod-template-hash as pods will not have it and
113
+            # otherwise there will be no match
114
+            labels.pop('pod-template-hash', None)
115
+            labels = replace_encoded_characters(labels)
116
+
117
+        exps = selector.get('matchExpressions', None)
118
+        if exps:
119
+            exps = ', '.join(format_expression(exp) for exp in exps)
120
+            if labels:
121
+                expressions = parse.quote("," + exps)
122
+                labels += expressions
123
+            else:
124
+                labels = parse.quote(exps)
120 125
 
121 126
     if namespace:
122 127
         pods = kubernetes.get(

+ 15
- 4
kuryr_kubernetes/controller/handlers/lbaas.py View File

@@ -364,7 +364,8 @@ class LoadBalancerHandler(k8s_base.ResourceEventHandler):
364 364
                                                              p.port]
365 365
             except KeyError:
366 366
                 continue
367
-        current_targets = {(str(m.ip), m.port) for m in lbaas_state.members}
367
+        current_targets = {(str(m.ip), m.port, m.pool_id)
368
+                           for m in lbaas_state.members}
368 369
 
369 370
         for subset in endpoints.get('subsets', []):
370 371
             subset_ports = subset.get('ports', [])
@@ -380,14 +381,14 @@ class LoadBalancerHandler(k8s_base.ResourceEventHandler):
380 381
                     continue
381 382
                 for subset_port in subset_ports:
382 383
                     target_port = subset_port['port']
383
-                    if (target_ip, target_port) in current_targets:
384
-                        continue
385 384
                     port_name = subset_port.get('name')
386 385
                     try:
387 386
                         pool = pool_by_tgt_name[port_name]
388 387
                     except KeyError:
389 388
                         LOG.debug("No pool found for port: %r", port_name)
390 389
                         continue
390
+                    if (target_ip, target_port, pool.id) in current_targets:
391
+                        continue
391 392
                     # TODO(apuimedo): Do not pass subnet_id at all when in
392 393
                     # L3 mode once old neutron-lbaasv2 is not supported, as
393 394
                     # octavia does not require it
@@ -400,6 +401,15 @@ class LoadBalancerHandler(k8s_base.ResourceEventHandler):
400 401
                         # from VIP to pods happens in layer 3 mode, i.e.,
401 402
                         # routed.
402 403
                         member_subnet_id = lbaas_state.loadbalancer.subnet_id
404
+                    first_member_of_the_pool = True
405
+                    for member in lbaas_state.members:
406
+                        if pool.id == member.pool_id:
407
+                            first_member_of_the_pool = False
408
+                            break
409
+                    if first_member_of_the_pool:
410
+                        listener_port = lsnr_by_id[pool.listener_id].port
411
+                    else:
412
+                        listener_port = None
403 413
                     member = self._drv_lbaas.ensure_member(
404 414
                         loadbalancer=lbaas_state.loadbalancer,
405 415
                         pool=pool,
@@ -407,7 +417,8 @@ class LoadBalancerHandler(k8s_base.ResourceEventHandler):
407 417
                         ip=target_ip,
408 418
                         port=target_port,
409 419
                         target_ref_namespace=target_ref['namespace'],
410
-                        target_ref_name=target_ref['name'])
420
+                        target_ref_name=target_ref['name'],
421
+                        listener_port=listener_port)
411 422
                     lbaas_state.members.append(member)
412 423
                     changed = True
413 424
 

+ 2
- 0
kuryr_kubernetes/tests/unit/controller/drivers/test_lbaasv2.py View File

@@ -159,6 +159,8 @@ class TestLBaaSv2Driver(test_base.TestCase):
159 159
             'security_group_rules': []}
160 160
         cls = d_lbaasv2.LBaaSv2Driver
161 161
         m_driver = mock.Mock(spec=d_lbaasv2.LBaaSv2Driver)
162
+        m_driver._get_vip_port.return_value = {
163
+            'security_groups': [mock.sentinel.sg_id]}
162 164
         loadbalancer = mock.Mock()
163 165
         listener = mock.Mock()
164 166
 

+ 14
- 3
kuryr_kubernetes/tests/unit/controller/drivers/test_network_policy.py View File

@@ -19,6 +19,7 @@ from kuryr_kubernetes.controller.drivers import network_policy
19 19
 from kuryr_kubernetes import exceptions
20 20
 from kuryr_kubernetes.tests import base as test_base
21 21
 from kuryr_kubernetes.tests.unit import kuryr_fixtures as k_fix
22
+from kuryr_kubernetes import utils
22 23
 
23 24
 from neutronclient.common import exceptions as n_exc
24 25
 
@@ -185,11 +186,15 @@ class TestNetworkPolicyDriver(test_base.TestCase):
185 186
                        '_add_kuryrnetpolicy_crd')
186 187
     @mock.patch.object(network_policy.NetworkPolicyDriver,
187 188
                        'parse_network_policy_rules')
188
-    def test_create_security_group_rules_from_network_policy(self, m_parse,
189
+    @mock.patch.object(utils, 'get_subnet_cidr')
190
+    def test_create_security_group_rules_from_network_policy(self, m_utils,
191
+                                                             m_parse,
189 192
                                                              m_add_crd,
190 193
                                                              m_get_crd):
191 194
         self._driver.neutron.create_security_group.return_value = {
192 195
             'security_group': {'id': mock.sentinel.id}}
196
+        m_utils.get_subnet_cidr.return_value = {
197
+            'subnet': {'cidr': mock.sentinel.cidr}}
193 198
         m_parse.return_value = (self._i_rules, self._e_rules)
194 199
         self._driver.neutron.create_security_group_rule.return_value = {
195 200
             'security_group_rule': {'id': mock.sentinel.id}}
@@ -204,10 +209,13 @@ class TestNetworkPolicyDriver(test_base.TestCase):
204 209
                        '_add_kuryrnetpolicy_crd')
205 210
     @mock.patch.object(network_policy.NetworkPolicyDriver,
206 211
                        'parse_network_policy_rules')
207
-    def test_create_security_group_rules_with_k8s_exc(self, m_parse,
212
+    @mock.patch.object(utils, 'get_subnet_cidr')
213
+    def test_create_security_group_rules_with_k8s_exc(self, m_utils, m_parse,
208 214
                                                       m_add_crd, m_get_crd):
209 215
         self._driver.neutron.create_security_group.return_value = {
210 216
             'security_group': {'id': mock.sentinel.id}}
217
+        m_utils.get_subnet_cidr.return_value = {
218
+            'subnet': {'cidr': mock.sentinel.cidr}}
211 219
         m_parse.return_value = (self._i_rules, self._e_rules)
212 220
         m_get_crd.side_effect = exceptions.K8sClientException
213 221
         self._driver.neutron.create_security_group_rule.return_value = {
@@ -224,10 +232,13 @@ class TestNetworkPolicyDriver(test_base.TestCase):
224 232
                        '_add_kuryrnetpolicy_crd')
225 233
     @mock.patch.object(network_policy.NetworkPolicyDriver,
226 234
                        'parse_network_policy_rules')
227
-    def test_create_security_group_rules_error_add_crd(self, m_parse,
235
+    @mock.patch.object(utils, 'get_subnet_cidr')
236
+    def test_create_security_group_rules_error_add_crd(self, m_utils, m_parse,
228 237
                                                        m_add_crd, m_get_crd):
229 238
         self._driver.neutron.create_security_group.return_value = {
230 239
             'security_group': {'id': mock.sentinel.id}}
240
+        m_utils.get_subnet_cidr.return_value = {
241
+            'subnet': {'cidr': mock.sentinel.cidr}}
231 242
         m_parse.return_value = (self._i_rules, self._e_rules)
232 243
         m_add_crd.side_effect = exceptions.K8sClientException
233 244
         self._driver.neutron.create_security_group_rule.return_value = {

+ 1
- 1
kuryr_kubernetes/tests/unit/controller/handlers/test_lbaas.py View File

@@ -382,7 +382,7 @@ class FakeLBaaSDriver(drv_base.LBaaSDriver):
382 382
                                    id=str(uuid.uuid4()))
383 383
 
384 384
     def ensure_member(self, loadbalancer, pool, subnet_id, ip, port,
385
-                      target_ref_namespace, target_ref_name
385
+                      target_ref_namespace, target_ref_name, listener_port=None
386 386
                       ):
387 387
         name = "%s:%s:%s" % (loadbalancer.name, ip, port)
388 388
         return obj_lbaas.LBaaSMember(name=name,

+ 12
- 0
kuryr_kubernetes/utils.py View File

@@ -16,6 +16,7 @@ import time
16 16
 
17 17
 import requests
18 18
 
19
+from neutronclient.common import exceptions as n_exc
19 20
 from os_vif import objects
20 21
 from oslo_cache import core as cache
21 22
 from oslo_config import cfg
@@ -161,6 +162,17 @@ def get_subnet(subnet_id):
161 162
     return network
162 163
 
163 164
 
165
+@MEMOIZE
166
+def get_subnet_cidr(subnet_id):
167
+    neutron = clients.get_neutron_client()
168
+    try:
169
+        subnet_obj = neutron.show_subnet(subnet_id)
170
+    except n_exc.NeutronClientException:
171
+        LOG.exception("Subnet %s CIDR not found!", subnet_id)
172
+        raise
173
+    return subnet_obj.get('subnet')['cidr']
174
+
175
+
164 176
 def extract_pod_annotation(annotation):
165 177
     obj = objects.base.VersionedObject.obj_from_primitive(annotation)
166 178
     # FIXME(dulek): This is code to maintain compatibility with Queens. We can

Loading…
Cancel
Save