Browse Source

Fix LBaaS sg rules update on deployment scale

When a service is created with a Network Policy applied and
deployments are scaled up or down, the LBaaS SG rules should be
updated accordindly. Right now, the LBaaS/Service do not react on
deployment scales.
This commit fixes the issue by ensuring that the LBaaS SG is updated
on pod events.

Also, when Pods, Network Policies and SVCs are created together it might
happen that the LBaaS SG remains with default SG rules, even though
the policy is being enforced. This commit ensures the right SG rules
are applied on a LBaaS regardless the order of k8s resources creation.
This happens by setting the LBaaS Spec annotation whenever a request
to update the SG rules has been made and retrieving the Spec again
whenever a LBaaS member is created.

Change-Id: I1c54d17a5fcff5387ffae2b132f5036ee9bf07ca
Closes-Bug: 1816015
Maysa Macedo 2 months ago
parent
commit
ba89bd027f

+ 19
- 2
kuryr_kubernetes/controller/drivers/lbaasv2.py View File

@@ -148,6 +148,7 @@ class LBaaSv2Driver(base.LBaaSDriver):
148 148
                                        protocol, sg_rule_name, new_sgs=None):
149 149
         LOG.debug("Applying members security groups.")
150 150
         neutron = clients.get_neutron_client()
151
+        lb_sg = None
151 152
         if CONF.octavia_defaults.sg_mode == 'create':
152 153
             if new_sgs:
153 154
                 lb_name = sg_rule_name.split(":")[0]
@@ -155,7 +156,15 @@ class LBaaSv2Driver(base.LBaaSDriver):
155 156
             else:
156 157
                 lb_sg = self._find_listeners_sg(loadbalancer)
157 158
         else:
158
-            lb_sg = self._get_vip_port(loadbalancer).get('security_groups')[0]
159
+            vip_port = self._get_vip_port(loadbalancer)
160
+            if vip_port:
161
+                lb_sg = vip_port.get('security_groups')[0]
162
+
163
+        # NOTE (maysams) It might happen that the update of LBaaS SG
164
+        # has been triggered and the LBaaS SG was not created yet.
165
+        # This update is skiped, until the LBaaS members are created.
166
+        if not lb_sg:
167
+            return
159 168
 
160 169
         lbaas_sg_rules = neutron.list_security_group_rules(
161 170
             security_group_id=lb_sg)
@@ -747,7 +756,12 @@ class LBaaSv2Driver(base.LBaaSDriver):
747 756
             # NOTE(ltomasbo): lb_name parameter is only passed when sg_mode
748 757
             # is 'create' and in that case there is only one sg associated
749 758
             # to the loadbalancer
750
-            return sgs['security_groups'][0]['id']
759
+            try:
760
+                sg_id = sgs['security_groups'][0]['id']
761
+            except IndexError:
762
+                sg_id = None
763
+                LOG.debug("Security Group not created yet for LBaaS.")
764
+            return sg_id
751 765
         try:
752 766
             sgs = neutron.list_security_groups(
753 767
                 name=loadbalancer.name, project_id=loadbalancer.project_id)
@@ -917,6 +931,9 @@ class LBaaSv2Driver(base.LBaaSDriver):
917 931
         if not lbaas:
918 932
             return
919 933
 
934
+        lbaas.security_groups_ids = sgs
935
+        utils.set_lbaas_spec(service, lbaas)
936
+
920 937
         for port in svc_ports:
921 938
             port_protocol = port['protocol']
922 939
             lbaas_port = port['port']

+ 13
- 0
kuryr_kubernetes/controller/drivers/utils.py View File

@@ -392,3 +392,16 @@ def tag_neutron_resources(resource, res_ids):
392 392
                 LOG.warning("Failed to tag %s %s with %s. Ignoring, but this "
393 393
                             "is still unexpected.", resource, res_id, tags,
394 394
                             exc_info=True)
395
+
396
+
397
+def get_services(namespace):
398
+    kubernetes = clients.get_kubernetes_client()
399
+    try:
400
+        services = kubernetes.get(
401
+            '{}/namespaces/{}/services'.format(constants.K8S_API_BASE,
402
+                                               namespace))
403
+    except k_exc.K8sClientException:
404
+        LOG.exception('Exception when getting K8s services in '
405
+                      'namespace %s', namespace)
406
+        raise
407
+    return services

+ 24
- 60
kuryr_kubernetes/controller/handlers/lbaas.py View File

@@ -24,6 +24,7 @@ from kuryr_kubernetes.controller.drivers import base as drv_base
24 24
 from kuryr_kubernetes import exceptions as k_exc
25 25
 from kuryr_kubernetes.handlers import k8s_base
26 26
 from kuryr_kubernetes.objects import lbaas as obj_lbaas
27
+from kuryr_kubernetes import utils
27 28
 
28 29
 LOG = logging.getLogger(__name__)
29 30
 
@@ -47,7 +48,7 @@ class LBaaSSpecHandler(k8s_base.ResourceEventHandler):
47 48
         self._drv_sg = drv_base.ServiceSecurityGroupsDriver.get_instance()
48 49
 
49 50
     def on_present(self, service):
50
-        lbaas_spec = self._get_lbaas_spec(service)
51
+        lbaas_spec = utils.get_lbaas_spec(service)
51 52
 
52 53
         if self._should_ignore(service):
53 54
             LOG.debug("Skipping Kubernetes service %s of an unsupported kind "
@@ -58,7 +59,7 @@ class LBaaSSpecHandler(k8s_base.ResourceEventHandler):
58 59
 
59 60
         if self._has_lbaas_spec_changes(service, lbaas_spec):
60 61
             lbaas_spec = self._generate_lbaas_spec(service)
61
-            self._set_lbaas_spec(service, lbaas_spec)
62
+            utils.set_lbaas_spec(service, lbaas_spec)
62 63
 
63 64
     def _is_supported_type(self, service):
64 65
         spec = service['spec']
@@ -167,55 +168,6 @@ class LBaaSSpecHandler(k8s_base.ResourceEventHandler):
167 168
         return [obj_lbaas.LBaaSPortSpec(**port)
168 169
                 for port in self._get_service_ports(service)]
169 170
 
170
-    def _get_endpoints_link(self, service):
171
-        svc_link = service['metadata']['selfLink']
172
-        link_parts = svc_link.split('/')
173
-
174
-        if link_parts[-2] != 'services':
175
-            raise k_exc.IntegrityError(_(
176
-                "Unsupported service link: %(link)s") % {
177
-                'link': svc_link})
178
-        link_parts[-2] = 'endpoints'
179
-
180
-        return "/".join(link_parts)
181
-
182
-    def _set_lbaas_spec(self, service, lbaas_spec):
183
-        # TODO(ivc): extract annotation interactions
184
-        if lbaas_spec is None:
185
-            LOG.debug("Removing LBaaSServiceSpec annotation: %r", lbaas_spec)
186
-            annotation = None
187
-        else:
188
-            lbaas_spec.obj_reset_changes(recursive=True)
189
-            LOG.debug("Setting LBaaSServiceSpec annotation: %r", lbaas_spec)
190
-            annotation = jsonutils.dumps(lbaas_spec.obj_to_primitive(),
191
-                                         sort_keys=True)
192
-        svc_link = service['metadata']['selfLink']
193
-        ep_link = self._get_endpoints_link(service)
194
-        k8s = clients.get_kubernetes_client()
195
-
196
-        try:
197
-            k8s.annotate(ep_link,
198
-                         {k_const.K8S_ANNOTATION_LBAAS_SPEC: annotation})
199
-        except k_exc.K8sClientException:
200
-            # REVISIT(ivc): only raise ResourceNotReady for NotFound
201
-            raise k_exc.ResourceNotReady(ep_link)
202
-
203
-        k8s.annotate(svc_link,
204
-                     {k_const.K8S_ANNOTATION_LBAAS_SPEC: annotation},
205
-                     resource_version=service['metadata']['resourceVersion'])
206
-
207
-    def _get_lbaas_spec(self, service):
208
-        # TODO(ivc): same as '_set_lbaas_spec'
209
-        try:
210
-            annotations = service['metadata']['annotations']
211
-            annotation = annotations[k_const.K8S_ANNOTATION_LBAAS_SPEC]
212
-        except KeyError:
213
-            return None
214
-        obj_dict = jsonutils.loads(annotation)
215
-        obj = obj_lbaas.LBaaSServiceSpec.obj_from_primitive(obj_dict)
216
-        LOG.debug("Got LBaaSServiceSpec from annotation: %r", obj)
217
-        return obj
218
-
219 171
 
220 172
 class LoadBalancerHandler(k8s_base.ResourceEventHandler):
221 173
     """LoadBalancerHandler handles K8s Endpoints events.
@@ -355,9 +307,30 @@ class LoadBalancerHandler(k8s_base.ResourceEventHandler):
355 307
 
356 308
         return changed
357 309
 
310
+    def _sync_lbaas_sgs(self, endpoints, lbaas_state, lbaas_spec):
311
+        # NOTE (maysams) Need to retrieve the LBaaS Spec again due to
312
+        # the possibility of it being updated after the LBaaS creation
313
+        # process has started.
314
+        svc_link = self._get_service_link(endpoints)
315
+        k8s = clients.get_kubernetes_client()
316
+        service = k8s.get(svc_link)
317
+        lbaas_spec = utils.get_lbaas_spec(service)
318
+
319
+        lb = lbaas_state.loadbalancer
320
+        default_sgs = config.CONF.neutron_defaults.pod_security_groups
321
+        lbaas_spec_sgs = lbaas_spec.security_groups_ids
322
+        if lb.security_groups and lb.security_groups != lbaas_spec_sgs:
323
+            sgs = [lb_sg for lb_sg in lb.security_groups
324
+                   if lb_sg not in default_sgs]
325
+            if lbaas_spec_sgs != default_sgs:
326
+                sgs.extend(lbaas_spec_sgs)
327
+            lb.security_groups = sgs
328
+
358 329
     def _add_new_members(self, endpoints, lbaas_state, lbaas_spec):
359 330
         changed = False
360 331
 
332
+        self._sync_lbaas_sgs(endpoints, lbaas_state, lbaas_spec)
333
+
361 334
         lsnr_by_id = {l.id: l for l in lbaas_state.listeners}
362 335
         pool_by_lsnr_port = {(lsnr_by_id[p.listener_id].protocol,
363 336
                               lsnr_by_id[p.listener_id].port): p
@@ -649,15 +622,6 @@ class LoadBalancerHandler(k8s_base.ResourceEventHandler):
649 622
                 lbaas_state.service_pub_ip_info = None
650 623
                 changed = True
651 624
 
652
-        default_sgs = config.CONF.neutron_defaults.pod_security_groups
653
-        lbaas_spec_sgs = lbaas_spec.security_groups_ids
654
-        if lb.security_groups and lb.security_groups != lbaas_spec_sgs:
655
-            sgs = [lb_sg for lb_sg in lb.security_groups
656
-                   if lb_sg not in default_sgs]
657
-            if lbaas_spec_sgs != default_sgs:
658
-                sgs.extend(lbaas_spec_sgs)
659
-            lb.security_groups = sgs
660
-
661 625
         lbaas_state.loadbalancer = lb
662 626
         return changed
663 627
 

+ 4
- 15
kuryr_kubernetes/controller/handlers/policy.py View File

@@ -20,7 +20,6 @@ from kuryr_kubernetes import clients
20 20
 from kuryr_kubernetes import constants as k_const
21 21
 from kuryr_kubernetes.controller.drivers import base as drivers
22 22
 from kuryr_kubernetes.controller.drivers import utils as driver_utils
23
-from kuryr_kubernetes import exceptions
24 23
 from kuryr_kubernetes.handlers import k8s_base
25 24
 from kuryr_kubernetes import utils
26 25
 
@@ -81,7 +80,8 @@ class NetworkPolicyHandler(k8s_base.ResourceEventHandler):
81 80
         if pods_to_update:
82 81
             # NOTE(ltomasbo): only need to change services if the pods that
83 82
             # they point to are updated
84
-            services = self._get_services(policy['metadata']['namespace'])
83
+            services = driver_utils.get_services(
84
+                policy['metadata']['namespace'])
85 85
             for service in services.get('items'):
86 86
                 # TODO(ltomasbo): Skip other services that are not affected
87 87
                 # by the policy
@@ -116,7 +116,8 @@ class NetworkPolicyHandler(k8s_base.ResourceEventHandler):
116 116
 
117 117
             self._drv_policy.release_network_policy(netpolicy_crd)
118 118
 
119
-            services = self._get_services(policy['metadata']['namespace'])
119
+            services = driver_utils.get_services(
120
+                policy['metadata']['namespace'])
120 121
             for service in services.get('items'):
121 122
                 if service['metadata']['name'] == 'kubernetes':
122 123
                     continue
@@ -137,15 +138,3 @@ class NetworkPolicyHandler(k8s_base.ResourceEventHandler):
137 138
         if utils.has_limit(sg_quota):
138 139
             return utils.is_available('security_groups', sg_quota, sg_func)
139 140
         return True
140
-
141
-    def _get_services(self, namespace):
142
-        kubernetes = clients.get_kubernetes_client()
143
-        services = {"items": []}
144
-        try:
145
-            services = kubernetes.get(
146
-                '{}/namespaces/{}/services'.format(k_const.K8S_API_BASE,
147
-                                                   namespace))
148
-        except exceptions.K8sClientException:
149
-            LOG.exception("Kubernetes Client Exception.")
150
-            raise
151
-        return services

+ 24
- 2
kuryr_kubernetes/controller/handlers/vif.py View File

@@ -73,6 +73,8 @@ class VIFHandler(k8s_base.ResourceEventHandler):
73 73
             specific_driver='multi_pool')
74 74
         self._drv_vif_pool.set_vif_driver()
75 75
         self._drv_multi_vif = drivers.MultiVIFDriver.get_enabled_drivers()
76
+        self._drv_lbaas = drivers.LBaaSDriver.get_instance()
77
+        self._drv_svc_sg = drivers.ServiceSecurityGroupsDriver.get_instance()
76 78
 
77 79
     def on_present(self, pod):
78 80
         if driver_utils.is_host_network(pod) or not self._is_pending_node(pod):
@@ -83,9 +85,8 @@ class VIFHandler(k8s_base.ResourceEventHandler):
83 85
             return
84 86
         state = driver_utils.get_pod_state(pod)
85 87
         LOG.debug("Got VIFs from annotation: %r", state)
86
-
88
+        project_id = self._drv_project.get_project(pod)
87 89
         if not state:
88
-            project_id = self._drv_project.get_project(pod)
89 90
             security_groups = self._drv_sg.get_security_groups(pod, project_id)
90 91
             subnets = self._drv_subnets.get_subnets(pod, project_id)
91 92
 
@@ -127,10 +128,16 @@ class VIFHandler(k8s_base.ResourceEventHandler):
127 128
                 self._set_pod_state(pod, state)
128 129
                 self._drv_sg.create_sg_rules(pod)
129 130
 
131
+                if self._is_network_policy_enabled():
132
+                    services = driver_utils.get_services(
133
+                        pod['metadata']['namespace'])
134
+                    self._update_services(services, project_id)
135
+
130 136
     def on_deleted(self, pod):
131 137
         if driver_utils.is_host_network(pod):
132 138
             return
133 139
 
140
+        services = driver_utils.get_services(pod['metadata']['namespace'])
134 141
         project_id = self._drv_project.get_project(pod)
135 142
         self._drv_sg.delete_sg_rules(pod)
136 143
         try:
@@ -150,6 +157,8 @@ class VIFHandler(k8s_base.ResourceEventHandler):
150 157
             for ifname, vif in state.vifs.items():
151 158
                 self._drv_vif_pool.release_vif(pod, vif, project_id,
152 159
                                                security_groups)
160
+        if self._is_network_policy_enabled():
161
+            self._update_services(services, project_id)
153 162
 
154 163
     @MEMOIZE
155 164
     def is_ready(self, quota):
@@ -198,3 +207,16 @@ class VIFHandler(k8s_base.ResourceEventHandler):
198 207
                      {constants.K8S_ANNOTATION_VIF: annotation,
199 208
                       constants.K8S_ANNOTATION_LABEL: labels_annotation},
200 209
                      resource_version=pod['metadata']['resourceVersion'])
210
+
211
+    def _update_services(self, services, project_id):
212
+        for service in services.get('items'):
213
+            if service['metadata']['name'] == 'kubernetes':
214
+                continue
215
+            sgs = self._drv_svc_sg.get_security_groups(service,
216
+                                                       project_id)
217
+            self._drv_lbaas.update_lbaas_sg(service, sgs)
218
+
219
+    def _is_network_policy_enabled(self):
220
+        enabled_handlers = oslo_cfg.CONF.kubernetes.enabled_handlers
221
+        svc_sg_driver = oslo_cfg.CONF.kubernetes.service_security_groups_driver
222
+        return ('policy' in enabled_handlers and svc_sg_driver == 'policy')

+ 35
- 32
kuryr_kubernetes/tests/unit/controller/handlers/test_lbaas.py View File

@@ -48,7 +48,9 @@ class TestLBaaSSpecHandler(test_base.TestCase):
48 48
         self.assertEqual(mock.sentinel.drv_subnets, handler._drv_subnets)
49 49
         self.assertEqual(mock.sentinel.drv_sg, handler._drv_sg)
50 50
 
51
-    def test_on_present(self):
51
+    @mock.patch('kuryr_kubernetes.utils.set_lbaas_spec')
52
+    @mock.patch('kuryr_kubernetes.utils.get_lbaas_spec')
53
+    def test_on_present(self, m_get_lbaas_spec, m_set_lbaas_spec):
52 54
         svc_event = mock.sentinel.svc_event
53 55
         old_spec = mock.sentinel.old_spec
54 56
         new_spec = mock.sentinel.new_spec
@@ -58,7 +60,7 @@ class TestLBaaSSpecHandler(test_base.TestCase):
58 60
         m_drv_project.get_project.return_value = project_id
59 61
 
60 62
         m_handler = mock.Mock(spec=h_lbaas.LBaaSSpecHandler)
61
-        m_handler._get_lbaas_spec.return_value = old_spec
63
+        m_get_lbaas_spec.return_value = old_spec
62 64
         m_handler._has_lbaas_spec_changes.return_value = True
63 65
         m_handler._generate_lbaas_spec.return_value = new_spec
64 66
         m_handler._should_ignore.return_value = False
@@ -66,43 +68,49 @@ class TestLBaaSSpecHandler(test_base.TestCase):
66 68
 
67 69
         h_lbaas.LBaaSSpecHandler.on_present(m_handler, svc_event)
68 70
 
69
-        m_handler._get_lbaas_spec.assert_called_once_with(svc_event)
71
+        m_get_lbaas_spec.assert_called_once_with(svc_event)
70 72
         m_handler._has_lbaas_spec_changes.assert_called_once_with(svc_event,
71 73
                                                                   old_spec)
72 74
         m_handler._generate_lbaas_spec.assert_called_once_with(svc_event)
73
-        m_handler._set_lbaas_spec.assert_called_once_with(svc_event, new_spec)
75
+        m_set_lbaas_spec.assert_called_once_with(svc_event, new_spec)
74 76
 
75
-    def test_on_present_no_changes(self):
77
+    @mock.patch('kuryr_kubernetes.utils.set_lbaas_spec')
78
+    @mock.patch('kuryr_kubernetes.utils.get_lbaas_spec')
79
+    def test_on_present_no_changes(self, m_get_lbaas_spec,
80
+                                   m_set_lbaas_spec):
76 81
         svc_event = mock.sentinel.svc_event
77 82
         old_spec = mock.sentinel.old_spec
78 83
 
79 84
         m_handler = mock.Mock(spec=h_lbaas.LBaaSSpecHandler)
80
-        m_handler._get_lbaas_spec.return_value = old_spec
85
+        m_get_lbaas_spec.return_value = old_spec
81 86
         m_handler._has_lbaas_spec_changes.return_value = False
82 87
         m_handler._should_ignore.return_value = False
83 88
 
84 89
         h_lbaas.LBaaSSpecHandler.on_present(m_handler, svc_event)
85 90
 
86
-        m_handler._get_lbaas_spec.assert_called_once_with(svc_event)
91
+        m_get_lbaas_spec.assert_called_once_with(svc_event)
87 92
         m_handler._has_lbaas_spec_changes.assert_called_once_with(svc_event,
88 93
                                                                   old_spec)
89 94
         m_handler._generate_lbaas_spec.assert_not_called()
90
-        m_handler._set_lbaas_spec.assert_not_called()
95
+        m_set_lbaas_spec.assert_not_called()
91 96
 
92
-    def test_on_present_no_selector(self):
97
+    @mock.patch('kuryr_kubernetes.utils.set_lbaas_spec')
98
+    @mock.patch('kuryr_kubernetes.utils.get_lbaas_spec')
99
+    def test_on_present_no_selector(self, m_get_lbaas_spec,
100
+                                    m_set_lbaas_spec):
93 101
         svc_event = {'metadata': {'name': 'dummy_name'}}
94 102
         old_spec = mock.sentinel.old_spec
95 103
 
96 104
         m_handler = mock.Mock(spec=h_lbaas.LBaaSSpecHandler)
97
-        m_handler._get_lbaas_spec.return_value = old_spec
105
+        m_get_lbaas_spec.return_value = old_spec
98 106
         m_handler._should_ignore.return_value = True
99 107
 
100 108
         h_lbaas.LBaaSSpecHandler.on_present(m_handler, svc_event)
101 109
 
102
-        m_handler._get_lbaas_spec.assert_called_once_with(svc_event)
110
+        m_get_lbaas_spec.assert_called_once_with(svc_event)
103 111
         m_handler._has_lbaas_spec_changes.assert_not_called()
104 112
         m_handler._generate_lbaas_spec.assert_not_called()
105
-        m_handler._set_lbaas_spec.assert_not_called()
113
+        m_set_lbaas_spec.assert_not_called()
106 114
 
107 115
     def test_get_service_ip(self):
108 116
         svc_body = {'spec': {'type': 'ClusterIP',
@@ -328,22 +336,6 @@ class TestLBaaSSpecHandler(test_base.TestCase):
328 336
         m_handler._get_service_ports.assert_called_once_with(
329 337
             mock.sentinel.service)
330 338
 
331
-    def test_get_endpoints_link(self):
332
-        m_handler = mock.Mock(spec=h_lbaas.LBaaSSpecHandler)
333
-        service = {'metadata': {
334
-            'selfLink': "/api/v1/namespaces/default/services/test"}}
335
-        ret = h_lbaas.LBaaSSpecHandler._get_endpoints_link(m_handler, service)
336
-        expected_link = "/api/v1/namespaces/default/endpoints/test"
337
-        self.assertEqual(expected_link, ret)
338
-
339
-    def test_get_endpoints_link__integrity_error(self):
340
-        m_handler = mock.Mock(spec=h_lbaas.LBaaSSpecHandler)
341
-        service = {'metadata': {
342
-            'selfLink': "/api/v1/namespaces/default/not-services/test"}}
343
-        self.assertRaises(k_exc.IntegrityError,
344
-                          h_lbaas.LBaaSSpecHandler._get_endpoints_link,
345
-                          m_handler, service)
346
-
347 339
     def test_set_lbaas_spec(self):
348 340
         self.skipTest("skipping until generalised annotation handling is "
349 341
                       "implemented")
@@ -821,6 +813,8 @@ class TestLoadBalancerHandler(test_base.TestCase):
821 813
             for member in state.members)
822 814
         return observed_targets
823 815
 
816
+    @mock.patch('kuryr_kubernetes.controller.handlers.lbaas.'
817
+                'LoadBalancerHandler._sync_lbaas_sgs')
824 818
     @mock.patch('kuryr_kubernetes.controller.drivers.base'
825 819
                 '.PodSubnetsDriver.get_instance')
826 820
     @mock.patch('kuryr_kubernetes.controller.drivers.base'
@@ -828,7 +822,7 @@ class TestLoadBalancerHandler(test_base.TestCase):
828 822
     @mock.patch('kuryr_kubernetes.controller.drivers.base'
829 823
                 '.LBaaSDriver.get_instance')
830 824
     def test_sync_lbaas_members(self, m_get_drv_lbaas, m_get_drv_project,
831
-                                m_get_drv_subnets):
825
+                                m_get_drv_subnets, m_sync_lbaas_sgs):
832 826
         # REVISIT(ivc): test methods separately and verify ensure/release
833 827
         project_id = str(uuid.uuid4())
834 828
         subnet_id = str(uuid.uuid4())
@@ -855,6 +849,8 @@ class TestLoadBalancerHandler(test_base.TestCase):
855 849
         self.assertEqual(sorted(expected_targets.items()), observed_targets)
856 850
         self.assertEqual(expected_ip, str(state.loadbalancer.ip))
857 851
 
852
+    @mock.patch('kuryr_kubernetes.controller.handlers.lbaas.'
853
+                'LoadBalancerHandler._sync_lbaas_sgs')
858 854
     @mock.patch('kuryr_kubernetes.controller.drivers.base'
859 855
                 '.PodSubnetsDriver.get_instance')
860 856
     @mock.patch('kuryr_kubernetes.controller.drivers.base'
@@ -862,7 +858,8 @@ class TestLoadBalancerHandler(test_base.TestCase):
862 858
     @mock.patch('kuryr_kubernetes.controller.drivers.base'
863 859
                 '.LBaaSDriver.get_instance')
864 860
     def test_sync_lbaas_members_udp(self, m_get_drv_lbaas,
865
-                                    m_get_drv_project, m_get_drv_subnets):
861
+                                    m_get_drv_project, m_get_drv_subnets,
862
+                                    m_sync_lbaas_sgs):
866 863
         # REVISIT(ivc): test methods separately and verify ensure/release
867 864
         project_id = str(uuid.uuid4())
868 865
         subnet_id = str(uuid.uuid4())
@@ -889,6 +886,8 @@ class TestLoadBalancerHandler(test_base.TestCase):
889 886
         self.assertEqual([], observed_targets)
890 887
         self.assertEqual(expected_ip, str(state.loadbalancer.ip))
891 888
 
889
+    @mock.patch('kuryr_kubernetes.controller.handlers.lbaas.'
890
+                'LoadBalancerHandler._sync_lbaas_sgs')
892 891
     @mock.patch('kuryr_kubernetes.controller.drivers.base'
893 892
                 '.PodSubnetsDriver.get_instance')
894 893
     @mock.patch('kuryr_kubernetes.controller.drivers.base'
@@ -896,7 +895,8 @@ class TestLoadBalancerHandler(test_base.TestCase):
896 895
     @mock.patch('kuryr_kubernetes.controller.drivers.base'
897 896
                 '.LBaaSDriver.get_instance')
898 897
     def test_sync_lbaas_members_svc_listener_port_edit(
899
-            self, m_get_drv_lbaas, m_get_drv_project, m_get_drv_subnets):
898
+            self, m_get_drv_lbaas, m_get_drv_project, m_get_drv_subnets,
899
+            m_sync_lbaas_sgs):
900 900
         # REVISIT(ivc): test methods separately and verify ensure/release
901 901
         project_id = str(uuid.uuid4())
902 902
         subnet_id = str(uuid.uuid4())
@@ -943,6 +943,8 @@ class TestLoadBalancerHandler(test_base.TestCase):
943 943
         self.skipTest("skipping until generalised annotation handling is "
944 944
                       "implemented")
945 945
 
946
+    @mock.patch('kuryr_kubernetes.controller.handlers.lbaas.'
947
+                'LoadBalancerHandler._sync_lbaas_sgs')
946 948
     @mock.patch('kuryr_kubernetes.controller.drivers.base'
947 949
                 '.PodSubnetsDriver.get_instance')
948 950
     @mock.patch('kuryr_kubernetes.controller.drivers.base'
@@ -950,7 +952,8 @@ class TestLoadBalancerHandler(test_base.TestCase):
950 952
     @mock.patch('kuryr_kubernetes.controller.drivers.base'
951 953
                 '.LBaaSDriver.get_instance')
952 954
     def test_add_new_members_udp(self, m_get_drv_lbaas,
953
-                                 m_get_drv_project, m_get_drv_subnets):
955
+                                 m_get_drv_project, m_get_drv_subnets,
956
+                                 m_sync_lbaas_sgs):
954 957
         project_id = str(uuid.uuid4())
955 958
         subnet_id = str(uuid.uuid4())
956 959
         current_ip = '1.1.1.1'

+ 13
- 8
kuryr_kubernetes/tests/unit/controller/handlers/test_policy.py View File

@@ -115,8 +115,9 @@ class TestPolicyHandler(test_base.TestCase):
115 115
                          handler._drv_project)
116 116
         self.assertEqual(m_get_policy_driver.return_value, handler._drv_policy)
117 117
 
118
+    @mock.patch('kuryr_kubernetes.controller.drivers.utils.get_services')
118 119
     @mock.patch('kuryr_kubernetes.controller.drivers.utils.is_host_network')
119
-    def test_on_present(self, m_host_network):
120
+    def test_on_present(self, m_host_network, m_get_services):
120 121
         modified_pod = mock.sentinel.modified_pod
121 122
         match_pod = mock.sentinel.match_pod
122 123
         m_host_network.return_value = False
@@ -131,7 +132,7 @@ class TestPolicyHandler(test_base.TestCase):
131 132
         sg1 = [mock.sentinel.sg1]
132 133
         sg2 = [mock.sentinel.sg2]
133 134
         self._get_security_groups.side_effect = [sg1, sg2]
134
-        self._handler._get_services.return_value = {'items': []}
135
+        m_get_services.return_value = {'items': []}
135 136
 
136 137
         policy.NetworkPolicyHandler.on_present(self._handler, self._policy)
137 138
         namespaced_pods.assert_not_called()
@@ -147,8 +148,10 @@ class TestPolicyHandler(test_base.TestCase):
147 148
         self._update_vif_sgs.assert_has_calls(calls)
148 149
         self._update_lbaas_sg.assert_not_called()
149 150
 
151
+    @mock.patch('kuryr_kubernetes.controller.drivers.utils.get_services')
150 152
     @mock.patch('kuryr_kubernetes.controller.drivers.utils.is_host_network')
151
-    def test_on_present_without_knps_on_namespace(self, m_host_network):
153
+    def test_on_present_without_knps_on_namespace(self, m_host_network,
154
+                                                  m_get_services):
152 155
         modified_pod = mock.sentinel.modified_pod
153 156
         match_pod = mock.sentinel.match_pod
154 157
         m_host_network.return_value = False
@@ -160,7 +163,7 @@ class TestPolicyHandler(test_base.TestCase):
160 163
         sg2 = [mock.sentinel.sg2]
161 164
         sg3 = [mock.sentinel.sg3]
162 165
         self._get_security_groups.side_effect = [sg2, sg3]
163
-        self._handler._get_services.return_value = {'items': []}
166
+        m_get_services.return_value = {'items': []}
164 167
 
165 168
         policy.NetworkPolicyHandler.on_present(self._handler, self._policy)
166 169
         ensure_nw_policy.assert_called_once_with(self._policy,
@@ -176,8 +179,9 @@ class TestPolicyHandler(test_base.TestCase):
176 179
         self._update_vif_sgs.assert_has_calls(calls)
177 180
         self._update_lbaas_sg.assert_not_called()
178 181
 
182
+    @mock.patch('kuryr_kubernetes.controller.drivers.utils.get_services')
179 183
     @mock.patch('kuryr_kubernetes.controller.drivers.utils.is_host_network')
180
-    def test_on_present_with_services(self, m_host_network):
184
+    def test_on_present_with_services(self, m_host_network, m_get_services):
181 185
         modified_pod = mock.sentinel.modified_pod
182 186
         match_pod = mock.sentinel.match_pod
183 187
         m_host_network.return_value = False
@@ -193,7 +197,7 @@ class TestPolicyHandler(test_base.TestCase):
193 197
         sg2 = [mock.sentinel.sg2]
194 198
         self._get_security_groups.side_effect = [sg1, sg2]
195 199
         service = {'metadata': {'name': 'service-test'}}
196
-        self._handler._get_services.return_value = {'items': [service]}
200
+        m_get_services.return_value = {'items': [service]}
197 201
 
198 202
         policy.NetworkPolicyHandler.on_present(self._handler, self._policy)
199 203
         namespaced_pods.assert_not_called()
@@ -209,8 +213,9 @@ class TestPolicyHandler(test_base.TestCase):
209 213
         self._update_vif_sgs.assert_has_calls(calls)
210 214
         self._update_lbaas_sg.assert_called_once()
211 215
 
216
+    @mock.patch('kuryr_kubernetes.controller.drivers.utils.get_services')
212 217
     @mock.patch('kuryr_kubernetes.controller.drivers.utils.is_host_network')
213
-    def test_on_deleted(self, m_host_network):
218
+    def test_on_deleted(self, m_host_network, m_get_services):
214 219
         namespace_pod = mock.sentinel.namespace_pod
215 220
         match_pod = mock.sentinel.match_pod
216 221
         m_host_network.return_value = False
@@ -222,7 +227,7 @@ class TestPolicyHandler(test_base.TestCase):
222 227
         sg1 = [mock.sentinel.sg1]
223 228
         sg2 = [mock.sentinel.sg2]
224 229
         self._get_security_groups.side_effect = [sg1, sg2]
225
-        self._handler._get_services.return_value = {'items': []}
230
+        m_get_services.return_value = {'items': []}
226 231
         release_nw_policy = self._handler._drv_policy.release_network_policy
227 232
         knp_on_ns = self._handler._drv_policy.knps_on_namespace
228 233
         knp_on_ns.return_value = False

+ 17
- 5
kuryr_kubernetes/tests/unit/controller/handlers/test_vif.py View File

@@ -42,9 +42,11 @@ class TestVIFHandler(test_base.TestCase):
42 42
 
43 43
         self._pod_version = mock.sentinel.pod_version
44 44
         self._pod_link = mock.sentinel.pod_link
45
+        self._pod_namespace = mock.sentinel.namespace
45 46
         self._pod = {
46 47
             'metadata': {'resourceVersion': self._pod_version,
47
-                         'selfLink': self._pod_link},
48
+                         'selfLink': self._pod_link,
49
+                         'namespace': self._pod_namespace},
48 50
             'status': {'phase': k_const.K8S_POD_STATUS_PENDING},
49 51
             'spec': {'hostNetwork': False,
50 52
                      'nodeName': 'hostname'}
@@ -168,11 +170,14 @@ class TestVIFHandler(test_base.TestCase):
168 170
         self._activate_vif.assert_not_called()
169 171
         self._set_pod_state.assert_not_called()
170 172
 
173
+    @mock.patch('kuryr_kubernetes.controller.drivers.utils.get_services')
171 174
     @mock.patch('kuryr_kubernetes.controller.drivers.utils.is_host_network')
172 175
     @mock.patch('kuryr_kubernetes.controller.drivers.utils.get_pod_state')
173
-    def test_on_present_activate(self, m_get_pod_state, m_host_network):
176
+    def test_on_present_activate(self, m_get_pod_state, m_host_network,
177
+                                 m_get_services):
174 178
         m_get_pod_state.return_value = self._state
175 179
         m_host_network.return_value = False
180
+        m_get_services.return_value = {"items": []}
176 181
         self._vif.active = False
177 182
 
178 183
         h_vif.VIFHandler.on_present(self._handler, self._pod)
@@ -239,11 +244,13 @@ class TestVIFHandler(test_base.TestCase):
239 244
                                                   self._security_groups)
240 245
         self._activate_vif.assert_not_called()
241 246
 
247
+    @mock.patch('kuryr_kubernetes.controller.drivers.utils.get_services')
242 248
     @mock.patch('kuryr_kubernetes.controller.drivers.utils.is_host_network')
243 249
     @mock.patch('kuryr_kubernetes.controller.drivers.utils.get_pod_state')
244
-    def test_on_deleted(self, m_get_pod_state, m_host_network):
250
+    def test_on_deleted(self, m_get_pod_state, m_host_network, m_get_services):
245 251
         m_get_pod_state.return_value = self._state
246 252
         m_host_network.return_value = False
253
+        m_get_services.return_value = {"items": []}
247 254
         h_vif.VIFHandler.on_deleted(self._handler, self._pod)
248 255
 
249 256
         m_get_pod_state.assert_called_once_with(self._pod)
@@ -251,14 +258,16 @@ class TestVIFHandler(test_base.TestCase):
251 258
                                                   self._project_id,
252 259
                                                   self._security_groups)
253 260
 
261
+    @mock.patch('kuryr_kubernetes.controller.drivers.utils.get_services')
254 262
     @mock.patch('kuryr_kubernetes.controller.drivers.utils.is_host_network')
255 263
     @mock.patch('kuryr_kubernetes.controller.drivers.utils.get_pod_state')
256 264
     def test_on_deleted_with_additional_vifs(self, m_get_pod_state,
257
-                                             m_host_network):
265
+                                             m_host_network, m_get_services):
258 266
         additional_vif = os_obj.vif.VIFBase()
259 267
         self._state.additional_vifs = {'eth1': additional_vif}
260 268
         m_get_pod_state.return_value = self._state
261 269
         m_host_network.return_value = False
270
+        m_get_services.return_value = {"items": []}
262 271
 
263 272
         h_vif.VIFHandler.on_deleted(self._handler, self._pod)
264 273
 
@@ -280,11 +289,14 @@ class TestVIFHandler(test_base.TestCase):
280 289
         m_get_pod_state.assert_not_called()
281 290
         self._release_vif.assert_not_called()
282 291
 
292
+    @mock.patch('kuryr_kubernetes.controller.drivers.utils.get_services')
283 293
     @mock.patch('kuryr_kubernetes.controller.drivers.utils.is_host_network')
284 294
     @mock.patch('kuryr_kubernetes.controller.drivers.utils.get_pod_state')
285
-    def test_on_deleted_no_annotation(self, m_get_pod_state, m_host_network):
295
+    def test_on_deleted_no_annotation(self, m_get_pod_state, m_host_network,
296
+                                      m_get_services):
286 297
         m_get_pod_state.return_value = None
287 298
         m_host_network.return_value = False
299
+        m_get_services.return_value = {"items": []}
288 300
 
289 301
         h_vif.VIFHandler.on_deleted(self._handler, self._pod)
290 302
 

+ 7
- 0
kuryr_kubernetes/tests/unit/test_utils.py View File

@@ -157,3 +157,10 @@ class TestUtils(test_base.TestCase):
157 157
             self.assertEqual(resp, False)
158 158
 
159 159
             kubernetes.get.assert_called_once()
160
+
161
+    def test_get_endpoints_link(self):
162
+        service = {'metadata': {
163
+            'selfLink': "/api/v1/namespaces/default/services/test"}}
164
+        ret = utils.get_endpoints_link(service)
165
+        expected_link = "/api/v1/namespaces/default/endpoints/test"
166
+        self.assertEqual(expected_link, ret)

+ 40
- 0
kuryr_kubernetes/utils.py View File

@@ -223,3 +223,43 @@ def get_lbaas_spec(service):
223 223
     obj = obj_lbaas.LBaaSServiceSpec.obj_from_primitive(obj_dict)
224 224
     LOG.debug("Got LBaaSServiceSpec from annotation: %r", obj)
225 225
     return obj
226
+
227
+
228
+def set_lbaas_spec(service, lbaas_spec):
229
+    # TODO(ivc): extract annotation interactions
230
+    if lbaas_spec is None:
231
+        LOG.debug("Removing LBaaSServiceSpec annotation: %r", lbaas_spec)
232
+        annotation = None
233
+    else:
234
+        lbaas_spec.obj_reset_changes(recursive=True)
235
+        LOG.debug("Setting LBaaSServiceSpec annotation: %r", lbaas_spec)
236
+        annotation = jsonutils.dumps(lbaas_spec.obj_to_primitive(),
237
+                                     sort_keys=True)
238
+    svc_link = service['metadata']['selfLink']
239
+    ep_link = get_endpoints_link(service)
240
+    k8s = clients.get_kubernetes_client()
241
+
242
+    try:
243
+        k8s.annotate(ep_link,
244
+                     {constants.K8S_ANNOTATION_LBAAS_SPEC: annotation})
245
+    except exceptions.K8sResourceNotFound:
246
+        raise exceptions.ResourceNotReady(ep_link)
247
+    except exceptions.K8sClientException:
248
+        LOG.debug("Failed to annotate endpoint %r", ep_link)
249
+        raise
250
+    k8s.annotate(svc_link,
251
+                 {constants.K8S_ANNOTATION_LBAAS_SPEC: annotation},
252
+                 resource_version=service['metadata']['resourceVersion'])
253
+
254
+
255
+def get_endpoints_link(service):
256
+    svc_link = service['metadata']['selfLink']
257
+    link_parts = svc_link.split('/')
258
+
259
+    if link_parts[-2] != 'services':
260
+        raise exceptions.IntegrityError(_(
261
+            "Unsupported service link: %(link)s") % {
262
+            'link': svc_link})
263
+    link_parts[-2] = 'endpoints'
264
+
265
+    return "/".join(link_parts)

Loading…
Cancel
Save