diff --git a/kuryr_kubernetes/controller/drivers/base.py b/kuryr_kubernetes/controller/drivers/base.py index ec1a3009c..9f698f508 100644 --- a/kuryr_kubernetes/controller/drivers/base.py +++ b/kuryr_kubernetes/controller/drivers/base.py @@ -305,7 +305,7 @@ class LBaaSDriver(DriverBase): @abc.abstractmethod def ensure_loadbalancer(self, endpoints, project_id, subnet_id, ip, - security_groups_ids): + security_groups_ids, service_type): """Get or create load balancer. :param endpoints: dict containing K8s Endpoints object @@ -314,6 +314,7 @@ class LBaaSDriver(DriverBase): :param ip: IP of the load balancer :param security_groups_ids: security groups that should be allowed access to the load balancer + :param service_type: K8s service type (ClusterIP or LoadBalancer) """ raise NotImplementedError() diff --git a/kuryr_kubernetes/controller/drivers/lbaasv2.py b/kuryr_kubernetes/controller/drivers/lbaasv2.py index a7562439e..e8643c9ba 100644 --- a/kuryr_kubernetes/controller/drivers/lbaasv2.py +++ b/kuryr_kubernetes/controller/drivers/lbaasv2.py @@ -16,6 +16,8 @@ import random import time +import requests + from neutronclient.common import exceptions as n_exc from oslo_log import log as logging from oslo_utils import excutils @@ -35,36 +37,27 @@ class LBaaSv2Driver(base.LBaaSDriver): """LBaaSv2Driver implements LBaaSDriver for Neutron LBaaSv2 API.""" def ensure_loadbalancer(self, endpoints, project_id, subnet_id, ip, - security_groups_ids): + security_groups_ids, service_type): name = "%(namespace)s/%(name)s" % endpoints['metadata'] - request = obj_lbaas.LBaaSLoadBalancer(name=name, - project_id=project_id, - subnet_id=subnet_id, - ip=ip) - response = self._ensure(request, - self._create_loadbalancer, + request = obj_lbaas.LBaaSLoadBalancer( + name=name, project_id=project_id, subnet_id=subnet_id, ip=ip, + security_groups=security_groups_ids) + response = self._ensure(request, self._create_loadbalancer, self._find_loadbalancer) if not response: # NOTE(ivc): load balancer was present before 'create', but got # deleted externally between 'create' and 'find' raise k_exc.ResourceNotReady(request) - # We only handle SGs for legacy LBaaSv2, Octavia handles it dynamically - # according to listener ports. - if response.provider == const.NEUTRON_LBAAS_HAPROXY_PROVIDER: - vip_port_id = response.port_id - neutron = clients.get_neutron_client() - try: - neutron.update_port( - vip_port_id, - {'port': {'security_groups': security_groups_ids}}) - except n_exc.NeutronClientException: - LOG.exception('Failed to set SG for LBaaS v2 VIP port %s.', - vip_port_id) - # NOTE(dulek): `endpoints` arguments on release_loadbalancer() - # is ignored for some reason, so just pass None. - self.release_loadbalancer(None, response) - raise + try: + self.ensure_security_groups(endpoints, response, + security_groups_ids, service_type) + except n_exc.NeutronClientException: + # NOTE(dulek): `endpoints` arguments on release_loadbalancer() + # is ignored for some reason, so just pass None. + self.release_loadbalancer(None, response) + raise + return response def release_loadbalancer(self, endpoints, loadbalancer): @@ -72,6 +65,68 @@ class LBaaSv2Driver(base.LBaaSDriver): self._release(loadbalancer, loadbalancer, neutron.delete_loadbalancer, loadbalancer.id) + sg_id = self._find_listeners_sg(loadbalancer) + if sg_id: + try: + neutron.delete_security_group(sg_id) + except n_exc.NeutronClientException: + LOG.exception('Error when deleting loadbalancer security ' + 'group. Leaving it orphaned.') + + def ensure_security_groups(self, endpoints, loadbalancer, + security_groups_ids, service_type): + # We only handle SGs for legacy LBaaSv2, Octavia handles it dynamically + # according to listener ports. + if loadbalancer.provider == const.NEUTRON_LBAAS_HAPROXY_PROVIDER: + neutron = clients.get_neutron_client() + sg_id = None + try: + # NOTE(dulek): We're creating another security group to + # overcome LBaaS v2 limitations and handle SGs + # ourselves. + if service_type == 'LoadBalancer': + sg_id = self._find_listeners_sg(loadbalancer) + if not sg_id: + sg = neutron.create_security_group({ + 'security_group': { + 'name': loadbalancer.name, + 'project_id': loadbalancer.project_id, + }, + }) + sg_id = sg['security_group']['id'] + loadbalancer.security_groups.append(sg_id) + + neutron.update_port( + loadbalancer.port_id, + {'port': { + 'security_groups': loadbalancer.security_groups}}) + except n_exc.NeutronClientException: + LOG.exception('Failed to set SG for LBaaS v2 VIP port %s.', + loadbalancer.port_id) + if sg_id: + neutron.delete_security_group(sg_id) + raise + + def ensure_security_group_rules(self, endpoints, loadbalancer, listener): + sg_id = self._find_listeners_sg(loadbalancer) + if sg_id: + try: + neutron = clients.get_neutron_client() + neutron.create_security_group_rule({ + 'security_group_rule': { + 'direction': 'ingress', + 'port_range_min': listener.port, + 'port_range_max': listener.port, + 'protocol': listener.protocol, + 'security_group_id': sg_id, + 'description': listener.name, + }, + }) + except n_exc.NeutronClientException as ex: + if ex.status_code != requests.codes.conflict: + LOG.exception('Failed when creating security group rule ' + 'for listener %s.', listener.name) + def ensure_listener(self, endpoints, loadbalancer, protocol, port): name = "%(namespace)s/%(name)s" % endpoints['metadata'] name += ":%s:%s" % (protocol, port) @@ -80,9 +135,13 @@ class LBaaSv2Driver(base.LBaaSDriver): loadbalancer_id=loadbalancer.id, protocol=protocol, port=port) - return self._ensure_provisioned(loadbalancer, listener, - self._create_listener, - self._find_listener) + result = self._ensure_provisioned(loadbalancer, listener, + self._create_listener, + self._find_listener) + + self.ensure_security_group_rules(endpoints, loadbalancer, result) + + return result def release_listener(self, endpoints, loadbalancer, listener): neutron = clients.get_neutron_client() @@ -90,6 +149,17 @@ class LBaaSv2Driver(base.LBaaSDriver): neutron.delete_listener, listener.id) + sg_id = self._find_listeners_sg(loadbalancer) + if sg_id: + rules = neutron.list_security_group_rules( + security_group_id=sg_id, description=listener.name) + rules = rules['security_group_rules'] + if len(rules): + neutron.delete_security_group_rule(rules[0]['id']) + else: + LOG.warning('Cannot find SG rule for %s (%s) listener.', + listener.id, listener.name) + def ensure_pool(self, endpoints, loadbalancer, listener): pool = obj_lbaas.LBaaSPool(name=listener.name, project_id=loadbalancer.project_id, @@ -352,3 +422,18 @@ class LBaaSv2Driver(base.LBaaSDriver): interval = min(interval, timer.leftover()) if interval: time.sleep(interval) + + def _find_listeners_sg(self, loadbalancer): + neutron = clients.get_neutron_client() + try: + sgs = neutron.list_security_groups( + name=loadbalancer.name, project_id=loadbalancer.project_id) + for sg in sgs['security_groups']: + sg_id = sg['id'] + if sg_id in loadbalancer.security_groups: + return sg_id + except n_exc.NeutronClientException: + LOG.exception('Cannot list security groups for loadbalancer %s.', + loadbalancer.name) + + return None diff --git a/kuryr_kubernetes/controller/handlers/lbaas.py b/kuryr_kubernetes/controller/handlers/lbaas.py index 64cf06038..882a139a6 100644 --- a/kuryr_kubernetes/controller/handlers/lbaas.py +++ b/kuryr_kubernetes/controller/handlers/lbaas.py @@ -519,7 +519,8 @@ class LoadBalancerHandler(k8s_base.ResourceEventHandler): project_id=lbaas_spec.project_id, subnet_id=lbaas_spec.subnet_id, ip=lbaas_spec.ip, - security_groups_ids=lbaas_spec.security_groups_ids) + security_groups_ids=lbaas_spec.security_groups_ids, + service_type=lbaas_spec.type) if lbaas_state.service_pub_ip_info is None: service_pub_ip_info = ( self._drv_service_pub_ip.acquire_service_pub_ip_info( diff --git a/kuryr_kubernetes/objects/lbaas.py b/kuryr_kubernetes/objects/lbaas.py index bd12fd7ee..6f359fef6 100644 --- a/kuryr_kubernetes/objects/lbaas.py +++ b/kuryr_kubernetes/objects/lbaas.py @@ -23,7 +23,7 @@ from kuryr_kubernetes.objects import fields as k_fields @obj_base.VersionedObjectRegistry.register class LBaaSLoadBalancer(k_obj.KuryrK8sObjectBase): # Version 1.0: Initial version - # Version 1.1: Added provider field + # Version 1.1: Added provider field and security_groups field. VERSION = '1.1' fields = { @@ -34,6 +34,7 @@ class LBaaSLoadBalancer(k_obj.KuryrK8sObjectBase): 'subnet_id': obj_fields.UUIDField(), 'port_id': obj_fields.UUIDField(), 'provider': obj_fields.StringField(), + 'security_groups': k_fields.ListOfUUIDField(), } diff --git a/kuryr_kubernetes/tests/unit/controller/drivers/test_lbaasv2.py b/kuryr_kubernetes/tests/unit/controller/drivers/test_lbaasv2.py index 0170feb20..f8249df62 100644 --- a/kuryr_kubernetes/tests/unit/controller/drivers/test_lbaasv2.py +++ b/kuryr_kubernetes/tests/unit/controller/drivers/test_lbaasv2.py @@ -17,7 +17,6 @@ import mock from neutronclient.common import exceptions as n_exc -from kuryr_kubernetes import constants as const from kuryr_kubernetes.controller.drivers import lbaasv2 as d_lbaasv2 from kuryr_kubernetes import exceptions as k_exc from kuryr_kubernetes.objects import lbaas as obj_lbaas @@ -31,7 +30,8 @@ class TestLBaaSv2Driver(test_base.TestCase): cls = d_lbaasv2.LBaaSv2Driver m_driver = mock.Mock(spec=d_lbaasv2.LBaaSv2Driver) expected_resp = obj_lbaas.LBaaSLoadBalancer( - provider='octavia', port_id='D3FA400A-F543-4B91-9CD3-047AF0CE42E2') + provider='octavia', port_id='D3FA400A-F543-4B91-9CD3-047AF0CE42E2', + security_groups=[]) namespace = 'TEST_NAMESPACE' name = 'TEST_NAME' project_id = 'TEST_PROJECT' @@ -43,7 +43,7 @@ class TestLBaaSv2Driver(test_base.TestCase): m_driver._ensure.return_value = expected_resp neutron.update_port = mock.Mock() resp = cls.ensure_loadbalancer(m_driver, endpoints, project_id, - subnet_id, ip, sg_ids) + subnet_id, ip, sg_ids, 'ClusterIP') m_driver._ensure.assert_called_once_with(mock.ANY, m_driver._create_loadbalancer, m_driver._find_loadbalancer) @@ -55,73 +55,6 @@ class TestLBaaSv2Driver(test_base.TestCase): self.assertEqual(expected_resp, resp) neutron.update_port.assert_not_called() - def test_ensure_loadbalancer_sg_updated(self): - neutron = self.useFixture(k_fix.MockNeutronClient()).client - cls = d_lbaasv2.LBaaSv2Driver - m_driver = mock.Mock(spec=d_lbaasv2.LBaaSv2Driver) - expected_resp = obj_lbaas.LBaaSLoadBalancer( - provider=const.NEUTRON_LBAAS_HAPROXY_PROVIDER, - port_id='D3FA400A-F543-4B91-9CD3-047AF0CE42E2') - namespace = 'TEST_NAMESPACE' - name = 'TEST_NAME' - project_id = 'TEST_PROJECT' - subnet_id = 'D3FA400A-F543-4B91-9CD3-047AF0CE42D1' - ip = '1.2.3.4' - sg_ids = ['foo', 'bar'] - endpoints = {'metadata': {'namespace': namespace, 'name': name}} - - m_driver._ensure.return_value = expected_resp - neutron.update_port = mock.Mock() - resp = cls.ensure_loadbalancer(m_driver, endpoints, project_id, - subnet_id, ip, sg_ids) - m_driver._ensure.assert_called_once_with(mock.ANY, - m_driver._create_loadbalancer, - m_driver._find_loadbalancer) - req = m_driver._ensure.call_args[0][0] - self.assertEqual("%s/%s" % (namespace, name), req.name) - self.assertEqual(project_id, req.project_id) - self.assertEqual(subnet_id, req.subnet_id) - self.assertEqual(ip, str(req.ip)) - self.assertEqual(expected_resp, resp) - neutron.update_port.assert_called_once_with( - 'D3FA400A-F543-4B91-9CD3-047AF0CE42E2', - {'port': {'security_groups': ['foo', 'bar']}}) - - def test_ensure_loadbalancer_neutron_error(self): - neutron = self.useFixture(k_fix.MockNeutronClient()).client - cls = d_lbaasv2.LBaaSv2Driver - m_driver = mock.Mock(spec=d_lbaasv2.LBaaSv2Driver) - expected_resp = obj_lbaas.LBaaSLoadBalancer( - provider=const.NEUTRON_LBAAS_HAPROXY_PROVIDER, - port_id='D3FA400A-F543-4B91-9CD3-047AF0CE42E2') - namespace = 'TEST_NAMESPACE' - name = 'TEST_NAME' - project_id = 'TEST_PROJECT' - subnet_id = 'D3FA400A-F543-4B91-9CD3-047AF0CE42D1' - ip = '1.2.3.4' - sg_ids = ['foo', 'bar'] - endpoints = {'metadata': {'namespace': namespace, 'name': name}} - - m_driver._ensure.return_value = expected_resp - neutron.update_port = mock.Mock( - side_effect=n_exc.NeutronClientException) - self.assertRaises(n_exc.NeutronClientException, - cls.ensure_loadbalancer, m_driver, endpoints, - project_id, subnet_id, ip, sg_ids) - m_driver._ensure.assert_called_once_with(mock.ANY, - m_driver._create_loadbalancer, - m_driver._find_loadbalancer) - req = m_driver._ensure.call_args[0][0] - self.assertEqual("%s/%s" % (namespace, name), req.name) - self.assertEqual(project_id, req.project_id) - self.assertEqual(subnet_id, req.subnet_id) - self.assertEqual(ip, str(req.ip)) - neutron.update_port.assert_called_once_with( - 'D3FA400A-F543-4B91-9CD3-047AF0CE42E2', - {'port': {'security_groups': ['foo', 'bar']}}) - m_driver.release_loadbalancer.assert_called_once_with(None, - expected_resp) - def test_ensure_loadbalancer_not_ready(self): cls = d_lbaasv2.LBaaSv2Driver m_driver = mock.Mock(spec=d_lbaasv2.LBaaSv2Driver) @@ -137,7 +70,7 @@ class TestLBaaSv2Driver(test_base.TestCase): m_driver._ensure.return_value = None self.assertRaises(k_exc.ResourceNotReady, cls.ensure_loadbalancer, m_driver, endpoints, project_id, subnet_id, ip, - sg_ids) + sg_ids, 'ClusterIP') def test_release_loadbalancer(self): neutron = self.useFixture(k_fix.MockNeutronClient()).client @@ -188,6 +121,8 @@ class TestLBaaSv2Driver(test_base.TestCase): def test_release_listener(self): neutron = self.useFixture(k_fix.MockNeutronClient()).client + neutron.list_security_group_rules.return_value = { + 'security_group_rules': []} cls = d_lbaasv2.LBaaSv2Driver m_driver = mock.Mock(spec=d_lbaasv2.LBaaSv2Driver) endpoints = mock.sentinel.endpoints @@ -291,7 +226,8 @@ class TestLBaaSv2Driver(test_base.TestCase): m_driver = mock.Mock(spec=d_lbaasv2.LBaaSv2Driver) loadbalancer = obj_lbaas.LBaaSLoadBalancer( name='TEST_NAME', project_id='TEST_PROJECT', ip='1.2.3.4', - subnet_id='D3FA400A-F543-4B91-9CD3-047AF0CE42D1') + subnet_id='D3FA400A-F543-4B91-9CD3-047AF0CE42D1', + security_groups=[]) loadbalancer_id = '00EE9E11-91C2-41CF-8FD4-7970579E5C4C' req = {'loadbalancer': { 'name': loadbalancer.name, @@ -317,7 +253,7 @@ class TestLBaaSv2Driver(test_base.TestCase): loadbalancer = obj_lbaas.LBaaSLoadBalancer( name='TEST_NAME', project_id='TEST_PROJECT', ip='1.2.3.4', subnet_id='D3FA400A-F543-4B91-9CD3-047AF0CE42D1', - provider='haproxy') + provider='haproxy', security_groups=[]) loadbalancer_id = '00EE9E11-91C2-41CF-8FD4-7970579E5C4C' resp = {'loadbalancers': [{'id': loadbalancer_id, 'provider': 'haproxy'}]} diff --git a/kuryr_kubernetes/tests/unit/controller/handlers/test_lbaas.py b/kuryr_kubernetes/tests/unit/controller/handlers/test_lbaas.py index 794fba1c2..a40dba22a 100644 --- a/kuryr_kubernetes/tests/unit/controller/handlers/test_lbaas.py +++ b/kuryr_kubernetes/tests/unit/controller/handlers/test_lbaas.py @@ -331,7 +331,7 @@ class TestLBaaSSpecHandler(test_base.TestCase): class FakeLBaaSDriver(drv_base.LBaaSDriver): def ensure_loadbalancer(self, endpoints, project_id, subnet_id, ip, - security_groups_ids): + security_groups_ids, service_type): name = str(ip) return obj_lbaas.LBaaSLoadBalancer(name=name, project_id=project_id, @@ -510,7 +510,7 @@ class TestLoadBalancerHandler(test_base.TestCase): endpoints = mock.sentinel.endpoints drv = FakeLBaaSDriver() lb = drv.ensure_loadbalancer( - endpoints, project_id, subnet_id, vip, None) + endpoints, project_id, subnet_id, vip, None, 'ClusterIP') listeners = {} pools = {} members = {}