Ensure LB state annotation sg matches the SG on the LB
As soon as the service is created it's possible that the backend pods are not yet created resulting in an lbaas_spec annotation with no security groups defined, and so security group rules can turn out to be removed from the load balancer sg. This commit ensures the lbaas_state annotation contains the updated sgs. Closes-bug: 1872962 Change-Id: I296d16a627e39e6534ad9c1dff18b4564624d35d
This commit is contained in:
parent
c4e47c169d
commit
20f7d24ed1
@ -163,6 +163,8 @@ class LoadBalancerHandler(k8s_base.ResourceEventHandler):
|
|||||||
self._drv_pod_project = drv_base.PodProjectDriver.get_instance()
|
self._drv_pod_project = drv_base.PodProjectDriver.get_instance()
|
||||||
self._drv_pod_subnets = drv_base.PodSubnetsDriver.get_instance()
|
self._drv_pod_subnets = drv_base.PodSubnetsDriver.get_instance()
|
||||||
self._drv_service_pub_ip = drv_base.ServicePubIpDriver.get_instance()
|
self._drv_service_pub_ip = drv_base.ServicePubIpDriver.get_instance()
|
||||||
|
self._drv_project = drv_base.ServiceProjectDriver.get_instance()
|
||||||
|
self._drv_sg = drv_base.ServiceSecurityGroupsDriver.get_instance()
|
||||||
# Note(yboaron) LBaaS driver supports 'provider' parameter in
|
# Note(yboaron) LBaaS driver supports 'provider' parameter in
|
||||||
# Load Balancer creation flow.
|
# Load Balancer creation flow.
|
||||||
# We need to set the requested load balancer provider
|
# We need to set the requested load balancer provider
|
||||||
@ -281,38 +283,25 @@ class LoadBalancerHandler(k8s_base.ResourceEventHandler):
|
|||||||
|
|
||||||
return changed
|
return changed
|
||||||
|
|
||||||
def _sync_lbaas_sgs(self, endpoints, lbaas_state, lbaas_spec):
|
def _sync_lbaas_sgs(self, endpoints, lbaas_state):
|
||||||
# NOTE (maysams) Need to retrieve the LBaaS Spec again due to
|
|
||||||
# the possibility of it being updated after the LBaaS creation
|
|
||||||
# process has started.
|
|
||||||
svc_link = self._get_service_link(endpoints)
|
svc_link = self._get_service_link(endpoints)
|
||||||
k8s = clients.get_kubernetes_client()
|
k8s = clients.get_kubernetes_client()
|
||||||
service = k8s.get(svc_link)
|
service = k8s.get(svc_link)
|
||||||
lbaas_spec = utils.get_lbaas_spec(service)
|
|
||||||
|
|
||||||
lb = lbaas_state.loadbalancer
|
lb = lbaas_state.loadbalancer
|
||||||
default_sgs = config.CONF.neutron_defaults.pod_security_groups
|
# NOTE(maysams) It's possible that while the service annotation
|
||||||
# NOTE(maysams) As the endpoint and svc are annotated with the
|
# is added the backend pods on that service are not yet created
|
||||||
# 'lbaas_spec' in two separate k8s calls, it's possible that
|
# resulting in no security groups retrieved for the service.
|
||||||
# the endpoint got annotated and the svc haven't due to controller
|
# Let's retrieve again to ensure is updated.
|
||||||
# restarts. For this case, a resourceNotReady exception is raised
|
project_id = self._drv_project.get_project(service)
|
||||||
# till the svc gets annotated with a 'lbaas_spec'.
|
lb_sgs = self._drv_sg.get_security_groups(service, project_id)
|
||||||
if lbaas_spec:
|
lb.security_groups = lb_sgs
|
||||||
lbaas_spec_sgs = lbaas_spec.security_groups_ids
|
|
||||||
else:
|
|
||||||
raise k_exc.ResourceNotReady(svc_link)
|
|
||||||
if lb.security_groups and lb.security_groups != lbaas_spec_sgs:
|
|
||||||
sgs = [lb_sg for lb_sg in lb.security_groups
|
|
||||||
if lb_sg not in default_sgs]
|
|
||||||
if lbaas_spec_sgs != default_sgs:
|
|
||||||
sgs.extend(lbaas_spec_sgs)
|
|
||||||
lb.security_groups = sgs
|
|
||||||
|
|
||||||
def _add_new_members(self, endpoints, lbaas_state, lbaas_spec):
|
def _add_new_members(self, endpoints, lbaas_state, lbaas_spec):
|
||||||
changed = False
|
changed = False
|
||||||
|
|
||||||
try:
|
try:
|
||||||
self._sync_lbaas_sgs(endpoints, lbaas_state, lbaas_spec)
|
self._sync_lbaas_sgs(endpoints, lbaas_state)
|
||||||
except k_exc.K8sResourceNotFound:
|
except k_exc.K8sResourceNotFound:
|
||||||
LOG.debug("The svc has been deleted while processing the endpoints"
|
LOG.debug("The svc has been deleted while processing the endpoints"
|
||||||
" update. No need to add new members.")
|
" update. No need to add new members.")
|
||||||
|
@ -359,6 +359,10 @@ class FakeLBaaSDriver(drv_base.LBaaSDriver):
|
|||||||
|
|
||||||
class TestLoadBalancerHandler(test_base.TestCase):
|
class TestLoadBalancerHandler(test_base.TestCase):
|
||||||
|
|
||||||
|
@mock.patch('kuryr_kubernetes.controller.drivers.base.'
|
||||||
|
'ServiceProjectDriver.get_instance')
|
||||||
|
@mock.patch('kuryr_kubernetes.controller.drivers.base.'
|
||||||
|
'ServiceSecurityGroupsDriver.get_instance')
|
||||||
@mock.patch('kuryr_kubernetes.controller.handlers.lbaas'
|
@mock.patch('kuryr_kubernetes.controller.handlers.lbaas'
|
||||||
'.LoadBalancerHandler._cleanup_leftover_lbaas')
|
'.LoadBalancerHandler._cleanup_leftover_lbaas')
|
||||||
@mock.patch('kuryr_kubernetes.config.CONF')
|
@mock.patch('kuryr_kubernetes.config.CONF')
|
||||||
@ -372,11 +376,14 @@ class TestLoadBalancerHandler(test_base.TestCase):
|
|||||||
'.LBaaSDriver.get_instance')
|
'.LBaaSDriver.get_instance')
|
||||||
def test_init(self, m_get_drv_lbaas, m_get_drv_project,
|
def test_init(self, m_get_drv_lbaas, m_get_drv_project,
|
||||||
m_get_drv_subnets, m_get_drv_service_pub_ip, m_cfg,
|
m_get_drv_subnets, m_get_drv_service_pub_ip, m_cfg,
|
||||||
m_cleanup_leftover_lbaas):
|
m_cleanup_leftover_lbaas,
|
||||||
|
m_get_svc_sg_drv, m_get_svc_drv_project):
|
||||||
m_get_drv_lbaas.return_value = mock.sentinel.drv_lbaas
|
m_get_drv_lbaas.return_value = mock.sentinel.drv_lbaas
|
||||||
m_get_drv_project.return_value = mock.sentinel.drv_project
|
m_get_drv_project.return_value = mock.sentinel.drv_project
|
||||||
m_get_drv_subnets.return_value = mock.sentinel.drv_subnets
|
m_get_drv_subnets.return_value = mock.sentinel.drv_subnets
|
||||||
m_get_drv_service_pub_ip.return_value = mock.sentinel.drv_lb_ip
|
m_get_drv_service_pub_ip.return_value = mock.sentinel.drv_lb_ip
|
||||||
|
m_get_svc_drv_project.return_value = mock.sentinel.drv_svc_project
|
||||||
|
m_get_svc_sg_drv.return_value = mock.sentinel.drv_sg
|
||||||
m_cfg.kubernetes.endpoints_driver_octavia_provider = 'default'
|
m_cfg.kubernetes.endpoints_driver_octavia_provider = 'default'
|
||||||
handler = h_lbaas.LoadBalancerHandler()
|
handler = h_lbaas.LoadBalancerHandler()
|
||||||
|
|
||||||
@ -384,8 +391,14 @@ class TestLoadBalancerHandler(test_base.TestCase):
|
|||||||
self.assertEqual(mock.sentinel.drv_project, handler._drv_pod_project)
|
self.assertEqual(mock.sentinel.drv_project, handler._drv_pod_project)
|
||||||
self.assertEqual(mock.sentinel.drv_subnets, handler._drv_pod_subnets)
|
self.assertEqual(mock.sentinel.drv_subnets, handler._drv_pod_subnets)
|
||||||
self.assertEqual(mock.sentinel.drv_lb_ip, handler._drv_service_pub_ip)
|
self.assertEqual(mock.sentinel.drv_lb_ip, handler._drv_service_pub_ip)
|
||||||
|
self.assertEqual(mock.sentinel.drv_svc_project, handler._drv_project)
|
||||||
|
self.assertEqual(mock.sentinel.drv_sg, handler._drv_sg)
|
||||||
self.assertIsNone(handler._lb_provider)
|
self.assertIsNone(handler._lb_provider)
|
||||||
|
|
||||||
|
@mock.patch('kuryr_kubernetes.controller.drivers.base.'
|
||||||
|
'ServiceProjectDriver.get_instance')
|
||||||
|
@mock.patch('kuryr_kubernetes.controller.drivers.base.'
|
||||||
|
'ServiceSecurityGroupsDriver.get_instance')
|
||||||
@mock.patch('kuryr_kubernetes.controller.handlers.lbaas'
|
@mock.patch('kuryr_kubernetes.controller.handlers.lbaas'
|
||||||
'.LoadBalancerHandler._cleanup_leftover_lbaas')
|
'.LoadBalancerHandler._cleanup_leftover_lbaas')
|
||||||
@mock.patch('kuryr_kubernetes.config.CONF')
|
@mock.patch('kuryr_kubernetes.config.CONF')
|
||||||
@ -399,11 +412,14 @@ class TestLoadBalancerHandler(test_base.TestCase):
|
|||||||
'.LBaaSDriver.get_instance')
|
'.LBaaSDriver.get_instance')
|
||||||
def test_init_provider_ovn(self, m_get_drv_lbaas, m_get_drv_project,
|
def test_init_provider_ovn(self, m_get_drv_lbaas, m_get_drv_project,
|
||||||
m_get_drv_subnets, m_get_drv_service_pub_ip,
|
m_get_drv_subnets, m_get_drv_service_pub_ip,
|
||||||
m_cfg, m_cleanup_leftover_lbaas):
|
m_cfg, m_cleanup_leftover_lbaas,
|
||||||
|
m_get_svc_sg_drv, m_get_svc_drv_project):
|
||||||
m_get_drv_lbaas.return_value = mock.sentinel.drv_lbaas
|
m_get_drv_lbaas.return_value = mock.sentinel.drv_lbaas
|
||||||
m_get_drv_project.return_value = mock.sentinel.drv_project
|
m_get_drv_project.return_value = mock.sentinel.drv_project
|
||||||
m_get_drv_subnets.return_value = mock.sentinel.drv_subnets
|
m_get_drv_subnets.return_value = mock.sentinel.drv_subnets
|
||||||
m_get_drv_service_pub_ip.return_value = mock.sentinel.drv_lb_ip
|
m_get_drv_service_pub_ip.return_value = mock.sentinel.drv_lb_ip
|
||||||
|
m_get_svc_drv_project.return_value = mock.sentinel.drv_svc_project
|
||||||
|
m_get_svc_sg_drv.return_value = mock.sentinel.drv_sg
|
||||||
m_cfg.kubernetes.endpoints_driver_octavia_provider = 'ovn'
|
m_cfg.kubernetes.endpoints_driver_octavia_provider = 'ovn'
|
||||||
handler = h_lbaas.LoadBalancerHandler()
|
handler = h_lbaas.LoadBalancerHandler()
|
||||||
|
|
||||||
@ -411,6 +427,8 @@ class TestLoadBalancerHandler(test_base.TestCase):
|
|||||||
self.assertEqual(mock.sentinel.drv_project, handler._drv_pod_project)
|
self.assertEqual(mock.sentinel.drv_project, handler._drv_pod_project)
|
||||||
self.assertEqual(mock.sentinel.drv_subnets, handler._drv_pod_subnets)
|
self.assertEqual(mock.sentinel.drv_subnets, handler._drv_pod_subnets)
|
||||||
self.assertEqual(mock.sentinel.drv_lb_ip, handler._drv_service_pub_ip)
|
self.assertEqual(mock.sentinel.drv_lb_ip, handler._drv_service_pub_ip)
|
||||||
|
self.assertEqual(mock.sentinel.drv_svc_project, handler._drv_project)
|
||||||
|
self.assertEqual(mock.sentinel.drv_sg, handler._drv_sg)
|
||||||
self.assertEqual('ovn', handler._lb_provider)
|
self.assertEqual('ovn', handler._lb_provider)
|
||||||
|
|
||||||
@mock.patch('kuryr_kubernetes.utils.get_lbaas_spec')
|
@mock.patch('kuryr_kubernetes.utils.get_lbaas_spec')
|
||||||
|
Loading…
x
Reference in New Issue
Block a user