Browse Source

Clean up unused methods and not needed interactions to k8s API.

This commit removes some methods or variables definitions
that are not used. Also, reorder or remove interactions to
k8s API.

Change-Id: I424ce0b9a9a8c9fb0940bd3b60690f14140442ed
changes/79/753279/6
Maysa Macedo 10 months ago
parent
commit
5b2c399da8
  1. 11
      kuryr_kubernetes/controller/handlers/lbaas.py
  2. 32
      kuryr_kubernetes/controller/handlers/loadbalancer.py
  3. 31
      kuryr_kubernetes/controller/handlers/vif.py
  4. 42
      kuryr_kubernetes/tests/unit/controller/handlers/test_vif.py

11
kuryr_kubernetes/controller/handlers/lbaas.py

@ -165,7 +165,6 @@ class ServiceHandler(k8s_base.ResourceEventHandler):
except k_exc.K8sClientException:
LOG.exception("Exception when creating KuryrLoadBalancer CRD.")
raise
return loadbalancer_crd
def _update_crd_spec(self, loadbalancer_crd, service):
svc_name = service['metadata']['name']
@ -182,7 +181,6 @@ class ServiceHandler(k8s_base.ResourceEventHandler):
except k_exc.K8sClientException:
LOG.exception('Error updating kuryrnet CRD %s', loadbalancer_crd)
raise
return loadbalancer_crd
def _build_kuryrloadbalancer_spec(self, service):
svc_ip = self._get_service_ip(service)
@ -250,9 +248,6 @@ class EndpointsHandler(k8s_base.ResourceEventHandler):
def __init__(self):
super(EndpointsHandler, self).__init__()
self._drv_lbaas = drv_base.LBaaSDriver.get_instance()
self._drv_pod_project = drv_base.PodProjectDriver.get_instance()
self._drv_pod_subnets = drv_base.PodSubnetsDriver.get_instance()
self._drv_service_pub_ip = drv_base.ServicePubIpDriver.get_instance()
# Note(yboaron) LBaaS driver supports 'provider' parameter in
# Load Balancer creation flow.
# We need to set the requested load balancer provider
@ -269,9 +264,6 @@ class EndpointsHandler(k8s_base.ResourceEventHandler):
if self._move_annotations_to_crd(endpoints):
return
k8s = clients.get_kubernetes_client()
loadbalancer_crd = k8s.get_loadbalancer_crd(endpoints)
if (not self._has_pods(endpoints) or
k_const.K8S_ANNOTATION_HEADLESS_SERVICE
in endpoints['metadata'].get('labels', [])):
@ -279,6 +271,9 @@ class EndpointsHandler(k8s_base.ResourceEventHandler):
endpoints['metadata']['name'])
return
k8s = clients.get_kubernetes_client()
loadbalancer_crd = k8s.get_loadbalancer_crd(endpoints)
if loadbalancer_crd is None:
try:
self._create_crd_spec(endpoints)

32
kuryr_kubernetes/controller/handlers/loadbalancer.py

@ -76,17 +76,6 @@ class KuryrLoadBalancerHandler(k8s_base.ResourceEventHandler):
spec_lb_provider in OCTAVIA_DEFAULT_PROVIDERS):
self._ensure_release_lbaas(loadbalancer_crd)
try:
name = loadbalancer_crd['metadata']['name']
namespace = loadbalancer_crd['metadata']['namespace']
self._get_loadbalancer_crd(name, namespace)
except k_exc.K8sResourceNotFound:
LOG.debug('KuryrLoadbalancer CRD not found %s',
loadbalancer_crd)
except KeyError:
LOG.debug('KuryrLoadbalancer CRD not found')
raise k_exc.ResourceNotReady(loadbalancer_crd)
if self._sync_lbaas_members(loadbalancer_crd):
# Note(yboaron) For LoadBalancer services, we should allocate FIP,
# associate it to LB VIP and update K8S service status
@ -136,10 +125,6 @@ class KuryrLoadBalancerHandler(k8s_base.ResourceEventHandler):
def on_finalize(self, loadbalancer_crd):
LOG.debug("Deleting the loadbalancer CRD")
if not loadbalancer_crd:
LOG.warning("Load Balancer CRD not present")
return
if loadbalancer_crd['status'] != {}:
# NOTE(ivc): deleting pool deletes its members
self._drv_lbaas.release_loadbalancer(
@ -171,8 +156,8 @@ class KuryrLoadBalancerHandler(k8s_base.ResourceEventHandler):
service = kubernetes.get(f"{k_const.K8S_API_NAMESPACES}"
f"/{namespace}/services/{name}")
except k_exc.K8sResourceNotFound as ex:
LOG.exception("Failed to get service: %s", ex)
raise
LOG.warning("Failed to get service: %s", ex)
return
LOG.debug('Removing finalizer from service %s',
service["metadata"]["name"])
@ -183,19 +168,6 @@ class KuryrLoadBalancerHandler(k8s_base.ResourceEventHandler):
'for %s', service["metadata"]["name"])
raise
def _get_loadbalancer_crd(self, loadbalancer_crd_name, namespace):
k8s = clients.get_kubernetes_client()
try:
loadbalancer_crd = k8s.get('{}/{}/kuryrloadbalancers/{}'.format(
k_const.K8S_API_CRD_NAMESPACES, namespace,
loadbalancer_crd_name))
except k_exc.K8sResourceNotFound:
return None
except k_exc.K8sClientException:
LOG.exception("Kubernetes Client Exception.")
raise
return loadbalancer_crd
def _sync_lbaas_members(self, loadbalancer_crd):
changed = False

31
kuryr_kubernetes/controller/handlers/vif.py

@ -14,13 +14,11 @@
# under the License.
from os_vif import objects
from oslo_config import cfg as oslo_cfg
from oslo_log import log as logging
from oslo_serialization import jsonutils
from kuryr_kubernetes import clients
from kuryr_kubernetes import constants
from kuryr_kubernetes.controller.drivers import base as drivers
from kuryr_kubernetes.controller.drivers import utils as driver_utils
from kuryr_kubernetes import exceptions as k_exc
from kuryr_kubernetes.handlers import k8s_base
@ -46,21 +44,6 @@ class VIFHandler(k8s_base.ResourceEventHandler):
def __init__(self):
super(VIFHandler, self).__init__()
self._drv_project = drivers.PodProjectDriver.get_instance()
self._drv_subnets = drivers.PodSubnetsDriver.get_instance()
self._drv_sg = drivers.PodSecurityGroupsDriver.get_instance()
# REVISIT(ltomasbo): The VIF Handler should not be aware of the pool
# directly. Due to the lack of a mechanism to load and set the
# VIFHandler driver, for now it is aware of the pool driver, but this
# will be reverted as soon as a mechanism is in place.
self._drv_vif_pool = drivers.VIFPoolDriver.get_instance(
specific_driver='multi_pool')
self._drv_vif_pool.set_vif_driver()
self._drv_multi_vif = drivers.MultiVIFDriver.get_enabled_drivers()
if self._is_network_policy_enabled():
self._drv_lbaas = drivers.LBaaSDriver.get_instance()
self._drv_svc_sg = (
drivers.ServiceSecurityGroupsDriver.get_instance())
def on_present(self, pod):
if (driver_utils.is_host_network(pod) or
@ -179,20 +162,6 @@ class VIFHandler(k8s_base.ResourceEventHandler):
except KeyError:
return False
def _update_services(self, services, crd_pod_selectors, project_id):
for service in services.get('items'):
if not driver_utils.service_matches_affected_pods(
service, crd_pod_selectors):
continue
sgs = self._drv_svc_sg.get_security_groups(service,
project_id)
self._drv_lbaas.update_lbaas_sg(service, sgs)
def _is_network_policy_enabled(self):
enabled_handlers = oslo_cfg.CONF.kubernetes.enabled_handlers
svc_sg_driver = oslo_cfg.CONF.kubernetes.service_security_groups_driver
return ('policy' in enabled_handlers and svc_sg_driver == 'policy')
def _add_kuryrport_crd(self, pod, vifs=None):
LOG.debug('Adding CRD %s', pod["metadata"]["name"])

42
kuryr_kubernetes/tests/unit/controller/handlers/test_vif.py

@ -100,48 +100,6 @@ class TestVIFHandler(test_base.TestCase):
self._set_vifs_driver.return_value = mock.Mock(
spec=drivers.PodVIFDriver)
@mock.patch.object(h_vif.VIFHandler, '_is_network_policy_enabled')
@mock.patch.object(drivers.MultiVIFDriver, 'get_enabled_drivers')
@mock.patch.object(drivers.VIFPoolDriver, 'set_vif_driver')
@mock.patch.object(drivers.VIFPoolDriver, 'get_instance')
@mock.patch.object(drivers.PodVIFDriver, 'get_instance')
@mock.patch.object(drivers.PodSecurityGroupsDriver, 'get_instance')
@mock.patch.object(drivers.PodSubnetsDriver, 'get_instance')
@mock.patch.object(drivers.PodProjectDriver, 'get_instance')
@mock.patch.object(drivers.LBaaSDriver, 'get_instance')
@mock.patch.object(drivers.ServiceSecurityGroupsDriver, 'get_instance')
def test_init(self, m_get_svc_sg_driver, m_get_lbaas_driver,
m_get_project_driver, m_get_subnets_driver, m_get_sg_driver,
m_get_vif_driver, m_get_vif_pool_driver, m_set_vifs_driver,
m_get_multi_vif_drivers, m_is_network_policy_enabled):
project_driver = mock.sentinel.project_driver
subnets_driver = mock.sentinel.subnets_driver
sg_driver = mock.sentinel.sg_driver
vif_driver = mock.sentinel.vif_driver
vif_pool_driver = mock.Mock(spec=drivers.VIFPoolDriver)
multi_vif_drivers = [mock.MagicMock(spec=drivers.MultiVIFDriver)]
lbaas_driver = mock.sentinel.lbaas_driver
svc_sg_driver = mock.Mock(spec=drivers.ServiceSecurityGroupsDriver)
m_get_project_driver.return_value = project_driver
m_get_subnets_driver.return_value = subnets_driver
m_get_sg_driver.return_value = sg_driver
m_get_vif_driver.return_value = vif_driver
m_get_vif_pool_driver.return_value = vif_pool_driver
m_get_multi_vif_drivers.return_value = multi_vif_drivers
m_get_lbaas_driver.return_value = lbaas_driver
m_get_svc_sg_driver.return_value = svc_sg_driver
m_is_network_policy_enabled.return_value = True
handler = h_vif.VIFHandler()
self.assertEqual(project_driver, handler._drv_project)
self.assertEqual(subnets_driver, handler._drv_subnets)
self.assertEqual(sg_driver, handler._drv_sg)
self.assertEqual(vif_pool_driver, handler._drv_vif_pool)
self.assertEqual(multi_vif_drivers, handler._drv_multi_vif)
self.assertEqual(lbaas_driver, handler._drv_lbaas)
self.assertEqual(svc_sg_driver, handler._drv_svc_sg)
def test_is_pod_scheduled(self):
self.assertTrue(h_vif.VIFHandler._is_pod_scheduled(self._pod))

Loading…
Cancel
Save