Adding support for vif pool driver

Every time a container is created or deleted there is a call from
Kuryr to Neutron to create/remove the port used by the container.
In order to speed up both container creation and deletion a vif
pool driver is added, enabling the posibility of performing
Neutron resource management actions before/after containers
creation/deletion process.

This patch introduces a basic structure for the driver to trigger
ports creation and cleanup as part of the vif pool management.
Note it will be followed up with extended versions of the drivers
to support the extra ports pool functionality.

Partially Implements blueprint ports-pool
Change-Id: I441aa8f8ef567414f38d40365e3799de33de5b8c
This commit is contained in:
Luis Tomas Bolivar 2017-02-22 10:18:37 +01:00
parent 07c8879542
commit 961dfdcabb
6 changed files with 81 additions and 10 deletions

View File

@ -68,6 +68,9 @@ k8s_opts = [
help=_("The driver that provides LoadBalancers for Kubernetes "
"Endpoints"),
default='lbaasv2'),
cfg.StrOpt('vif_pool_driver',
help=_("The driver that manages VIFs pools for Kubernetes Pods."),
default='noop'),
]
neutron_defaults = [

View File

@ -360,3 +360,19 @@ class LBaaSDriver(DriverBase):
:param member: `LBaaSMember` object
"""
raise NotImplementedError()
@six.add_metaclass(abc.ABCMeta)
class VIFPoolDriver(PodVIFDriver):
"""Manages Pool of Neutron ports to provide VIFs for Kubernetes Pods."""
ALIAS = 'vif_pool'
@abc.abstractmethod
def set_vif_driver(self, driver):
"""Sets the driver the Pool should use to manage resources
The driver will be used for acquiring, releasing and updating the
vif resources.
"""
raise NotImplementedError()

View File

@ -0,0 +1,33 @@
# Copyright (c) 2017 Red Hat, Inc.
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
from kuryr_kubernetes.controller.drivers import base
class NoopVIFPool(base.VIFPoolDriver):
"""No pool VIFs for Kubernetes Pods"""
def set_vif_driver(self, driver):
self._drv_vif = driver
def request_vif(self, pod, project_id, subnets, security_groups):
return self._drv_vif.request_vif(pod, project_id, subnets,
security_groups)
def release_vif(self, pod, vif):
self._drv_vif.release_vif(pod, vif)
def activate_vif(self, pod, vif):
self._drv_vif.activate_vif(pod, vif)

View File

@ -44,6 +44,12 @@ class VIFHandler(k8s_base.ResourceEventHandler):
self._drv_subnets = drivers.PodSubnetsDriver.get_instance()
self._drv_sg = drivers.PodSecurityGroupsDriver.get_instance()
self._drv_vif = drivers.PodVIFDriver.get_instance()
# REVISIT(ltomasbo): The VIF Handler should not be aware of the pool
# directly. Due to the lack of a mechanism to load and set the
# VIFHandler driver, for now it is aware of the pool driver, but this
# will be reverted as soon as a mechanism is in place.
self._drv_vif_pool = drivers.VIFPoolDriver.get_instance()
self._drv_vif_pool.set_vif_driver(self._drv_vif)
def on_present(self, pod):
if self._is_host_network(pod) or not self._is_pending(pod):
@ -59,17 +65,17 @@ class VIFHandler(k8s_base.ResourceEventHandler):
project_id = self._drv_project.get_project(pod)
security_groups = self._drv_sg.get_security_groups(pod, project_id)
subnets = self._drv_subnets.get_subnets(pod, project_id)
vif = self._drv_vif.request_vif(pod, project_id, subnets,
security_groups)
vif = self._drv_vif_pool.request_vif(pod, project_id, subnets,
security_groups)
try:
self._set_vif(pod, vif)
except k_exc.K8sClientException as ex:
LOG.debug("Failed to set annotation: %s", ex)
# FIXME(ivc): improve granularity of K8sClient exceptions:
# only resourceVersion conflict should be ignored
self._drv_vif.release_vif(pod, vif)
self._drv_vif_pool.release_vif(pod, vif)
elif not vif.active:
self._drv_vif.activate_vif(pod, vif)
self._drv_vif_pool.activate_vif(pod, vif)
self._set_vif(pod, vif)
def on_deleted(self, pod):
@ -79,7 +85,7 @@ class VIFHandler(k8s_base.ResourceEventHandler):
vif = self._get_vif(pod)
if vif:
self._drv_vif.release_vif(pod, vif)
self._drv_vif_pool.release_vif(pod, vif)
@staticmethod
def _is_host_network(pod):

View File

@ -49,13 +49,16 @@ class TestVIFHandler(test_base.TestCase):
self._handler._drv_subnets = mock.Mock(spec=drivers.PodSubnetsDriver)
self._handler._drv_sg = mock.Mock(spec=drivers.PodSecurityGroupsDriver)
self._handler._drv_vif = mock.Mock(spec=drivers.PodVIFDriver)
self._handler._drv_vif_pool = mock.MagicMock(
spec=drivers.VIFPoolDriver)
self._get_project = self._handler._drv_project.get_project
self._get_subnets = self._handler._drv_subnets.get_subnets
self._get_security_groups = self._handler._drv_sg.get_security_groups
self._request_vif = self._handler._drv_vif.request_vif
self._release_vif = self._handler._drv_vif.release_vif
self._activate_vif = self._handler._drv_vif.activate_vif
self._set_vif_driver = self._handler._drv_vif_pool.set_vif_driver
self._request_vif = self._handler._drv_vif_pool.request_vif
self._release_vif = self._handler._drv_vif_pool.release_vif
self._activate_vif = self._handler._drv_vif_pool.activate_vif
self._get_vif = self._handler._get_vif
self._set_vif = self._handler._set_vif
self._is_host_network = self._handler._is_host_network
@ -68,28 +71,35 @@ class TestVIFHandler(test_base.TestCase):
self._get_project.return_value = self._project_id
self._get_subnets.return_value = self._subnets
self._get_security_groups.return_value = self._security_groups
self._set_vif_driver.return_value = mock.Mock(
spec=drivers.PodVIFDriver)
@mock.patch.object(drivers.VIFPoolDriver, 'set_vif_driver')
@mock.patch.object(drivers.VIFPoolDriver, 'get_instance')
@mock.patch.object(drivers.PodVIFDriver, 'get_instance')
@mock.patch.object(drivers.PodSecurityGroupsDriver, 'get_instance')
@mock.patch.object(drivers.PodSubnetsDriver, 'get_instance')
@mock.patch.object(drivers.PodProjectDriver, 'get_instance')
def test_init(self, m_get_project_driver, m_get_subnets_driver,
m_get_sg_driver, m_get_vif_driver):
m_get_sg_driver, m_get_vif_driver, m_get_vif_pool_driver,
m_set_vif_driver):
project_driver = mock.sentinel.project_driver
subnets_driver = mock.sentinel.subnets_driver
sg_driver = mock.sentinel.sg_driver
vif_driver = mock.sentinel.vif_driver
vif_pool_driver = mock.Mock(spec=drivers.VIFPoolDriver)
m_get_project_driver.return_value = project_driver
m_get_subnets_driver.return_value = subnets_driver
m_get_sg_driver.return_value = sg_driver
m_get_vif_driver.return_value = vif_driver
m_get_vif_pool_driver.return_value = vif_pool_driver
handler = h_vif.VIFHandler()
self.assertEqual(project_driver, handler._drv_project)
self.assertEqual(subnets_driver, handler._drv_subnets)
self.assertEqual(sg_driver, handler._drv_sg)
self.assertEqual(vif_driver, handler._drv_vif)
self.assertEqual(vif_pool_driver, handler._drv_vif_pool)
def test_is_host_network(self):
self._pod['spec']['hostNetwork'] = True

View File

@ -64,6 +64,9 @@ kuryr_kubernetes.controller.drivers.pod_vif =
kuryr_kubernetes.controller.drivers.endpoints_lbaas =
lbaasv2 = kuryr_kubernetes.controller.drivers.lbaasv2:LBaaSv2Driver
kuryr_kubernetes.controller.drivers.vif_pool =
noop = kuryr_kubernetes.controller.drivers.vif_pool:NoopVIFPool
[files]
packages =
kuryr_kubernetes