Generic vif driver extension to enable ports reuse

In order to speed up containers creation/deletion a new generic-vif
driver is proposed that build upon the port pool driver to ensure
ports already created can be reused in the future.

Note this remove the neutron.create_port from the container creation process.
As measured in the performance evaluation performed in [0], these times are,
on average, around 2 seconds.

[0] https://blog.russellbryant.net/2016/12/19/comparing-openstack-neutron-ml2ovs-and-ovn-control-plane/

Partially Implements blueprint ports-pool
Change-Id: Ib127735570470850dde452c453eac3d5545f7a43
This commit is contained in:
Luis Tomas Bolivar 2017-02-22 11:10:05 +01:00
parent 3c7afdc733
commit e01b4d559b
7 changed files with 447 additions and 6 deletions

View File

@ -220,7 +220,7 @@ class PodVIFDriver(DriverBase):
raise NotImplementedError()
@abc.abstractmethod
def release_vif(self, pod, vif):
def release_vif(self, pod, vif, project_id=None, security_groups=None):
"""Unlinks Neutron port corresponding to VIF object from pod.
Implementing drivers must ensure the port is either deleted or made
@ -228,6 +228,10 @@ class PodVIFDriver(DriverBase):
:param pod: dict containing Kubernetes Pod object
:param vif: VIF object as returned by `PodVIFDriver.request_vif`
:param project_id: OpenStack project ID
:param security_groups: list containing security groups'
IDs as returned by
`PodSecurityGroupsDriver.get_security_groups`
"""
raise NotImplementedError()

View File

@ -12,8 +12,27 @@
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
import collections
from kuryr.lib._i18n import _
from neutronclient.common import exceptions as n_exc
from oslo_config import cfg as oslo_cfg
from oslo_log import log as logging
from kuryr_kubernetes import clients
from kuryr_kubernetes.controller.drivers import base
from kuryr_kubernetes import exceptions
LOG = logging.getLogger(__name__)
# Moved out from neutron_default group
vif_pool_driver_opts = [
oslo_cfg.IntOpt('ports_pool_max',
help=_("Set a maximun amount of ports per pool. 0 to disable"),
default=0),
]
oslo_cfg.CONF.register_opts(vif_pool_driver_opts, "vif_pool")
class NoopVIFPool(base.VIFPoolDriver):
@ -26,8 +45,123 @@ class NoopVIFPool(base.VIFPoolDriver):
return self._drv_vif.request_vif(pod, project_id, subnets,
security_groups)
def release_vif(self, pod, vif):
def release_vif(self, pod, vif, *argv):
self._drv_vif.release_vif(pod, vif)
def activate_vif(self, pod, vif):
self._drv_vif.activate_vif(pod, vif)
class GenericVIFPool(base.VIFPoolDriver):
"""Manages VIFs for Bare Metal Kubernetes Pods.
In order to handle the pools of ports, a few dicts are used:
_available_ports_pool is a dictionary with the ready to use Neutron ports
information. The keys are the 'pool_key' and the values the 'port_id's.
_existing_vifs is a dictionary containing the port vif objects. The keys
are the 'port_id' and the values are the vif objects.
_recyclable_ports is a dictionary with the Neutron ports to be
recycled. The keys are the 'port_id' and their values are the 'pool_key'.
The following driver configuration options exist:
- ports_pool_max: it specifies how many ports can be kept at each pool.
If the pool already reached the specified size, the ports to be recycled
are deleted instead. If set to 0, the limit is disabled and ports are
always recycled.
"""
_available_ports_pools = collections.defaultdict(collections.deque)
_existing_vifs = collections.defaultdict(collections.defaultdict)
_recyclable_ports = collections.defaultdict(collections.defaultdict)
def set_vif_driver(self, driver):
self._drv_vif = driver
def request_vif(self, pod, project_id, subnets, security_groups):
try:
host_addr = pod['status']['hostIP']
except KeyError:
LOG.error("Pod has not been scheduled yet.")
raise
pool_key = (host_addr, project_id, tuple(security_groups))
try:
return self._get_port_from_pool(pool_key, pod)
except exceptions.ResourceNotReady:
LOG.error("Ports pool does not have available ports!")
# TODO(ltomasbo): This is to be removed in the next patch when the
# pre-creation of several ports in a bulk request is included.
vif = self._drv_vif.request_vif(pod, project_id, subnets,
security_groups)
self._existing_vifs[vif.id] = vif
return vif
def _get_port_from_pool(self, pool_key, pod):
try:
port_id = self._available_ports_pools[pool_key].popleft()
except IndexError:
raise exceptions.ResourceNotReady(pod)
neutron = clients.get_neutron_client()
neutron.update_port(port_id,
{
"port": {
'name': pod['metadata']['name'],
'device_id': pod['metadata']['uid']
}
})
return self._existing_vifs[port_id]
def release_vif(self, pod, vif, project_id, security_groups):
host_addr = pod['status']['hostIP']
pool_key = (host_addr, project_id, tuple(security_groups))
self._recyclable_ports[vif.id] = pool_key
# TODO(ltomasbo) Make the port update in another thread
self._return_ports_to_pool()
def activate_vif(self, pod, vif):
self._drv_vif.activate_vif(pod, vif)
def _get_pool_size(self, pool_key=None):
return len(self._available_ports_pools.get(pool_key, []))
def _return_ports_to_pool(self):
"""Recycle ports to be reused by future pods.
For each port in the recyclable_ports dict it reaplies
security group and changes the port name to available_port.
Upon successful port update, the port_id is included in the dict
with the available_ports.
If a maximun number of port per pool is set, the port will be
deleted if the maximun has been already reached.
"""
neutron = clients.get_neutron_client()
for port_id, pool_key in self._recyclable_ports.copy().items():
if (not oslo_cfg.CONF.vif_pool.ports_pool_max or
self._get_pool_size(pool_key) <
oslo_cfg.CONF.vif_pool.ports_pool_max):
try:
neutron.update_port(port_id,
{
"port": {
'name': 'available-port',
'device_id': '',
'security_groups': list(pool_key[2])
}
})
except n_exc.NeutronClientException:
LOG.warning("Error preparing port %s to be reused, put"
" back on the cleanable pool.", port_id)
continue
self._available_ports_pools.setdefault(pool_key, []).append(
port_id)
else:
try:
del self._existing_vifs[port_id]
neutron.delete_port(port_id)
except n_exc.PortNotFoundClient:
LOG.debug('Unable to release port %s as it no longer '
'exists.', port_id)
except KeyError:
LOG.debug('Port %s is not in the ports list.', port_id)
del self._recyclable_ports[port_id]

View File

@ -73,7 +73,8 @@ class VIFHandler(k8s_base.ResourceEventHandler):
LOG.debug("Failed to set annotation: %s", ex)
# FIXME(ivc): improve granularity of K8sClient exceptions:
# only resourceVersion conflict should be ignored
self._drv_vif_pool.release_vif(pod, vif)
self._drv_vif_pool.release_vif(pod, vif, project_id,
security_groups)
elif not vif.active:
self._drv_vif_pool.activate_vif(pod, vif)
self._set_vif(pod, vif)
@ -85,7 +86,10 @@ class VIFHandler(k8s_base.ResourceEventHandler):
vif = self._get_vif(pod)
if vif:
self._drv_vif_pool.release_vif(pod, vif)
project_id = self._drv_project.get_project(pod)
security_groups = self._drv_sg.get_security_groups(pod, project_id)
self._drv_vif_pool.release_vif(pod, vif, project_id,
security_groups)
@staticmethod
def _is_host_network(pod):

View File

@ -16,12 +16,14 @@ from oslo_log import _options
from kuryr.lib import opts as lib_opts
from kuryr_kubernetes import config
from kuryr_kubernetes.controller.drivers import nested_vif
from kuryr_kubernetes.controller.drivers import vif_pool
_kuryr_k8s_opts = [
('kubernetes', config.k8s_opts),
('kuryr-kubernetes', config.kuryr_k8s_opts),
('neutron_defaults', config.neutron_defaults),
('pod_vif_nested', nested_vif.nested_vif_driver_opts),
('vif_pool', vif_pool.vif_pool_driver_opts),
]

View File

@ -0,0 +1,292 @@
# Copyright 2017 Red Hat, Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import collections
import ddt
import mock
from neutronclient.common import exceptions as n_exc
from oslo_config import cfg as oslo_cfg
from os_vif.objects import vif as osv_vif
from kuryr_kubernetes.controller.drivers import neutron_vif
from kuryr_kubernetes.controller.drivers import vif_pool
from kuryr_kubernetes import exceptions
from kuryr_kubernetes.tests import base as test_base
from kuryr_kubernetes.tests.unit import kuryr_fixtures as k_fix
@ddt.ddt
class GenericVIFPool(test_base.TestCase):
def _get_pod_obj(self):
return {
'status': {
'qosClass': 'BestEffort',
'hostIP': '192.168.1.2',
},
'kind': 'Pod',
'spec': {
'schedulerName': 'default-scheduler',
'containers': [{
'name': 'busybox',
'image': 'busybox',
'resources': {}
}],
'nodeName': 'kuryr-devstack'
},
'metadata': {
'name': 'busybox-sleep1',
'namespace': 'default',
'resourceVersion': '53808',
'selfLink': '/api/v1/namespaces/default/pods/busybox-sleep1',
'uid': '452176db-4a85-11e7-80bd-fa163e29dbbb'
}}
def test_request_vif(self):
cls = vif_pool.GenericVIFPool
m_driver = mock.MagicMock(spec=cls)
pod = self._get_pod_obj()
project_id = mock.sentinel.project_id
subnets = mock.sentinel.subnets
security_groups = [mock.sentinel.security_groups]
vif = mock.sentinel.vif
m_driver._get_port_from_pool.return_value = vif
self.assertEqual(vif, cls.request_vif(m_driver, pod, project_id,
subnets, security_groups))
def test_request_vif_empty_pool(self):
cls = vif_pool.GenericVIFPool
m_driver = mock.MagicMock(spec=cls)
m_driver._existing_vifs = {}
cls_vif_driver = neutron_vif.NeutronPodVIFDriver
vif_driver = mock.MagicMock(spec=cls_vif_driver)
m_driver._drv_vif = vif_driver
pod = self._get_pod_obj()
project_id = mock.sentinel.project_id
subnets = mock.sentinel.subnets
security_groups = [mock.sentinel.security_groups]
vif = osv_vif.VIFOpenVSwitch(id='0fa0e837-d34e-4580-a6c4-04f5f607d93e')
m_driver._get_port_from_pool.side_effect = exceptions.ResourceNotReady(
pod)
m_driver._drv_vif.request_vif.return_value = vif
self.assertEqual(vif, cls.request_vif(m_driver, pod, project_id,
subnets, security_groups))
m_driver._drv_vif.request_vif.assert_called_with(
pod, project_id, subnets, security_groups)
def test_request_vif_pod_without_host_id(self):
cls = vif_pool.GenericVIFPool
m_driver = mock.MagicMock(spec=cls)
pod = self._get_pod_obj()
del pod['status']['hostIP']
project_id = mock.sentinel.project_id
subnets = mock.sentinel.subnets
security_groups = [mock.sentinel.security_groups]
self.assertRaises(KeyError, cls.request_vif, m_driver, pod, project_id,
subnets, security_groups)
def test__get_port_from_pool(self):
cls = vif_pool.GenericVIFPool
m_driver = mock.MagicMock(spec=cls)
neutron = self.useFixture(k_fix.MockNeutronClient()).client
pool_key = mock.sentinel.pool_key
port_id = mock.sentinel.port_id
port = mock.sentinel.port
pod = self._get_pod_obj()
m_driver._available_ports_pools = {
pool_key: collections.deque([port_id])}
m_driver._existing_vifs = {port_id: port}
self.assertEqual(port, cls._get_port_from_pool(
m_driver, pool_key, pod))
neutron.update_port.assert_called_once_with(port_id,
{
"port": {
'name': pod['metadata']['name'],
'device_id': pod['metadata']['uid']
}
})
def test__get_port_from_pool_empty_pool(self):
cls = vif_pool.GenericVIFPool
m_driver = mock.MagicMock(spec=cls)
neutron = self.useFixture(k_fix.MockNeutronClient()).client
pod = self._get_pod_obj()
pool_key = mock.sentinel.pool_key
m_driver._available_ports_pools = {pool_key: collections.deque([])}
self.assertRaises(exceptions.ResourceNotReady, cls._get_port_from_pool,
m_driver, pool_key, pod)
neutron.update_port.assert_not_called()
def test_release_vif(self):
cls = vif_pool.GenericVIFPool
m_driver = mock.MagicMock(spec=cls)
m_driver._recyclable_ports = {}
pod = self._get_pod_obj()
project_id = mock.sentinel.project_id
security_groups = [mock.sentinel.security_groups]
vif = osv_vif.VIFOpenVSwitch(id='0fa0e837-d34e-4580-a6c4-04f5f607d93e')
m_driver._return_ports_to_pool.return_value = None
cls.release_vif(m_driver, pod, vif, project_id, security_groups)
m_driver._return_ports_to_pool.assert_called_once()
@ddt.data((0), (10))
def test__return_ports_to_pool(self, max_pool):
cls = vif_pool.GenericVIFPool
m_driver = mock.MagicMock(spec=cls)
neutron = self.useFixture(k_fix.MockNeutronClient()).client
pool_key = ('node_ip', 'project_id', tuple(['security_group']))
port_id = mock.sentinel.port_id
pool_length = 5
m_driver._recyclable_ports = {port_id: pool_key}
m_driver._available_ports_pools = {}
oslo_cfg.CONF.set_override('ports_pool_max',
max_pool,
group='vif_pool')
m_driver._get_pool_size.return_value = pool_length
cls._return_ports_to_pool(m_driver)
neutron.update_port.assert_called_once_with(port_id,
{
"port": {
'name': 'available-port',
'device_id': '',
'security_groups': ['security_group']
}
})
neutron.delete_port.assert_not_called()
def test__return_ports_to_pool_delete_port(self):
cls = vif_pool.GenericVIFPool
m_driver = mock.MagicMock(spec=cls)
neutron = self.useFixture(k_fix.MockNeutronClient()).client
pool_key = ('node_ip', 'project_id', tuple(['security_group']))
port_id = mock.sentinel.port_id
pool_length = 10
vif = mock.sentinel.vif
m_driver._recyclable_ports = {port_id: pool_key}
m_driver._available_ports_pools = {}
m_driver._existing_vifs = {port_id: vif}
oslo_cfg.CONF.set_override('ports_pool_max',
10,
group='vif_pool')
m_driver._get_pool_size.return_value = pool_length
cls._return_ports_to_pool(m_driver)
neutron.update_port.assert_not_called()
neutron.delete_port.assert_called_once_with(port_id)
def test__return_ports_to_pool_update_exception(self):
cls = vif_pool.GenericVIFPool
m_driver = mock.MagicMock(spec=cls)
neutron = self.useFixture(k_fix.MockNeutronClient()).client
pool_key = ('node_ip', 'project_id', tuple(['security_group']))
port_id = mock.sentinel.port_id
pool_length = 5
m_driver._recyclable_ports = {port_id: pool_key}
m_driver._available_ports_pools = {}
oslo_cfg.CONF.set_override('ports_pool_max',
0,
group='vif_pool')
m_driver._get_pool_size.return_value = pool_length
neutron.update_port.side_effect = n_exc.NeutronClientException
cls._return_ports_to_pool(m_driver)
neutron.update_port.assert_called_once_with(port_id,
{
"port": {
'name': 'available-port',
'device_id': '',
'security_groups': ['security_group']
}
})
neutron.delete_port.assert_not_called()
def test__return_ports_to_pool_delete_exception(self):
cls = vif_pool.GenericVIFPool
m_driver = mock.MagicMock(spec=cls)
neutron = self.useFixture(k_fix.MockNeutronClient()).client
pool_key = ('node_ip', 'project_id', tuple(['security_group']))
port_id = mock.sentinel.port_id
pool_length = 10
vif = mock.sentinel.vif
m_driver._recyclable_ports = {port_id: pool_key}
m_driver._available_ports_pools = {}
m_driver._existing_vifs = {port_id: vif}
oslo_cfg.CONF.set_override('ports_pool_max',
5,
group='vif_pool')
m_driver._get_pool_size.return_value = pool_length
neutron.delete_port.side_effect = n_exc.PortNotFoundClient
cls._return_ports_to_pool(m_driver)
neutron.update_port.assert_not_called()
neutron.delete_port.assert_called_once_with(port_id)
def test__return_ports_to_pool_delete_key_error(self):
cls = vif_pool.GenericVIFPool
m_driver = mock.MagicMock(spec=cls)
neutron = self.useFixture(k_fix.MockNeutronClient()).client
pool_key = ('node_ip', 'project_id', tuple(['security_group']))
port_id = mock.sentinel.port_id
pool_length = 10
m_driver._recyclable_ports = {port_id: pool_key}
m_driver._available_ports_pools = {}
m_driver._existing_vifs = {}
oslo_cfg.CONF.set_override('ports_pool_max',
5,
group='vif_pool')
m_driver._get_pool_size.return_value = pool_length
cls._return_ports_to_pool(m_driver)
neutron.update_port.assert_not_called()
neutron.delete_port.assert_not_called()

View File

@ -182,14 +182,18 @@ class TestVIFHandler(test_base.TestCase):
self._request_vif.assert_called_once_with(
self._pod, self._project_id, self._subnets, self._security_groups)
self._set_vif.assert_called_once_with(self._pod, self._vif)
self._release_vif.assert_called_once_with(self._pod, self._vif)
self._release_vif.assert_called_once_with(self._pod, self._vif,
self._project_id,
self._security_groups)
self._activate_vif.assert_not_called()
def test_on_deleted(self):
h_vif.VIFHandler.on_deleted(self._handler, self._pod)
self._get_vif.assert_called_once_with(self._pod)
self._release_vif.assert_called_once_with(self._pod, self._vif)
self._release_vif.assert_called_once_with(self._pod, self._vif,
self._project_id,
self._security_groups)
def test_on_deleted_host_network(self):
self._is_host_network.return_value = True

View File

@ -66,6 +66,7 @@ kuryr_kubernetes.controller.drivers.endpoints_lbaas =
kuryr_kubernetes.controller.drivers.vif_pool =
noop = kuryr_kubernetes.controller.drivers.vif_pool:NoopVIFPool
generic = kuryr_kubernetes.controller.drivers.vif_pool:GenericVIFPool
[files]
packages =