Add multi pools support
This patch adds support for nodes with different vif drivers as well as different pool drivers for each vif driver type. Closes-Bug: 1747406 Change-Id: I842fd4b513a5f325d598d677e5008f9ea51adab9
This commit is contained in:
parent
7061f4abac
commit
a83eba5fa1
@ -90,3 +90,38 @@ Note there is a developers tool available at `contrib/pools-management` to
|
|||||||
create/delete ports in the desired pool(s) as well as to control the amount of
|
create/delete ports in the desired pool(s) as well as to control the amount of
|
||||||
existing ports loaded into each pool. For more details on this read the readme
|
existing ports loaded into each pool. For more details on this read the readme
|
||||||
file on that folder.
|
file on that folder.
|
||||||
|
|
||||||
|
|
||||||
|
Multi pod-vif drivers support with pools
|
||||||
|
----------------------------------------
|
||||||
|
|
||||||
|
There is a multi pool driver that supports hybrid environments where some
|
||||||
|
nodes are Bare Metal while others are running inside VMs, therefore having
|
||||||
|
different VIF drivers (e.g., neutron and nested-vlan).
|
||||||
|
|
||||||
|
This new multi pool driver is the default pool driver used even if a different
|
||||||
|
vif_pool_driver is set at the config option. However if the configuration
|
||||||
|
about the mappings between the different pools and pod vif drivers is not
|
||||||
|
provided at the pools_vif_drivers config option of vif_pool configuration
|
||||||
|
section only one pool driver will be loaded -- using the standard
|
||||||
|
vif_pool_driver and pod_vif_driver config options, i.e., using the one
|
||||||
|
selected at kuryr.conf options.
|
||||||
|
|
||||||
|
To enable the option of having different pools depending on the node's pod
|
||||||
|
vif types, you need to state the type of pool that you want for each pod vif
|
||||||
|
driver, e.g.:
|
||||||
|
|
||||||
|
.. code-block:: ini
|
||||||
|
|
||||||
|
[vif_pool]
|
||||||
|
pools_vif_drivers=nested:nested-vlan,neutron:neutron-vif
|
||||||
|
|
||||||
|
This will use a pool driver nested to handle the pods whose vif driver is
|
||||||
|
nested-vlan, and a pool driver neutron to handle the pods whose vif driver is
|
||||||
|
neutron-vif. When the controller is requesting a vif for a pod in node X, it
|
||||||
|
will first read the node's annotation about pod_vif driver to use, e.g.,
|
||||||
|
pod_vif: nested-vlan, and then use the corresponding pool driver -- which has
|
||||||
|
the right pod-vif driver set.
|
||||||
|
|
||||||
|
Note that if no annotation is set on a node, the default pod_vif_driver is
|
||||||
|
used.
|
||||||
|
@ -50,20 +50,21 @@ class DriverBase(object):
|
|||||||
"""
|
"""
|
||||||
|
|
||||||
@classmethod
|
@classmethod
|
||||||
def get_instance(cls):
|
def get_instance(cls, driver_alias=None):
|
||||||
"""Get an implementing driver instance."""
|
"""Get an implementing driver instance."""
|
||||||
|
|
||||||
alias = cls.ALIAS
|
alias = cls.ALIAS
|
||||||
|
driver_name = alias + '_driver' if not driver_alias else driver_alias
|
||||||
try:
|
try:
|
||||||
manager = _DRIVER_MANAGERS[alias]
|
manager = _DRIVER_MANAGERS[driver_name]
|
||||||
except KeyError:
|
except KeyError:
|
||||||
name = config.CONF.kubernetes[alias + '_driver']
|
name = (config.CONF.kubernetes[driver_name] if not driver_alias
|
||||||
|
else driver_alias)
|
||||||
manager = stv_driver.DriverManager(
|
manager = stv_driver.DriverManager(
|
||||||
namespace="%s.%s" % (_DRIVER_NAMESPACE_BASE, alias),
|
namespace="%s.%s" % (_DRIVER_NAMESPACE_BASE, alias),
|
||||||
name=name,
|
name=name,
|
||||||
invoke_on_load=True)
|
invoke_on_load=True)
|
||||||
_DRIVER_MANAGERS[alias] = manager
|
_DRIVER_MANAGERS[driver_name] = manager
|
||||||
|
|
||||||
driver = manager.driver
|
driver = manager.driver
|
||||||
if not isinstance(driver, cls):
|
if not isinstance(driver, cls):
|
||||||
|
@ -22,6 +22,7 @@ import time
|
|||||||
from kuryr.lib._i18n import _
|
from kuryr.lib._i18n import _
|
||||||
from kuryr.lib import constants as kl_const
|
from kuryr.lib import constants as kl_const
|
||||||
from neutronclient.common import exceptions as n_exc
|
from neutronclient.common import exceptions as n_exc
|
||||||
|
from oslo_cache import core as cache
|
||||||
from oslo_config import cfg as oslo_cfg
|
from oslo_config import cfg as oslo_cfg
|
||||||
from oslo_log import log as logging
|
from oslo_log import log as logging
|
||||||
from oslo_serialization import jsonutils
|
from oslo_serialization import jsonutils
|
||||||
@ -34,6 +35,7 @@ from kuryr_kubernetes.controller.drivers import default_subnet
|
|||||||
from kuryr_kubernetes.controller.managers import pool
|
from kuryr_kubernetes.controller.managers import pool
|
||||||
from kuryr_kubernetes import exceptions
|
from kuryr_kubernetes import exceptions
|
||||||
from kuryr_kubernetes import os_vif_util as ovu
|
from kuryr_kubernetes import os_vif_util as ovu
|
||||||
|
from kuryr_kubernetes import utils
|
||||||
|
|
||||||
LOG = logging.getLogger(__name__)
|
LOG = logging.getLogger(__name__)
|
||||||
|
|
||||||
@ -53,10 +55,31 @@ vif_pool_driver_opts = [
|
|||||||
help=_("Minimun interval (in seconds) "
|
help=_("Minimun interval (in seconds) "
|
||||||
"between pool updates"),
|
"between pool updates"),
|
||||||
default=20),
|
default=20),
|
||||||
|
oslo_cfg.DictOpt('pools_vif_drivers',
|
||||||
|
help=_("Dict with the pool driver and pod driver to be "
|
||||||
|
"used. If not set, it will take them from the "
|
||||||
|
"kubernetes driver options for pool and pod "
|
||||||
|
"drivers respectively"),
|
||||||
|
default={}),
|
||||||
]
|
]
|
||||||
|
|
||||||
oslo_cfg.CONF.register_opts(vif_pool_driver_opts, "vif_pool")
|
oslo_cfg.CONF.register_opts(vif_pool_driver_opts, "vif_pool")
|
||||||
|
|
||||||
|
node_vif_driver_caching_opts = [
|
||||||
|
oslo_cfg.BoolOpt('caching', default=True),
|
||||||
|
oslo_cfg.IntOpt('cache_time', default=3600),
|
||||||
|
]
|
||||||
|
|
||||||
|
oslo_cfg.CONF.register_opts(node_vif_driver_caching_opts,
|
||||||
|
"node_driver_caching")
|
||||||
|
|
||||||
|
cache.configure(oslo_cfg.CONF)
|
||||||
|
node_driver_cache_region = cache.create_region()
|
||||||
|
MEMOIZE = cache.get_memoization_decorator(
|
||||||
|
oslo_cfg.CONF, node_driver_cache_region, "node_driver_caching")
|
||||||
|
|
||||||
|
cache.configure_cache_region(oslo_cfg.CONF, node_driver_cache_region)
|
||||||
|
|
||||||
|
|
||||||
class NoopVIFPool(base.VIFPoolDriver):
|
class NoopVIFPool(base.VIFPoolDriver):
|
||||||
"""No pool VIFs for Kubernetes Pods"""
|
"""No pool VIFs for Kubernetes Pods"""
|
||||||
@ -618,3 +641,68 @@ class NestedVIFPool(BaseVIFPool):
|
|||||||
no trunk is specified).
|
no trunk is specified).
|
||||||
"""
|
"""
|
||||||
self._remove_precreated_ports(trunk_ips)
|
self._remove_precreated_ports(trunk_ips)
|
||||||
|
|
||||||
|
|
||||||
|
class MultiVIFPool(base.VIFPoolDriver):
|
||||||
|
"""Manages pools with different VIF types.
|
||||||
|
|
||||||
|
It manages hybrid deployments containing both Bare Metal and Nested
|
||||||
|
Kubernetes Pods. To do that it creates a pool per node with a different
|
||||||
|
pool driver depending on the vif driver that the node is using.
|
||||||
|
|
||||||
|
It assumes a label pod_vif is added to each node to inform about the
|
||||||
|
driver set for that node. If no label is added, it assumes the default pod
|
||||||
|
vif: the one specified at kuryr.conf
|
||||||
|
"""
|
||||||
|
|
||||||
|
def set_vif_driver(self):
|
||||||
|
self._vif_drvs = {}
|
||||||
|
pools_vif_drivers = oslo_cfg.CONF.vif_pool.pools_vif_drivers
|
||||||
|
if not pools_vif_drivers:
|
||||||
|
pod_vif = oslo_cfg.CONF.kubernetes.pod_vif_driver
|
||||||
|
drv_vif = base.PodVIFDriver.get_instance()
|
||||||
|
drv_pool = base.VIFPoolDriver.get_instance()
|
||||||
|
drv_pool.set_vif_driver(drv_vif)
|
||||||
|
self._vif_drvs[pod_vif] = drv_pool
|
||||||
|
return
|
||||||
|
for pool_driver, pod_driver in pools_vif_drivers.items():
|
||||||
|
if not utils.check_suitable_multi_pool_driver_opt(pool_driver,
|
||||||
|
pod_driver):
|
||||||
|
LOG.ERROR("The pool and pod driver selected are not "
|
||||||
|
"compatible. They will be skipped")
|
||||||
|
raise exceptions.MultiPodDriverPoolConfigurationNotSupported()
|
||||||
|
drv_vif = base.PodVIFDriver.get_instance(driver_alias=pod_driver)
|
||||||
|
drv_pool = base.VIFPoolDriver.get_instance(
|
||||||
|
driver_alias=pool_driver)
|
||||||
|
drv_pool.set_vif_driver(drv_vif)
|
||||||
|
self._vif_drvs[pod_driver] = drv_pool
|
||||||
|
|
||||||
|
def request_vif(self, pod, project_id, subnets, security_groups):
|
||||||
|
pod_vif_type = self._get_pod_vif_type(pod)
|
||||||
|
return self._vif_drvs[pod_vif_type].request_vif(
|
||||||
|
pod, project_id, subnets, security_groups)
|
||||||
|
|
||||||
|
def release_vif(self, pod, vif, *argv):
|
||||||
|
pod_vif_type = self._get_pod_vif_type(pod)
|
||||||
|
self._vif_drvs[pod_vif_type].release_vif(pod, vif, *argv)
|
||||||
|
|
||||||
|
def activate_vif(self, pod, vif):
|
||||||
|
pod_vif_type = self._get_pod_vif_type(pod)
|
||||||
|
self._vif_drvs[pod_vif_type].activate_vif(pod, vif)
|
||||||
|
|
||||||
|
def _get_pod_vif_type(self, pod):
|
||||||
|
node_name = pod['spec']['nodeName']
|
||||||
|
return self._get_node_vif_driver(node_name)
|
||||||
|
|
||||||
|
@MEMOIZE
|
||||||
|
def _get_node_vif_driver(self, node_name):
|
||||||
|
kubernetes = clients.get_kubernetes_client()
|
||||||
|
node_info = kubernetes.get(
|
||||||
|
constants.K8S_API_BASE + '/nodes/' + node_name)
|
||||||
|
|
||||||
|
labels = node_info['metadata'].get('labels', None)
|
||||||
|
if labels:
|
||||||
|
pod_vif = labels.get('pod_vif',
|
||||||
|
oslo_cfg.CONF.kubernetes.pod_vif_driver)
|
||||||
|
return pod_vif
|
||||||
|
return oslo_cfg.CONF.kubernetes.pod_vif_driver
|
||||||
|
@ -44,13 +44,13 @@ class VIFHandler(k8s_base.ResourceEventHandler):
|
|||||||
self._drv_project = drivers.PodProjectDriver.get_instance()
|
self._drv_project = drivers.PodProjectDriver.get_instance()
|
||||||
self._drv_subnets = drivers.PodSubnetsDriver.get_instance()
|
self._drv_subnets = drivers.PodSubnetsDriver.get_instance()
|
||||||
self._drv_sg = drivers.PodSecurityGroupsDriver.get_instance()
|
self._drv_sg = drivers.PodSecurityGroupsDriver.get_instance()
|
||||||
self._drv_vif = drivers.PodVIFDriver.get_instance()
|
|
||||||
# REVISIT(ltomasbo): The VIF Handler should not be aware of the pool
|
# REVISIT(ltomasbo): The VIF Handler should not be aware of the pool
|
||||||
# directly. Due to the lack of a mechanism to load and set the
|
# directly. Due to the lack of a mechanism to load and set the
|
||||||
# VIFHandler driver, for now it is aware of the pool driver, but this
|
# VIFHandler driver, for now it is aware of the pool driver, but this
|
||||||
# will be reverted as soon as a mechanism is in place.
|
# will be reverted as soon as a mechanism is in place.
|
||||||
self._drv_vif_pool = drivers.VIFPoolDriver.get_instance()
|
self._drv_vif_pool = drivers.VIFPoolDriver.get_instance(
|
||||||
self._drv_vif_pool.set_vif_driver(self._drv_vif)
|
driver_alias='multi_pool')
|
||||||
|
self._drv_vif_pool.set_vif_driver()
|
||||||
|
|
||||||
def on_present(self, pod):
|
def on_present(self, pod):
|
||||||
if self._is_host_network(pod) or not self._is_pending_node(pod):
|
if self._is_host_network(pod) or not self._is_pending_node(pod):
|
||||||
|
@ -51,3 +51,14 @@ class AllowedAddressAlreadyPresent(Exception):
|
|||||||
'allowed address pair' on a port is made. Such a condition likely indicates
|
'allowed address pair' on a port is made. Such a condition likely indicates
|
||||||
a bad program state or a programming bug.
|
a bad program state or a programming bug.
|
||||||
"""
|
"""
|
||||||
|
|
||||||
|
|
||||||
|
class MultiPodDriverPoolConfigurationNotSupported(Exception):
|
||||||
|
"""Exception indicates a wrong configuration of the multi pod driver pool
|
||||||
|
|
||||||
|
This exception is raised when the multi pod driver pool is not properly
|
||||||
|
configured. This could be due to three different reasons:
|
||||||
|
1. One of the pool drivers is not supported
|
||||||
|
2. One of the pod drivers is not supported
|
||||||
|
3. One of the pod drivers is not supported by its selected pool driver
|
||||||
|
"""
|
||||||
|
@ -31,6 +31,7 @@ _kuryr_k8s_opts = [
|
|||||||
('octavia_defaults', config.octavia_defaults),
|
('octavia_defaults', config.octavia_defaults),
|
||||||
('cache_defaults', config.cache_defaults),
|
('cache_defaults', config.cache_defaults),
|
||||||
('subnet_caching', default_subnet.subnet_caching_opts),
|
('subnet_caching', default_subnet.subnet_caching_opts),
|
||||||
|
('node_driver_caching', vif_pool.node_vif_driver_caching_opts),
|
||||||
('pool_manager', pool.pool_manager_opts),
|
('pool_manager', pool.pool_manager_opts),
|
||||||
('cni_daemon', config.daemon_opts),
|
('cni_daemon', config.daemon_opts),
|
||||||
('health_server', health.health_server_opts),
|
('health_server', health.health_server_opts),
|
||||||
|
@ -64,7 +64,7 @@ class TestDriverBase(test_base.TestCase):
|
|||||||
m_cfg.kubernetes.__getitem__.assert_called_with(cfg_name)
|
m_cfg.kubernetes.__getitem__.assert_called_with(cfg_name)
|
||||||
m_stv_mgr.assert_called_with(namespace=namespace, name=drv_name,
|
m_stv_mgr.assert_called_with(namespace=namespace, name=drv_name,
|
||||||
invoke_on_load=True)
|
invoke_on_load=True)
|
||||||
m_mgrs.__setitem__.assert_called_once_with(alias, m_mgr)
|
m_mgrs.__setitem__.assert_called_once_with(cfg_name, m_mgr)
|
||||||
|
|
||||||
@mock.patch.object(d_base, '_DRIVER_MANAGERS')
|
@mock.patch.object(d_base, '_DRIVER_MANAGERS')
|
||||||
@mock.patch('kuryr_kubernetes.config.CONF')
|
@mock.patch('kuryr_kubernetes.config.CONF')
|
||||||
|
@ -15,6 +15,13 @@ from oslo_serialization import jsonutils
|
|||||||
|
|
||||||
CONF = cfg.CONF
|
CONF = cfg.CONF
|
||||||
|
|
||||||
|
VALID_MULTI_POD_POOLS_OPTS = {'noop': ['neutron-vif',
|
||||||
|
'nested-vlan',
|
||||||
|
'nested-macvlan'],
|
||||||
|
'neutron': ['neutron-vif'],
|
||||||
|
'nested': ['nested-vlan'],
|
||||||
|
}
|
||||||
|
|
||||||
|
|
||||||
def utf8_json_decoder(byte_data):
|
def utf8_json_decoder(byte_data):
|
||||||
"""Deserializes the bytes into UTF-8 encoded JSON.
|
"""Deserializes the bytes into UTF-8 encoded JSON.
|
||||||
@ -50,3 +57,7 @@ def get_pod_unique_name(pod):
|
|||||||
:returns: String with namespace/name of the pod
|
:returns: String with namespace/name of the pod
|
||||||
"""
|
"""
|
||||||
return "%(namespace)s/%(name)s" % pod['metadata']
|
return "%(namespace)s/%(name)s" % pod['metadata']
|
||||||
|
|
||||||
|
|
||||||
|
def check_suitable_multi_pool_driver_opt(pool_driver, pod_driver):
|
||||||
|
return pod_driver in VALID_MULTI_POD_POOLS_OPTS.get(pool_driver, [])
|
||||||
|
34
releasenotes/notes/multivif-pools-1cec757c77a8c4f8.yaml
Normal file
34
releasenotes/notes/multivif-pools-1cec757c77a8c4f8.yaml
Normal file
@ -0,0 +1,34 @@
|
|||||||
|
---
|
||||||
|
features:
|
||||||
|
- |
|
||||||
|
Adds a new multi pool driver to support hybrid environments where some
|
||||||
|
nodes are Bare Metal while others are running inside VMs, therefore
|
||||||
|
having different VIF drivers (e.g., neutron and nested-vlan)
|
||||||
|
|
||||||
|
This new multi pool driver is the default pool driver used even if a
|
||||||
|
different vif_pool_driver is set at the config option. However if the
|
||||||
|
configuration about the mappings between the different pools and pod vif
|
||||||
|
drivers is not provided at the pools_vif_drivers config option of vif_pool
|
||||||
|
configuration section only one pool driver will be loaded -- using the
|
||||||
|
standard vif_pool_driver and pod_vif_driver config options, i.e., using
|
||||||
|
the one selected at kuryr.conf options.
|
||||||
|
|
||||||
|
To enable the option of having different pools depending on the node's pod
|
||||||
|
vif types, you need to state the type of pool that you want for each pod
|
||||||
|
vif driver, e.g.:
|
||||||
|
|
||||||
|
.. code-block:: ini
|
||||||
|
|
||||||
|
[vif_pool]
|
||||||
|
pools_vif_drivers=nested:nested-vlan,neutron:neutron-vif
|
||||||
|
|
||||||
|
|
||||||
|
This will use a pool driver nested to handle the pods whose vif driver is
|
||||||
|
nested-vlan, and a pool driver neutron to handle the pods whose vif driver
|
||||||
|
is neutron-vif. When the controller is requesting a vif for a pod in
|
||||||
|
node X, it will first read the node's annotation about pod_vif driver to
|
||||||
|
use, e.g., pod_vif: nested-vlan, and then use the corresponding pool
|
||||||
|
driver -- which has the right pod-vif driver set.
|
||||||
|
|
||||||
|
Note that if no annotation is set on a node, the default pod_vif_driver
|
||||||
|
is used.
|
@ -73,6 +73,7 @@ kuryr_kubernetes.controller.drivers.vif_pool =
|
|||||||
noop = kuryr_kubernetes.controller.drivers.vif_pool:NoopVIFPool
|
noop = kuryr_kubernetes.controller.drivers.vif_pool:NoopVIFPool
|
||||||
neutron = kuryr_kubernetes.controller.drivers.vif_pool:NeutronVIFPool
|
neutron = kuryr_kubernetes.controller.drivers.vif_pool:NeutronVIFPool
|
||||||
nested = kuryr_kubernetes.controller.drivers.vif_pool:NestedVIFPool
|
nested = kuryr_kubernetes.controller.drivers.vif_pool:NestedVIFPool
|
||||||
|
multi_pool = kuryr_kubernetes.controller.drivers.vif_pool:MultiVIFPool
|
||||||
|
|
||||||
[files]
|
[files]
|
||||||
packages =
|
packages =
|
||||||
|
Loading…
Reference in New Issue
Block a user