Populate pools upon namespace creation

When namespace subnet driver is used, a new subnet is created for
each new namespace. As pools are created per subnet, this patch
ensures that new ports are created for each pool for the new subnet
in the nested case.

Note this feature depends on using resource tagging to filter out
trunk ports in case of multiple clusters deployed on the same openstack
project or when other trunks are present. Otherwise it will consider
all the existing trunks no matter if they belong or not to the
kubernetes cluster.

NOTE: this is only for nested case, where pooling shows the greatest
improvements as ports are already ACTIVE.

Change-Id: Id014cf49da8d4cbe0c1795e47765fcf2f0684c09
This commit is contained in:
Luis Tomas Bolivar 2019-05-03 17:54:55 +02:00
parent 34fc8f114b
commit 3f9c80e6e6
11 changed files with 265 additions and 28 deletions

View File

@ -13,6 +13,14 @@ the next steps are needed:
enabled_handlers=vif,lb,lbaasspec,namespace
Note that if you also want to enable prepopulation of ports pools upon new
namespace creation, you need to add the kuryrnet handler (more details on
:doc:`./ports-pool`)::
[kubernetes]
enabled_handlers=vif,lb,lbaasspec,namespace,kuryrnet
2. Enable the namespace subnet driver by modifying the default
pod_subnet_driver option at kuryr.conf::
@ -45,7 +53,7 @@ the next steps are needed:
pod_router = ROUTER_ID
Note if a new router is created, it must ensure the connectivity
Note that if a new router is created, it must ensure the connectivity
requirements between pod, service and public subnets, as in the case for
the default subnet driver.

View File

@ -9,6 +9,15 @@ handlers at kuryr.conf (further info on how to do this can be found at
[kubernetes]
enabled_handlers=vif,lb,lbaasspec,policy,pod_label,namespace,kuryrnetpolicy
Note that if you also want to enable prepopulation of ports pools upon new
namespace creation, you need to add the kuryrnet handler (more details on
:doc:`./ports-pool`)::
[kubernetes]
enabled_handlers=vif,lb,lbaasspec,policy,pod_label,namespace,kuryrnetpolicy,kuryrnet
After that, enable also the security group drivers for policies::
[kubernetes]

View File

@ -137,3 +137,21 @@ the right pod-vif driver set.
Note that if no annotation is set on a node, the default pod_vif_driver is
used.
Populate pools on subnets creation for namespace subnet driver
--------------------------------------------------------------
When the namespace subnet driver is used (either for namespace isolation or
for network policies) a new subnet is created for each namespace. The ports
associated to each namespace will therefore be on different pools. In order
to prepopulate the pools associated to a newly created namespace (i.e.,
subnet), the next handler needs to be enabled::
[kubernetes]
enabled_handlers=vif,lb,lbaasspec,namespace,*kuryrnet*
This can be enabled at devstack deployment time to by adding the next to the
local.conf::
KURYR_ENABLED_HANDLERS=vif,lb,lbaasspec,namespace,*kuryrnet*

View File

@ -28,7 +28,6 @@ K8S_OBJ_POD = 'Pod'
K8S_OBJ_SERVICE = 'Service'
K8S_OBJ_ENDPOINTS = 'Endpoints'
K8S_OBJ_POLICY = 'NetworkPolicy'
K8S_OBJ_KURYRNET = 'KuryrNet'
K8S_OBJ_KURYRNETPOLICY = 'KuryrNetPolicy'

View File

@ -44,11 +44,14 @@ class NamespacePodSubnetDriver(default_subnet.DefaultPodSubnetDriver):
def get_subnets(self, pod, project_id):
pod_namespace = pod['metadata']['namespace']
subnet_id = self._get_namespace_subnet(pod_namespace)
return self.get_namespace_subnet(pod_namespace)
def get_namespace_subnet(self, namespace, subnet_id=None):
if not subnet_id:
subnet_id = self._get_namespace_subnet_id(namespace)
return {subnet_id: utils.get_subnet(subnet_id)}
def _get_namespace_subnet(self, namespace):
def _get_namespace_subnet_id(self, namespace):
kubernetes = clients.get_kubernetes_client()
try:
ns = kubernetes.get('%s/namespaces/%s' % (constants.K8S_API_BASE,

View File

@ -828,14 +828,28 @@ class NestedVIFPool(BaseVIFPool):
LOG.debug('Port %s is not in the available ports '
'pool.', kuryr_subport['id'])
@lockutils.synchronized('return_to_pool_nested')
def populate_pool(self, trunk_ip, project_id, subnets, security_groups,
num_ports=None):
if not hasattr(self, '_available_ports_pools'):
LOG.info("Kuryr-controller not yet ready to populate pools.")
raise exceptions.ResourceNotReady(trunk_ip)
pool_key = self._get_pool_key(trunk_ip, project_id, None, subnets)
pools = self._available_ports_pools.get(pool_key)
if not pools:
self.force_populate_pool(trunk_ip, project_id, subnets,
security_groups, num_ports)
def force_populate_pool(self, trunk_ip, project_id, subnets,
security_groups, num_ports):
security_groups, num_ports=None):
"""Create a given amount of subports at a given trunk port.
This function creates a given amount of subports and attaches them to
the specified trunk, adding them to the related subports pool
regardless of the amount of subports already available in the pool.
"""
if not num_ports:
num_ports = oslo_cfg.CONF.vif_pool.ports_pool_batch
vifs = self._drv_vif.request_vifs(
pod=[],
project_id=project_id,
@ -998,3 +1012,8 @@ class MultiVIFPool(base.VIFPoolDriver):
vif_pool_mapping[pod_driver] = pool_driver
return vif_pool_mapping
def populate_pool(self, node_ip, project_id, subnets, sg_id):
for vif_drv in self._vif_drvs.values():
if str(vif_drv) == 'NestedVIFPool':
vif_drv.populate_pool(node_ip, project_id, subnets, sg_id)

View File

@ -0,0 +1,59 @@
# Copyright 2019 Red Hat, Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from oslo_log import log as logging
from kuryr_kubernetes import constants
from kuryr_kubernetes.controller.drivers import base as drivers
from kuryr_kubernetes.handlers import k8s_base
from kuryr_kubernetes import utils
LOG = logging.getLogger(__name__)
class KuryrNetHandler(k8s_base.ResourceEventHandler):
"""Controller side of KuryrNet process for Kubernetes pods.
`KuryrNetHandler` runs on the Kuryr-Kubernetes controller and is
responsible for populating pools for newly created namespaces.
"""
OBJECT_KIND = constants.K8S_OBJ_KURYRNET
OBJECT_WATCH_PATH = constants.K8S_API_CRD_KURYRNETS
def __init__(self):
super(KuryrNetHandler, self).__init__()
self._drv_project = drivers.NamespaceProjectDriver.get_instance()
self._drv_subnets = drivers.PodSubnetsDriver.get_instance()
self._drv_vif_pool = drivers.VIFPoolDriver.get_instance(
specific_driver='multi_pool')
self._drv_vif_pool.set_vif_driver()
def on_added(self, kuryrnet_crd):
namespace = kuryrnet_crd['metadata']['annotations'].get(
'namespaceName')
# NOTE(ltomasbo): using namespace name instead of object as it is not
# required
project_id = self._drv_project.get_project(namespace)
subnet_id = kuryrnet_crd['spec'].get('subnetId')
subnets = self._drv_subnets.get_namespace_subnet(namespace, subnet_id)
sg_id = kuryrnet_crd['spec'].get('sgId', [])
nodes = utils.get_nodes_ips()
# TODO(ltomasbo): Skip the master node where pods are not usually
# allocated.
for node_ip in nodes:
LOG.debug("Populating subnet pool %s at node %s", subnet_id,
node_ip)
self._drv_vif_pool.populate_pool(node_ip, project_id, subnets,
sg_id)

View File

@ -67,7 +67,6 @@ class TestNamespacePodSubnetDriver(test_base.TestCase):
@mock.patch('kuryr_kubernetes.utils.get_subnet')
def test_get_subnets(self, m_get_subnet):
project_id = mock.sentinel.project_id
pod = get_pod_obj()
pod_namespace = pod['metadata']['namespace']
subnet_id = mock.sentinel.subnet_id
@ -76,34 +75,35 @@ class TestNamespacePodSubnetDriver(test_base.TestCase):
cls = subnet_drv.NamespacePodSubnetDriver
m_driver = mock.MagicMock(spec=cls)
m_driver._get_namespace_subnet.return_value = subnet_id
m_driver._get_namespace_subnet_id.return_value = subnet_id
m_get_subnet.return_value = subnet
subnets = cls.get_subnets(m_driver, pod, project_id)
subnets = cls.get_namespace_subnet(m_driver, pod_namespace)
self.assertEqual({subnet_id: subnet}, subnets)
m_driver._get_namespace_subnet.assert_called_once_with(pod_namespace)
m_driver._get_namespace_subnet_id.assert_called_once_with(
pod_namespace)
m_get_subnet.assert_called_once_with(subnet_id)
@mock.patch('kuryr_kubernetes.utils.get_subnet')
def test_get_subnets_namespace_not_ready(self, m_get_subnet):
project_id = mock.sentinel.project_id
pod = get_pod_obj()
pod_namespace = pod['metadata']['namespace']
cls = subnet_drv.NamespacePodSubnetDriver
m_driver = mock.MagicMock(spec=cls)
m_driver._get_namespace_subnet.side_effect = k_exc.ResourceNotReady(
m_driver._get_namespace_subnet_id.side_effect = (
k_exc.ResourceNotReady(pod_namespace))
self.assertRaises(k_exc.ResourceNotReady, cls.get_namespace_subnet,
m_driver, pod_namespace)
m_driver._get_namespace_subnet_id.assert_called_once_with(
pod_namespace)
self.assertRaises(k_exc.ResourceNotReady, cls.get_subnets, m_driver,
pod, project_id)
m_driver._get_namespace_subnet.assert_called_once_with(pod_namespace)
m_get_subnet.assert_not_called()
def test__get_namespace_subnets(self):
def test__get_namespace_subnet_id(self):
cls = subnet_drv.NamespacePodSubnetDriver
m_driver = mock.MagicMock(spec=cls)
@ -119,11 +119,11 @@ class TestNamespacePodSubnetDriver(test_base.TestCase):
kubernetes = self.useFixture(k_fix.MockK8sClient()).client
kubernetes.get.side_effect = [ns, crd]
subnet_id_resp = cls._get_namespace_subnet(m_driver, namespace)
subnet_id_resp = cls._get_namespace_subnet_id(m_driver, namespace)
kubernetes.get.assert_called()
self.assertEqual(subnet_id, subnet_id_resp)
def test__get_namespace_subnets_get_namespace_exception(self):
def test__get_namespace_subnet_id_get_namespace_exception(self):
cls = subnet_drv.NamespacePodSubnetDriver
m_driver = mock.MagicMock(spec=cls)
@ -132,11 +132,11 @@ class TestNamespacePodSubnetDriver(test_base.TestCase):
kubernetes = self.useFixture(k_fix.MockK8sClient()).client
kubernetes.get.side_effect = k_exc.K8sClientException
self.assertRaises(k_exc.ResourceNotReady, cls._get_namespace_subnet,
m_driver, namespace)
self.assertRaises(k_exc.ResourceNotReady,
cls._get_namespace_subnet_id, m_driver, namespace)
kubernetes.get.assert_called_once()
def test__get_namespace_subnets_missing_annotation(self):
def test__get_namespace_subnet_id_missing_annotation(self):
cls = subnet_drv.NamespacePodSubnetDriver
m_driver = mock.MagicMock(spec=cls)
@ -153,11 +153,11 @@ class TestNamespacePodSubnetDriver(test_base.TestCase):
kubernetes = self.useFixture(k_fix.MockK8sClient()).client
kubernetes.get.side_effect = [ns, crd]
self.assertRaises(k_exc.ResourceNotReady, cls._get_namespace_subnet,
m_driver, namespace)
self.assertRaises(k_exc.ResourceNotReady,
cls._get_namespace_subnet_id, m_driver, namespace)
kubernetes.get.assert_called_once()
def test__get_namespace_subnets_get_crd_exception(self):
def test__get_namespace_subnet_id_get_crd_exception(self):
cls = subnet_drv.NamespacePodSubnetDriver
m_driver = mock.MagicMock(spec=cls)
@ -167,8 +167,8 @@ class TestNamespacePodSubnetDriver(test_base.TestCase):
kubernetes = self.useFixture(k_fix.MockK8sClient()).client
kubernetes.get.side_effect = [ns, k_exc.K8sClientException]
self.assertRaises(k_exc.K8sClientException, cls._get_namespace_subnet,
m_driver, namespace)
self.assertRaises(k_exc.K8sClientException,
cls._get_namespace_subnet_id, m_driver, namespace)
kubernetes.get.assert_called()
def test_delete_namespace_subnet(self):

View File

@ -0,0 +1,92 @@
# Copyright 2019, Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import mock
from kuryr_kubernetes.controller.drivers import base as drivers
from kuryr_kubernetes.controller.drivers import namespace_subnet as subnet_drv
from kuryr_kubernetes.controller.drivers import vif_pool
from kuryr_kubernetes.controller.handlers import kuryrnet
from kuryr_kubernetes.tests import base as test_base
from kuryr_kubernetes import utils
class TestKuryrNetHandler(test_base.TestCase):
def setUp(self):
super(TestKuryrNetHandler, self).setUp()
self._project_id = mock.sentinel.project_id
self._subnets = mock.sentinel.subnets
self._kuryrnet_crd = {
'metadata': {
'annotations': {
'namespaceName': 'test-namespace'
}},
'spec': {
'subnetId': 'test-subnet'
}
}
self._handler = mock.MagicMock(spec=kuryrnet.KuryrNetHandler)
self._handler._drv_project = mock.Mock(spec=drivers.PodProjectDriver)
# NOTE(ltomasbo): The KuryrNet handler is associated to the usage of
# namespace subnet driver,
self._handler._drv_subnets = mock.Mock(
spec=subnet_drv.NamespacePodSubnetDriver)
self._handler._drv_vif_pool = mock.MagicMock(
spec=vif_pool.MultiVIFPool)
self._get_project = self._handler._drv_project.get_project
self._get_namespace_subnet = (
self._handler._drv_subnets.get_namespace_subnet)
self._set_vif_driver = self._handler._drv_vif_pool.set_vif_driver
self._populate_pool = self._handler._drv_vif_pool.populate_pool
self._get_project.return_value = self._project_id
self._get_namespace_subnet.return_value = self._subnets
@mock.patch.object(drivers.VIFPoolDriver, 'get_instance')
@mock.patch.object(drivers.PodSubnetsDriver, 'get_instance')
@mock.patch.object(drivers.NamespaceProjectDriver, 'get_instance')
def test_init(self, m_get_project_driver, m_get_subnet_driver,
m_get_vif_pool_driver):
project_driver = mock.sentinel.project_driver
subnet_driver = mock.sentinel.subnet_driver
vif_pool_driver = mock.Mock(spec=vif_pool.MultiVIFPool)
m_get_project_driver.return_value = project_driver
m_get_subnet_driver.return_value = subnet_driver
m_get_vif_pool_driver.return_value = vif_pool_driver
handler = kuryrnet.KuryrNetHandler()
self.assertEqual(project_driver, handler._drv_project)
self.assertEqual(subnet_driver, handler._drv_subnets)
self.assertEqual(vif_pool_driver, handler._drv_vif_pool)
@mock.patch.object(utils, 'get_nodes_ips')
def test_on_added(self, m_get_nodes_ips):
m_get_nodes_ips.return_value = ['node-ip']
kuryrnet.KuryrNetHandler.on_added(self._handler, self._kuryrnet_crd)
self._get_project.assert_called_once()
self._get_namespace_subnet.assert_called_once_with(
self._kuryrnet_crd['metadata']['annotations']['namespaceName'],
self._kuryrnet_crd['spec']['subnetId'])
self._populate_pool.assert_called_once_with('node-ip',
self._project_id,
self._subnets,
[])

View File

@ -48,15 +48,25 @@ subnet_caching_opts = [
cfg.IntOpt('cache_time', default=3600),
]
nodes_caching_opts = [
cfg.BoolOpt('caching', default=True),
cfg.IntOpt('cache_time', default=3600),
]
CONF.register_opts(subnet_caching_opts, "subnet_caching")
CONF.register_opts(nodes_caching_opts, "nodes_caching")
cache.configure(CONF)
subnet_cache_region = cache.create_region()
MEMOIZE = cache.get_memoization_decorator(
CONF, subnet_cache_region, "subnet_caching")
cache.configure_cache_region(CONF, subnet_cache_region)
nodes_cache_region = cache.create_region()
MEMOIZE_NODE = cache.get_memoization_decorator(
CONF, nodes_cache_region, "nodes_caching")
cache.configure_cache_region(CONF, nodes_cache_region)
def utf8_json_decoder(byte_data):
"""Deserializes the bytes into UTF-8 encoded JSON.
@ -148,6 +158,25 @@ def get_leader_name():
return None
@MEMOIZE_NODE
def get_nodes_ips():
"""Get the IPs of the trunk ports associated to the deployment."""
trunk_ips = []
neutron = clients.get_neutron_client()
tags = CONF.neutron_defaults.resource_tags
if tags:
ports = neutron.list_ports(status='ACTIVE',
tags=CONF.neutron_defaults.resource_tags)
else:
# NOTE(ltomasbo: if tags are not used, assume all the trunk ports are
# part of the kuryr deployment
ports = neutron.list_ports(status='ACTIVE')
for port in ports.get('ports'):
if port.get('trunk_details'):
trunk_ips.append(port['fixed_ips'][0]['ip_address'])
return trunk_ips
@MEMOIZE
def get_subnet(subnet_id):
neutron = clients.get_neutron_client()

View File

@ -104,6 +104,7 @@ kuryr_kubernetes.controller.handlers =
policy = kuryr_kubernetes.controller.handlers.policy:NetworkPolicyHandler
pod_label = kuryr_kubernetes.controller.handlers.pod_label:PodLabelHandler
kuryrnetpolicy = kuryr_kubernetes.controller.handlers.kuryrnetpolicy:KuryrNetPolicyHandler
kuryrnet = kuryr_kubernetes.controller.handlers.kuryrnet:KuryrNetHandler
test_handler = kuryr_kubernetes.tests.unit.controller.handlers.test_fake_handler:TestHandler
kuryr_kubernetes.controller.drivers.multi_vif =