Avoid port update neutron call during pods boot up

This patches removes the need to update the port name when it is
associated to a pod. It still allows it by enabling the port_debug
option at kuryr.conf. By default is set to False, which means it
will not set any name for the ports being used by the pods, nor
to the pods available in the port pools. This makes it similar to
VMs booted with Nova, where unless a predefined port is used, the
port will be created and used without setting any name.

Thanks to disabling the port_debug, a call to Neutron to update the
port name is saved, with the consequent speed up in containers boot
up time, as well as minimization of congestion on the Neutron side
in case of many containers being created at the same time.

Partially Implements blueprint port-creation-control-plane-perf
Change-Id: I674fd948569558d3cc8f13f920c1793e056dfd92
This commit is contained in:
Luis Tomas Bolivar 2017-09-18 11:22:22 +00:00
parent aaa5150252
commit f5e8f487e7
8 changed files with 141 additions and 45 deletions

View File

@ -84,6 +84,10 @@ k8s_opts = [
help=_("The driver that manages VIFs pools for "
"Kubernetes Pods."),
default='noop'),
cfg.BoolOpt('port_debug',
help=_('Enable port debug to force kuryr port names to be '
'set to their corresponding pod names.'),
default=False),
]
neutron_defaults = [

View File

@ -32,3 +32,5 @@ K8S_OS_VIF_NOOP_PLUGIN = "noop"
CNI_EXCEPTION_CODE = 100
CNI_TIMEOUT_CODE = 200
KURYR_PORT_NAME = 'kuryr-pool-port'

View File

@ -20,6 +20,8 @@ from neutronclient.common import exceptions as n_exc
from oslo_log import log as logging
from kuryr_kubernetes import clients
from kuryr_kubernetes import config
from kuryr_kubernetes import constants
from kuryr_kubernetes.controller.drivers import nested_vif
from kuryr_kubernetes import exceptions as k_exc
from kuryr_kubernetes import os_vif_util as ovu
@ -124,10 +126,12 @@ class NestedVlanPodVIFDriver(nested_vif.NestedPodVIFDriver):
'device_owner': kl_const.DEVICE_OWNER,
'admin_state_up': True}
if unbound:
port_req_body['name'] = 'available-port'
else:
port_req_body['name'] = self._get_port_name(pod)
# only set name if port_debug is enabled
if config.CONF.kubernetes.port_debug:
if unbound:
port_req_body['name'] = constants.KURYR_PORT_NAME
else:
port_req_body['name'] = self._get_port_name(pod)
if security_groups:
port_req_body['security_groups'] = security_groups

View File

@ -18,6 +18,8 @@ from neutronclient.common import exceptions as n_exc
from oslo_log import log as logging
from kuryr_kubernetes import clients
from kuryr_kubernetes import config
from kuryr_kubernetes import constants
from kuryr_kubernetes.controller.drivers import base
from kuryr_kubernetes import exceptions as k_exc
from kuryr_kubernetes import os_vif_util as ovu
@ -93,20 +95,23 @@ class NeutronPodVIFDriver(base.PodVIFDriver):
def _get_port_request(self, pod, project_id, subnets, security_groups,
unbound=False):
port_req_body = {'project_id': project_id,
'name': self._get_port_name(pod),
'network_id': self._get_network_id(subnets),
'fixed_ips': ovu.osvif_to_neutron_fixed_ips(subnets),
'device_owner': kl_const.DEVICE_OWNER,
'device_id': self._get_device_id(pod),
'admin_state_up': True,
'binding:host_id': self._get_host_id(pod)}
# if unbound argument is set to true, it means the port requested
# should not be bound and not associated to the pod. Thus the port dict
# is filled with a generic name (available-port) and without device_id
if unbound:
port_req_body['name'] = 'available-port'
port_req_body['device_id'] = ''
# is filled with a generic name (constants.KURYR_PORT_NAME) if
# port_debug is enabled, and without device_id
if unbound and config.CONF.kubernetes.port_debug:
port_req_body['name'] = constants.KURYR_PORT_NAME
else:
# only set the name if port_debug is enabled
if config.CONF.kubernetes.port_debug:
port_req_body['name'] = self._get_port_name(pod)
port_req_body['device_id'] = self._get_device_id(pod)
if security_groups:
port_req_body['security_groups'] = security_groups

View File

@ -24,8 +24,11 @@ from kuryr.lib import constants as kl_const
from neutronclient.common import exceptions as n_exc
from oslo_config import cfg as oslo_cfg
from oslo_log import log as logging
from oslo_serialization import jsonutils
from kuryr_kubernetes import clients
from kuryr_kubernetes import config
from kuryr_kubernetes import constants
from kuryr_kubernetes.controller.drivers import base
from kuryr_kubernetes.controller.drivers import default_subnet
from kuryr_kubernetes import exceptions
@ -173,6 +176,16 @@ class BaseVIFPool(base.VIFPoolDriver):
ports = neutron.list_ports(**attrs)
return ports['ports']
def _get_in_use_ports(self):
kubernetes = clients.get_kubernetes_client()
in_use_ports = []
running_pods = kubernetes.get(constants.K8S_API_BASE + '/pods')
for pod in running_pods['items']:
annotations = jsonutils.loads(pod['metadata']['annotations'][
constants.K8S_ANNOTATION_VIF])
in_use_ports.append(annotations['versioned_object.data']['id'])
return in_use_ports
class NeutronVIFPool(BaseVIFPool):
"""Manages VIFs for Bare Metal Kubernetes Pods."""
@ -182,15 +195,16 @@ class NeutronVIFPool(BaseVIFPool):
port_id = self._available_ports_pools[pool_key].pop()
except IndexError:
raise exceptions.ResourceNotReady(pod)
neutron = clients.get_neutron_client()
neutron.update_port(
port_id,
{
"port": {
'name': pod['metadata']['name'],
'device_id': pod['metadata']['uid']
}
})
if config.CONF.kubernetes.port_debug:
neutron = clients.get_neutron_client()
neutron.update_port(
port_id,
{
"port": {
'name': pod['metadata']['name'],
'device_id': pod['metadata']['uid']
}
})
# check if the pool needs to be populated
if (self._get_pool_size(pool_key) <
oslo_cfg.CONF.vif_pool.ports_pool_min):
@ -214,12 +228,15 @@ class NeutronVIFPool(BaseVIFPool):
if (not oslo_cfg.CONF.vif_pool.ports_pool_max or
self._get_pool_size(pool_key) <
oslo_cfg.CONF.vif_pool.ports_pool_max):
port_name = (constants.KURYR_PORT_NAME
if config.CONF.kubernetes.port_debug
else '')
try:
neutron.update_port(
port_id,
{
"port": {
'name': 'available-port',
'name': port_name,
'device_id': '',
'security_groups': list(pool_key[2])
}
@ -250,8 +267,16 @@ class NeutronVIFPool(BaseVIFPool):
def _cleanup_precreated_ports(self):
neutron = clients.get_neutron_client()
available_ports = self._get_ports_by_attrs(
name='available-port', device_owner=kl_const.DEVICE_OWNER)
if config.CONF.kubernetes.port_debug:
available_ports = self._get_ports_by_attrs(
name=constants.KURYR_PORT_NAME, device_owner=[
kl_const.DEVICE_OWNER])
else:
kuryr_ports = self._get_ports_by_attrs(
device_owner=kl_const.DEVICE_OWNER)
in_use_ports = self._get_in_use_ports()
available_ports = [port for port in kuryr_ports
if port['id'] not in in_use_ports]
for port in available_ports:
neutron.delete_port(port['id'])
@ -271,14 +296,15 @@ class NestedVIFPool(BaseVIFPool):
port_id = self._available_ports_pools[pool_key].pop()
except IndexError:
raise exceptions.ResourceNotReady(pod)
neutron = clients.get_neutron_client()
neutron.update_port(
port_id,
{
"port": {
'name': pod['metadata']['name'],
}
})
if config.CONF.kubernetes.port_debug:
neutron = clients.get_neutron_client()
neutron.update_port(
port_id,
{
"port": {
'name': pod['metadata']['name'],
}
})
# check if the pool needs to be populated
if (self._get_pool_size(pool_key) <
oslo_cfg.CONF.vif_pool.ports_pool_min):
@ -302,12 +328,15 @@ class NestedVIFPool(BaseVIFPool):
if (not oslo_cfg.CONF.vif_pool.ports_pool_max or
self._get_pool_size(pool_key) <
oslo_cfg.CONF.vif_pool.ports_pool_max):
port_name = (constants.KURYR_PORT_NAME
if config.CONF.kubernetes.port_debug
else '')
try:
neutron.update_port(
port_id,
{
"port": {
'name': 'available-port',
'name': port_name,
'security_groups': list(pool_key[2])
}
})
@ -349,6 +378,7 @@ class NestedVIFPool(BaseVIFPool):
def _recover_precreated_ports(self):
self._precreated_ports(action='recover')
LOG.info("PORTS POOL: pools updated with pre-created ports")
def _remove_precreated_ports(self, trunk_ips=None):
self._precreated_ports(action='free', trunk_ips=trunk_ips)
@ -368,9 +398,16 @@ class NestedVIFPool(BaseVIFPool):
# when a port is attached to a trunk. However, that is not the case
# for other ML2 drivers, such as ODL. So we also need to look for
# compute:kuryr
available_ports = self._get_ports_by_attrs(
name='available-port', device_owner=['trunk:subport',
kl_const.DEVICE_OWNER])
if config.CONF.kubernetes.port_debug:
available_ports = self._get_ports_by_attrs(
name=constants.KURYR_PORT_NAME, device_owner=[
'trunk:subport', kl_const.DEVICE_OWNER])
else:
kuryr_subports = self._get_ports_by_attrs(
device_owner=['trunk:subport', kl_const.DEVICE_OWNER])
in_use_ports = self._get_in_use_ports()
available_ports = [subport for subport in kuryr_subports
if subport['id'] not in in_use_ports]
if not available_ports:
return

View File

@ -15,7 +15,9 @@ import mock
from kuryr.lib import constants as kl_const
from kuryr.lib import exceptions as kl_exc
from neutronclient.common import exceptions as n_exc
from oslo_config import cfg as oslo_cfg
from kuryr_kubernetes import constants
from kuryr_kubernetes.controller.drivers import nested_vlan_vif
from kuryr_kubernetes import exceptions as k_exc
from kuryr_kubernetes.tests import base as test_base
@ -339,6 +341,10 @@ class TestNestedVlanPodVIFDriver(test_base.TestCase):
m_driver._get_network_id.return_value = network_id
m_to_fips.return_value = fixed_ips
oslo_cfg.CONF.set_override('port_debug',
True,
group='kubernetes')
expected = {'port': {'project_id': project_id,
'name': port_name,
'network_id': network_id,
@ -350,7 +356,7 @@ class TestNestedVlanPodVIFDriver(test_base.TestCase):
expected['port']['security_groups'] = security_groups
if unbound:
expected['port']['name'] = 'available-port'
expected['port']['name'] = constants.KURYR_PORT_NAME
ret = cls._get_port_request(m_driver, pod, project_id, subnets,
security_groups, unbound)

View File

@ -17,7 +17,9 @@ import mock
from kuryr.lib import constants as kl_const
from neutronclient.common import exceptions as n_exc
from oslo_config import cfg as oslo_cfg
from kuryr_kubernetes import constants
from kuryr_kubernetes.controller.drivers import neutron_vif
from kuryr_kubernetes import exceptions as k_exc
from kuryr_kubernetes.tests import base as test_base
@ -209,12 +211,15 @@ class NeutronPodVIFDriver(test_base.TestCase):
m_driver._get_device_id.return_value = device_id
m_driver._get_host_id.return_value = host_id
oslo_cfg.CONF.set_override('port_debug',
True,
group='kubernetes')
expected = {'port': {'project_id': project_id,
'name': port_name,
'network_id': network_id,
'fixed_ips': fixed_ips,
'device_owner': kl_const.DEVICE_OWNER,
'device_id': device_id,
'admin_state_up': True,
'binding:host_id': host_id}}
@ -222,17 +227,19 @@ class NeutronPodVIFDriver(test_base.TestCase):
expected['port']['security_groups'] = security_groups
if unbound:
expected['port']['name'] = 'available-port'
expected['port']['device_id'] = ''
expected['port']['name'] = constants.KURYR_PORT_NAME
else:
expected['port']['device_id'] = device_id
ret = cls._get_port_request(m_driver, pod, project_id, subnets,
security_groups, unbound)
self.assertEqual(expected, ret)
m_driver._get_port_name.assert_called_once_with(pod)
m_driver._get_network_id.assert_called_once_with(subnets)
m_to_fips.assert_called_once_with(subnets)
m_driver._get_device_id.assert_called_once_with(pod)
if not unbound:
m_driver._get_port_name.assert_called_once_with(pod)
m_driver._get_device_id.assert_called_once_with(pod)
m_driver._get_host_id.assert_called_once_with(pod)
@mock.patch('kuryr_kubernetes.os_vif_util.osvif_to_neutron_fixed_ips')

View File

@ -21,6 +21,7 @@ from oslo_config import cfg as oslo_cfg
from os_vif.objects import vif as osv_vif
from kuryr_kubernetes import constants
from kuryr_kubernetes.controller.drivers import nested_vlan_vif
from kuryr_kubernetes.controller.drivers import neutron_vif
from kuryr_kubernetes.controller.drivers import vif_pool
@ -82,7 +83,7 @@ def get_port_obj(port_id=None, device_owner=None, ip_address=None):
'description': '',
'tags': [],
'device_id': '',
'name': 'available-port',
'name': constants.KURYR_PORT_NAME,
'admin_state_up': True,
'network_id': 'ba44f957-c467-412b-b985-ae720514bc46',
'tenant_id': 'b6e8fb2bde594673923afc19cf168f3a',
@ -259,6 +260,12 @@ class NeutronVIFPool(test_base.TestCase):
oslo_cfg.CONF.set_override('ports_pool_min',
5,
group='vif_pool')
oslo_cfg.CONF.set_override('port_debug',
True,
group='kubernetes')
oslo_cfg.CONF.set_override('port_debug',
True,
group='kubernetes')
pool_length = 5
m_driver._get_pool_size.return_value = pool_length
@ -295,6 +302,9 @@ class NeutronVIFPool(test_base.TestCase):
oslo_cfg.CONF.set_override('ports_pool_min',
5,
group='vif_pool')
oslo_cfg.CONF.set_override('port_debug',
True,
group='kubernetes')
pool_length = 3
m_driver._get_pool_size.return_value = pool_length
@ -359,6 +369,9 @@ class NeutronVIFPool(test_base.TestCase):
oslo_cfg.CONF.set_override('ports_pool_max',
max_pool,
group='vif_pool')
oslo_cfg.CONF.set_override('port_debug',
True,
group='kubernetes')
m_driver._get_pool_size.return_value = pool_length
self.assertRaises(SystemExit, cls._return_ports_to_pool, m_driver)
@ -367,7 +380,7 @@ class NeutronVIFPool(test_base.TestCase):
port_id,
{
"port": {
'name': 'available-port',
'name': constants.KURYR_PORT_NAME,
'device_id': '',
'security_groups': ['security_group']
}
@ -413,6 +426,12 @@ class NeutronVIFPool(test_base.TestCase):
oslo_cfg.CONF.set_override('ports_pool_max',
0,
group='vif_pool')
oslo_cfg.CONF.set_override('port_debug',
True,
group='kubernetes')
oslo_cfg.CONF.set_override('port_debug',
True,
group='kubernetes')
m_driver._get_pool_size.return_value = pool_length
neutron.update_port.side_effect = n_exc.NeutronClientException
@ -422,7 +441,7 @@ class NeutronVIFPool(test_base.TestCase):
port_id,
{
"port": {
'name': 'available-port',
'name': constants.KURYR_PORT_NAME,
'device_id': '',
'security_groups': ['security_group']
}
@ -684,6 +703,9 @@ class NestedVIFPool(test_base.TestCase):
oslo_cfg.CONF.set_override('ports_pool_min',
5,
group='vif_pool')
oslo_cfg.CONF.set_override('port_debug',
True,
group='kubernetes')
pool_length = 5
m_driver._get_pool_size.return_value = pool_length
@ -719,6 +741,9 @@ class NestedVIFPool(test_base.TestCase):
oslo_cfg.CONF.set_override('ports_pool_min',
5,
group='vif_pool')
oslo_cfg.CONF.set_override('port_debug',
True,
group='kubernetes')
pool_length = 3
m_driver._get_pool_size.return_value = pool_length
@ -782,6 +807,9 @@ class NestedVIFPool(test_base.TestCase):
oslo_cfg.CONF.set_override('ports_pool_max',
max_pool,
group='vif_pool')
oslo_cfg.CONF.set_override('port_debug',
True,
group='kubernetes')
m_driver._get_pool_size.return_value = pool_length
self.assertRaises(SystemExit, cls._return_ports_to_pool, m_driver)
@ -790,7 +818,7 @@ class NestedVIFPool(test_base.TestCase):
port_id,
{
"port": {
'name': 'available-port',
'name': constants.KURYR_PORT_NAME,
'security_groups': ['security_group']
}
})
@ -849,6 +877,9 @@ class NestedVIFPool(test_base.TestCase):
oslo_cfg.CONF.set_override('ports_pool_max',
0,
group='vif_pool')
oslo_cfg.CONF.set_override('port_debug',
True,
group='kubernetes')
m_driver._get_pool_size.return_value = pool_length
neutron.update_port.side_effect = n_exc.NeutronClientException
@ -858,7 +889,7 @@ class NestedVIFPool(test_base.TestCase):
port_id,
{
"port": {
'name': 'available-port',
'name': constants.KURYR_PORT_NAME,
'security_groups': ['security_group']
}
})