2017-02-22 10:18:37 +01:00
|
|
|
# Copyright (c) 2017 Red Hat, Inc.
|
|
|
|
# All Rights Reserved.
|
|
|
|
#
|
|
|
|
# Licensed under the Apache License, Version 2.0 (the "License"); you may
|
|
|
|
# not use this file except in compliance with the License. You may obtain
|
|
|
|
# a copy of the License at
|
|
|
|
#
|
|
|
|
# http://www.apache.org/licenses/LICENSE-2.0
|
|
|
|
#
|
|
|
|
# Unless required by applicable law or agreed to in writing, software
|
|
|
|
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
|
|
|
|
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
|
|
|
|
# License for the specific language governing permissions and limitations
|
|
|
|
# under the License.
|
2017-02-22 12:44:46 +01:00
|
|
|
|
|
|
|
import abc
|
2017-02-22 11:10:05 +01:00
|
|
|
import collections
|
2019-08-21 12:48:59 +02:00
|
|
|
import os
|
2021-12-03 09:24:45 +00:00
|
|
|
import threading
|
2017-02-22 10:18:37 +01:00
|
|
|
|
2022-04-04 11:38:20 +02:00
|
|
|
import eventlet
|
2017-02-22 11:10:05 +01:00
|
|
|
from kuryr.lib._i18n import _
|
2017-06-15 13:07:41 +00:00
|
|
|
from kuryr.lib import constants as kl_const
|
2019-11-29 07:30:03 +01:00
|
|
|
from openstack import exceptions as os_exc
|
2017-12-15 17:56:07 +01:00
|
|
|
from oslo_cache import core as cache
|
2018-08-10 12:14:37 +02:00
|
|
|
from oslo_concurrency import lockutils
|
2017-02-22 11:10:05 +01:00
|
|
|
from oslo_config import cfg as oslo_cfg
|
|
|
|
from oslo_log import log as logging
|
2018-08-27 17:43:27 +09:00
|
|
|
from oslo_log import versionutils
|
2017-02-22 11:10:05 +01:00
|
|
|
|
|
|
|
from kuryr_kubernetes import clients
|
2017-09-18 11:22:22 +00:00
|
|
|
from kuryr_kubernetes import config
|
|
|
|
from kuryr_kubernetes import constants
|
2017-02-22 10:18:37 +01:00
|
|
|
from kuryr_kubernetes.controller.drivers import base
|
2018-08-09 02:38:05 -04:00
|
|
|
from kuryr_kubernetes.controller.drivers import utils as c_utils
|
2017-08-29 08:43:34 +02:00
|
|
|
from kuryr_kubernetes.controller.managers import pool
|
2017-02-22 11:10:05 +01:00
|
|
|
from kuryr_kubernetes import exceptions
|
2017-06-15 13:07:41 +00:00
|
|
|
from kuryr_kubernetes import os_vif_util as ovu
|
2017-12-15 17:56:07 +01:00
|
|
|
from kuryr_kubernetes import utils
|
2017-02-22 11:10:05 +01:00
|
|
|
|
|
|
|
LOG = logging.getLogger(__name__)
|
|
|
|
|
|
|
|
# Moved out from neutron_default group
|
|
|
|
vif_pool_driver_opts = [
|
|
|
|
oslo_cfg.IntOpt('ports_pool_max',
|
2019-06-06 12:52:59 +03:00
|
|
|
help=_("Set a maximum amount of ports per pool. "
|
2017-06-08 15:53:52 +03:00
|
|
|
"0 to disable"),
|
|
|
|
default=0),
|
2017-02-22 12:00:48 +01:00
|
|
|
oslo_cfg.IntOpt('ports_pool_min',
|
2017-06-08 15:53:52 +03:00
|
|
|
help=_("Set a target minimum size of the pool of ports"),
|
|
|
|
default=5),
|
2017-02-22 12:00:48 +01:00
|
|
|
oslo_cfg.IntOpt('ports_pool_batch',
|
2017-06-08 15:53:52 +03:00
|
|
|
help=_("Number of ports to be created in a bulk request"),
|
|
|
|
default=10),
|
2017-02-22 12:00:48 +01:00
|
|
|
oslo_cfg.IntOpt('ports_pool_update_frequency',
|
2019-06-06 12:52:59 +03:00
|
|
|
help=_("Minimum interval (in seconds) "
|
2017-06-08 15:53:52 +03:00
|
|
|
"between pool updates"),
|
|
|
|
default=20),
|
2017-12-15 17:56:07 +01:00
|
|
|
oslo_cfg.DictOpt('pools_vif_drivers',
|
|
|
|
help=_("Dict with the pool driver and pod driver to be "
|
|
|
|
"used. If not set, it will take them from the "
|
|
|
|
"kubernetes driver options for pool and pod "
|
|
|
|
"drivers respectively"),
|
2018-08-27 17:43:27 +09:00
|
|
|
default={}, deprecated_for_removal=True,
|
|
|
|
deprecated_since="Stein",
|
|
|
|
deprecated_reason=_(
|
|
|
|
"Mapping from pool->vif does not allow different "
|
|
|
|
"vifs to use the same pool driver. "
|
|
|
|
"Use vif_pool_mapping instead.")),
|
|
|
|
oslo_cfg.DictOpt('vif_pool_mapping',
|
|
|
|
help=_("Dict with the pod driver and the corresponding "
|
|
|
|
"pool driver to be used. If not set, it will take "
|
|
|
|
"them from the kubernetes driver options for pool "
|
|
|
|
"and pod drivers respectively"),
|
2017-12-15 17:56:07 +01:00
|
|
|
default={}),
|
2017-02-22 11:10:05 +01:00
|
|
|
]
|
|
|
|
|
|
|
|
oslo_cfg.CONF.register_opts(vif_pool_driver_opts, "vif_pool")
|
2017-02-22 10:18:37 +01:00
|
|
|
|
2017-12-15 17:56:07 +01:00
|
|
|
node_vif_driver_caching_opts = [
|
2021-08-24 09:26:09 +09:00
|
|
|
oslo_cfg.BoolOpt('caching', default=True,
|
|
|
|
help=_('Enable caching of vifs.')),
|
|
|
|
oslo_cfg.IntOpt('cache_time', default=3600,
|
|
|
|
help=_('TTL, in seconds, for cached vifs')),
|
2017-12-15 17:56:07 +01:00
|
|
|
]
|
|
|
|
|
|
|
|
oslo_cfg.CONF.register_opts(node_vif_driver_caching_opts,
|
|
|
|
"node_driver_caching")
|
|
|
|
|
|
|
|
cache.configure(oslo_cfg.CONF)
|
|
|
|
node_driver_cache_region = cache.create_region()
|
|
|
|
MEMOIZE = cache.get_memoization_decorator(
|
|
|
|
oslo_cfg.CONF, node_driver_cache_region, "node_driver_caching")
|
|
|
|
|
|
|
|
cache.configure_cache_region(oslo_cfg.CONF, node_driver_cache_region)
|
|
|
|
|
2018-08-11 02:54:02 -04:00
|
|
|
VIF_TYPE_TO_DRIVER_MAPPING = {
|
|
|
|
'VIFOpenVSwitch': 'neutron-vif',
|
|
|
|
'VIFBridge': 'neutron-vif',
|
|
|
|
'VIFVlanNested': 'nested-vlan',
|
2017-10-10 14:30:59 +03:00
|
|
|
'VIFMacvlanNested': 'nested-macvlan',
|
2018-04-06 15:58:39 +01:00
|
|
|
'VIFDPDKNested': 'nested-dpdk',
|
Support DPDK application on bare-metal host
This patch contains binding driver, which intend to copy vhostuser port
to containers's directory. Here container's directory it's mounted
directory. Also this patch contains code to create proper VIF in case
when neutron ovs agent configured to work with vhostuser ports.
There is no code here for port creation, due to it performs in
base.connect by os_vif.plug. This function creates/or recreates OVS
bridge with netdev type, then it creates port in this bridge. It uses
vif.network.bridge as the name for integration bridge, IOW it doesn't
use ovs_bridge from kuryr.conf, vif.network.bridge is configured by
neutron ovs agent.
VhostUser mode is defined by neutron ovs agent, it obtains from
Open vSwitch configuration:
Command to check Open vSwitch configuration
ovs-vsctl list Open_vSwitch |grep iface_types
If neutron ovs agent finds dpdkvhostuserclient there, it sets
vhostuser_mode to VIFVHostUserMode.SERVER, it means DPDK application in
container will be a server, and OVS will be a client, so DPDK
application will create/bind/listen vhostuser socket by predefined path.
This path is set in kuryr.conf/vhostuser/mount_point.
When dpdkvhostuserclient is not in OVS's capability list, e.g. it's old
OVS or it was built w/o dpdkvhostuserclient support, the mode will be
VIFVHostUserMode.CLIENT. In this case OVS will create/bind/listen
socket, so socket file will exist, and it shoud be copied to container's
mount volume. At the moment of copying OVS server already has to listen
it, otherwise approach is not working.
Partially Implements: blueprint support-vhost-user-port-type-on-bm-installation
Change-Id: Ib9c22368e518815064282f4c3b9f9ddaf58dc622
Signed-off-by: Alexey Perevalov <a.perevalov@samsung.com>
Signed-off-by: Andrey Zaikin <a.zaikin@partner.samsung.com>
Signed-off-by: Vladimir Kuramshin <v.kuramshin@samsung.com>
2020-01-31 11:31:34 +03:00
|
|
|
'VIFVHostUser': 'neutron-vif',
|
2018-08-11 02:54:02 -04:00
|
|
|
}
|
|
|
|
|
2020-09-22 10:55:43 +02:00
|
|
|
NODE_PORTS_CLEAN_FREQUENCY = 600 # seconds
|
2021-12-03 09:24:45 +00:00
|
|
|
POPULATE_POOL_TIMEOUT = 420 # seconds
|
2021-12-13 21:53:52 +00:00
|
|
|
BULK_PORTS_CREATION_REQUESTS = 20
|
2020-09-22 10:55:43 +02:00
|
|
|
|
2017-02-22 10:18:37 +01:00
|
|
|
|
|
|
|
class NoopVIFPool(base.VIFPoolDriver):
|
|
|
|
"""No pool VIFs for Kubernetes Pods"""
|
|
|
|
|
|
|
|
def set_vif_driver(self, driver):
|
|
|
|
self._drv_vif = driver
|
|
|
|
|
|
|
|
def request_vif(self, pod, project_id, subnets, security_groups):
|
|
|
|
return self._drv_vif.request_vif(pod, project_id, subnets,
|
|
|
|
security_groups)
|
|
|
|
|
2017-02-22 11:10:05 +01:00
|
|
|
def release_vif(self, pod, vif, *argv):
|
2018-08-20 16:50:50 +09:00
|
|
|
self._drv_vif.release_vif(pod, vif, *argv)
|
2017-02-22 10:18:37 +01:00
|
|
|
|
2021-06-07 09:41:36 +02:00
|
|
|
def activate_vif(self, vif, **kwargs):
|
|
|
|
self._drv_vif.activate_vif(vif, **kwargs)
|
2017-02-22 11:10:05 +01:00
|
|
|
|
2018-11-07 18:46:07 +01:00
|
|
|
def update_vif_sgs(self, pod, sgs):
|
|
|
|
self._drv_vif.update_vif_sgs(pod, sgs)
|
|
|
|
|
2019-10-04 13:11:56 +02:00
|
|
|
def remove_sg_from_pools(self, sg_id, net_id):
|
|
|
|
pass
|
|
|
|
|
2018-08-20 17:49:52 +02:00
|
|
|
def sync_pools(self):
|
|
|
|
pass
|
|
|
|
|
2017-02-22 11:10:05 +01:00
|
|
|
|
2020-02-28 12:00:19 +01:00
|
|
|
class BaseVIFPool(base.VIFPoolDriver, metaclass=abc.ABCMeta):
|
2017-02-22 12:44:46 +01:00
|
|
|
"""Skeletal pool driver.
|
2017-02-22 11:10:05 +01:00
|
|
|
|
|
|
|
In order to handle the pools of ports, a few dicts are used:
|
|
|
|
_available_ports_pool is a dictionary with the ready to use Neutron ports
|
|
|
|
information. The keys are the 'pool_key' and the values the 'port_id's.
|
|
|
|
_existing_vifs is a dictionary containing the port vif objects. The keys
|
|
|
|
are the 'port_id' and the values are the vif objects.
|
|
|
|
_recyclable_ports is a dictionary with the Neutron ports to be
|
|
|
|
recycled. The keys are the 'port_id' and their values are the 'pool_key'.
|
2021-12-03 09:24:45 +00:00
|
|
|
_populate_pool_lock is a dict with the pool_key as key and a lock as value.
|
|
|
|
Also, there is a _lock to control access to _populate_pool_lock dict.
|
2017-02-22 11:10:05 +01:00
|
|
|
|
|
|
|
The following driver configuration options exist:
|
|
|
|
- ports_pool_max: it specifies how many ports can be kept at each pool.
|
|
|
|
If the pool already reached the specified size, the ports to be recycled
|
|
|
|
are deleted instead. If set to 0, the limit is disabled and ports are
|
|
|
|
always recycled.
|
2017-02-22 12:00:48 +01:00
|
|
|
- ports_pool_min: minimum desired number of ready to use ports at populated
|
|
|
|
pools. Should be smaller than ports_pool_max (if enabled).
|
|
|
|
- ports_pool_batch: target number of ports to be created in bulk requests
|
|
|
|
when populating pools.
|
|
|
|
- ports_pool_update_frequency: interval in seconds between ports pool
|
2021-12-03 09:24:45 +00:00
|
|
|
updates for recycling ports.
|
2021-12-13 21:53:52 +00:00
|
|
|
Also, it has a Semaphore _create_ports_semaphore to restrict the number of
|
|
|
|
bulk Ports creation calls running in parallel.
|
2017-02-22 11:10:05 +01:00
|
|
|
"""
|
2017-02-22 12:00:48 +01:00
|
|
|
|
2017-02-22 12:51:35 +01:00
|
|
|
def __init__(self):
|
|
|
|
# Note(ltomasbo) Execute the port recycling periodic actions in a
|
|
|
|
# background thread
|
2019-11-26 14:09:32 +01:00
|
|
|
self._recovered_pools = False
|
2017-02-22 12:51:35 +01:00
|
|
|
eventlet.spawn(self._return_ports_to_pool)
|
2020-09-22 10:55:43 +02:00
|
|
|
eventlet.spawn(self._cleanup_removed_nodes)
|
2017-02-22 12:51:35 +01:00
|
|
|
|
2017-02-22 12:44:46 +01:00
|
|
|
def set_vif_driver(self, driver):
|
|
|
|
self._drv_vif = driver
|
|
|
|
|
2021-06-07 09:41:36 +02:00
|
|
|
def activate_vif(self, vif, **kwargs):
|
|
|
|
self._drv_vif.activate_vif(vif, **kwargs)
|
2017-02-22 12:44:46 +01:00
|
|
|
|
2018-11-07 18:46:07 +01:00
|
|
|
def update_vif_sgs(self, pod, sgs):
|
|
|
|
self._drv_vif.update_vif_sgs(pod, sgs)
|
|
|
|
|
Pools support with Network Policies
This patch adapts the pools support to the use of Network Policies.
Unlike with the other drivers, when Network Policies are applied the
pods' ports changes their security groups while being used. That means
their original pool will not fit them anymore with the next two
consequences:
1.- Ports will have their original SG reapplied when pods are deleted,
with the consequent performance impact do to increasing the number of
calls to neutron
2.- Original pools may become useless, as different SGs are being used,
therefore wasting neutron ports
To accomodate for network policies, this patch removes the SG ids from
the pool key, merging all the pools with same network/project/host ids
but with different security groups into the same pool. This will not
change the behavior of the other drivers as there was a unique pool per
network/project/host ids already, i.e., the same SG ids were being used.
However, this will helps to avoid problem 1) as it is no longer
re-applying the SG, but simply putting the port back into its current
pool. And it will fix problem 2) as it will pick a port for an existing
pool that matches network/project/host ids. First it will search for one
with already matching SGs, and if not found, it will recycle one of the
others by reapplying the needed SGs (note it picks a port from one of
the pools that are less frequently used -- assumes they may belong to
a deleted NP that it is not needed anymore, thus removing the port
wastage problem)
Partially Implements: blueprint k8s-network-policies
Change-Id: I2c1e47fd5112c64b8e9984e5ac5d8572d91ac202
2019-02-04 12:32:12 +01:00
|
|
|
def _get_pool_size(self, pool_key):
|
|
|
|
pool = self._available_ports_pools.get(pool_key, {})
|
|
|
|
pool_members = []
|
|
|
|
for port_list in pool.values():
|
|
|
|
pool_members.extend(port_list)
|
|
|
|
return len(pool_members)
|
2017-02-22 12:44:46 +01:00
|
|
|
|
2017-09-27 17:02:32 +00:00
|
|
|
def _get_host_addr(self, pod):
|
|
|
|
return pod['status']['hostIP']
|
|
|
|
|
Pools support with Network Policies
This patch adapts the pools support to the use of Network Policies.
Unlike with the other drivers, when Network Policies are applied the
pods' ports changes their security groups while being used. That means
their original pool will not fit them anymore with the next two
consequences:
1.- Ports will have their original SG reapplied when pods are deleted,
with the consequent performance impact do to increasing the number of
calls to neutron
2.- Original pools may become useless, as different SGs are being used,
therefore wasting neutron ports
To accomodate for network policies, this patch removes the SG ids from
the pool key, merging all the pools with same network/project/host ids
but with different security groups into the same pool. This will not
change the behavior of the other drivers as there was a unique pool per
network/project/host ids already, i.e., the same SG ids were being used.
However, this will helps to avoid problem 1) as it is no longer
re-applying the SG, but simply putting the port back into its current
pool. And it will fix problem 2) as it will pick a port for an existing
pool that matches network/project/host ids. First it will search for one
with already matching SGs, and if not found, it will recycle one of the
others by reapplying the needed SGs (note it picks a port from one of
the pools that are less frequently used -- assumes they may belong to
a deleted NP that it is not needed anymore, thus removing the port
wastage problem)
Partially Implements: blueprint k8s-network-policies
Change-Id: I2c1e47fd5112c64b8e9984e5ac5d8572d91ac202
2019-02-04 12:32:12 +01:00
|
|
|
def _get_pool_key(self, host, project_id, net_id=None, subnets=None):
|
2018-02-28 17:22:44 +00:00
|
|
|
if not net_id and subnets:
|
|
|
|
net_obj = list(subnets.values())[0]
|
|
|
|
net_id = net_obj.id
|
Pools support with Network Policies
This patch adapts the pools support to the use of Network Policies.
Unlike with the other drivers, when Network Policies are applied the
pods' ports changes their security groups while being used. That means
their original pool will not fit them anymore with the next two
consequences:
1.- Ports will have their original SG reapplied when pods are deleted,
with the consequent performance impact do to increasing the number of
calls to neutron
2.- Original pools may become useless, as different SGs are being used,
therefore wasting neutron ports
To accomodate for network policies, this patch removes the SG ids from
the pool key, merging all the pools with same network/project/host ids
but with different security groups into the same pool. This will not
change the behavior of the other drivers as there was a unique pool per
network/project/host ids already, i.e., the same SG ids were being used.
However, this will helps to avoid problem 1) as it is no longer
re-applying the SG, but simply putting the port back into its current
pool. And it will fix problem 2) as it will pick a port for an existing
pool that matches network/project/host ids. First it will search for one
with already matching SGs, and if not found, it will recycle one of the
others by reapplying the needed SGs (note it picks a port from one of
the pools that are less frequently used -- assumes they may belong to
a deleted NP that it is not needed anymore, thus removing the port
wastage problem)
Partially Implements: blueprint k8s-network-policies
Change-Id: I2c1e47fd5112c64b8e9984e5ac5d8572d91ac202
2019-02-04 12:32:12 +01:00
|
|
|
pool_key = (host, project_id, net_id)
|
2018-02-28 17:22:44 +00:00
|
|
|
return pool_key
|
|
|
|
|
2018-04-25 09:31:10 +00:00
|
|
|
def _get_pool_key_net(self, pool_key):
|
Pools support with Network Policies
This patch adapts the pools support to the use of Network Policies.
Unlike with the other drivers, when Network Policies are applied the
pods' ports changes their security groups while being used. That means
their original pool will not fit them anymore with the next two
consequences:
1.- Ports will have their original SG reapplied when pods are deleted,
with the consequent performance impact do to increasing the number of
calls to neutron
2.- Original pools may become useless, as different SGs are being used,
therefore wasting neutron ports
To accomodate for network policies, this patch removes the SG ids from
the pool key, merging all the pools with same network/project/host ids
but with different security groups into the same pool. This will not
change the behavior of the other drivers as there was a unique pool per
network/project/host ids already, i.e., the same SG ids were being used.
However, this will helps to avoid problem 1) as it is no longer
re-applying the SG, but simply putting the port back into its current
pool. And it will fix problem 2) as it will pick a port for an existing
pool that matches network/project/host ids. First it will search for one
with already matching SGs, and if not found, it will recycle one of the
others by reapplying the needed SGs (note it picks a port from one of
the pools that are less frequently used -- assumes they may belong to
a deleted NP that it is not needed anymore, thus removing the port
wastage problem)
Partially Implements: blueprint k8s-network-policies
Change-Id: I2c1e47fd5112c64b8e9984e5ac5d8572d91ac202
2019-02-04 12:32:12 +01:00
|
|
|
return pool_key[2]
|
2018-04-25 09:31:10 +00:00
|
|
|
|
2017-02-22 11:10:05 +01:00
|
|
|
def request_vif(self, pod, project_id, subnets, security_groups):
|
2020-08-28 15:50:06 +02:00
|
|
|
if not self._recovered_pools:
|
2020-09-04 13:41:38 +02:00
|
|
|
LOG.debug("Kuryr-controller not yet ready to handle new pods.")
|
2020-08-28 15:50:06 +02:00
|
|
|
raise exceptions.ResourceNotReady(pod)
|
2017-02-22 11:10:05 +01:00
|
|
|
try:
|
2017-09-27 17:02:32 +00:00
|
|
|
host_addr = self._get_host_addr(pod)
|
2017-02-22 11:10:05 +01:00
|
|
|
except KeyError:
|
2019-08-21 23:09:43 +02:00
|
|
|
return None
|
|
|
|
|
Pools support with Network Policies
This patch adapts the pools support to the use of Network Policies.
Unlike with the other drivers, when Network Policies are applied the
pods' ports changes their security groups while being used. That means
their original pool will not fit them anymore with the next two
consequences:
1.- Ports will have their original SG reapplied when pods are deleted,
with the consequent performance impact do to increasing the number of
calls to neutron
2.- Original pools may become useless, as different SGs are being used,
therefore wasting neutron ports
To accomodate for network policies, this patch removes the SG ids from
the pool key, merging all the pools with same network/project/host ids
but with different security groups into the same pool. This will not
change the behavior of the other drivers as there was a unique pool per
network/project/host ids already, i.e., the same SG ids were being used.
However, this will helps to avoid problem 1) as it is no longer
re-applying the SG, but simply putting the port back into its current
pool. And it will fix problem 2) as it will pick a port for an existing
pool that matches network/project/host ids. First it will search for one
with already matching SGs, and if not found, it will recycle one of the
others by reapplying the needed SGs (note it picks a port from one of
the pools that are less frequently used -- assumes they may belong to
a deleted NP that it is not needed anymore, thus removing the port
wastage problem)
Partially Implements: blueprint k8s-network-policies
Change-Id: I2c1e47fd5112c64b8e9984e5ac5d8572d91ac202
2019-02-04 12:32:12 +01:00
|
|
|
pool_key = self._get_pool_key(host_addr, project_id, None, subnets)
|
2017-02-22 11:10:05 +01:00
|
|
|
|
2021-12-03 09:24:45 +00:00
|
|
|
# NOTE(maysams): It's possible that more recent Pods will retrieve
|
|
|
|
# the Ports from the pool that older Pods were waiting for. In case
|
|
|
|
# this happens, the event will be retried.
|
2017-02-22 11:10:05 +01:00
|
|
|
try:
|
Pools support with Network Policies
This patch adapts the pools support to the use of Network Policies.
Unlike with the other drivers, when Network Policies are applied the
pods' ports changes their security groups while being used. That means
their original pool will not fit them anymore with the next two
consequences:
1.- Ports will have their original SG reapplied when pods are deleted,
with the consequent performance impact do to increasing the number of
calls to neutron
2.- Original pools may become useless, as different SGs are being used,
therefore wasting neutron ports
To accomodate for network policies, this patch removes the SG ids from
the pool key, merging all the pools with same network/project/host ids
but with different security groups into the same pool. This will not
change the behavior of the other drivers as there was a unique pool per
network/project/host ids already, i.e., the same SG ids were being used.
However, this will helps to avoid problem 1) as it is no longer
re-applying the SG, but simply putting the port back into its current
pool. And it will fix problem 2) as it will pick a port for an existing
pool that matches network/project/host ids. First it will search for one
with already matching SGs, and if not found, it will recycle one of the
others by reapplying the needed SGs (note it picks a port from one of
the pools that are less frequently used -- assumes they may belong to
a deleted NP that it is not needed anymore, thus removing the port
wastage problem)
Partially Implements: blueprint k8s-network-policies
Change-Id: I2c1e47fd5112c64b8e9984e5ac5d8572d91ac202
2019-02-04 12:32:12 +01:00
|
|
|
return self._get_port_from_pool(pool_key, pod, subnets,
|
|
|
|
tuple(sorted(security_groups)))
|
2019-01-24 17:34:50 +01:00
|
|
|
except exceptions.ResourceNotReady:
|
2020-09-04 13:41:38 +02:00
|
|
|
LOG.debug("Ports pool does not have available ports: %s", pool_key)
|
2021-12-03 09:24:45 +00:00
|
|
|
if self._populate_pool(pool_key, pod, subnets,
|
|
|
|
tuple(sorted(security_groups))):
|
|
|
|
return self._get_port_from_pool(
|
|
|
|
pool_key, pod, subnets, tuple(sorted(security_groups)))
|
2019-01-24 17:34:50 +01:00
|
|
|
raise
|
2017-02-22 12:00:48 +01:00
|
|
|
|
2021-12-06 16:58:27 +01:00
|
|
|
def _set_port_debug(self, port_id, pod):
|
|
|
|
"""_set_port_debug sets name to the port to simplify debugging"""
|
2017-10-04 09:26:05 +02:00
|
|
|
raise NotImplementedError()
|
|
|
|
|
2021-12-03 09:24:45 +00:00
|
|
|
def _get_populate_pool_lock(self, pool_key):
|
|
|
|
with self._lock:
|
|
|
|
return self._populate_pool_lock[pool_key]
|
|
|
|
|
2021-12-06 16:58:27 +01:00
|
|
|
def _get_port_from_pool(self, pool_key, pod, subnets, security_groups):
|
|
|
|
try:
|
|
|
|
pool_ports = self._available_ports_pools[pool_key]
|
|
|
|
except (KeyError, AttributeError):
|
|
|
|
raise exceptions.ResourceNotReady(pod)
|
|
|
|
|
|
|
|
try:
|
|
|
|
port_id = pool_ports[security_groups].pop()
|
|
|
|
except (KeyError, IndexError):
|
|
|
|
# Get another port from the pool and update the SG to the
|
|
|
|
# appropriate one. It uses a port from the group that was updated
|
|
|
|
# longer ago - these will be at the front of the OrderedDict.
|
|
|
|
for sg_group, ports in pool_ports.items():
|
|
|
|
try:
|
|
|
|
port_id = pool_ports[sg_group].pop()
|
|
|
|
break
|
|
|
|
except (IndexError, KeyError):
|
|
|
|
continue
|
|
|
|
else:
|
|
|
|
# pool is empty, no port to reuse
|
|
|
|
raise exceptions.ResourceNotReady(pod)
|
|
|
|
os_net = clients.get_network_client()
|
|
|
|
os_net.update_port(port_id, security_groups=list(security_groups))
|
|
|
|
if config.CONF.kubernetes.port_debug:
|
|
|
|
self._set_port_debug(port_id, pod)
|
|
|
|
eventlet.spawn(self._populate_pool, pool_key, pod, subnets,
|
|
|
|
security_groups)
|
|
|
|
# Add protection from port_id not in existing_vifs
|
|
|
|
try:
|
|
|
|
port = self._existing_vifs[port_id]
|
|
|
|
except KeyError:
|
|
|
|
LOG.debug('Missing port on existing_vifs, this should not happen.'
|
|
|
|
' Retrying.')
|
|
|
|
raise exceptions.ResourceNotReady(pod)
|
|
|
|
return port
|
|
|
|
|
2021-12-03 09:24:45 +00:00
|
|
|
def _populate_pool(self, pool_key, pod, subnets, security_groups):
|
2017-02-22 12:00:48 +01:00
|
|
|
# REVISIT(ltomasbo): Drop the subnets parameter and get the information
|
|
|
|
# from the pool_key, which will be required when multi-network is
|
|
|
|
# supported
|
2021-10-15 11:59:18 +02:00
|
|
|
kubernetes = clients.get_kubernetes_client()
|
|
|
|
|
2019-11-26 14:09:32 +01:00
|
|
|
if not self._recovered_pools:
|
2020-09-04 13:41:38 +02:00
|
|
|
LOG.debug("Kuryr-controller not yet ready to populate pools.")
|
2021-12-03 09:24:45 +00:00
|
|
|
return False
|
|
|
|
ports_pool_min = oslo_cfg.CONF.vif_pool.ports_pool_min
|
|
|
|
lock = self._get_populate_pool_lock(pool_key)
|
|
|
|
# NOTE(maysams): Only allow one request vifs per pool and times out
|
|
|
|
# if takes 420 sec.
|
|
|
|
if lock.acquire(timeout=POPULATE_POOL_TIMEOUT):
|
|
|
|
pool_size = self._get_pool_size(pool_key)
|
2021-10-15 11:59:18 +02:00
|
|
|
try:
|
2021-12-03 09:24:45 +00:00
|
|
|
if pool_size < ports_pool_min:
|
|
|
|
num_ports = max(oslo_cfg.CONF.vif_pool.ports_pool_batch,
|
|
|
|
ports_pool_min - pool_size)
|
|
|
|
try:
|
|
|
|
vifs = self._drv_vif.request_vifs(
|
|
|
|
pod=pod,
|
|
|
|
project_id=pool_key[1],
|
|
|
|
subnets=subnets,
|
|
|
|
security_groups=security_groups,
|
2021-12-13 21:53:52 +00:00
|
|
|
num_ports=num_ports,
|
|
|
|
semaphore=self._create_ports_semaphore)
|
2021-12-03 09:24:45 +00:00
|
|
|
except os_exc.SDKException as exc:
|
|
|
|
kubernetes.add_event(
|
|
|
|
pod, 'FailToPopulateVIFPool',
|
|
|
|
f'There was an error during populating VIF pool '
|
|
|
|
f'for pod: {exc.message}', type_='Warning')
|
|
|
|
raise
|
|
|
|
|
|
|
|
for vif in vifs:
|
|
|
|
self._existing_vifs[vif.id] = vif
|
2021-12-06 16:58:27 +01:00
|
|
|
self._available_ports_pools[pool_key].setdefault(
|
2021-12-03 09:24:45 +00:00
|
|
|
security_groups, []).append(vif.id)
|
|
|
|
if vifs:
|
2021-12-06 16:58:27 +01:00
|
|
|
# Mark it as updated most recently.
|
|
|
|
self._available_ports_pools[pool_key].move_to_end(
|
|
|
|
security_groups)
|
2021-12-03 09:24:45 +00:00
|
|
|
finally:
|
|
|
|
lock.release()
|
|
|
|
else:
|
|
|
|
return False
|
|
|
|
return True
|
2017-02-22 12:00:48 +01:00
|
|
|
|
2022-08-23 11:54:04 +02:00
|
|
|
def release_vif(self, pod, vif, project_id, host_addr=None):
|
2020-08-28 15:50:06 +02:00
|
|
|
if not self._recovered_pools:
|
2020-09-04 13:41:38 +02:00
|
|
|
LOG.debug("Kuryr-controller not yet ready to remove pods.")
|
2020-08-28 15:50:06 +02:00
|
|
|
raise exceptions.ResourceNotReady(pod)
|
2019-10-15 12:12:04 +02:00
|
|
|
if not host_addr:
|
|
|
|
host_addr = self._get_host_addr(pod)
|
2018-02-28 17:22:44 +00:00
|
|
|
|
Pools support with Network Policies
This patch adapts the pools support to the use of Network Policies.
Unlike with the other drivers, when Network Policies are applied the
pods' ports changes their security groups while being used. That means
their original pool will not fit them anymore with the next two
consequences:
1.- Ports will have their original SG reapplied when pods are deleted,
with the consequent performance impact do to increasing the number of
calls to neutron
2.- Original pools may become useless, as different SGs are being used,
therefore wasting neutron ports
To accomodate for network policies, this patch removes the SG ids from
the pool key, merging all the pools with same network/project/host ids
but with different security groups into the same pool. This will not
change the behavior of the other drivers as there was a unique pool per
network/project/host ids already, i.e., the same SG ids were being used.
However, this will helps to avoid problem 1) as it is no longer
re-applying the SG, but simply putting the port back into its current
pool. And it will fix problem 2) as it will pick a port for an existing
pool that matches network/project/host ids. First it will search for one
with already matching SGs, and if not found, it will recycle one of the
others by reapplying the needed SGs (note it picks a port from one of
the pools that are less frequently used -- assumes they may belong to
a deleted NP that it is not needed anymore, thus removing the port
wastage problem)
Partially Implements: blueprint k8s-network-policies
Change-Id: I2c1e47fd5112c64b8e9984e5ac5d8572d91ac202
2019-02-04 12:32:12 +01:00
|
|
|
pool_key = self._get_pool_key(host_addr, project_id, vif.network.id,
|
|
|
|
None)
|
2017-02-22 12:51:35 +01:00
|
|
|
|
2018-10-29 16:30:17 +01:00
|
|
|
try:
|
|
|
|
if not self._existing_vifs.get(vif.id):
|
|
|
|
self._existing_vifs[vif.id] = vif
|
|
|
|
self._recyclable_ports[vif.id] = pool_key
|
|
|
|
except AttributeError:
|
2020-09-04 13:41:38 +02:00
|
|
|
LOG.debug("Kuryr-controller is not ready to handle the pools yet.")
|
2018-10-29 16:30:17 +01:00
|
|
|
raise exceptions.ResourceNotReady(pod)
|
2017-02-22 12:51:35 +01:00
|
|
|
|
2017-10-04 09:26:05 +02:00
|
|
|
def _return_ports_to_pool(self):
|
|
|
|
raise NotImplementedError()
|
|
|
|
|
|
|
|
def _recover_precreated_ports(self):
|
|
|
|
raise NotImplementedError()
|
|
|
|
|
2021-02-10 00:11:55 +00:00
|
|
|
def _get_in_use_ports_info(self):
|
2017-09-18 11:22:22 +00:00
|
|
|
kubernetes = clients.get_kubernetes_client()
|
|
|
|
in_use_ports = []
|
2021-02-10 00:11:55 +00:00
|
|
|
networks = {}
|
2021-11-02 19:22:48 +00:00
|
|
|
kuryr_ports = kubernetes.get(constants.K8S_API_CRD_KURYRPORTS)
|
|
|
|
for kp in kuryr_ports['items']:
|
|
|
|
vifs = c_utils.get_vifs(kp)
|
2020-05-26 13:03:07 +00:00
|
|
|
for data in vifs.values():
|
|
|
|
in_use_ports.append(data.id)
|
2021-02-10 00:11:55 +00:00
|
|
|
networks[data.network.id] = data.network
|
|
|
|
return in_use_ports, networks
|
2017-09-18 11:22:22 +00:00
|
|
|
|
2017-09-15 13:36:18 +00:00
|
|
|
def list_pools(self):
|
|
|
|
return self._available_ports_pools
|
|
|
|
|
|
|
|
def show_pool(self, pool_key):
|
|
|
|
return self._available_ports_pools.get(pool_key)
|
|
|
|
|
2018-04-25 09:31:10 +00:00
|
|
|
def delete_network_pools(self, net_id):
|
|
|
|
raise NotImplementedError()
|
|
|
|
|
2019-10-04 13:11:56 +02:00
|
|
|
def remove_sg_from_pools(self, sg_id, net_id):
|
2019-11-29 07:30:03 +01:00
|
|
|
os_net = clients.get_network_client()
|
2020-01-09 11:52:47 +01:00
|
|
|
for pool_key, pool_ports in list(self._available_ports_pools.items()):
|
2019-10-04 13:11:56 +02:00
|
|
|
if self._get_pool_key_net(pool_key) != net_id:
|
|
|
|
continue
|
2020-01-16 11:57:47 +01:00
|
|
|
for sg_key, ports in list(pool_ports.items()):
|
2019-10-04 13:11:56 +02:00
|
|
|
if sg_id not in sg_key:
|
|
|
|
continue
|
|
|
|
# remove the pool associated to that SG
|
2020-01-17 11:43:14 +01:00
|
|
|
try:
|
|
|
|
del self._available_ports_pools[pool_key][sg_key]
|
|
|
|
except KeyError:
|
|
|
|
LOG.debug("SG already removed from the pool. Ports "
|
|
|
|
"already re-used, no need to change their "
|
|
|
|
"associated SGs.")
|
|
|
|
continue
|
2019-10-04 13:11:56 +02:00
|
|
|
for port_id in ports:
|
|
|
|
# remove all SGs from the port to be reused
|
2019-11-29 07:30:03 +01:00
|
|
|
os_net.update_port(port_id, security_groups=None)
|
2019-10-04 13:11:56 +02:00
|
|
|
# add the port to the default pool
|
|
|
|
self._available_ports_pools[pool_key].setdefault(
|
2021-12-06 16:58:27 +01:00
|
|
|
(), []).append(port_id)
|
2019-10-04 13:11:56 +02:00
|
|
|
# NOTE(ltomasbo): as this ports were not created for this
|
|
|
|
# pool, ensuring they are used first, marking them as the
|
|
|
|
# most outdated
|
2021-12-06 16:58:27 +01:00
|
|
|
self._available_ports_pools[pool_key].move_to_end(
|
|
|
|
(), last=False)
|
2019-10-04 13:11:56 +02:00
|
|
|
|
2017-11-08 10:40:32 +01:00
|
|
|
def _create_healthcheck_file(self):
|
|
|
|
# Note(ltomasbo): Create a health check file when the pre-created
|
|
|
|
# ports are loaded into their corresponding pools. This file is used
|
|
|
|
# by the readiness probe when the controller is deployed in
|
|
|
|
# containerized mode. This way the controller pod will not be ready
|
|
|
|
# until all the pre-created ports have been loaded
|
|
|
|
try:
|
|
|
|
with open('/tmp/pools_loaded', 'a'):
|
|
|
|
LOG.debug("Health check file created for readiness probe")
|
|
|
|
except IOError:
|
|
|
|
LOG.exception("I/O error creating the health check file.")
|
|
|
|
|
2018-08-20 17:49:52 +02:00
|
|
|
@lockutils.synchronized('return_to_pool_baremetal')
|
|
|
|
@lockutils.synchronized('return_to_pool_nested')
|
|
|
|
def sync_pools(self):
|
2019-08-21 12:48:59 +02:00
|
|
|
# NOTE(ltomasbo): Ensure readiness probe is not set to true until the
|
|
|
|
# pools sync is completed in case of controller restart
|
|
|
|
try:
|
|
|
|
os.remove('/tmp/pools_loaded')
|
|
|
|
except OSError:
|
|
|
|
pass
|
|
|
|
|
2021-12-06 16:58:27 +01:00
|
|
|
self._available_ports_pools = collections.defaultdict(
|
|
|
|
collections.OrderedDict)
|
Pools support with Network Policies
This patch adapts the pools support to the use of Network Policies.
Unlike with the other drivers, when Network Policies are applied the
pods' ports changes their security groups while being used. That means
their original pool will not fit them anymore with the next two
consequences:
1.- Ports will have their original SG reapplied when pods are deleted,
with the consequent performance impact do to increasing the number of
calls to neutron
2.- Original pools may become useless, as different SGs are being used,
therefore wasting neutron ports
To accomodate for network policies, this patch removes the SG ids from
the pool key, merging all the pools with same network/project/host ids
but with different security groups into the same pool. This will not
change the behavior of the other drivers as there was a unique pool per
network/project/host ids already, i.e., the same SG ids were being used.
However, this will helps to avoid problem 1) as it is no longer
re-applying the SG, but simply putting the port back into its current
pool. And it will fix problem 2) as it will pick a port for an existing
pool that matches network/project/host ids. First it will search for one
with already matching SGs, and if not found, it will recycle one of the
others by reapplying the needed SGs (note it picks a port from one of
the pools that are less frequently used -- assumes they may belong to
a deleted NP that it is not needed anymore, thus removing the port
wastage problem)
Partially Implements: blueprint k8s-network-policies
Change-Id: I2c1e47fd5112c64b8e9984e5ac5d8572d91ac202
2019-02-04 12:32:12 +01:00
|
|
|
self._existing_vifs = collections.defaultdict()
|
|
|
|
self._recyclable_ports = collections.defaultdict()
|
2021-12-03 09:24:45 +00:00
|
|
|
self._lock = threading.Lock()
|
|
|
|
self._populate_pool_lock = collections.defaultdict(threading.Lock)
|
2021-12-13 21:53:52 +00:00
|
|
|
semaphore = eventlet.semaphore.Semaphore(BULK_PORTS_CREATION_REQUESTS)
|
|
|
|
self._create_ports_semaphore = semaphore
|
2018-08-20 17:49:52 +02:00
|
|
|
|
2018-12-17 17:38:44 +01:00
|
|
|
def _get_trunks_info(self):
|
|
|
|
"""Returns information about trunks and their subports.
|
|
|
|
|
|
|
|
This method searches for parent ports and subports among the active
|
|
|
|
neutron ports.
|
|
|
|
To find the parent ports it filters the ones that have trunk_details,
|
|
|
|
i.e., the ones that are the parent port of a trunk.
|
|
|
|
To find the subports to recover, it filters out the ports that are
|
|
|
|
already in used by running kubernetes pods. It also filters out the
|
|
|
|
ports whose device_owner is not related to subports, i.e., the ports
|
|
|
|
that are not attached to trunks, such as active ports allocated to
|
|
|
|
running VMs.
|
|
|
|
At the same time it collects information about ports subnets to
|
|
|
|
minimize the number of interaction with Neutron API.
|
|
|
|
|
|
|
|
It returns three dictionaries with the needed information about the
|
|
|
|
parent ports, subports and subnets
|
|
|
|
|
|
|
|
:return: 3 dicts with the trunk details (Key: trunk_id; Value: dict
|
|
|
|
containing ip and subports), subport details (Key: port_id; Value:
|
|
|
|
port_object), and subnet details (Key: subnet_id; Value: subnet dict)
|
|
|
|
"""
|
|
|
|
# REVISIT(ltomasbo): there is no need to recover the subports
|
|
|
|
# belonging to trunk ports whose parent port is DOWN as that means no
|
|
|
|
# pods can be scheduled there. We may need to update this if we allow
|
|
|
|
# lively extending the kubernetes cluster with VMs that already have
|
|
|
|
# precreated subports. For instance by shutting down and up a
|
|
|
|
# kubernetes Worker VM with subports already attached, and the
|
|
|
|
# controller is restarted in between.
|
Remove get_ports_by_attrs by simply use args in query ports.
get_ports_by_attrs was using neutron client for obtaining ports with
specified attribites. OpenStackSDK have this OOTB, so that one can query
for ports simply by using os.network.ports(name=…, tags=[…], …), so,
there is no need for additional function.
Also, make use of previously added real port object in tests. By using
openstack.network.v2.port.Port objects we gain such confidence
that we are dealing with an port object, dict, tuple, or any other data
type, so that we can treat it accordingly in the code.
Implements: blueprint switch-to-openstacksdk
Change-Id: I7b597f7229113a598de631641bde04e083fea4b5
2019-12-02 16:37:55 +01:00
|
|
|
os_net = clients.get_network_client()
|
2018-12-17 17:38:44 +01:00
|
|
|
parent_ports = {}
|
|
|
|
subports = {}
|
|
|
|
subnets = {}
|
|
|
|
|
2019-08-05 12:21:50 +02:00
|
|
|
attrs = {'status': 'ACTIVE'}
|
|
|
|
tags = config.CONF.neutron_defaults.resource_tags
|
|
|
|
if tags:
|
|
|
|
attrs['tags'] = tags
|
Remove get_ports_by_attrs by simply use args in query ports.
get_ports_by_attrs was using neutron client for obtaining ports with
specified attribites. OpenStackSDK have this OOTB, so that one can query
for ports simply by using os.network.ports(name=…, tags=[…], …), so,
there is no need for additional function.
Also, make use of previously added real port object in tests. By using
openstack.network.v2.port.Port objects we gain such confidence
that we are dealing with an port object, dict, tuple, or any other data
type, so that we can treat it accordingly in the code.
Implements: blueprint switch-to-openstacksdk
Change-Id: I7b597f7229113a598de631641bde04e083fea4b5
2019-12-02 16:37:55 +01:00
|
|
|
|
|
|
|
all_active_ports = os_net.ports(**attrs)
|
2021-02-10 00:11:55 +00:00
|
|
|
in_use_ports, in_use_networks = self._get_in_use_ports_info()
|
2018-12-17 17:38:44 +01:00
|
|
|
|
|
|
|
for port in all_active_ports:
|
|
|
|
# Parent port
|
2021-02-04 18:06:51 +01:00
|
|
|
# NOTE(dulek): We do not filter by worker_nodes_subnets here
|
|
|
|
# meaning that we might include some unrelated trunks,
|
|
|
|
# but the consequence is only memory usage.
|
|
|
|
if port.trunk_details and port.fixed_ips:
|
Remove get_ports_by_attrs by simply use args in query ports.
get_ports_by_attrs was using neutron client for obtaining ports with
specified attribites. OpenStackSDK have this OOTB, so that one can query
for ports simply by using os.network.ports(name=…, tags=[…], …), so,
there is no need for additional function.
Also, make use of previously added real port object in tests. By using
openstack.network.v2.port.Port objects we gain such confidence
that we are dealing with an port object, dict, tuple, or any other data
type, so that we can treat it accordingly in the code.
Implements: blueprint switch-to-openstacksdk
Change-Id: I7b597f7229113a598de631641bde04e083fea4b5
2019-12-02 16:37:55 +01:00
|
|
|
parent_ports[port.trunk_details['trunk_id']] = {
|
|
|
|
'ip': port.fixed_ips[0]['ip_address'],
|
|
|
|
'subports': port.trunk_details['sub_ports']}
|
2018-12-17 17:38:44 +01:00
|
|
|
else:
|
|
|
|
# Filter to only get subports that are not in use
|
Remove get_ports_by_attrs by simply use args in query ports.
get_ports_by_attrs was using neutron client for obtaining ports with
specified attribites. OpenStackSDK have this OOTB, so that one can query
for ports simply by using os.network.ports(name=…, tags=[…], …), so,
there is no need for additional function.
Also, make use of previously added real port object in tests. By using
openstack.network.v2.port.Port objects we gain such confidence
that we are dealing with an port object, dict, tuple, or any other data
type, so that we can treat it accordingly in the code.
Implements: blueprint switch-to-openstacksdk
Change-Id: I7b597f7229113a598de631641bde04e083fea4b5
2019-12-02 16:37:55 +01:00
|
|
|
if (port.id not in in_use_ports and
|
|
|
|
port.device_owner in ['trunk:subport',
|
|
|
|
kl_const.DEVICE_OWNER]):
|
|
|
|
subports[port.id] = port
|
2018-12-17 17:38:44 +01:00
|
|
|
# NOTE(ltomasbo): _get_subnet can be costly as it
|
|
|
|
# needs to call neutron to get network and subnet
|
|
|
|
# information. This ensures it is only called once
|
|
|
|
# per subnet in use
|
Remove get_ports_by_attrs by simply use args in query ports.
get_ports_by_attrs was using neutron client for obtaining ports with
specified attribites. OpenStackSDK have this OOTB, so that one can query
for ports simply by using os.network.ports(name=…, tags=[…], …), so,
there is no need for additional function.
Also, make use of previously added real port object in tests. By using
openstack.network.v2.port.Port objects we gain such confidence
that we are dealing with an port object, dict, tuple, or any other data
type, so that we can treat it accordingly in the code.
Implements: blueprint switch-to-openstacksdk
Change-Id: I7b597f7229113a598de631641bde04e083fea4b5
2019-12-02 16:37:55 +01:00
|
|
|
subnet_id = port.fixed_ips[0]['subnet_id']
|
2018-12-17 17:38:44 +01:00
|
|
|
if not subnets.get(subnet_id):
|
2021-02-10 00:11:55 +00:00
|
|
|
# NOTE(maysams): Avoid calling Neutron by
|
|
|
|
# getting the Network and Subnet info from
|
|
|
|
# Network defined on an existing KuryrPort CR.
|
|
|
|
# This assumes only one Subnet exists per Network.
|
2021-11-02 19:22:48 +00:00
|
|
|
network = in_use_networks.get(port.network_id)
|
|
|
|
if network:
|
|
|
|
subnets[subnet_id] = {subnet_id: network}
|
2021-02-10 00:11:55 +00:00
|
|
|
else:
|
|
|
|
subnets[subnet_id] = {
|
|
|
|
subnet_id: utils.get_subnet(subnet_id)}
|
2018-12-17 17:38:44 +01:00
|
|
|
return parent_ports, subports, subnets
|
|
|
|
|
2019-08-05 14:13:13 +02:00
|
|
|
def _cleanup_leftover_ports(self):
|
2019-11-29 07:30:03 +01:00
|
|
|
os_net = clients.get_network_client()
|
Remove get_ports_by_attrs by simply use args in query ports.
get_ports_by_attrs was using neutron client for obtaining ports with
specified attribites. OpenStackSDK have this OOTB, so that one can query
for ports simply by using os.network.ports(name=…, tags=[…], …), so,
there is no need for additional function.
Also, make use of previously added real port object in tests. By using
openstack.network.v2.port.Port objects we gain such confidence
that we are dealing with an port object, dict, tuple, or any other data
type, so that we can treat it accordingly in the code.
Implements: blueprint switch-to-openstacksdk
Change-Id: I7b597f7229113a598de631641bde04e083fea4b5
2019-12-02 16:37:55 +01:00
|
|
|
existing_ports = os_net.ports(device_owner=kl_const.DEVICE_OWNER,
|
|
|
|
status='DOWN')
|
2019-08-05 14:13:13 +02:00
|
|
|
|
|
|
|
tags = config.CONF.neutron_defaults.resource_tags
|
|
|
|
if tags:
|
2019-11-29 07:30:03 +01:00
|
|
|
nets = os_net.networks(tags=tags)
|
|
|
|
nets_ids = [n.id for n in nets]
|
2019-08-05 14:13:13 +02:00
|
|
|
for port in existing_ports:
|
Remove get_ports_by_attrs by simply use args in query ports.
get_ports_by_attrs was using neutron client for obtaining ports with
specified attribites. OpenStackSDK have this OOTB, so that one can query
for ports simply by using os.network.ports(name=…, tags=[…], …), so,
there is no need for additional function.
Also, make use of previously added real port object in tests. By using
openstack.network.v2.port.Port objects we gain such confidence
that we are dealing with an port object, dict, tuple, or any other data
type, so that we can treat it accordingly in the code.
Implements: blueprint switch-to-openstacksdk
Change-Id: I7b597f7229113a598de631641bde04e083fea4b5
2019-12-02 16:37:55 +01:00
|
|
|
net_id = port.network_id
|
2019-08-05 14:13:13 +02:00
|
|
|
if net_id in nets_ids:
|
Remove get_ports_by_attrs by simply use args in query ports.
get_ports_by_attrs was using neutron client for obtaining ports with
specified attribites. OpenStackSDK have this OOTB, so that one can query
for ports simply by using os.network.ports(name=…, tags=[…], …), so,
there is no need for additional function.
Also, make use of previously added real port object in tests. By using
openstack.network.v2.port.Port objects we gain such confidence
that we are dealing with an port object, dict, tuple, or any other data
type, so that we can treat it accordingly in the code.
Implements: blueprint switch-to-openstacksdk
Change-Id: I7b597f7229113a598de631641bde04e083fea4b5
2019-12-02 16:37:55 +01:00
|
|
|
if port.binding_host_id:
|
|
|
|
if set(tags).difference(set(port.tags)):
|
2019-11-29 07:30:03 +01:00
|
|
|
# delete the port if it has binding details, it
|
|
|
|
# belongs to the deployment subnet and it does not
|
|
|
|
# have the right tags
|
|
|
|
try:
|
Remove get_ports_by_attrs by simply use args in query ports.
get_ports_by_attrs was using neutron client for obtaining ports with
specified attribites. OpenStackSDK have this OOTB, so that one can query
for ports simply by using os.network.ports(name=…, tags=[…], …), so,
there is no need for additional function.
Also, make use of previously added real port object in tests. By using
openstack.network.v2.port.Port objects we gain such confidence
that we are dealing with an port object, dict, tuple, or any other data
type, so that we can treat it accordingly in the code.
Implements: blueprint switch-to-openstacksdk
Change-Id: I7b597f7229113a598de631641bde04e083fea4b5
2019-12-02 16:37:55 +01:00
|
|
|
os_net.delete_port(port.id)
|
2019-11-29 07:30:03 +01:00
|
|
|
except os_exc.SDKException:
|
|
|
|
LOG.debug("Problem deleting leftover port %s. "
|
Remove get_ports_by_attrs by simply use args in query ports.
get_ports_by_attrs was using neutron client for obtaining ports with
specified attribites. OpenStackSDK have this OOTB, so that one can query
for ports simply by using os.network.ports(name=…, tags=[…], …), so,
there is no need for additional function.
Also, make use of previously added real port object in tests. By using
openstack.network.v2.port.Port objects we gain such confidence
that we are dealing with an port object, dict, tuple, or any other data
type, so that we can treat it accordingly in the code.
Implements: blueprint switch-to-openstacksdk
Change-Id: I7b597f7229113a598de631641bde04e083fea4b5
2019-12-02 16:37:55 +01:00
|
|
|
"Skipping.", port.id)
|
2019-08-05 14:13:13 +02:00
|
|
|
else:
|
|
|
|
# delete port if they have no binding but belong to the
|
|
|
|
# deployment networks, regardless of their tagging
|
|
|
|
try:
|
Remove get_ports_by_attrs by simply use args in query ports.
get_ports_by_attrs was using neutron client for obtaining ports with
specified attribites. OpenStackSDK have this OOTB, so that one can query
for ports simply by using os.network.ports(name=…, tags=[…], …), so,
there is no need for additional function.
Also, make use of previously added real port object in tests. By using
openstack.network.v2.port.Port objects we gain such confidence
that we are dealing with an port object, dict, tuple, or any other data
type, so that we can treat it accordingly in the code.
Implements: blueprint switch-to-openstacksdk
Change-Id: I7b597f7229113a598de631641bde04e083fea4b5
2019-12-02 16:37:55 +01:00
|
|
|
os_net.delete_port(port.id)
|
2019-11-29 07:30:03 +01:00
|
|
|
except os_exc.SDKException:
|
2019-08-05 14:13:13 +02:00
|
|
|
LOG.debug("Problem deleting leftover port %s. "
|
Remove get_ports_by_attrs by simply use args in query ports.
get_ports_by_attrs was using neutron client for obtaining ports with
specified attribites. OpenStackSDK have this OOTB, so that one can query
for ports simply by using os.network.ports(name=…, tags=[…], …), so,
there is no need for additional function.
Also, make use of previously added real port object in tests. By using
openstack.network.v2.port.Port objects we gain such confidence
that we are dealing with an port object, dict, tuple, or any other data
type, so that we can treat it accordingly in the code.
Implements: blueprint switch-to-openstacksdk
Change-Id: I7b597f7229113a598de631641bde04e083fea4b5
2019-12-02 16:37:55 +01:00
|
|
|
"Skipping.", port.id)
|
2019-08-05 14:13:13 +02:00
|
|
|
continue
|
|
|
|
else:
|
2022-04-04 11:38:20 +02:00
|
|
|
c_utils.delete_ports([p for p in existing_ports
|
|
|
|
if not p.binding_host_id])
|
2019-08-05 14:13:13 +02:00
|
|
|
|
2020-09-22 10:55:43 +02:00
|
|
|
def _cleanup_removed_nodes(self):
|
|
|
|
"""Remove ports associated to removed nodes."""
|
|
|
|
previous_ports_to_remove = []
|
|
|
|
while True:
|
|
|
|
# NOTE(ltomasbo): Nodes are not expected to be removed
|
|
|
|
# frequently, so there is no need to execute this frequently
|
|
|
|
# either
|
|
|
|
eventlet.sleep(NODE_PORTS_CLEAN_FREQUENCY)
|
|
|
|
try:
|
|
|
|
self._trigger_removed_nodes_ports_cleanup(
|
|
|
|
previous_ports_to_remove)
|
|
|
|
except Exception:
|
|
|
|
LOG.exception('Error while removing the ports associated to '
|
|
|
|
'deleted nodes. It will be retried in %s '
|
|
|
|
'seconds', NODE_PORTS_CLEAN_FREQUENCY)
|
|
|
|
|
|
|
|
def _trigger_removed_nodes_ports_cleanup(self, previous_ports_to_remove):
|
|
|
|
"""Remove ports associated to removed nodes.
|
|
|
|
|
|
|
|
There are two types of ports pool, one for neutron and one for nested.
|
|
|
|
For the nested, the ports lost their device_owner after being detached,
|
|
|
|
i.e., after the node they belong to got removed. This means we cannot
|
|
|
|
find them unless they have been tagged.
|
|
|
|
|
|
|
|
For the neutron ones, we rely on them having the kuryr device owner
|
|
|
|
and not having binding information, thus ensuring they are not
|
|
|
|
attached to any node. However, to avoid the case where those ports
|
|
|
|
are being created at the same time of the cleanup process, we don't
|
|
|
|
delete them unless we have seen them for 2 iterations.
|
|
|
|
"""
|
|
|
|
if not self._recovered_pools:
|
|
|
|
LOG.debug("Kuryr-controller not yet ready to perform nodes"
|
|
|
|
" cleanup.")
|
|
|
|
return
|
|
|
|
os_net = clients.get_network_client()
|
|
|
|
tags = config.CONF.neutron_defaults.resource_tags
|
|
|
|
if tags:
|
2020-09-30 17:41:07 +02:00
|
|
|
subnetpool_id = config.CONF.namespace_subnet.pod_subnet_pool
|
|
|
|
if subnetpool_id:
|
|
|
|
subnets = os_net.subnets(tags=tags,
|
|
|
|
subnetpool_id=subnetpool_id)
|
|
|
|
subnets_ids = [s.id for s in subnets]
|
|
|
|
else:
|
|
|
|
subnets_ids = [config.CONF.neutron_defaults.pod_subnet]
|
|
|
|
|
2020-09-22 10:55:43 +02:00
|
|
|
# NOTE(ltomasbo): Detached subports gets their device_owner unset
|
2020-10-02 10:20:37 +02:00
|
|
|
detached_subports = os_net.ports(status='DOWN', tags=tags)
|
2020-09-22 10:55:43 +02:00
|
|
|
for subport in detached_subports:
|
2020-10-02 10:20:37 +02:00
|
|
|
# FIXME(ltomasbo): Looking for trunk:subport is only needed
|
|
|
|
# due to a bug in neutron that does not reset the
|
|
|
|
# device_owner after the port is detached from the trunk
|
|
|
|
if subport.device_owner not in ['', 'trunk:subport']:
|
|
|
|
continue
|
|
|
|
if subport.id not in previous_ports_to_remove:
|
|
|
|
# FIXME(ltomasbo): Until the above problem is there,
|
|
|
|
# we need to add protection for recently created ports
|
|
|
|
# that are still being attached
|
|
|
|
previous_ports_to_remove.append(subport.id)
|
|
|
|
continue
|
2020-09-30 17:41:07 +02:00
|
|
|
# check if port belonged to kuryr and it was a subport
|
|
|
|
# FIXME(ltomasbo): Assuming single stack
|
2020-10-02 10:20:37 +02:00
|
|
|
if len(subport.fixed_ips) != 1:
|
|
|
|
# This should never happen as there is no option to create
|
|
|
|
# ports without IPs in Neutron, yet we hit it. So adding
|
|
|
|
# protection from it
|
|
|
|
continue
|
|
|
|
if subport.fixed_ips[0].get('subnet_id') not in subnets_ids:
|
2020-09-30 17:41:07 +02:00
|
|
|
continue
|
2020-09-22 10:55:43 +02:00
|
|
|
try:
|
|
|
|
del self._existing_vifs[subport.id]
|
|
|
|
except KeyError:
|
|
|
|
LOG.debug('Port %s is not in the ports list.', subport.id)
|
2021-10-25 16:30:40 +00:00
|
|
|
port_deleted = c_utils.delete_port(subport)
|
|
|
|
if port_deleted:
|
2020-10-02 10:20:37 +02:00
|
|
|
previous_ports_to_remove.remove(subport.id)
|
2020-09-22 10:55:43 +02:00
|
|
|
|
|
|
|
# normal ports, or subports not yet attached
|
|
|
|
existing_ports = os_net.ports(
|
|
|
|
device_owner=kl_const.DEVICE_OWNER,
|
|
|
|
status='DOWN',
|
|
|
|
tags=tags)
|
|
|
|
else:
|
|
|
|
# normal ports, or subports not yet attached
|
|
|
|
existing_ports = os_net.ports(
|
|
|
|
device_owner=kl_const.DEVICE_OWNER,
|
|
|
|
status='DOWN')
|
|
|
|
|
|
|
|
for port in existing_ports:
|
|
|
|
# NOTE(ltomasbo): It may be that the port got just created and it
|
|
|
|
# is still being attached and/or being tagged.
|
|
|
|
if port.id not in previous_ports_to_remove:
|
|
|
|
previous_ports_to_remove.append(port.id)
|
|
|
|
continue
|
|
|
|
|
|
|
|
if not port.binding_host_id:
|
|
|
|
try:
|
|
|
|
del self._existing_vifs[port.id]
|
|
|
|
except KeyError:
|
|
|
|
LOG.debug('Port %s is not in the ports list.', port.id)
|
|
|
|
try:
|
|
|
|
os_net.delete_port(port.id)
|
|
|
|
except os_exc.SDKException:
|
|
|
|
LOG.debug("Problem deleting leftover port %s. "
|
|
|
|
"Skipping.", port.id)
|
2020-10-02 10:20:37 +02:00
|
|
|
else:
|
|
|
|
previous_ports_to_remove.remove(port.id)
|
2020-09-22 10:55:43 +02:00
|
|
|
|
2017-02-22 12:51:35 +01:00
|
|
|
|
2017-06-09 17:16:34 +02:00
|
|
|
class NeutronVIFPool(BaseVIFPool):
|
2017-02-22 12:51:35 +01:00
|
|
|
"""Manages VIFs for Bare Metal Kubernetes Pods."""
|
|
|
|
|
2017-09-27 17:02:32 +00:00
|
|
|
def _get_host_addr(self, pod):
|
|
|
|
return pod['spec']['nodeName']
|
|
|
|
|
2021-12-06 16:58:27 +01:00
|
|
|
def _set_port_debug(self, port_id, pod):
|
|
|
|
os_net = clients.get_network_client()
|
|
|
|
os_net.update_port(port_id, name=c_utils.get_port_name(pod),
|
|
|
|
device_id=pod['metadata']['uid'])
|
2017-02-22 11:10:05 +01:00
|
|
|
|
|
|
|
def _return_ports_to_pool(self):
|
|
|
|
"""Recycle ports to be reused by future pods.
|
|
|
|
|
2019-11-29 07:30:03 +01:00
|
|
|
For each port in the recyclable_ports dict it reapplies
|
2017-09-20 11:40:16 +00:00
|
|
|
security group if they have been changed and it changes the port
|
|
|
|
name to available_port if the port_debug option is enabled.
|
|
|
|
Then the port_id is included in the dict with the available_ports.
|
2017-02-22 11:10:05 +01:00
|
|
|
|
2019-06-06 12:52:59 +03:00
|
|
|
If a maximum number of ports per pool is set, the port will be
|
|
|
|
deleted if the maximum has been already reached.
|
2017-02-22 11:10:05 +01:00
|
|
|
"""
|
2017-02-22 12:00:48 +01:00
|
|
|
while True:
|
2018-04-25 09:31:10 +00:00
|
|
|
eventlet.sleep(oslo_cfg.CONF.vif_pool.ports_pool_update_frequency)
|
2020-09-22 10:55:43 +02:00
|
|
|
try:
|
|
|
|
self._trigger_return_to_pool()
|
|
|
|
except Exception:
|
|
|
|
LOG.exception(
|
|
|
|
'Error while returning ports to pool. '
|
|
|
|
'It will be retried in %s seconds',
|
|
|
|
oslo_cfg.CONF.vif_pool.ports_pool_update_frequency)
|
2018-04-25 09:31:10 +00:00
|
|
|
|
2018-08-10 12:14:37 +02:00
|
|
|
@lockutils.synchronized('return_to_pool_baremetal')
|
2018-04-25 09:31:10 +00:00
|
|
|
def _trigger_return_to_pool(self):
|
2020-08-28 15:50:06 +02:00
|
|
|
if not self._recovered_pools:
|
2020-09-04 13:41:38 +02:00
|
|
|
LOG.debug("Kuryr-controller not yet ready to return ports to "
|
|
|
|
"pools.")
|
2018-10-29 16:30:17 +01:00
|
|
|
return
|
2019-11-29 07:30:03 +01:00
|
|
|
os_net = clients.get_network_client()
|
2018-04-25 09:31:10 +00:00
|
|
|
sg_current = {}
|
|
|
|
if not config.CONF.kubernetes.port_debug:
|
2019-08-05 12:21:50 +02:00
|
|
|
attrs = {'device_owner': kl_const.DEVICE_OWNER}
|
|
|
|
tags = config.CONF.neutron_defaults.resource_tags
|
|
|
|
if tags:
|
|
|
|
attrs['tags'] = tags
|
Remove get_ports_by_attrs by simply use args in query ports.
get_ports_by_attrs was using neutron client for obtaining ports with
specified attribites. OpenStackSDK have this OOTB, so that one can query
for ports simply by using os.network.ports(name=…, tags=[…], …), so,
there is no need for additional function.
Also, make use of previously added real port object in tests. By using
openstack.network.v2.port.Port objects we gain such confidence
that we are dealing with an port object, dict, tuple, or any other data
type, so that we can treat it accordingly in the code.
Implements: blueprint switch-to-openstacksdk
Change-Id: I7b597f7229113a598de631641bde04e083fea4b5
2019-12-02 16:37:55 +01:00
|
|
|
|
|
|
|
for port in os_net.ports(**attrs):
|
|
|
|
if port.id in self._recyclable_ports:
|
|
|
|
sg_current[port.id] = tuple(sorted(
|
|
|
|
port.security_group_ids))
|
2018-04-25 09:31:10 +00:00
|
|
|
|
2020-01-09 11:52:47 +01:00
|
|
|
for port_id, pool_key in list(self._recyclable_ports.items()):
|
2018-04-25 09:31:10 +00:00
|
|
|
if (not oslo_cfg.CONF.vif_pool.ports_pool_max or
|
|
|
|
self._get_pool_size(pool_key) <
|
|
|
|
oslo_cfg.CONF.vif_pool.ports_pool_max):
|
|
|
|
port_name = (constants.KURYR_PORT_NAME
|
|
|
|
if config.CONF.kubernetes.port_debug
|
|
|
|
else '')
|
Pools support with Network Policies
This patch adapts the pools support to the use of Network Policies.
Unlike with the other drivers, when Network Policies are applied the
pods' ports changes their security groups while being used. That means
their original pool will not fit them anymore with the next two
consequences:
1.- Ports will have their original SG reapplied when pods are deleted,
with the consequent performance impact do to increasing the number of
calls to neutron
2.- Original pools may become useless, as different SGs are being used,
therefore wasting neutron ports
To accomodate for network policies, this patch removes the SG ids from
the pool key, merging all the pools with same network/project/host ids
but with different security groups into the same pool. This will not
change the behavior of the other drivers as there was a unique pool per
network/project/host ids already, i.e., the same SG ids were being used.
However, this will helps to avoid problem 1) as it is no longer
re-applying the SG, but simply putting the port back into its current
pool. And it will fix problem 2) as it will pick a port for an existing
pool that matches network/project/host ids. First it will search for one
with already matching SGs, and if not found, it will recycle one of the
others by reapplying the needed SGs (note it picks a port from one of
the pools that are less frequently used -- assumes they may belong to
a deleted NP that it is not needed anymore, thus removing the port
wastage problem)
Partially Implements: blueprint k8s-network-policies
Change-Id: I2c1e47fd5112c64b8e9984e5ac5d8572d91ac202
2019-02-04 12:32:12 +01:00
|
|
|
if config.CONF.kubernetes.port_debug:
|
2017-02-22 12:00:48 +01:00
|
|
|
try:
|
2019-11-29 07:30:03 +01:00
|
|
|
os_net.update_port(port_id, name=port_name,
|
|
|
|
device_id='')
|
|
|
|
except os_exc.SDKException:
|
Pools support with Network Policies
This patch adapts the pools support to the use of Network Policies.
Unlike with the other drivers, when Network Policies are applied the
pods' ports changes their security groups while being used. That means
their original pool will not fit them anymore with the next two
consequences:
1.- Ports will have their original SG reapplied when pods are deleted,
with the consequent performance impact do to increasing the number of
calls to neutron
2.- Original pools may become useless, as different SGs are being used,
therefore wasting neutron ports
To accomodate for network policies, this patch removes the SG ids from
the pool key, merging all the pools with same network/project/host ids
but with different security groups into the same pool. This will not
change the behavior of the other drivers as there was a unique pool per
network/project/host ids already, i.e., the same SG ids were being used.
However, this will helps to avoid problem 1) as it is no longer
re-applying the SG, but simply putting the port back into its current
pool. And it will fix problem 2) as it will pick a port for an existing
pool that matches network/project/host ids. First it will search for one
with already matching SGs, and if not found, it will recycle one of the
others by reapplying the needed SGs (note it picks a port from one of
the pools that are less frequently used -- assumes they may belong to
a deleted NP that it is not needed anymore, thus removing the port
wastage problem)
Partially Implements: blueprint k8s-network-policies
Change-Id: I2c1e47fd5112c64b8e9984e5ac5d8572d91ac202
2019-02-04 12:32:12 +01:00
|
|
|
LOG.warning("Error changing name for port %s to be "
|
2018-04-25 09:31:10 +00:00
|
|
|
"reused, put back on the cleanable "
|
|
|
|
"pool.", port_id)
|
|
|
|
continue
|
2021-12-06 16:58:27 +01:00
|
|
|
sg = sg_current.get(port_id)
|
|
|
|
self._available_ports_pools[pool_key].setdefault(
|
|
|
|
sg, []).append(port_id)
|
|
|
|
# Move it to the end of ports to update the SG.
|
|
|
|
self._available_ports_pools[pool_key].move_to_end(sg)
|
2018-04-25 09:31:10 +00:00
|
|
|
else:
|
|
|
|
try:
|
|
|
|
del self._existing_vifs[port_id]
|
2019-11-29 07:30:03 +01:00
|
|
|
os_net.delete_port(port_id)
|
2018-04-25 09:31:10 +00:00
|
|
|
except KeyError:
|
|
|
|
LOG.debug('Port %s is not in the ports list.', port_id)
|
|
|
|
try:
|
2017-02-22 12:00:48 +01:00
|
|
|
del self._recyclable_ports[port_id]
|
2018-04-25 09:31:10 +00:00
|
|
|
except KeyError:
|
|
|
|
LOG.debug('Port already recycled: %s', port_id)
|
2017-02-22 12:44:46 +01:00
|
|
|
|
2019-11-26 14:09:32 +01:00
|
|
|
def sync_pools(self):
|
|
|
|
super(NeutronVIFPool, self).sync_pools()
|
|
|
|
# NOTE(ltomasbo): Ensure previously created ports are recovered into
|
|
|
|
# their respective pools
|
|
|
|
self._cleanup_leftover_ports()
|
|
|
|
self._recover_precreated_ports()
|
|
|
|
self._recovered_pools = True
|
|
|
|
|
2017-06-15 13:07:41 +00:00
|
|
|
def _recover_precreated_ports(self):
|
Remove get_ports_by_attrs by simply use args in query ports.
get_ports_by_attrs was using neutron client for obtaining ports with
specified attribites. OpenStackSDK have this OOTB, so that one can query
for ports simply by using os.network.ports(name=…, tags=[…], …), so,
there is no need for additional function.
Also, make use of previously added real port object in tests. By using
openstack.network.v2.port.Port objects we gain such confidence
that we are dealing with an port object, dict, tuple, or any other data
type, so that we can treat it accordingly in the code.
Implements: blueprint switch-to-openstacksdk
Change-Id: I7b597f7229113a598de631641bde04e083fea4b5
2019-12-02 16:37:55 +01:00
|
|
|
os_net = clients.get_network_client()
|
2019-08-05 12:21:50 +02:00
|
|
|
attrs = {'device_owner': kl_const.DEVICE_OWNER}
|
|
|
|
tags = config.CONF.neutron_defaults.resource_tags
|
|
|
|
if tags:
|
|
|
|
attrs['tags'] = tags
|
|
|
|
|
2017-09-18 11:22:22 +00:00
|
|
|
if config.CONF.kubernetes.port_debug:
|
2019-08-05 12:21:50 +02:00
|
|
|
attrs['name'] = constants.KURYR_PORT_NAME
|
Remove get_ports_by_attrs by simply use args in query ports.
get_ports_by_attrs was using neutron client for obtaining ports with
specified attribites. OpenStackSDK have this OOTB, so that one can query
for ports simply by using os.network.ports(name=…, tags=[…], …), so,
there is no need for additional function.
Also, make use of previously added real port object in tests. By using
openstack.network.v2.port.Port objects we gain such confidence
that we are dealing with an port object, dict, tuple, or any other data
type, so that we can treat it accordingly in the code.
Implements: blueprint switch-to-openstacksdk
Change-Id: I7b597f7229113a598de631641bde04e083fea4b5
2019-12-02 16:37:55 +01:00
|
|
|
available_ports = os_net.ports(**attrs)
|
2017-09-18 11:22:22 +00:00
|
|
|
else:
|
Remove get_ports_by_attrs by simply use args in query ports.
get_ports_by_attrs was using neutron client for obtaining ports with
specified attribites. OpenStackSDK have this OOTB, so that one can query
for ports simply by using os.network.ports(name=…, tags=[…], …), so,
there is no need for additional function.
Also, make use of previously added real port object in tests. By using
openstack.network.v2.port.Port objects we gain such confidence
that we are dealing with an port object, dict, tuple, or any other data
type, so that we can treat it accordingly in the code.
Implements: blueprint switch-to-openstacksdk
Change-Id: I7b597f7229113a598de631641bde04e083fea4b5
2019-12-02 16:37:55 +01:00
|
|
|
kuryr_ports = os_net.ports(**attrs)
|
2021-02-10 00:11:55 +00:00
|
|
|
in_use_ports, _ = self._get_in_use_ports_info()
|
2017-09-18 11:22:22 +00:00
|
|
|
available_ports = [port for port in kuryr_ports
|
Remove get_ports_by_attrs by simply use args in query ports.
get_ports_by_attrs was using neutron client for obtaining ports with
specified attribites. OpenStackSDK have this OOTB, so that one can query
for ports simply by using os.network.ports(name=…, tags=[…], …), so,
there is no need for additional function.
Also, make use of previously added real port object in tests. By using
openstack.network.v2.port.Port objects we gain such confidence
that we are dealing with an port object, dict, tuple, or any other data
type, so that we can treat it accordingly in the code.
Implements: blueprint switch-to-openstacksdk
Change-Id: I7b597f7229113a598de631641bde04e083fea4b5
2019-12-02 16:37:55 +01:00
|
|
|
if port.id not in in_use_ports]
|
2017-09-27 17:02:32 +00:00
|
|
|
|
2018-12-17 17:38:44 +01:00
|
|
|
_, available_subports, _ = self._get_trunks_info()
|
2017-06-15 13:07:41 +00:00
|
|
|
for port in available_ports:
|
2018-12-17 17:38:44 +01:00
|
|
|
# NOTE(ltomasbo): ensure subports are not considered for
|
|
|
|
# recovering in the case of multi pools
|
Remove get_ports_by_attrs by simply use args in query ports.
get_ports_by_attrs was using neutron client for obtaining ports with
specified attribites. OpenStackSDK have this OOTB, so that one can query
for ports simply by using os.network.ports(name=…, tags=[…], …), so,
there is no need for additional function.
Also, make use of previously added real port object in tests. By using
openstack.network.v2.port.Port objects we gain such confidence
that we are dealing with an port object, dict, tuple, or any other data
type, so that we can treat it accordingly in the code.
Implements: blueprint switch-to-openstacksdk
Change-Id: I7b597f7229113a598de631641bde04e083fea4b5
2019-12-02 16:37:55 +01:00
|
|
|
if available_subports.get(port.id):
|
2018-12-17 17:38:44 +01:00
|
|
|
continue
|
Remove get_ports_by_attrs by simply use args in query ports.
get_ports_by_attrs was using neutron client for obtaining ports with
specified attribites. OpenStackSDK have this OOTB, so that one can query
for ports simply by using os.network.ports(name=…, tags=[…], …), so,
there is no need for additional function.
Also, make use of previously added real port object in tests. By using
openstack.network.v2.port.Port objects we gain such confidence
that we are dealing with an port object, dict, tuple, or any other data
type, so that we can treat it accordingly in the code.
Implements: blueprint switch-to-openstacksdk
Change-Id: I7b597f7229113a598de631641bde04e083fea4b5
2019-12-02 16:37:55 +01:00
|
|
|
if not port.binding_vif_type or not port.binding_host_id:
|
2018-12-17 17:38:44 +01:00
|
|
|
# NOTE(ltomasbo): kuryr-controller is running without the
|
|
|
|
# rights to get the needed information to recover the ports.
|
|
|
|
# Thus, removing the port instead
|
2019-11-29 07:30:03 +01:00
|
|
|
os_net = clients.get_network_client()
|
Remove get_ports_by_attrs by simply use args in query ports.
get_ports_by_attrs was using neutron client for obtaining ports with
specified attribites. OpenStackSDK have this OOTB, so that one can query
for ports simply by using os.network.ports(name=…, tags=[…], …), so,
there is no need for additional function.
Also, make use of previously added real port object in tests. By using
openstack.network.v2.port.Port objects we gain such confidence
that we are dealing with an port object, dict, tuple, or any other data
type, so that we can treat it accordingly in the code.
Implements: blueprint switch-to-openstacksdk
Change-Id: I7b597f7229113a598de631641bde04e083fea4b5
2019-12-02 16:37:55 +01:00
|
|
|
os_net.delete_port(port.id)
|
2018-12-17 17:38:44 +01:00
|
|
|
continue
|
Remove get_ports_by_attrs by simply use args in query ports.
get_ports_by_attrs was using neutron client for obtaining ports with
specified attribites. OpenStackSDK have this OOTB, so that one can query
for ports simply by using os.network.ports(name=…, tags=[…], …), so,
there is no need for additional function.
Also, make use of previously added real port object in tests. By using
openstack.network.v2.port.Port objects we gain such confidence
that we are dealing with an port object, dict, tuple, or any other data
type, so that we can treat it accordingly in the code.
Implements: blueprint switch-to-openstacksdk
Change-Id: I7b597f7229113a598de631641bde04e083fea4b5
2019-12-02 16:37:55 +01:00
|
|
|
subnet_id = port.fixed_ips[0]['subnet_id']
|
2017-09-27 17:02:32 +00:00
|
|
|
subnet = {
|
2018-08-09 17:24:17 +08:00
|
|
|
subnet_id: utils.get_subnet(subnet_id)}
|
Remove get_ports_by_attrs by simply use args in query ports.
get_ports_by_attrs was using neutron client for obtaining ports with
specified attribites. OpenStackSDK have this OOTB, so that one can query
for ports simply by using os.network.ports(name=…, tags=[…], …), so,
there is no need for additional function.
Also, make use of previously added real port object in tests. By using
openstack.network.v2.port.Port objects we gain such confidence
that we are dealing with an port object, dict, tuple, or any other data
type, so that we can treat it accordingly in the code.
Implements: blueprint switch-to-openstacksdk
Change-Id: I7b597f7229113a598de631641bde04e083fea4b5
2019-12-02 16:37:55 +01:00
|
|
|
vif = ovu.neutron_to_osvif_vif(port.binding_vif_type, port, subnet)
|
2018-02-28 17:22:44 +00:00
|
|
|
net_obj = subnet[subnet_id]
|
Remove get_ports_by_attrs by simply use args in query ports.
get_ports_by_attrs was using neutron client for obtaining ports with
specified attribites. OpenStackSDK have this OOTB, so that one can query
for ports simply by using os.network.ports(name=…, tags=[…], …), so,
there is no need for additional function.
Also, make use of previously added real port object in tests. By using
openstack.network.v2.port.Port objects we gain such confidence
that we are dealing with an port object, dict, tuple, or any other data
type, so that we can treat it accordingly in the code.
Implements: blueprint switch-to-openstacksdk
Change-Id: I7b597f7229113a598de631641bde04e083fea4b5
2019-12-02 16:37:55 +01:00
|
|
|
pool_key = self._get_pool_key(port.binding_host_id,
|
|
|
|
port.project_id,
|
2018-02-28 17:22:44 +00:00
|
|
|
net_obj.id, None)
|
2017-09-27 17:02:32 +00:00
|
|
|
|
Remove get_ports_by_attrs by simply use args in query ports.
get_ports_by_attrs was using neutron client for obtaining ports with
specified attribites. OpenStackSDK have this OOTB, so that one can query
for ports simply by using os.network.ports(name=…, tags=[…], …), so,
there is no need for additional function.
Also, make use of previously added real port object in tests. By using
openstack.network.v2.port.Port objects we gain such confidence
that we are dealing with an port object, dict, tuple, or any other data
type, so that we can treat it accordingly in the code.
Implements: blueprint switch-to-openstacksdk
Change-Id: I7b597f7229113a598de631641bde04e083fea4b5
2019-12-02 16:37:55 +01:00
|
|
|
self._existing_vifs[port.id] = vif
|
2021-12-06 16:58:27 +01:00
|
|
|
self._available_ports_pools[pool_key].setdefault(
|
Remove get_ports_by_attrs by simply use args in query ports.
get_ports_by_attrs was using neutron client for obtaining ports with
specified attribites. OpenStackSDK have this OOTB, so that one can query
for ports simply by using os.network.ports(name=…, tags=[…], …), so,
there is no need for additional function.
Also, make use of previously added real port object in tests. By using
openstack.network.v2.port.Port objects we gain such confidence
that we are dealing with an port object, dict, tuple, or any other data
type, so that we can treat it accordingly in the code.
Implements: blueprint switch-to-openstacksdk
Change-Id: I7b597f7229113a598de631641bde04e083fea4b5
2019-12-02 16:37:55 +01:00
|
|
|
tuple(sorted(port.security_group_ids)), []).append(port.id)
|
2017-09-27 17:02:32 +00:00
|
|
|
|
|
|
|
LOG.info("PORTS POOL: pools updated with pre-created ports")
|
2017-11-08 10:40:32 +01:00
|
|
|
self._create_healthcheck_file()
|
2017-06-15 13:07:41 +00:00
|
|
|
|
2018-04-25 09:31:10 +00:00
|
|
|
def delete_network_pools(self, net_id):
|
2019-11-26 14:09:32 +01:00
|
|
|
if not self._recovered_pools:
|
2020-09-04 13:41:38 +02:00
|
|
|
LOG.debug("Kuryr-controller not yet ready to delete network "
|
|
|
|
"pools.")
|
2018-10-29 16:30:17 +01:00
|
|
|
raise exceptions.ResourceNotReady(net_id)
|
2022-04-04 11:38:20 +02:00
|
|
|
|
|
|
|
epool = eventlet.GreenPool(constants.LEFTOVER_RM_POOL_SIZE)
|
2018-08-10 12:14:37 +02:00
|
|
|
|
|
|
|
# NOTE(ltomasbo): Note the pods should already be deleted, but their
|
|
|
|
# associated ports may not have been recycled yet, therefore not being
|
|
|
|
# on the available_ports_pools dict. The next call forces it to be on
|
|
|
|
# that dict before cleaning it up
|
2018-04-25 09:31:10 +00:00
|
|
|
self._trigger_return_to_pool()
|
2020-01-09 11:52:47 +01:00
|
|
|
for pool_key, ports in list(self._available_ports_pools.items()):
|
2018-04-25 09:31:10 +00:00
|
|
|
if self._get_pool_key_net(pool_key) != net_id:
|
|
|
|
continue
|
Pools support with Network Policies
This patch adapts the pools support to the use of Network Policies.
Unlike with the other drivers, when Network Policies are applied the
pods' ports changes their security groups while being used. That means
their original pool will not fit them anymore with the next two
consequences:
1.- Ports will have their original SG reapplied when pods are deleted,
with the consequent performance impact do to increasing the number of
calls to neutron
2.- Original pools may become useless, as different SGs are being used,
therefore wasting neutron ports
To accomodate for network policies, this patch removes the SG ids from
the pool key, merging all the pools with same network/project/host ids
but with different security groups into the same pool. This will not
change the behavior of the other drivers as there was a unique pool per
network/project/host ids already, i.e., the same SG ids were being used.
However, this will helps to avoid problem 1) as it is no longer
re-applying the SG, but simply putting the port back into its current
pool. And it will fix problem 2) as it will pick a port for an existing
pool that matches network/project/host ids. First it will search for one
with already matching SGs, and if not found, it will recycle one of the
others by reapplying the needed SGs (note it picks a port from one of
the pools that are less frequently used -- assumes they may belong to
a deleted NP that it is not needed anymore, thus removing the port
wastage problem)
Partially Implements: blueprint k8s-network-policies
Change-Id: I2c1e47fd5112c64b8e9984e5ac5d8572d91ac202
2019-02-04 12:32:12 +01:00
|
|
|
ports_id = []
|
|
|
|
for sg_ports in ports.values():
|
|
|
|
ports_id.extend(sg_ports)
|
2018-04-25 09:31:10 +00:00
|
|
|
for port_id in ports_id:
|
|
|
|
try:
|
|
|
|
del self._existing_vifs[port_id]
|
|
|
|
except KeyError:
|
|
|
|
LOG.debug('Port %s is not in the ports list.', port_id)
|
2019-11-29 07:30:03 +01:00
|
|
|
# NOTE(gryf): openstack client doesn't return information, if
|
|
|
|
# the port deos not exists
|
2022-04-04 11:38:20 +02:00
|
|
|
|
|
|
|
# Delete ports concurrently
|
|
|
|
for result in epool.imap(c_utils.delete_neutron_port, ports_id):
|
|
|
|
if result:
|
|
|
|
LOG.error('During Neutron port deletion an error occured: '
|
|
|
|
'%s', result)
|
|
|
|
raise result
|
2019-11-29 07:30:03 +01:00
|
|
|
|
2021-12-06 16:58:27 +01:00
|
|
|
del self._available_ports_pools[pool_key]
|
2021-12-03 09:24:45 +00:00
|
|
|
with self._lock:
|
|
|
|
try:
|
|
|
|
del self._populate_pool_lock[pool_key]
|
|
|
|
except KeyError:
|
|
|
|
pass
|
2018-04-25 09:31:10 +00:00
|
|
|
|
2017-02-22 12:44:46 +01:00
|
|
|
|
|
|
|
class NestedVIFPool(BaseVIFPool):
|
|
|
|
"""Manages VIFs for nested Kubernetes Pods.
|
|
|
|
|
|
|
|
In order to handle the pools of ports for nested Pods, an extra dict is
|
|
|
|
used:
|
|
|
|
_known_trunk_ids is a dictionary that keeps the trunk port ids associated
|
|
|
|
to each pool_key to skip calls to neutron to get the trunk information.
|
|
|
|
"""
|
2017-02-22 12:51:35 +01:00
|
|
|
_known_trunk_ids = collections.defaultdict(str)
|
2017-02-22 12:44:46 +01:00
|
|
|
|
2017-08-29 08:43:34 +02:00
|
|
|
def __init__(self):
|
|
|
|
super(NestedVIFPool, self).__init__()
|
|
|
|
# Start the pool manager so that pools can be populated/freed on
|
|
|
|
# demand
|
|
|
|
if config.CONF.kubernetes.enable_manager:
|
|
|
|
self._pool_manager = pool.PoolManager()
|
|
|
|
|
|
|
|
def set_vif_driver(self, driver):
|
|
|
|
self._drv_vif = driver
|
|
|
|
|
2022-08-23 11:54:04 +02:00
|
|
|
def release_vif(self, pod, vif, project_id):
|
2020-08-28 15:50:06 +02:00
|
|
|
if not self._recovered_pools:
|
2020-09-04 13:41:38 +02:00
|
|
|
LOG.debug("Kuryr-controller not yet ready to remove pods.")
|
2020-08-28 15:50:06 +02:00
|
|
|
raise exceptions.ResourceNotReady(pod)
|
2019-10-15 12:12:04 +02:00
|
|
|
try:
|
|
|
|
host_addr = self._get_host_addr(pod)
|
|
|
|
except KeyError:
|
|
|
|
name = pod['metadata']['name']
|
|
|
|
LOG.warning("Pod %s does not have status.hostIP field set when "
|
|
|
|
"getting deleted. This is unusual. Trying to "
|
|
|
|
"determine the IP by calling Neutron.",
|
|
|
|
name)
|
|
|
|
|
2022-08-23 11:54:04 +02:00
|
|
|
parent_id = utils.get_parent_port_id(vif)
|
2019-10-15 12:12:04 +02:00
|
|
|
if not parent_id:
|
|
|
|
LOG.warning("Port %s not found, ignoring its release request.",
|
|
|
|
vif.id)
|
|
|
|
return
|
|
|
|
|
2022-08-23 11:54:04 +02:00
|
|
|
host_addr = utils.get_parent_port_ip(parent_id)
|
2019-10-15 12:12:04 +02:00
|
|
|
LOG.debug("Determined hostIP for pod %s is %s", name, host_addr)
|
|
|
|
|
|
|
|
super(NestedVIFPool, self).release_vif(
|
2022-08-23 11:54:04 +02:00
|
|
|
pod, vif, project_id, host_addr=host_addr)
|
2019-10-15 12:12:04 +02:00
|
|
|
|
2021-12-06 16:58:27 +01:00
|
|
|
def _set_port_debug(self, port_id, pod):
|
2019-11-29 07:30:03 +01:00
|
|
|
os_net = clients.get_network_client()
|
2021-12-06 16:58:27 +01:00
|
|
|
os_net.update_port(port_id, name=c_utils.get_port_name(pod))
|
2017-02-22 12:44:46 +01:00
|
|
|
|
|
|
|
def _return_ports_to_pool(self):
|
|
|
|
"""Recycle ports to be reused by future pods.
|
|
|
|
|
2019-11-29 07:30:03 +01:00
|
|
|
For each port in the recyclable_ports dict it reapplies
|
2017-09-20 11:40:16 +00:00
|
|
|
security group if they have been changed and it changes the port
|
|
|
|
name to available_port if the port_debug option is enabled.
|
|
|
|
Then the port_id is included in the dict with the available_ports.
|
2017-02-22 12:44:46 +01:00
|
|
|
|
2019-06-06 12:52:59 +03:00
|
|
|
If a maximum number of ports per pool is set, the port will be
|
|
|
|
deleted if the maximum has been already reached.
|
2017-02-22 12:44:46 +01:00
|
|
|
"""
|
2017-02-22 12:51:35 +01:00
|
|
|
while True:
|
2018-04-25 09:31:10 +00:00
|
|
|
eventlet.sleep(oslo_cfg.CONF.vif_pool.ports_pool_update_frequency)
|
2020-09-22 10:55:43 +02:00
|
|
|
try:
|
|
|
|
self._trigger_return_to_pool()
|
|
|
|
except Exception:
|
|
|
|
LOG.exception(
|
|
|
|
'Error while returning ports to pool. '
|
|
|
|
'It will be retried in %s seconds',
|
|
|
|
oslo_cfg.CONF.vif_pool.ports_pool_update_frequency)
|
2018-04-25 09:31:10 +00:00
|
|
|
|
2018-08-10 12:14:37 +02:00
|
|
|
@lockutils.synchronized('return_to_pool_nested')
|
2018-04-25 09:31:10 +00:00
|
|
|
def _trigger_return_to_pool(self):
|
2020-08-28 15:50:06 +02:00
|
|
|
if not self._recovered_pools:
|
2020-09-04 13:41:38 +02:00
|
|
|
LOG.debug("Kuryr-controller not yet ready to return ports to "
|
|
|
|
"pools.")
|
2018-10-29 16:30:17 +01:00
|
|
|
return
|
2019-11-29 07:30:03 +01:00
|
|
|
os_net = clients.get_network_client()
|
2018-04-25 09:31:10 +00:00
|
|
|
sg_current = {}
|
|
|
|
if not config.CONF.kubernetes.port_debug:
|
2019-08-05 12:21:50 +02:00
|
|
|
attrs = {'device_owner': ['trunk:subport', kl_const.DEVICE_OWNER]}
|
|
|
|
tags = config.CONF.neutron_defaults.resource_tags
|
|
|
|
if tags:
|
|
|
|
attrs['tags'] = tags
|
Remove get_ports_by_attrs by simply use args in query ports.
get_ports_by_attrs was using neutron client for obtaining ports with
specified attribites. OpenStackSDK have this OOTB, so that one can query
for ports simply by using os.network.ports(name=…, tags=[…], …), so,
there is no need for additional function.
Also, make use of previously added real port object in tests. By using
openstack.network.v2.port.Port objects we gain such confidence
that we are dealing with an port object, dict, tuple, or any other data
type, so that we can treat it accordingly in the code.
Implements: blueprint switch-to-openstacksdk
Change-Id: I7b597f7229113a598de631641bde04e083fea4b5
2019-12-02 16:37:55 +01:00
|
|
|
kuryr_subports = os_net.ports(**attrs)
|
2018-04-25 09:31:10 +00:00
|
|
|
for subport in kuryr_subports:
|
Remove get_ports_by_attrs by simply use args in query ports.
get_ports_by_attrs was using neutron client for obtaining ports with
specified attribites. OpenStackSDK have this OOTB, so that one can query
for ports simply by using os.network.ports(name=…, tags=[…], …), so,
there is no need for additional function.
Also, make use of previously added real port object in tests. By using
openstack.network.v2.port.Port objects we gain such confidence
that we are dealing with an port object, dict, tuple, or any other data
type, so that we can treat it accordingly in the code.
Implements: blueprint switch-to-openstacksdk
Change-Id: I7b597f7229113a598de631641bde04e083fea4b5
2019-12-02 16:37:55 +01:00
|
|
|
if subport.id in self._recyclable_ports:
|
|
|
|
sg_current[subport.id] = tuple(sorted(
|
|
|
|
subport.security_group_ids))
|
2018-04-25 09:31:10 +00:00
|
|
|
|
2020-01-09 11:52:47 +01:00
|
|
|
for port_id, pool_key in list(self._recyclable_ports.items()):
|
2018-04-25 09:31:10 +00:00
|
|
|
if (not oslo_cfg.CONF.vif_pool.ports_pool_max or
|
|
|
|
self._get_pool_size(pool_key) <
|
|
|
|
oslo_cfg.CONF.vif_pool.ports_pool_max):
|
|
|
|
port_name = (constants.KURYR_PORT_NAME
|
|
|
|
if config.CONF.kubernetes.port_debug
|
|
|
|
else '')
|
Pools support with Network Policies
This patch adapts the pools support to the use of Network Policies.
Unlike with the other drivers, when Network Policies are applied the
pods' ports changes their security groups while being used. That means
their original pool will not fit them anymore with the next two
consequences:
1.- Ports will have their original SG reapplied when pods are deleted,
with the consequent performance impact do to increasing the number of
calls to neutron
2.- Original pools may become useless, as different SGs are being used,
therefore wasting neutron ports
To accomodate for network policies, this patch removes the SG ids from
the pool key, merging all the pools with same network/project/host ids
but with different security groups into the same pool. This will not
change the behavior of the other drivers as there was a unique pool per
network/project/host ids already, i.e., the same SG ids were being used.
However, this will helps to avoid problem 1) as it is no longer
re-applying the SG, but simply putting the port back into its current
pool. And it will fix problem 2) as it will pick a port for an existing
pool that matches network/project/host ids. First it will search for one
with already matching SGs, and if not found, it will recycle one of the
others by reapplying the needed SGs (note it picks a port from one of
the pools that are less frequently used -- assumes they may belong to
a deleted NP that it is not needed anymore, thus removing the port
wastage problem)
Partially Implements: blueprint k8s-network-policies
Change-Id: I2c1e47fd5112c64b8e9984e5ac5d8572d91ac202
2019-02-04 12:32:12 +01:00
|
|
|
if config.CONF.kubernetes.port_debug:
|
2017-02-22 12:51:35 +01:00
|
|
|
try:
|
2019-11-29 07:30:03 +01:00
|
|
|
os_net.update_port(port_id, name=port_name)
|
|
|
|
except os_exc.SDKException:
|
Pools support with Network Policies
This patch adapts the pools support to the use of Network Policies.
Unlike with the other drivers, when Network Policies are applied the
pods' ports changes their security groups while being used. That means
their original pool will not fit them anymore with the next two
consequences:
1.- Ports will have their original SG reapplied when pods are deleted,
with the consequent performance impact do to increasing the number of
calls to neutron
2.- Original pools may become useless, as different SGs are being used,
therefore wasting neutron ports
To accomodate for network policies, this patch removes the SG ids from
the pool key, merging all the pools with same network/project/host ids
but with different security groups into the same pool. This will not
change the behavior of the other drivers as there was a unique pool per
network/project/host ids already, i.e., the same SG ids were being used.
However, this will helps to avoid problem 1) as it is no longer
re-applying the SG, but simply putting the port back into its current
pool. And it will fix problem 2) as it will pick a port for an existing
pool that matches network/project/host ids. First it will search for one
with already matching SGs, and if not found, it will recycle one of the
others by reapplying the needed SGs (note it picks a port from one of
the pools that are less frequently used -- assumes they may belong to
a deleted NP that it is not needed anymore, thus removing the port
wastage problem)
Partially Implements: blueprint k8s-network-policies
Change-Id: I2c1e47fd5112c64b8e9984e5ac5d8572d91ac202
2019-02-04 12:32:12 +01:00
|
|
|
LOG.warning("Error changing name for port %s to be "
|
2018-04-25 09:31:10 +00:00
|
|
|
"reused, put back on the cleanable "
|
|
|
|
"pool.", port_id)
|
2017-02-22 12:51:35 +01:00
|
|
|
continue
|
2021-12-06 16:58:27 +01:00
|
|
|
sg = sg_current.get(port_id)
|
|
|
|
self._available_ports_pools[pool_key].setdefault(
|
|
|
|
sg, []).append(port_id)
|
|
|
|
# Move it to the end of ports to update the SG.
|
|
|
|
self._available_ports_pools[pool_key].move_to_end(sg)
|
2018-04-25 09:31:10 +00:00
|
|
|
else:
|
2019-12-18 20:40:21 +01:00
|
|
|
trunk_id = self._get_trunk_id(pool_key)
|
2018-04-25 09:31:10 +00:00
|
|
|
try:
|
2019-12-18 20:40:21 +01:00
|
|
|
self._drv_vif._remove_subport(trunk_id, port_id)
|
2018-04-25 09:31:10 +00:00
|
|
|
self._drv_vif._release_vlan_id(
|
|
|
|
self._existing_vifs[port_id].vlan_id)
|
|
|
|
del self._existing_vifs[port_id]
|
2019-11-29 07:30:03 +01:00
|
|
|
os_net.delete_port(port_id)
|
2018-04-25 09:31:10 +00:00
|
|
|
except KeyError:
|
|
|
|
LOG.debug('Port %s is not in the ports list.', port_id)
|
2019-11-29 07:30:03 +01:00
|
|
|
except (os_exc.SDKException, os_exc.HttpException):
|
2018-04-25 09:31:10 +00:00
|
|
|
LOG.warning('Error removing the subport %s', port_id)
|
|
|
|
continue
|
|
|
|
try:
|
2017-02-22 12:51:35 +01:00
|
|
|
del self._recyclable_ports[port_id]
|
2018-04-25 09:31:10 +00:00
|
|
|
except KeyError:
|
|
|
|
LOG.debug('Port already recycled: %s', port_id)
|
|
|
|
|
2019-12-18 20:40:21 +01:00
|
|
|
def _get_trunk_id(self, pool_key):
|
2018-04-25 09:31:10 +00:00
|
|
|
trunk_id = self._known_trunk_ids.get(pool_key, None)
|
|
|
|
if not trunk_id:
|
2019-12-18 20:40:21 +01:00
|
|
|
p_port = self._drv_vif._get_parent_port_by_host_ip(pool_key[0])
|
2018-04-25 09:31:10 +00:00
|
|
|
trunk_id = self._drv_vif._get_trunk_id(p_port)
|
|
|
|
self._known_trunk_ids[pool_key] = trunk_id
|
|
|
|
return trunk_id
|
2017-06-15 13:07:41 +00:00
|
|
|
|
2019-11-26 14:09:32 +01:00
|
|
|
def sync_pools(self):
|
|
|
|
super(NestedVIFPool, self).sync_pools()
|
|
|
|
# NOTE(ltomasbo): Ensure previously created ports are recovered into
|
|
|
|
# their respective pools
|
|
|
|
self._recover_precreated_ports()
|
|
|
|
self._recovered_pools = True
|
|
|
|
eventlet.spawn(self._cleanup_leftover_ports)
|
|
|
|
|
2017-06-15 13:07:41 +00:00
|
|
|
def _recover_precreated_ports(self):
|
2017-08-28 18:24:04 +02:00
|
|
|
self._precreated_ports(action='recover')
|
2017-09-18 11:22:22 +00:00
|
|
|
LOG.info("PORTS POOL: pools updated with pre-created ports")
|
2017-11-08 10:40:32 +01:00
|
|
|
self._create_healthcheck_file()
|
2017-08-28 18:24:04 +02:00
|
|
|
|
|
|
|
def _remove_precreated_ports(self, trunk_ips=None):
|
|
|
|
self._precreated_ports(action='free', trunk_ips=trunk_ips)
|
|
|
|
|
|
|
|
def _precreated_ports(self, action, trunk_ips=None):
|
|
|
|
"""Removes or recovers pre-created subports at given pools
|
|
|
|
|
|
|
|
This function handles the pre-created ports based on the given action:
|
|
|
|
- If action is `free` it will remove all the subport from the given
|
|
|
|
trunk ports, or from all the trunk ports if no trunk_ips are passed.
|
|
|
|
- If action is `recover` it will discover the existing subports in the
|
|
|
|
given trunk ports (or in all of them if none are passed) and will add
|
|
|
|
them (and the needed information) to the respective pools.
|
|
|
|
"""
|
2019-11-29 07:30:03 +01:00
|
|
|
os_net = clients.get_network_client()
|
2017-08-28 11:34:49 +02:00
|
|
|
# Note(ltomasbo): ML2/OVS changes the device_owner to trunk:subport
|
|
|
|
# when a port is attached to a trunk. However, that is not the case
|
|
|
|
# for other ML2 drivers, such as ODL. So we also need to look for
|
|
|
|
# compute:kuryr
|
2017-06-15 13:07:41 +00:00
|
|
|
|
2017-10-06 15:09:00 +00:00
|
|
|
parent_ports, available_subports, subnets = self._get_trunks_info()
|
2017-06-15 13:07:41 +00:00
|
|
|
|
2017-10-06 15:09:00 +00:00
|
|
|
if not available_subports:
|
|
|
|
return
|
2017-06-15 13:07:41 +00:00
|
|
|
|
2019-11-08 12:15:21 +01:00
|
|
|
# FIXME(ltomasbo): Workaround for ports already detached from trunks
|
|
|
|
# whose status is ACTIVE
|
|
|
|
trunks_subports = [subport_id['port_id']
|
|
|
|
for p_port in parent_ports.values()
|
|
|
|
for subport_id in p_port['subports']]
|
Remove get_ports_by_attrs by simply use args in query ports.
get_ports_by_attrs was using neutron client for obtaining ports with
specified attribites. OpenStackSDK have this OOTB, so that one can query
for ports simply by using os.network.ports(name=…, tags=[…], …), so,
there is no need for additional function.
Also, make use of previously added real port object in tests. By using
openstack.network.v2.port.Port objects we gain such confidence
that we are dealing with an port object, dict, tuple, or any other data
type, so that we can treat it accordingly in the code.
Implements: blueprint switch-to-openstacksdk
Change-Id: I7b597f7229113a598de631641bde04e083fea4b5
2019-12-02 16:37:55 +01:00
|
|
|
port_ids_to_delete = [p_id for p_id in available_subports
|
2019-11-08 12:15:21 +01:00
|
|
|
if p_id not in trunks_subports]
|
|
|
|
for port_id in port_ids_to_delete:
|
|
|
|
LOG.debug("Deleting port with wrong status: %s", port_id)
|
|
|
|
try:
|
2019-11-29 07:30:03 +01:00
|
|
|
os_net.delete_port(port_id)
|
|
|
|
except os_exc.SDKException:
|
2019-11-08 12:15:21 +01:00
|
|
|
LOG.exception('Error removing the port %s', port_id)
|
|
|
|
|
2017-10-06 15:09:00 +00:00
|
|
|
for trunk_id, parent_port in parent_ports.items():
|
|
|
|
host_addr = parent_port.get('ip')
|
2017-08-28 18:24:04 +02:00
|
|
|
if trunk_ips and host_addr not in trunk_ips:
|
|
|
|
continue
|
|
|
|
|
2017-10-06 15:09:00 +00:00
|
|
|
for subport in parent_port.get('subports'):
|
|
|
|
kuryr_subport = available_subports.get(subport['port_id'])
|
Remove get_ports_by_attrs by simply use args in query ports.
get_ports_by_attrs was using neutron client for obtaining ports with
specified attribites. OpenStackSDK have this OOTB, so that one can query
for ports simply by using os.network.ports(name=…, tags=[…], …), so,
there is no need for additional function.
Also, make use of previously added real port object in tests. By using
openstack.network.v2.port.Port objects we gain such confidence
that we are dealing with an port object, dict, tuple, or any other data
type, so that we can treat it accordingly in the code.
Implements: blueprint switch-to-openstacksdk
Change-Id: I7b597f7229113a598de631641bde04e083fea4b5
2019-12-02 16:37:55 +01:00
|
|
|
if not kuryr_subport:
|
|
|
|
continue
|
|
|
|
|
|
|
|
subnet_id = kuryr_subport.fixed_ips[0]['subnet_id']
|
|
|
|
subnet = subnets[subnet_id]
|
|
|
|
net_obj = subnet[subnet_id]
|
|
|
|
pool_key = self._get_pool_key(host_addr,
|
|
|
|
kuryr_subport.project_id,
|
|
|
|
net_obj.id, None)
|
|
|
|
|
|
|
|
if action == 'recover':
|
|
|
|
vif = ovu.neutron_to_osvif_vif_nested_vlan(
|
|
|
|
kuryr_subport, subnet, subport['segmentation_id'])
|
|
|
|
|
|
|
|
self._existing_vifs[kuryr_subport.id] = vif
|
2021-12-06 16:58:27 +01:00
|
|
|
self._available_ports_pools[pool_key].setdefault(
|
|
|
|
tuple(sorted(kuryr_subport.security_group_ids)),
|
|
|
|
[]).append(kuryr_subport.id)
|
Remove get_ports_by_attrs by simply use args in query ports.
get_ports_by_attrs was using neutron client for obtaining ports with
specified attribites. OpenStackSDK have this OOTB, so that one can query
for ports simply by using os.network.ports(name=…, tags=[…], …), so,
there is no need for additional function.
Also, make use of previously added real port object in tests. By using
openstack.network.v2.port.Port objects we gain such confidence
that we are dealing with an port object, dict, tuple, or any other data
type, so that we can treat it accordingly in the code.
Implements: blueprint switch-to-openstacksdk
Change-Id: I7b597f7229113a598de631641bde04e083fea4b5
2019-12-02 16:37:55 +01:00
|
|
|
|
|
|
|
elif action == 'free':
|
|
|
|
try:
|
|
|
|
self._drv_vif._remove_subport(trunk_id,
|
|
|
|
kuryr_subport.id)
|
|
|
|
os_net.delete_port(kuryr_subport.id)
|
|
|
|
self._drv_vif._release_vlan_id(
|
|
|
|
subport['segmentation_id'])
|
|
|
|
del self._existing_vifs[kuryr_subport.id]
|
|
|
|
self._available_ports_pools[pool_key][
|
|
|
|
tuple(sorted(kuryr_subport.security_group_ids
|
|
|
|
))].remove(kuryr_subport.id)
|
|
|
|
except KeyError:
|
|
|
|
LOG.debug('Port %s is not in the ports list.',
|
|
|
|
kuryr_subport.id)
|
|
|
|
except (os_exc.SDKException, os_exc.HttpException):
|
|
|
|
LOG.warning('Error removing the subport %s',
|
|
|
|
kuryr_subport.id)
|
|
|
|
except ValueError:
|
|
|
|
LOG.debug('Port %s is not in the available ports '
|
|
|
|
'pool.', kuryr_subport.id)
|
2017-08-28 18:24:04 +02:00
|
|
|
|
2019-05-03 17:54:55 +02:00
|
|
|
@lockutils.synchronized('return_to_pool_nested')
|
2019-07-02 14:02:43 +02:00
|
|
|
def populate_pool(self, trunk_ip, project_id, subnets, security_groups):
|
2019-11-26 14:09:32 +01:00
|
|
|
if not self._recovered_pools:
|
2020-09-04 13:41:38 +02:00
|
|
|
LOG.debug("Kuryr-controller not yet ready to populate pools.")
|
2019-05-03 17:54:55 +02:00
|
|
|
raise exceptions.ResourceNotReady(trunk_ip)
|
2022-06-10 15:45:38 +02:00
|
|
|
|
2019-05-03 17:54:55 +02:00
|
|
|
pool_key = self._get_pool_key(trunk_ip, project_id, None, subnets)
|
2022-06-10 15:45:38 +02:00
|
|
|
lock = self._get_populate_pool_lock(pool_key)
|
|
|
|
|
|
|
|
if lock.acquire(timeout=POPULATE_POOL_TIMEOUT):
|
|
|
|
try:
|
|
|
|
pools = self._available_ports_pools.get(pool_key)
|
|
|
|
if not pools:
|
|
|
|
# NOTE(ltomasbo): If the amount of nodes is large the
|
|
|
|
# repopulation actions may take too long. Using half of the
|
|
|
|
# batch to prevent the problem
|
|
|
|
num_ports = int(max(oslo_cfg.CONF.vif_pool
|
|
|
|
.ports_pool_batch/2,
|
|
|
|
oslo_cfg.CONF.vif_pool.ports_pool_min))
|
|
|
|
self.force_populate_pool(trunk_ip, project_id, subnets,
|
|
|
|
security_groups, num_ports)
|
|
|
|
finally:
|
|
|
|
lock.release()
|
|
|
|
else:
|
|
|
|
LOG.debug("Kuryr-controller timed out waiting for it turn to "
|
|
|
|
"populate pool, retrying.")
|
|
|
|
raise exceptions.ResourceNotReady(trunk_ip)
|
2019-05-03 17:54:55 +02:00
|
|
|
|
2017-08-28 18:24:04 +02:00
|
|
|
def force_populate_pool(self, trunk_ip, project_id, subnets,
|
2019-05-03 17:54:55 +02:00
|
|
|
security_groups, num_ports=None):
|
2017-08-28 18:24:04 +02:00
|
|
|
"""Create a given amount of subports at a given trunk port.
|
|
|
|
|
|
|
|
This function creates a given amount of subports and attaches them to
|
|
|
|
the specified trunk, adding them to the related subports pool
|
|
|
|
regardless of the amount of subports already available in the pool.
|
|
|
|
"""
|
2019-05-03 17:54:55 +02:00
|
|
|
if not num_ports:
|
|
|
|
num_ports = oslo_cfg.CONF.vif_pool.ports_pool_batch
|
2017-08-28 18:24:04 +02:00
|
|
|
vifs = self._drv_vif.request_vifs(
|
|
|
|
pod=[],
|
|
|
|
project_id=project_id,
|
|
|
|
subnets=subnets,
|
|
|
|
security_groups=security_groups,
|
|
|
|
num_ports=num_ports,
|
2021-12-13 21:53:52 +00:00
|
|
|
trunk_ip=trunk_ip,
|
|
|
|
semaphore=self._create_ports_semaphore)
|
2017-08-28 18:24:04 +02:00
|
|
|
|
Pools support with Network Policies
This patch adapts the pools support to the use of Network Policies.
Unlike with the other drivers, when Network Policies are applied the
pods' ports changes their security groups while being used. That means
their original pool will not fit them anymore with the next two
consequences:
1.- Ports will have their original SG reapplied when pods are deleted,
with the consequent performance impact do to increasing the number of
calls to neutron
2.- Original pools may become useless, as different SGs are being used,
therefore wasting neutron ports
To accomodate for network policies, this patch removes the SG ids from
the pool key, merging all the pools with same network/project/host ids
but with different security groups into the same pool. This will not
change the behavior of the other drivers as there was a unique pool per
network/project/host ids already, i.e., the same SG ids were being used.
However, this will helps to avoid problem 1) as it is no longer
re-applying the SG, but simply putting the port back into its current
pool. And it will fix problem 2) as it will pick a port for an existing
pool that matches network/project/host ids. First it will search for one
with already matching SGs, and if not found, it will recycle one of the
others by reapplying the needed SGs (note it picks a port from one of
the pools that are less frequently used -- assumes they may belong to
a deleted NP that it is not needed anymore, thus removing the port
wastage problem)
Partially Implements: blueprint k8s-network-policies
Change-Id: I2c1e47fd5112c64b8e9984e5ac5d8572d91ac202
2019-02-04 12:32:12 +01:00
|
|
|
pool_key = self._get_pool_key(trunk_ip, project_id, None, subnets)
|
2017-08-28 18:24:04 +02:00
|
|
|
for vif in vifs:
|
|
|
|
self._existing_vifs[vif.id] = vif
|
2021-12-06 16:58:27 +01:00
|
|
|
self._available_ports_pools[pool_key].setdefault(
|
Pools support with Network Policies
This patch adapts the pools support to the use of Network Policies.
Unlike with the other drivers, when Network Policies are applied the
pods' ports changes their security groups while being used. That means
their original pool will not fit them anymore with the next two
consequences:
1.- Ports will have their original SG reapplied when pods are deleted,
with the consequent performance impact do to increasing the number of
calls to neutron
2.- Original pools may become useless, as different SGs are being used,
therefore wasting neutron ports
To accomodate for network policies, this patch removes the SG ids from
the pool key, merging all the pools with same network/project/host ids
but with different security groups into the same pool. This will not
change the behavior of the other drivers as there was a unique pool per
network/project/host ids already, i.e., the same SG ids were being used.
However, this will helps to avoid problem 1) as it is no longer
re-applying the SG, but simply putting the port back into its current
pool. And it will fix problem 2) as it will pick a port for an existing
pool that matches network/project/host ids. First it will search for one
with already matching SGs, and if not found, it will recycle one of the
others by reapplying the needed SGs (note it picks a port from one of
the pools that are less frequently used -- assumes they may belong to
a deleted NP that it is not needed anymore, thus removing the port
wastage problem)
Partially Implements: blueprint k8s-network-policies
Change-Id: I2c1e47fd5112c64b8e9984e5ac5d8572d91ac202
2019-02-04 12:32:12 +01:00
|
|
|
tuple(sorted(security_groups)), []).append(vif.id)
|
2017-08-28 18:24:04 +02:00
|
|
|
|
|
|
|
def free_pool(self, trunk_ips=None):
|
|
|
|
"""Removes subports from the pool and deletes neutron port resource.
|
|
|
|
|
|
|
|
This function empties the pool of available subports and removes the
|
|
|
|
neutron port resources of the specified trunk port (or all of them if
|
|
|
|
no trunk is specified).
|
|
|
|
"""
|
|
|
|
self._remove_precreated_ports(trunk_ips)
|
2017-12-15 17:56:07 +01:00
|
|
|
|
2018-04-25 09:31:10 +00:00
|
|
|
def delete_network_pools(self, net_id):
|
2019-11-26 14:09:32 +01:00
|
|
|
if not self._recovered_pools:
|
2020-09-04 13:41:38 +02:00
|
|
|
LOG.debug("Kuryr-controller not yet ready to delete network "
|
|
|
|
"pools.")
|
2018-10-29 16:30:17 +01:00
|
|
|
raise exceptions.ResourceNotReady(net_id)
|
2022-04-04 11:38:20 +02:00
|
|
|
|
2022-07-28 15:11:43 +02:00
|
|
|
epool = eventlet.GreenPool(constants.LEFTOVER_RM_POOL_SIZE)
|
2022-04-04 11:38:20 +02:00
|
|
|
ports_to_remove = []
|
|
|
|
|
2018-08-10 12:14:37 +02:00
|
|
|
# NOTE(ltomasbo): Note the pods should already be deleted, but their
|
|
|
|
# associated ports may not have been recycled yet, therefore not being
|
|
|
|
# on the available_ports_pools dict. The next call forces it to be on
|
|
|
|
# that dict before cleaning it up
|
2018-04-25 09:31:10 +00:00
|
|
|
self._trigger_return_to_pool()
|
2020-01-09 11:52:47 +01:00
|
|
|
for pool_key, ports in list(self._available_ports_pools.items()):
|
2018-04-25 09:31:10 +00:00
|
|
|
if self._get_pool_key_net(pool_key) != net_id:
|
|
|
|
continue
|
2019-12-18 20:40:21 +01:00
|
|
|
trunk_id = self._get_trunk_id(pool_key)
|
Pools support with Network Policies
This patch adapts the pools support to the use of Network Policies.
Unlike with the other drivers, when Network Policies are applied the
pods' ports changes their security groups while being used. That means
their original pool will not fit them anymore with the next two
consequences:
1.- Ports will have their original SG reapplied when pods are deleted,
with the consequent performance impact do to increasing the number of
calls to neutron
2.- Original pools may become useless, as different SGs are being used,
therefore wasting neutron ports
To accomodate for network policies, this patch removes the SG ids from
the pool key, merging all the pools with same network/project/host ids
but with different security groups into the same pool. This will not
change the behavior of the other drivers as there was a unique pool per
network/project/host ids already, i.e., the same SG ids were being used.
However, this will helps to avoid problem 1) as it is no longer
re-applying the SG, but simply putting the port back into its current
pool. And it will fix problem 2) as it will pick a port for an existing
pool that matches network/project/host ids. First it will search for one
with already matching SGs, and if not found, it will recycle one of the
others by reapplying the needed SGs (note it picks a port from one of
the pools that are less frequently used -- assumes they may belong to
a deleted NP that it is not needed anymore, thus removing the port
wastage problem)
Partially Implements: blueprint k8s-network-policies
Change-Id: I2c1e47fd5112c64b8e9984e5ac5d8572d91ac202
2019-02-04 12:32:12 +01:00
|
|
|
ports_id = [p_id for sg_ports in ports.values()
|
|
|
|
for p_id in sg_ports]
|
2018-04-25 09:31:10 +00:00
|
|
|
try:
|
2019-12-18 20:40:21 +01:00
|
|
|
self._drv_vif._remove_subports(trunk_id, ports_id)
|
2022-05-11 17:25:26 +02:00
|
|
|
except os_exc.NotFoundException:
|
|
|
|
# We don't know which subport was already removed, but we'll
|
|
|
|
# attempt a manual detach on DELETE error, so just continue.
|
|
|
|
pass
|
2019-11-29 07:30:03 +01:00
|
|
|
except (os_exc.SDKException, os_exc.HttpException):
|
2018-04-25 09:31:10 +00:00
|
|
|
LOG.exception('Error removing subports from trunk: %s',
|
|
|
|
trunk_id)
|
2022-07-28 15:11:43 +02:00
|
|
|
raise exceptions.ResourceNotReady(net_id)
|
2018-04-25 09:31:10 +00:00
|
|
|
|
Pools support with Network Policies
This patch adapts the pools support to the use of Network Policies.
Unlike with the other drivers, when Network Policies are applied the
pods' ports changes their security groups while being used. That means
their original pool will not fit them anymore with the next two
consequences:
1.- Ports will have their original SG reapplied when pods are deleted,
with the consequent performance impact do to increasing the number of
calls to neutron
2.- Original pools may become useless, as different SGs are being used,
therefore wasting neutron ports
To accomodate for network policies, this patch removes the SG ids from
the pool key, merging all the pools with same network/project/host ids
but with different security groups into the same pool. This will not
change the behavior of the other drivers as there was a unique pool per
network/project/host ids already, i.e., the same SG ids were being used.
However, this will helps to avoid problem 1) as it is no longer
re-applying the SG, but simply putting the port back into its current
pool. And it will fix problem 2) as it will pick a port for an existing
pool that matches network/project/host ids. First it will search for one
with already matching SGs, and if not found, it will recycle one of the
others by reapplying the needed SGs (note it picks a port from one of
the pools that are less frequently used -- assumes they may belong to
a deleted NP that it is not needed anymore, thus removing the port
wastage problem)
Partially Implements: blueprint k8s-network-policies
Change-Id: I2c1e47fd5112c64b8e9984e5ac5d8572d91ac202
2019-02-04 12:32:12 +01:00
|
|
|
for port_id in ports_id:
|
2018-04-25 09:31:10 +00:00
|
|
|
try:
|
|
|
|
self._drv_vif._release_vlan_id(
|
|
|
|
self._existing_vifs[port_id].vlan_id)
|
|
|
|
del self._existing_vifs[port_id]
|
|
|
|
except KeyError:
|
|
|
|
LOG.debug('Port %s is not in the ports list.', port_id)
|
2022-04-04 11:38:20 +02:00
|
|
|
ports_to_remove.append(port_id)
|
2019-11-29 07:30:03 +01:00
|
|
|
|
2021-12-06 16:58:27 +01:00
|
|
|
del self._available_ports_pools[pool_key]
|
2021-12-03 09:24:45 +00:00
|
|
|
with self._lock:
|
|
|
|
try:
|
|
|
|
del self._populate_pool_lock[pool_key]
|
|
|
|
except KeyError:
|
|
|
|
pass
|
2018-04-25 09:31:10 +00:00
|
|
|
|
2022-07-28 15:11:43 +02:00
|
|
|
# Parallelize Ports deletion. At this point the Ports
|
|
|
|
# should have been detatched from Trunk and if not operation
|
|
|
|
# will be retried
|
|
|
|
for result in epool.imap(c_utils.delete_neutron_port, ports_to_remove):
|
|
|
|
if result:
|
|
|
|
LOG.error('During Neutron port deletion an error occured: %s',
|
|
|
|
result)
|
|
|
|
raise exceptions.ResourceNotReady(net_id)
|
2022-04-04 11:38:20 +02:00
|
|
|
|
2017-12-15 17:56:07 +01:00
|
|
|
|
|
|
|
class MultiVIFPool(base.VIFPoolDriver):
|
|
|
|
"""Manages pools with different VIF types.
|
|
|
|
|
|
|
|
It manages hybrid deployments containing both Bare Metal and Nested
|
|
|
|
Kubernetes Pods. To do that it creates a pool per node with a different
|
|
|
|
pool driver depending on the vif driver that the node is using.
|
|
|
|
|
|
|
|
It assumes a label pod_vif is added to each node to inform about the
|
|
|
|
driver set for that node. If no label is added, it assumes the default pod
|
|
|
|
vif: the one specified at kuryr.conf
|
|
|
|
"""
|
|
|
|
|
|
|
|
def set_vif_driver(self):
|
|
|
|
self._vif_drvs = {}
|
2018-08-27 17:43:27 +09:00
|
|
|
vif_pool_mapping = self._get_vif_pool_mapping()
|
|
|
|
|
|
|
|
if not vif_pool_mapping:
|
2017-12-15 17:56:07 +01:00
|
|
|
pod_vif = oslo_cfg.CONF.kubernetes.pod_vif_driver
|
|
|
|
drv_vif = base.PodVIFDriver.get_instance()
|
|
|
|
drv_pool = base.VIFPoolDriver.get_instance()
|
|
|
|
drv_pool.set_vif_driver(drv_vif)
|
|
|
|
self._vif_drvs[pod_vif] = drv_pool
|
|
|
|
return
|
2018-08-27 17:43:27 +09:00
|
|
|
for pod_driver, pool_driver in vif_pool_mapping.items():
|
2017-12-15 17:56:07 +01:00
|
|
|
if not utils.check_suitable_multi_pool_driver_opt(pool_driver,
|
|
|
|
pod_driver):
|
2019-07-10 10:01:55 +03:00
|
|
|
LOG.error("The pool(%s) and pod(%s) driver selected are not "
|
|
|
|
"compatible.", pool_driver, pod_driver)
|
2017-12-15 17:56:07 +01:00
|
|
|
raise exceptions.MultiPodDriverPoolConfigurationNotSupported()
|
2018-08-27 17:09:38 +09:00
|
|
|
drv_vif = base.PodVIFDriver.get_instance(
|
|
|
|
specific_driver=pod_driver)
|
2017-12-15 17:56:07 +01:00
|
|
|
drv_pool = base.VIFPoolDriver.get_instance(
|
2018-08-27 17:43:27 +09:00
|
|
|
specific_driver=pool_driver, scope='for:{}'.format(pod_driver))
|
2017-12-15 17:56:07 +01:00
|
|
|
drv_pool.set_vif_driver(drv_vif)
|
|
|
|
self._vif_drvs[pod_driver] = drv_pool
|
|
|
|
|
|
|
|
def request_vif(self, pod, project_id, subnets, security_groups):
|
2022-01-07 14:38:33 +00:00
|
|
|
pod_info = "%s/%s" % (pod['metadata']['namespace'],
|
|
|
|
pod['metadata']['name'])
|
|
|
|
try:
|
|
|
|
pod_vif_type = self._get_pod_vif_type(pod)
|
|
|
|
except KeyError:
|
|
|
|
# NOTE(maysams): No nodeName set. Event should be skipped
|
|
|
|
LOG.warning("Pod %s has no .spec.nodeName set. This is unexpected "
|
|
|
|
"as it's supposed to be scheduled. Ignoring event.",
|
|
|
|
pod_info)
|
|
|
|
return None
|
2017-12-15 17:56:07 +01:00
|
|
|
return self._vif_drvs[pod_vif_type].request_vif(
|
|
|
|
pod, project_id, subnets, security_groups)
|
|
|
|
|
|
|
|
def release_vif(self, pod, vif, *argv):
|
2018-08-11 02:54:02 -04:00
|
|
|
vif_drv_alias = self._get_vif_drv_alias(vif)
|
|
|
|
self._vif_drvs[vif_drv_alias].release_vif(pod, vif, *argv)
|
2017-12-15 17:56:07 +01:00
|
|
|
|
2021-06-07 09:41:36 +02:00
|
|
|
def activate_vif(self, vif, **kwargs):
|
2018-08-11 02:54:02 -04:00
|
|
|
vif_drv_alias = self._get_vif_drv_alias(vif)
|
2021-06-07 09:41:36 +02:00
|
|
|
self._vif_drvs[vif_drv_alias].activate_vif(vif, **kwargs)
|
2017-12-15 17:56:07 +01:00
|
|
|
|
2018-11-07 18:46:07 +01:00
|
|
|
def update_vif_sgs(self, pod, sgs):
|
|
|
|
pod_vif_type = self._get_pod_vif_type(pod)
|
|
|
|
self._vif_drvs[pod_vif_type].update_vif_sgs(pod, sgs)
|
|
|
|
|
2019-10-04 13:11:56 +02:00
|
|
|
def remove_sg_from_pools(self, sg_id, net_id):
|
|
|
|
for vif_drv in self._vif_drvs.values():
|
|
|
|
if str(vif_drv) == 'NoopVIFPool':
|
|
|
|
continue
|
|
|
|
vif_drv.remove_sg_from_pools(sg_id, net_id)
|
|
|
|
|
2018-04-25 09:31:10 +00:00
|
|
|
def delete_network_pools(self, net_id):
|
|
|
|
for vif_drv in self._vif_drvs.values():
|
|
|
|
if str(vif_drv) == 'NoopVIFPool':
|
|
|
|
continue
|
|
|
|
vif_drv.delete_network_pools(net_id)
|
|
|
|
|
2018-08-20 17:49:52 +02:00
|
|
|
def sync_pools(self):
|
|
|
|
for vif_drv in self._vif_drvs.values():
|
|
|
|
vif_drv.sync_pools()
|
|
|
|
|
2017-12-15 17:56:07 +01:00
|
|
|
def _get_pod_vif_type(self, pod):
|
|
|
|
node_name = pod['spec']['nodeName']
|
|
|
|
return self._get_node_vif_driver(node_name)
|
|
|
|
|
|
|
|
@MEMOIZE
|
|
|
|
def _get_node_vif_driver(self, node_name):
|
|
|
|
kubernetes = clients.get_kubernetes_client()
|
|
|
|
node_info = kubernetes.get(
|
|
|
|
constants.K8S_API_BASE + '/nodes/' + node_name)
|
|
|
|
|
|
|
|
labels = node_info['metadata'].get('labels', None)
|
|
|
|
if labels:
|
|
|
|
pod_vif = labels.get('pod_vif',
|
|
|
|
oslo_cfg.CONF.kubernetes.pod_vif_driver)
|
|
|
|
return pod_vif
|
|
|
|
return oslo_cfg.CONF.kubernetes.pod_vif_driver
|
2018-08-11 02:54:02 -04:00
|
|
|
|
|
|
|
def _get_vif_drv_alias(self, vif):
|
|
|
|
vif_type_name = type(vif).__name__
|
|
|
|
return VIF_TYPE_TO_DRIVER_MAPPING[vif_type_name]
|
2018-08-27 17:43:27 +09:00
|
|
|
|
|
|
|
def _get_vif_pool_mapping(self):
|
|
|
|
vif_pool_mapping = oslo_cfg.CONF.vif_pool.vif_pool_mapping
|
|
|
|
|
|
|
|
if not vif_pool_mapping:
|
|
|
|
pools_vif_drivers = oslo_cfg.CONF.vif_pool.pools_vif_drivers
|
|
|
|
|
|
|
|
if pools_vif_drivers:
|
|
|
|
msg = ("Config option vif_pool.pools_vif_drivers is "
|
|
|
|
"deprecated in favour of vif_pool.vif_pool_mapping, "
|
|
|
|
"and will be removed in a future release")
|
|
|
|
versionutils.report_deprecated_feature(LOG, msg)
|
|
|
|
|
|
|
|
for pool_driver, pod_driver in pools_vif_drivers.items():
|
|
|
|
vif_pool_mapping[pod_driver] = pool_driver
|
|
|
|
|
|
|
|
return vif_pool_mapping
|
2019-05-03 17:54:55 +02:00
|
|
|
|
|
|
|
def populate_pool(self, node_ip, project_id, subnets, sg_id):
|
|
|
|
for vif_drv in self._vif_drvs.values():
|
|
|
|
if str(vif_drv) == 'NestedVIFPool':
|
|
|
|
vif_drv.populate_pool(node_ip, project_id, subnets, sg_id)
|