Support dual stack service subnets

This commit implements Services support for K8s clusters running dual
stack, i.e. having IPv4 and IPv6 services. This means that
[neutron_defaults]service_subnet option is depreacted in favor of
service_subnets and Kuryr will choose the correct one based on the
Service's `.spec.ipFamily`.

Change-Id: Iad79a2b766b7c71986a9ea79143384cc029c1f79
Implements: blueprint dual-stack
This commit is contained in:
Michał Dulko 2021-03-03 15:57:23 +01:00
parent 1c1d1d463a
commit 80500f26a0
16 changed files with 126 additions and 94 deletions

View File

@ -217,8 +217,11 @@ neutron_defaults = [
cfg.StrOpt('ovs_bridge',
help=_("Default OpenVSwitch integration bridge"),
sample_default="br-int"),
cfg.StrOpt('service_subnet',
help=_("Default Neutron subnet ID for Kubernetes services")),
cfg.ListOpt('service_subnets',
help=_("Neutron subnet IDs for Kubernetes services"),
default=[],
deprecated_name='service_subnet',
deprecated_group='neutron_default'),
cfg.StrOpt('external_svc_net',
help=_("Default external network ID for Kubernetes services")),
cfg.StrOpt('external_svc_subnet',

View File

@ -17,6 +17,7 @@ from oslo_config import cfg
from kuryr_kubernetes import config
from kuryr_kubernetes.controller.drivers import base
from kuryr_kubernetes import exceptions as k_exc
from kuryr_kubernetes import utils
@ -34,21 +35,32 @@ class DefaultPodSubnetDriver(base.PodSubnetsDriver):
raise cfg.RequiredOptError('pod_subnet',
cfg.OptGroup('neutron_defaults'))
return {subnet_id: utils.get_subnet(subnet_id)}
return {subnet_id: utils.get_os_vif_network(subnet_id)}
class DefaultServiceSubnetDriver(base.ServiceSubnetsDriver):
"""Provides subnet for Service's LBaaS based on a configuration option."""
def get_subnets(self, service, project_id):
subnet_id = config.CONF.neutron_defaults.service_subnet
subnets_ids = config.CONF.neutron_defaults.service_subnets
if not subnet_id:
if not subnets_ids:
# NOTE(ivc): this option is only required for
# DefaultServiceSubnetDriver and its subclasses, but it may be
# optional for other drivers (e.g. when each namespace has own
# subnet)
raise cfg.RequiredOptError('service_subnet',
raise cfg.RequiredOptError('service_subnets',
cfg.OptGroup('neutron_defaults'))
return {subnet_id: utils.get_subnet(subnet_id)}
# TODO(dulek): We should probably check the IPs instead?
ip_version = 4
if service['spec'].get('ipFamily') == 'IPv6':
ip_version = 6
for subnet_id in subnets_ids:
if ip_version == utils.get_subnet_ip_version(subnet_id):
return {subnet_id: utils.get_os_vif_network(subnet_id)}
raise k_exc.InvalidKuryrConfig(
'Cannot find %s subnet in [neutron_defaults]service_subnets.',
service['spec'].get('ipFamily'))

View File

@ -267,9 +267,13 @@ class LBaaSv2Driver(base.LBaaSDriver):
else:
sgs = loadbalancer['security_groups']
sg_rule_ethertype = k_const.IPv4
if utils.get_service_subnet_version() == k_const.IP_VERSION_6:
sg_rule_ethertype = k_const.IPv6
sg_rule_ethertypes = []
for ip_version in utils.get_service_subnets_ip_versions():
ethertype = k_const.IPv4
if ip_version == k_const.IP_VERSION_6:
ethertype = k_const.IPv6
sg_rule_ethertypes.append(ethertype)
# Check if Network Policy allows listener on the pods
for sg in sgs:
if sg != lb_sg:
@ -301,7 +305,7 @@ class LBaaSv2Driver(base.LBaaSDriver):
lb_sg)
os_net.create_security_group_rule(
direction='ingress',
ether_type=sg_rule_ethertype,
ether_type=rule.ether_type,
port_range_min=port,
port_range_max=port,
protocol=protocol,
@ -330,13 +334,12 @@ class LBaaSv2Driver(base.LBaaSDriver):
if add_default_rules:
try:
LOG.debug("Restoring default LBaaS sg rule for sg: %r", lb_sg)
os_net.create_security_group_rule(direction='ingress',
ether_type=sg_rule_ethertype,
port_range_min=port,
port_range_max=port,
protocol=protocol,
security_group_id=lb_sg,
description=sg_rule_name)
for sg_rule_ethertype in sg_rule_ethertypes:
os_net.create_security_group_rule(
direction='ingress', ether_type=sg_rule_ethertype,
port_range_min=port, port_range_max=port,
protocol=protocol, security_group_id=lb_sg,
description=sg_rule_name)
except os_exc.ConflictException:
pass
except os_exc.SDKException:

View File

@ -84,7 +84,7 @@ class NPWGMultiVIFDriver(base.MultiVIFDriver):
LOG.debug("Default subnet mapping in config file "
"doesn't contain any subnet for %s driver "
"alias. Default pod_subnet was used.", alias)
subnet = {subnet_id: utils.get_subnet(subnet_id)}
subnet = {subnet_id: utils.get_os_vif_network(subnet_id)}
vif = vif_drv.request_vif(pod, project_id, subnet, security_groups)
if vif:
vifs.append(vif)

View File

@ -50,7 +50,7 @@ class NamespacePodSubnetDriver(default_subnet.DefaultPodSubnetDriver):
def get_namespace_subnet(self, namespace, subnet_id=None):
if not subnet_id:
subnet_id = self._get_namespace_subnet_id(namespace)
return {subnet_id: utils.get_subnet(subnet_id)}
return {subnet_id: utils.get_os_vif_network(subnet_id)}
def _get_namespace_subnet_id(self, namespace):
kubernetes = clients.get_kubernetes_client()

View File

@ -145,8 +145,8 @@ class NetworkPolicyDriver(base.NetworkPolicyDriver):
rules = []
default_cidrs = []
if CONF.octavia_defaults.enforce_sg_rules:
default_cidrs.append(utils.get_subnet_cidr(
CONF.neutron_defaults.service_subnet))
for subnet_id in CONF.neutron_defaults.service_subnets:
default_cidrs.append(utils.get_subnet_cidr(subnet_id))
worker_subnet_ids = self.nodes_subnets_driver.get_nodes_subnets()
default_cidrs.extend(utils.get_subnets_cidrs(worker_subnet_ids))
@ -205,8 +205,8 @@ class NetworkPolicyDriver(base.NetworkPolicyDriver):
allowed_cidrs = utils.get_subnetpool_cidrs(
CONF.namespace_subnet.pod_subnet_pool)
if CONF.octavia_defaults.enforce_sg_rules:
allowed_cidrs.append(utils.get_subnet_cidr(
CONF.neutron_defaults.service_subnet))
for subnet in CONF.neutron_defaults.service_subnets:
allowed_cidrs.append(utils.get_subnet_cidr(subnet))
elif namespace_selector:
selectors = True
if pod_selector:
@ -521,12 +521,12 @@ class NetworkPolicyDriver(base.NetworkPolicyDriver):
resource=None, port=None, protocol=None):
services = driver_utils.get_services()
if not resource:
svc_subnet = utils.get_subnet_cidr(
CONF.neutron_defaults.service_subnet)
rule = driver_utils.create_security_group_rule_body(
'egress', port, protocol=protocol, cidr=svc_subnet)
if rule not in sg_rule_body_list:
sg_rule_body_list.append(rule)
for subnet_id in CONF.neutron_defaults.service_subnets:
svc_subnet = utils.get_subnet_cidr(subnet_id)
rule = driver_utils.create_security_group_rule_body(
'egress', port, protocol=protocol, cidr=svc_subnet)
if rule not in sg_rule_body_list:
sg_rule_body_list.append(rule)
return
for service in services.get('items'):

View File

@ -438,7 +438,7 @@ class BaseVIFPool(base.VIFPoolDriver, metaclass=abc.ABCMeta):
port.network_id)}
else:
subnets[subnet_id] = {
subnet_id: utils.get_subnet(subnet_id)}
subnet_id: utils.get_os_vif_network(subnet_id)}
return parent_ports, subports, subnets
def _cleanup_leftover_ports(self):
@ -755,7 +755,7 @@ class NeutronVIFPool(BaseVIFPool):
continue
subnet_id = port.fixed_ips[0]['subnet_id']
subnet = {
subnet_id: utils.get_subnet(subnet_id)}
subnet_id: utils.get_os_vif_network(subnet_id)}
vif = ovu.neutron_to_osvif_vif(port.binding_vif_type, port, subnet)
net_obj = subnet[subnet_id]
pool_key = self._get_pool_key(port.binding_host_id,

View File

@ -101,7 +101,11 @@ class AllowedAddressAlreadyPresent(Exception):
"""
class MultiPodDriverPoolConfigurationNotSupported(Exception):
class InvalidKuryrConfig(Exception):
"""Exception indicates problem with Kuryr configuration (kuryr.conf)"""
class MultiPodDriverPoolConfigurationNotSupported(InvalidKuryrConfig):
"""Exception indicates a wrong configuration of the multi pod driver pool
This exception is raised when the multi pod driver pool is not properly

View File

@ -23,56 +23,56 @@ from kuryr_kubernetes.tests import base as test_base
class TestDefaultPodSubnetDriver(test_base.TestCase):
@mock.patch('kuryr_kubernetes.utils.get_subnet')
@mock.patch('kuryr_kubernetes.utils.get_os_vif_network')
@mock.patch('kuryr_kubernetes.config.CONF')
def test_get_subnets(self, m_cfg, m_get_subnet):
def test_get_subnets(self, m_cfg, m_get_network):
subnet_id = mock.sentinel.subnet_id
subnet = mock.sentinel.subnet
pod = mock.sentinel.pod
project_id = mock.sentinel.project_id
m_cfg.neutron_defaults.pod_subnet = subnet_id
m_get_subnet.return_value = subnet
m_get_network.return_value = subnet
driver = default_subnet.DefaultPodSubnetDriver()
subnets = driver.get_subnets(pod, project_id)
self.assertEqual({subnet_id: subnet}, subnets)
m_get_subnet.assert_called_once_with(subnet_id)
m_get_network.assert_called_once_with(subnet_id)
@mock.patch('kuryr_kubernetes.utils.get_subnet')
def test_get_subnets_not_set(self, m_get_subnet):
@mock.patch('kuryr_kubernetes.utils.get_os_vif_network')
def test_get_subnets_not_set(self, m_get_network):
pod = mock.sentinel.pod
project_id = mock.sentinel.project_id
driver = default_subnet.DefaultPodSubnetDriver()
self.assertRaises(cfg.RequiredOptError, driver.get_subnets,
pod, project_id)
m_get_subnet.assert_not_called()
m_get_network.assert_not_called()
class TestDefaultServiceSubnetDriver(test_base.TestCase):
@mock.patch('kuryr_kubernetes.utils.get_subnet')
@mock.patch('kuryr_kubernetes.utils.get_os_vif_network')
@mock.patch('kuryr_kubernetes.config.CONF')
def test_get_subnets(self, m_cfg, m_get_subnet):
def test_get_subnets(self, m_cfg, m_get_network):
subnet_id = mock.sentinel.subnet_id
subnet = mock.sentinel.subnet
service = mock.sentinel.service
service = {'spec': {}}
project_id = mock.sentinel.project_id
m_cfg.neutron_defaults.service_subnet = subnet_id
m_get_subnet.return_value = subnet
m_cfg.neutron_defaults.service_subnets = [subnet_id]
m_get_network.return_value = subnet
driver = default_subnet.DefaultServiceSubnetDriver()
subnets = driver.get_subnets(service, project_id)
self.assertEqual({subnet_id: subnet}, subnets)
m_get_subnet.assert_called_once_with(subnet_id)
m_get_network.assert_called_once_with(subnet_id)
@mock.patch('kuryr_kubernetes.utils.get_subnet')
def test_get_subnets_not_set(self, m_get_subnet):
@mock.patch('kuryr_kubernetes.utils.get_os_vif_network')
def test_get_subnets_not_set(self, m_get_network):
service = mock.sentinel.service
project_id = mock.sentinel.project_id
driver = default_subnet.DefaultPodSubnetDriver()
self.assertRaises(cfg.RequiredOptError, driver.get_subnets,
service, project_id)
m_get_subnet.assert_not_called()
m_get_network.assert_not_called()

View File

@ -1252,8 +1252,8 @@ class TestLBaaSv2AppyMembersSecurityGroup(test_base.TestCase):
security_group_id=self.vip.security_group_ids[0],
description=self.sg_rule_name)
@mock.patch("kuryr_kubernetes.utils.get_service_subnet_version",
return_value=k_const.IP_VERSION_6)
@mock.patch("kuryr_kubernetes.utils.get_service_subnets_ip_versions",
return_value=[k_const.IP_VERSION_6])
def test__apply_members_security_groups_ipv6_add_default(self, gssv):
os_net = self.useFixture(k_fix.MockNetworkClient()).client
cls = d_lbaasv2.LBaaSv2Driver
@ -1284,6 +1284,8 @@ class TestLBaaSv2AppyMembersSecurityGroup(test_base.TestCase):
security_group_id=self.vip.security_group_ids[0],
description=self.sg_rule_name)
@mock.patch("kuryr_kubernetes.utils.get_service_subnets_ip_versions",
mock.Mock(return_value=[k_const.IP_VERSION_4]))
def test__apply_members_security_groups_add_default_conflict(self):
os_net = self.useFixture(k_fix.MockNetworkClient()).client
cls = d_lbaasv2.LBaaSv2Driver
@ -1316,6 +1318,8 @@ class TestLBaaSv2AppyMembersSecurityGroup(test_base.TestCase):
security_group_id=self.vip.security_group_ids[0],
description=self.sg_rule_name)
@mock.patch("kuryr_kubernetes.utils.get_service_subnets_ip_versions",
mock.Mock(return_value=[k_const.IP_VERSION_4]))
def test__apply_members_security_groups_add_default_sdk_exception(self):
os_net = self.useFixture(k_fix.MockNetworkClient()).client
cls = d_lbaasv2.LBaaSv2Driver

View File

@ -134,17 +134,17 @@ class TestNPWGMultiVIFDriver(test_base.TestCase):
specific_driver='multi_pool')
m_set_vifs_driver.assert_called_once()
@mock.patch('kuryr_kubernetes.utils.get_subnet')
@mock.patch('kuryr_kubernetes.utils.get_os_vif_network')
@mock.patch('kuryr_kubernetes.clients.get_kubernetes_client')
def test_request_additional_vifs(self, m_get_client, m_get_subnet):
def test_request_additional_vifs(self, m_get_client, m_get_network):
vifs = [mock.sentinel.vif_a, mock.sentinel.vif_b, mock.sentinel.vif_c]
self._request_vif.side_effect = vifs
net_crds = get_crd_objs()
client = mock.Mock()
m_get_client.return_value = client
m_get_subnet.side_effect = [mock.sentinel.subneta,
mock.sentinel.subnetb,
mock.sentinel.subnetc]
m_get_network.side_effect = [mock.sentinel.subneta,
mock.sentinel.subnetb,
mock.sentinel.subnetc]
client.get = mock.Mock()
client.get.side_effect = net_crds
self._drv._get_networks.return_value = get_nets()

View File

@ -54,8 +54,8 @@ def get_pod_obj():
class TestNamespacePodSubnetDriver(test_base.TestCase):
@mock.patch('kuryr_kubernetes.utils.get_subnet')
def test_get_subnets(self, m_get_subnet):
@mock.patch('kuryr_kubernetes.utils.get_os_vif_network')
def test_get_subnets(self, m_get_network):
pod = get_pod_obj()
pod_namespace = pod['metadata']['namespace']
subnet_id = mock.sentinel.subnet_id
@ -65,17 +65,17 @@ class TestNamespacePodSubnetDriver(test_base.TestCase):
m_driver = mock.MagicMock(spec=cls)
m_driver._get_namespace_subnet_id.return_value = subnet_id
m_get_subnet.return_value = subnet
m_get_network.return_value = subnet
subnets = cls.get_namespace_subnet(m_driver, pod_namespace)
self.assertEqual({subnet_id: subnet}, subnets)
m_driver._get_namespace_subnet_id.assert_called_once_with(
pod_namespace)
m_get_subnet.assert_called_once_with(subnet_id)
m_get_network.assert_called_once_with(subnet_id)
@mock.patch('kuryr_kubernetes.utils.get_subnet')
def test_get_subnets_namespace_not_ready(self, m_get_subnet):
@mock.patch('kuryr_kubernetes.utils.get_os_vif_network')
def test_get_subnets_namespace_not_ready(self, m_get_network):
pod = get_pod_obj()
pod_namespace = pod['metadata']['namespace']
@ -90,7 +90,7 @@ class TestNamespacePodSubnetDriver(test_base.TestCase):
m_driver._get_namespace_subnet_id.assert_called_once_with(
pod_namespace)
m_get_subnet.assert_not_called()
m_get_network.assert_not_called()
def test__get_namespace_subnet_id(self):
cls = subnet_drv.NamespacePodSubnetDriver

View File

@ -100,7 +100,7 @@ class TestSriovVIFDriver(test_base.TestCase):
vif = mock.sentinel.vif
m_to_vif.return_value = vif
os_net.create_port.return_value = port
utils.get_subnet.return_value = subnets
utils.get_os_vif_network.return_value = subnets
self.assertEqual(vif, cls.request_vif(m_driver, self._pod, project_id,
subnets, security_groups))

View File

@ -838,8 +838,8 @@ class NeutronVIFPool(test_base.TestCase):
os_net.delete_port.assert_not_called()
@mock.patch('kuryr_kubernetes.os_vif_util.neutron_to_osvif_vif')
@mock.patch('kuryr_kubernetes.utils.get_subnet')
def test__recover_precreated_ports(self, m_get_subnet, m_to_osvif):
@mock.patch('kuryr_kubernetes.utils.get_os_vif_network')
def test__recover_precreated_ports(self, m_get_network, m_to_osvif):
cls = vif_pool.NeutronVIFPool
m_driver = mock.MagicMock(spec=cls)
os_net = self.useFixture(k_fix.MockNetworkClient()).client
@ -870,7 +870,7 @@ class NeutronVIFPool(test_base.TestCase):
'provider_network_type': None})
network = ovu.neutron_to_osvif_network(_net)
subnet = {subnet_id: network}
m_get_subnet.return_value = network
m_get_network.return_value = network
vif = mock.sentinel.vif
m_to_osvif.return_value = vif
m_driver._get_in_use_ports_info.return_value = [], {}
@ -882,7 +882,7 @@ class NeutronVIFPool(test_base.TestCase):
cls._recover_precreated_ports(m_driver)
os_net.ports.assert_called_once()
m_get_subnet.assert_called_with(subnet_id)
m_get_network.assert_called_with(subnet_id)
m_to_osvif.assert_called_once_with(vif_plugin, port, subnet)
self.assertEqual(m_driver._existing_vifs[port_id], vif)
@ -890,8 +890,8 @@ class NeutronVIFPool(test_base.TestCase):
{tuple(port.security_group_ids): [port_id]})
@mock.patch('kuryr_kubernetes.os_vif_util.neutron_to_osvif_vif')
@mock.patch('kuryr_kubernetes.utils.get_subnet')
def test__recover_precreated_ports_empty(self, m_get_subnet, m_to_osvif):
@mock.patch('kuryr_kubernetes.utils.get_os_vif_network')
def test__recover_precreated_ports_empty(self, m_get_network, m_to_osvif):
cls = vif_pool.NeutronVIFPool
m_driver = mock.MagicMock(spec=cls)
os_net = self.useFixture(k_fix.MockNetworkClient()).client
@ -908,7 +908,7 @@ class NeutronVIFPool(test_base.TestCase):
cls._recover_precreated_ports(m_driver)
os_net.ports.assert_called_once()
m_get_subnet.assert_not_called()
m_get_network.assert_not_called()
m_to_osvif.assert_not_called()
def test_delete_network_pools(self):
@ -1426,8 +1426,8 @@ class NestedVIFPool(test_base.TestCase):
self.assertEqual(ip_address, cls._get_parent_port_ip(m_driver,
port_id))
@mock.patch('kuryr_kubernetes.utils.get_subnet')
def test__get_trunk_info(self, m_get_subnet):
@mock.patch('kuryr_kubernetes.utils.get_os_vif_network')
def test__get_trunk_info(self, m_get_network):
cls = vif_pool.NestedVIFPool
m_driver = mock.MagicMock(spec=cls)
os_net = self.useFixture(k_fix.MockNetworkClient()).client
@ -1449,7 +1449,7 @@ class NestedVIFPool(test_base.TestCase):
os_net.ports.return_value = [trunk_port, subport]
m_driver._get_in_use_ports_info.return_value = [], {}
subnet = mock.sentinel.subnet
m_get_subnet.return_value = subnet
m_get_network.return_value = subnet
exp_p_ports = {trunk_id: {
'ip': trunk_port.fixed_ips[0]['ip_address'],

View File

@ -65,7 +65,7 @@ class TestUtils(test_base.TestCase):
@mock.patch('kuryr_kubernetes.os_vif_util.neutron_to_osvif_network')
@mock.patch('kuryr_kubernetes.os_vif_util.neutron_to_osvif_subnet')
def test_get_subnet(self, m_osv_subnet, m_osv_network):
def test_get_os_vif_network(self, m_osv_subnet, m_osv_network):
os_net = self.useFixture(k_fix.MockNetworkClient()).client
subnet = mock.MagicMock()
@ -82,7 +82,7 @@ class TestUtils(test_base.TestCase):
m_osv_subnet.return_value = subnet
m_osv_network.return_value = network
ret = utils.get_subnet(subnet_id)
ret = utils.get_os_vif_network(subnet_id)
self.assertEqual(network, ret)
os_net.get_subnet.assert_called_once_with(subnet_id)

View File

@ -277,10 +277,20 @@ def get_nodes_ips(node_subnets):
@MEMOIZE
def get_subnet(subnet_id):
def _get_subnet(subnet_id):
os_net = clients.get_network_client()
try:
return os_net.get_subnet(subnet_id)
except os_exc.ResourceNotFound:
LOG.exception("Subnet %s not found!", subnet_id)
raise
@MEMOIZE
def get_os_vif_network(subnet_id):
os_net = clients.get_network_client()
n_subnet = os_net.get_subnet(subnet_id)
n_subnet = _get_subnet(subnet_id)
n_network = os_net.get_network(n_subnet.network_id)
subnet = os_vif_util.neutron_to_osvif_subnet(n_subnet)
@ -291,13 +301,12 @@ def get_subnet(subnet_id):
@MEMOIZE
def get_subnet_cidr(subnet_id):
os_net = clients.get_network_client()
try:
subnet_obj = os_net.get_subnet(subnet_id)
except os_exc.ResourceNotFound:
LOG.exception("Subnet %s CIDR not found!", subnet_id)
raise
return subnet_obj.cidr
return _get_subnet(subnet_id).cidr
@MEMOIZE
def get_subnet_ip_version(subnet_id):
return _get_subnet(subnet_id).ip_version
def get_subnet_id(**filters):
@ -524,16 +533,13 @@ def get_service_ports(service):
for port in service['spec']['ports']]
# TODO(dulek): Update this.
@MEMOIZE
def get_service_subnet_version():
os_net = clients.get_network_client()
svc_subnet_id = CONF.neutron_defaults.service_subnet
try:
svc_subnet = os_net.get_subnet(svc_subnet_id)
except os_exc.ResourceNotFound:
LOG.exception("Service subnet %s not found", svc_subnet_id)
raise
return svc_subnet.ip_version
def get_service_subnets_ip_versions():
ip_versions = set()
for subnet_id in CONF.neutron_defaults.service_subnets:
ip_versions.add(_get_subnet(subnet_id).ip_version)
return ip_versions
def clean_lb_crd_status(loadbalancer_name):