Support dual stack service subnets
This commit implements Services support for K8s clusters running dual stack, i.e. having IPv4 and IPv6 services. This means that [neutron_defaults]service_subnet option is depreacted in favor of service_subnets and Kuryr will choose the correct one based on the Service's `.spec.ipFamily`. Change-Id: Iad79a2b766b7c71986a9ea79143384cc029c1f79 Implements: blueprint dual-stack
This commit is contained in:
parent
1c1d1d463a
commit
2a6fda9cc4
|
@ -81,7 +81,7 @@ script. Below is the list of available variables:
|
|||
* ``$KURYR_K8S_PROJECT_ID`` - ``[neutron]k8s_project_id``
|
||||
* ``$KURYR_K8S_POD_SUBNET_ID`` - ``[neutron_defaults]pod_subnet_id``
|
||||
* ``$KURYR_K8S_POD_SG`` - ``[neutron_defaults]pod_sg``
|
||||
* ``$KURYR_K8S_SERVICE_SUBNET_ID`` - ``[neutron_defaults]service_subnet_id``
|
||||
* ``$KURYR_K8S_SERVICE_SUBNET_IDS`` - ``[neutron_defaults]service_subnets``
|
||||
* ``$KURYR_K8S_WORKER_NODES_SUBNETS`` - ``[pod_vif_nested]worker_nodes_subnets``
|
||||
* ``$KURYR_K8S_BINDING_DRIVER`` - ``[binding]driver`` (default:
|
||||
``kuryr.lib.binding.drivers.vlan``)
|
||||
|
|
|
@ -57,7 +57,7 @@ Edit ``kuryr.conf``:
|
|||
pod_security_groups = {id_of_secuirity_group_for_pods}
|
||||
pod_subnet = {id_of_subnet_for_pods}
|
||||
project = {id_of_project}
|
||||
service_subnet = {id_of_subnet_for_k8s_services}
|
||||
service_subnets = {id_of_subnet_for_k8s_services}
|
||||
|
||||
.. note::
|
||||
|
||||
|
@ -115,7 +115,7 @@ Edit ``kuryr.conf``:
|
|||
You can generate ``ServiceAccount`` definition with correct ``ClusterRole``
|
||||
using instructions on :ref:`containerized-generate` page.
|
||||
|
||||
Note that the service_subnet and the pod_subnet *should be routable* and that
|
||||
Note that the service_subnets and the pod_subnet *should be routable* and that
|
||||
the pods should allow service subnet access.
|
||||
|
||||
Octavia supports two ways of performing the load balancing between the
|
||||
|
|
|
@ -390,7 +390,7 @@ The services and pods subnets should be created.
|
|||
|
||||
[neutron_defaults]
|
||||
pod_subnet = e0a888ab-9915-4685-a600-bffe240dc58b
|
||||
service_subnet = d6438a81-22fa-4a88-9b05-c4723662ef36
|
||||
service_subnets = d6438a81-22fa-4a88-9b05-c4723662ef36
|
||||
|
||||
#. Configure Kubernetes API server to use only a subset of the service
|
||||
addresses, **10.2.0.0/17**. The rest will be used for loadbalancer *vrrp*
|
||||
|
@ -565,7 +565,7 @@ of doing the following:
|
|||
|
||||
[neutron_defaults]
|
||||
pod_subnet = 3a1df0d9-f738-4293-8de6-6c624f742980
|
||||
service_subnet = 3a1df0d9-f738-4293-8de6-6c624f742980
|
||||
service_subnets = 3a1df0d9-f738-4293-8de6-6c624f742980
|
||||
|
||||
#. Configure Kubernetes API server to use only a subset of the addresses for
|
||||
services, **10.0.0.0/18**. The rest will be used for pods. To configure
|
||||
|
|
|
@ -217,8 +217,11 @@ neutron_defaults = [
|
|||
cfg.StrOpt('ovs_bridge',
|
||||
help=_("Default OpenVSwitch integration bridge"),
|
||||
sample_default="br-int"),
|
||||
cfg.StrOpt('service_subnet',
|
||||
help=_("Default Neutron subnet ID for Kubernetes services")),
|
||||
cfg.ListOpt('service_subnets',
|
||||
help=_("Neutron subnet IDs for Kubernetes services"),
|
||||
default=[],
|
||||
deprecated_name='service_subnet',
|
||||
deprecated_group='neutron_defaults'),
|
||||
cfg.StrOpt('external_svc_net',
|
||||
help=_("Default external network ID for Kubernetes services")),
|
||||
cfg.StrOpt('external_svc_subnet',
|
||||
|
|
|
@ -13,12 +13,18 @@
|
|||
# License for the specific language governing permissions and limitations
|
||||
# under the License.
|
||||
|
||||
import netaddr
|
||||
|
||||
from oslo_config import cfg
|
||||
from oslo_log import log as logging
|
||||
|
||||
from kuryr_kubernetes import config
|
||||
from kuryr_kubernetes.controller.drivers import base
|
||||
from kuryr_kubernetes import exceptions as k_exc
|
||||
from kuryr_kubernetes import utils
|
||||
|
||||
LOG = logging.getLogger(__name__)
|
||||
|
||||
|
||||
class DefaultPodSubnetDriver(base.PodSubnetsDriver):
|
||||
"""Provides subnet for Pod port based on a configuration option."""
|
||||
|
@ -34,21 +40,49 @@ class DefaultPodSubnetDriver(base.PodSubnetsDriver):
|
|||
raise cfg.RequiredOptError('pod_subnet',
|
||||
cfg.OptGroup('neutron_defaults'))
|
||||
|
||||
return {subnet_id: utils.get_subnet(subnet_id)}
|
||||
return {subnet_id: utils.get_os_vif_network(subnet_id)}
|
||||
|
||||
|
||||
class DefaultServiceSubnetDriver(base.ServiceSubnetsDriver):
|
||||
"""Provides subnet for Service's LBaaS based on a configuration option."""
|
||||
|
||||
def get_subnets(self, service, project_id):
|
||||
subnet_id = config.CONF.neutron_defaults.service_subnet
|
||||
subnets_ids = config.CONF.neutron_defaults.service_subnets
|
||||
|
||||
if not subnet_id:
|
||||
if not subnets_ids:
|
||||
# NOTE(ivc): this option is only required for
|
||||
# DefaultServiceSubnetDriver and its subclasses, but it may be
|
||||
# optional for other drivers (e.g. when each namespace has own
|
||||
# subnet)
|
||||
raise cfg.RequiredOptError('service_subnet',
|
||||
raise cfg.RequiredOptError('service_subnets',
|
||||
cfg.OptGroup('neutron_defaults'))
|
||||
|
||||
return {subnet_id: utils.get_subnet(subnet_id)}
|
||||
ip_version = None # This means we don't care.
|
||||
|
||||
# This is to support vanilla K8s and 1.19 with IPv6DualStack feature
|
||||
# gate enabled. According to API reference even if ipFamilies field is
|
||||
# set we're still supposed to get the IP version from clusterIP field.
|
||||
cluster_ip = service['spec'].get('clusterIP')
|
||||
if cluster_ip:
|
||||
ip_version = netaddr.IPAddress(cluster_ip).version
|
||||
|
||||
# This is 1.20+ with IPv6DualStack feature gate enabled.
|
||||
ip_family_policy = service['spec'].get('ipFamilyPolicy', 'SingleStack')
|
||||
if ip_family_policy == 'RequireDualStack':
|
||||
raise NotImplementedError(
|
||||
'Kuryr does not support RequireDualStack set for '
|
||||
'Service %s.', utils.get_res_unique_name(service))
|
||||
elif ip_family_policy == 'PreferDualStack':
|
||||
LOG.warning('Kuryr does not support dual stack Services, ignoring '
|
||||
'PreferDualStack set for Service %s.',
|
||||
utils.get_res_unique_name(service))
|
||||
|
||||
for subnet_id in subnets_ids:
|
||||
if (not ip_version or
|
||||
ip_version == utils.get_subnet_ip_version(subnet_id)):
|
||||
return {subnet_id: utils.get_os_vif_network(subnet_id)}
|
||||
|
||||
raise k_exc.InvalidKuryrConfig(
|
||||
'[neutron_defaults]service_subnets does not list IPv%d subnet '
|
||||
'requested by Service %s.', ip_version,
|
||||
utils.get_res_unique_name(service))
|
||||
|
|
|
@ -267,9 +267,13 @@ class LBaaSv2Driver(base.LBaaSDriver):
|
|||
else:
|
||||
sgs = loadbalancer['security_groups']
|
||||
|
||||
sg_rule_ethertype = k_const.IPv4
|
||||
if utils.get_service_subnet_version() == k_const.IP_VERSION_6:
|
||||
sg_rule_ethertype = k_const.IPv6
|
||||
sg_rule_ethertypes = []
|
||||
for ip_version in utils.get_service_subnets_ip_versions():
|
||||
ethertype = k_const.IPv4
|
||||
if ip_version == k_const.IP_VERSION_6:
|
||||
ethertype = k_const.IPv6
|
||||
sg_rule_ethertypes.append(ethertype)
|
||||
|
||||
# Check if Network Policy allows listener on the pods
|
||||
for sg in sgs:
|
||||
if sg != lb_sg:
|
||||
|
@ -301,7 +305,7 @@ class LBaaSv2Driver(base.LBaaSDriver):
|
|||
lb_sg)
|
||||
os_net.create_security_group_rule(
|
||||
direction='ingress',
|
||||
ether_type=sg_rule_ethertype,
|
||||
ether_type=rule.ether_type,
|
||||
port_range_min=port,
|
||||
port_range_max=port,
|
||||
protocol=protocol,
|
||||
|
@ -330,13 +334,12 @@ class LBaaSv2Driver(base.LBaaSDriver):
|
|||
if add_default_rules:
|
||||
try:
|
||||
LOG.debug("Restoring default LBaaS sg rule for sg: %r", lb_sg)
|
||||
os_net.create_security_group_rule(direction='ingress',
|
||||
ether_type=sg_rule_ethertype,
|
||||
port_range_min=port,
|
||||
port_range_max=port,
|
||||
protocol=protocol,
|
||||
security_group_id=lb_sg,
|
||||
description=sg_rule_name)
|
||||
for sg_rule_ethertype in sg_rule_ethertypes:
|
||||
os_net.create_security_group_rule(
|
||||
direction='ingress', ether_type=sg_rule_ethertype,
|
||||
port_range_min=port, port_range_max=port,
|
||||
protocol=protocol, security_group_id=lb_sg,
|
||||
description=sg_rule_name)
|
||||
except os_exc.ConflictException:
|
||||
pass
|
||||
except os_exc.SDKException:
|
||||
|
|
|
@ -84,7 +84,7 @@ class NPWGMultiVIFDriver(base.MultiVIFDriver):
|
|||
LOG.debug("Default subnet mapping in config file "
|
||||
"doesn't contain any subnet for %s driver "
|
||||
"alias. Default pod_subnet was used.", alias)
|
||||
subnet = {subnet_id: utils.get_subnet(subnet_id)}
|
||||
subnet = {subnet_id: utils.get_os_vif_network(subnet_id)}
|
||||
vif = vif_drv.request_vif(pod, project_id, subnet, security_groups)
|
||||
if vif:
|
||||
vifs.append(vif)
|
||||
|
|
|
@ -50,7 +50,7 @@ class NamespacePodSubnetDriver(default_subnet.DefaultPodSubnetDriver):
|
|||
def get_namespace_subnet(self, namespace, subnet_id=None):
|
||||
if not subnet_id:
|
||||
subnet_id = self._get_namespace_subnet_id(namespace)
|
||||
return {subnet_id: utils.get_subnet(subnet_id)}
|
||||
return {subnet_id: utils.get_os_vif_network(subnet_id)}
|
||||
|
||||
def _get_namespace_subnet_id(self, namespace):
|
||||
kubernetes = clients.get_kubernetes_client()
|
||||
|
|
|
@ -145,8 +145,8 @@ class NetworkPolicyDriver(base.NetworkPolicyDriver):
|
|||
rules = []
|
||||
default_cidrs = []
|
||||
if CONF.octavia_defaults.enforce_sg_rules:
|
||||
default_cidrs.append(utils.get_subnet_cidr(
|
||||
CONF.neutron_defaults.service_subnet))
|
||||
for subnet_id in CONF.neutron_defaults.service_subnets:
|
||||
default_cidrs.append(utils.get_subnet_cidr(subnet_id))
|
||||
worker_subnet_ids = self.nodes_subnets_driver.get_nodes_subnets()
|
||||
default_cidrs.extend(utils.get_subnets_cidrs(worker_subnet_ids))
|
||||
|
||||
|
@ -205,8 +205,8 @@ class NetworkPolicyDriver(base.NetworkPolicyDriver):
|
|||
allowed_cidrs = utils.get_subnetpool_cidrs(
|
||||
CONF.namespace_subnet.pod_subnet_pool)
|
||||
if CONF.octavia_defaults.enforce_sg_rules:
|
||||
allowed_cidrs.append(utils.get_subnet_cidr(
|
||||
CONF.neutron_defaults.service_subnet))
|
||||
for subnet in CONF.neutron_defaults.service_subnets:
|
||||
allowed_cidrs.append(utils.get_subnet_cidr(subnet))
|
||||
elif namespace_selector:
|
||||
selectors = True
|
||||
if pod_selector:
|
||||
|
@ -521,12 +521,12 @@ class NetworkPolicyDriver(base.NetworkPolicyDriver):
|
|||
resource=None, port=None, protocol=None):
|
||||
services = driver_utils.get_services()
|
||||
if not resource:
|
||||
svc_subnet = utils.get_subnet_cidr(
|
||||
CONF.neutron_defaults.service_subnet)
|
||||
rule = driver_utils.create_security_group_rule_body(
|
||||
'egress', port, protocol=protocol, cidr=svc_subnet)
|
||||
if rule not in sg_rule_body_list:
|
||||
sg_rule_body_list.append(rule)
|
||||
for subnet_id in CONF.neutron_defaults.service_subnets:
|
||||
svc_subnet = utils.get_subnet_cidr(subnet_id)
|
||||
rule = driver_utils.create_security_group_rule_body(
|
||||
'egress', port, protocol=protocol, cidr=svc_subnet)
|
||||
if rule not in sg_rule_body_list:
|
||||
sg_rule_body_list.append(rule)
|
||||
return
|
||||
|
||||
for service in services.get('items'):
|
||||
|
|
|
@ -438,7 +438,7 @@ class BaseVIFPool(base.VIFPoolDriver, metaclass=abc.ABCMeta):
|
|||
port.network_id)}
|
||||
else:
|
||||
subnets[subnet_id] = {
|
||||
subnet_id: utils.get_subnet(subnet_id)}
|
||||
subnet_id: utils.get_os_vif_network(subnet_id)}
|
||||
return parent_ports, subports, subnets
|
||||
|
||||
def _cleanup_leftover_ports(self):
|
||||
|
@ -755,7 +755,7 @@ class NeutronVIFPool(BaseVIFPool):
|
|||
continue
|
||||
subnet_id = port.fixed_ips[0]['subnet_id']
|
||||
subnet = {
|
||||
subnet_id: utils.get_subnet(subnet_id)}
|
||||
subnet_id: utils.get_os_vif_network(subnet_id)}
|
||||
vif = ovu.neutron_to_osvif_vif(port.binding_vif_type, port, subnet)
|
||||
net_obj = subnet[subnet_id]
|
||||
pool_key = self._get_pool_key(port.binding_host_id,
|
||||
|
|
|
@ -101,7 +101,11 @@ class AllowedAddressAlreadyPresent(Exception):
|
|||
"""
|
||||
|
||||
|
||||
class MultiPodDriverPoolConfigurationNotSupported(Exception):
|
||||
class InvalidKuryrConfig(Exception):
|
||||
"""Exception indicates problem with Kuryr configuration (kuryr.conf)"""
|
||||
|
||||
|
||||
class MultiPodDriverPoolConfigurationNotSupported(InvalidKuryrConfig):
|
||||
"""Exception indicates a wrong configuration of the multi pod driver pool
|
||||
|
||||
This exception is raised when the multi pod driver pool is not properly
|
||||
|
|
|
@ -18,61 +18,154 @@ from unittest import mock
|
|||
from oslo_config import cfg
|
||||
|
||||
from kuryr_kubernetes.controller.drivers import default_subnet
|
||||
from kuryr_kubernetes import exceptions as k_exc
|
||||
from kuryr_kubernetes.tests import base as test_base
|
||||
|
||||
|
||||
class TestDefaultPodSubnetDriver(test_base.TestCase):
|
||||
|
||||
@mock.patch('kuryr_kubernetes.utils.get_subnet')
|
||||
@mock.patch('kuryr_kubernetes.utils.get_os_vif_network')
|
||||
@mock.patch('kuryr_kubernetes.config.CONF')
|
||||
def test_get_subnets(self, m_cfg, m_get_subnet):
|
||||
subnet_id = mock.sentinel.subnet_id
|
||||
def test_get_subnets(self, m_cfg, m_get_network):
|
||||
subnet_id = 'subnet-uuid'
|
||||
subnet = mock.sentinel.subnet
|
||||
pod = mock.sentinel.pod
|
||||
project_id = mock.sentinel.project_id
|
||||
m_cfg.neutron_defaults.pod_subnet = subnet_id
|
||||
m_get_subnet.return_value = subnet
|
||||
m_get_network.return_value = subnet
|
||||
driver = default_subnet.DefaultPodSubnetDriver()
|
||||
|
||||
subnets = driver.get_subnets(pod, project_id)
|
||||
|
||||
self.assertEqual({subnet_id: subnet}, subnets)
|
||||
m_get_subnet.assert_called_once_with(subnet_id)
|
||||
m_get_network.assert_called_once_with(subnet_id)
|
||||
|
||||
@mock.patch('kuryr_kubernetes.utils.get_subnet')
|
||||
def test_get_subnets_not_set(self, m_get_subnet):
|
||||
@mock.patch('kuryr_kubernetes.utils.get_os_vif_network')
|
||||
def test_get_subnets_not_set(self, m_get_network):
|
||||
pod = mock.sentinel.pod
|
||||
project_id = mock.sentinel.project_id
|
||||
driver = default_subnet.DefaultPodSubnetDriver()
|
||||
|
||||
self.assertRaises(cfg.RequiredOptError, driver.get_subnets,
|
||||
pod, project_id)
|
||||
m_get_subnet.assert_not_called()
|
||||
m_get_network.assert_not_called()
|
||||
|
||||
|
||||
class TestDefaultServiceSubnetDriver(test_base.TestCase):
|
||||
def setUp(self):
|
||||
super().setUp()
|
||||
self.service = {
|
||||
'metadata': {'name': 'foo', 'namespace': 'bar'},
|
||||
'spec': {'clusterIP': '172.30.0.1'},
|
||||
}
|
||||
|
||||
@mock.patch('kuryr_kubernetes.utils.get_subnet')
|
||||
@mock.patch('kuryr_kubernetes.config.CONF')
|
||||
def test_get_subnets(self, m_cfg, m_get_subnet):
|
||||
subnet_id = mock.sentinel.subnet_id
|
||||
@mock.patch('kuryr_kubernetes.utils.get_os_vif_network')
|
||||
@mock.patch('kuryr_kubernetes.utils.get_subnet_ip_version')
|
||||
def test_get_subnets(self, m_get_version, m_get_network):
|
||||
subnet_id = 'subnet-uuid'
|
||||
subnet = mock.sentinel.subnet
|
||||
service = mock.sentinel.service
|
||||
project_id = mock.sentinel.project_id
|
||||
m_cfg.neutron_defaults.service_subnet = subnet_id
|
||||
m_get_subnet.return_value = subnet
|
||||
cfg.CONF.set_override('service_subnets', [subnet_id],
|
||||
group='neutron_defaults')
|
||||
m_get_version.return_value = 4
|
||||
m_get_network.return_value = subnet
|
||||
driver = default_subnet.DefaultServiceSubnetDriver()
|
||||
|
||||
subnets = driver.get_subnets(service, project_id)
|
||||
subnets = driver.get_subnets(self.service, project_id)
|
||||
|
||||
self.assertEqual({subnet_id: subnet}, subnets)
|
||||
m_get_subnet.assert_called_once_with(subnet_id)
|
||||
m_get_version.assert_called_once_with(subnet_id)
|
||||
m_get_network.assert_called_once_with(subnet_id)
|
||||
|
||||
@mock.patch('kuryr_kubernetes.utils.get_subnet')
|
||||
def test_get_subnets_not_set(self, m_get_subnet):
|
||||
@mock.patch('kuryr_kubernetes.utils.get_os_vif_network')
|
||||
@mock.patch('kuryr_kubernetes.utils.get_subnet_ip_version')
|
||||
def test_get_subnets_deprecated_opt(self, m_get_version, m_get_network):
|
||||
subnet_id = 'subnet-uuid'
|
||||
subnet = mock.sentinel.subnet
|
||||
project_id = mock.sentinel.project_id
|
||||
m_get_version.return_value = 4
|
||||
m_get_network.return_value = subnet
|
||||
cfg.CONF.set_override('service_subnet', subnet_id,
|
||||
group='neutron_defaults')
|
||||
driver = default_subnet.DefaultServiceSubnetDriver()
|
||||
|
||||
subnets = driver.get_subnets(self.service, project_id)
|
||||
|
||||
self.assertEqual({subnet_id: subnet}, subnets)
|
||||
m_get_version.assert_called_once_with(subnet_id)
|
||||
m_get_network.assert_called_once_with(subnet_id)
|
||||
|
||||
@mock.patch('kuryr_kubernetes.utils.get_os_vif_network')
|
||||
@mock.patch('kuryr_kubernetes.utils.get_subnet_ip_version')
|
||||
def test_get_subnets_wrong_version(self, m_get_version, m_get_network):
|
||||
subnet_id = 'subnet-uuid'
|
||||
subnet = mock.sentinel.subnet
|
||||
project_id = mock.sentinel.project_id
|
||||
cfg.CONF.set_override('service_subnets', [subnet_id],
|
||||
group='neutron_defaults')
|
||||
m_get_version.return_value = 6
|
||||
m_get_network.return_value = subnet
|
||||
driver = default_subnet.DefaultServiceSubnetDriver()
|
||||
|
||||
self.assertRaises(k_exc.InvalidKuryrConfig,
|
||||
driver.get_subnets, self.service, project_id)
|
||||
|
||||
m_get_version.assert_called_once_with(subnet_id)
|
||||
m_get_network.assert_not_called()
|
||||
|
||||
@mock.patch('kuryr_kubernetes.utils.get_os_vif_network')
|
||||
@mock.patch('kuryr_kubernetes.utils.get_subnet_ip_version')
|
||||
def test_get_subnets_choose_version(self, m_get_version, m_get_network):
|
||||
subnet_id1 = 'subnet-uuid1'
|
||||
subnet_id2 = 'subnet-uuid2'
|
||||
subnet2 = mock.sentinel.subnet2
|
||||
project_id = mock.sentinel.project_id
|
||||
cfg.CONF.set_override('service_subnets', [subnet_id1, subnet_id2],
|
||||
group='neutron_defaults')
|
||||
m_get_version.side_effect = (4, 6)
|
||||
m_get_network.return_value = subnet2
|
||||
self.service['spec']['clusterIP'] = (
|
||||
'2001:0db8:85a3:0000:0000:8a2e:0370:7334')
|
||||
driver = default_subnet.DefaultServiceSubnetDriver()
|
||||
|
||||
subnets = driver.get_subnets(self.service, project_id)
|
||||
|
||||
self.assertEqual({subnet_id2: subnet2}, subnets)
|
||||
m_get_version.assert_has_calls([mock.call(subnet_id1),
|
||||
mock.call(subnet_id2)])
|
||||
m_get_network.assert_called_once_with(subnet_id2)
|
||||
|
||||
@mock.patch('kuryr_kubernetes.utils.get_os_vif_network')
|
||||
@mock.patch('kuryr_kubernetes.utils.get_subnet_ip_version')
|
||||
def test_get_subnets_require_dual_stack(self, m_get_version,
|
||||
m_get_network):
|
||||
subnet_id = 'subnet-uuid'
|
||||
subnet = mock.sentinel.subnet
|
||||
project_id = mock.sentinel.project_id
|
||||
cfg.CONF.set_override('service_subnets', [subnet_id],
|
||||
group='neutron_defaults')
|
||||
m_get_version.return_value = 6
|
||||
m_get_network.return_value = subnet
|
||||
driver = default_subnet.DefaultServiceSubnetDriver()
|
||||
|
||||
self.service['spec']['ipFamilyPolicy'] = 'RequireDualStack'
|
||||
|
||||
self.assertRaises(NotImplementedError,
|
||||
driver.get_subnets, self.service, project_id)
|
||||
|
||||
@mock.patch('kuryr_kubernetes.utils.get_os_vif_network')
|
||||
def test_get_subnets_not_set(self, m_get_network):
|
||||
service = mock.sentinel.service
|
||||
project_id = mock.sentinel.project_id
|
||||
driver = default_subnet.DefaultPodSubnetDriver()
|
||||
driver = default_subnet.DefaultServiceSubnetDriver()
|
||||
|
||||
cfg.CONF.set_override('service_subnets', None,
|
||||
group='neutron_defaults')
|
||||
self.assertRaises(cfg.RequiredOptError, driver.get_subnets,
|
||||
service, project_id)
|
||||
m_get_subnet.assert_not_called()
|
||||
m_get_network.assert_not_called()
|
||||
|
||||
cfg.CONF.set_override('service_subnets', [], group='neutron_defaults')
|
||||
self.assertRaises(cfg.RequiredOptError, driver.get_subnets,
|
||||
service, project_id)
|
||||
m_get_network.assert_not_called()
|
||||
|
|
|
@ -1252,8 +1252,8 @@ class TestLBaaSv2AppyMembersSecurityGroup(test_base.TestCase):
|
|||
security_group_id=self.vip.security_group_ids[0],
|
||||
description=self.sg_rule_name)
|
||||
|
||||
@mock.patch("kuryr_kubernetes.utils.get_service_subnet_version",
|
||||
return_value=k_const.IP_VERSION_6)
|
||||
@mock.patch("kuryr_kubernetes.utils.get_service_subnets_ip_versions",
|
||||
return_value=[k_const.IP_VERSION_6])
|
||||
def test__apply_members_security_groups_ipv6_add_default(self, gssv):
|
||||
os_net = self.useFixture(k_fix.MockNetworkClient()).client
|
||||
cls = d_lbaasv2.LBaaSv2Driver
|
||||
|
@ -1284,6 +1284,8 @@ class TestLBaaSv2AppyMembersSecurityGroup(test_base.TestCase):
|
|||
security_group_id=self.vip.security_group_ids[0],
|
||||
description=self.sg_rule_name)
|
||||
|
||||
@mock.patch("kuryr_kubernetes.utils.get_service_subnets_ip_versions",
|
||||
mock.Mock(return_value=[k_const.IP_VERSION_4]))
|
||||
def test__apply_members_security_groups_add_default_conflict(self):
|
||||
os_net = self.useFixture(k_fix.MockNetworkClient()).client
|
||||
cls = d_lbaasv2.LBaaSv2Driver
|
||||
|
@ -1316,6 +1318,8 @@ class TestLBaaSv2AppyMembersSecurityGroup(test_base.TestCase):
|
|||
security_group_id=self.vip.security_group_ids[0],
|
||||
description=self.sg_rule_name)
|
||||
|
||||
@mock.patch("kuryr_kubernetes.utils.get_service_subnets_ip_versions",
|
||||
mock.Mock(return_value=[k_const.IP_VERSION_4]))
|
||||
def test__apply_members_security_groups_add_default_sdk_exception(self):
|
||||
os_net = self.useFixture(k_fix.MockNetworkClient()).client
|
||||
cls = d_lbaasv2.LBaaSv2Driver
|
||||
|
|
|
@ -134,17 +134,17 @@ class TestNPWGMultiVIFDriver(test_base.TestCase):
|
|||
specific_driver='multi_pool')
|
||||
m_set_vifs_driver.assert_called_once()
|
||||
|
||||
@mock.patch('kuryr_kubernetes.utils.get_subnet')
|
||||
@mock.patch('kuryr_kubernetes.utils.get_os_vif_network')
|
||||
@mock.patch('kuryr_kubernetes.clients.get_kubernetes_client')
|
||||
def test_request_additional_vifs(self, m_get_client, m_get_subnet):
|
||||
def test_request_additional_vifs(self, m_get_client, m_get_network):
|
||||
vifs = [mock.sentinel.vif_a, mock.sentinel.vif_b, mock.sentinel.vif_c]
|
||||
self._request_vif.side_effect = vifs
|
||||
net_crds = get_crd_objs()
|
||||
client = mock.Mock()
|
||||
m_get_client.return_value = client
|
||||
m_get_subnet.side_effect = [mock.sentinel.subneta,
|
||||
mock.sentinel.subnetb,
|
||||
mock.sentinel.subnetc]
|
||||
m_get_network.side_effect = [mock.sentinel.subneta,
|
||||
mock.sentinel.subnetb,
|
||||
mock.sentinel.subnetc]
|
||||
client.get = mock.Mock()
|
||||
client.get.side_effect = net_crds
|
||||
self._drv._get_networks.return_value = get_nets()
|
||||
|
|
|
@ -54,8 +54,8 @@ def get_pod_obj():
|
|||
|
||||
class TestNamespacePodSubnetDriver(test_base.TestCase):
|
||||
|
||||
@mock.patch('kuryr_kubernetes.utils.get_subnet')
|
||||
def test_get_subnets(self, m_get_subnet):
|
||||
@mock.patch('kuryr_kubernetes.utils.get_os_vif_network')
|
||||
def test_get_subnets(self, m_get_network):
|
||||
pod = get_pod_obj()
|
||||
pod_namespace = pod['metadata']['namespace']
|
||||
subnet_id = mock.sentinel.subnet_id
|
||||
|
@ -65,17 +65,17 @@ class TestNamespacePodSubnetDriver(test_base.TestCase):
|
|||
m_driver = mock.MagicMock(spec=cls)
|
||||
|
||||
m_driver._get_namespace_subnet_id.return_value = subnet_id
|
||||
m_get_subnet.return_value = subnet
|
||||
m_get_network.return_value = subnet
|
||||
|
||||
subnets = cls.get_namespace_subnet(m_driver, pod_namespace)
|
||||
|
||||
self.assertEqual({subnet_id: subnet}, subnets)
|
||||
m_driver._get_namespace_subnet_id.assert_called_once_with(
|
||||
pod_namespace)
|
||||
m_get_subnet.assert_called_once_with(subnet_id)
|
||||
m_get_network.assert_called_once_with(subnet_id)
|
||||
|
||||
@mock.patch('kuryr_kubernetes.utils.get_subnet')
|
||||
def test_get_subnets_namespace_not_ready(self, m_get_subnet):
|
||||
@mock.patch('kuryr_kubernetes.utils.get_os_vif_network')
|
||||
def test_get_subnets_namespace_not_ready(self, m_get_network):
|
||||
pod = get_pod_obj()
|
||||
pod_namespace = pod['metadata']['namespace']
|
||||
|
||||
|
@ -90,7 +90,7 @@ class TestNamespacePodSubnetDriver(test_base.TestCase):
|
|||
|
||||
m_driver._get_namespace_subnet_id.assert_called_once_with(
|
||||
pod_namespace)
|
||||
m_get_subnet.assert_not_called()
|
||||
m_get_network.assert_not_called()
|
||||
|
||||
def test__get_namespace_subnet_id(self):
|
||||
cls = subnet_drv.NamespacePodSubnetDriver
|
||||
|
|
|
@ -100,7 +100,7 @@ class TestSriovVIFDriver(test_base.TestCase):
|
|||
vif = mock.sentinel.vif
|
||||
m_to_vif.return_value = vif
|
||||
os_net.create_port.return_value = port
|
||||
utils.get_subnet.return_value = subnets
|
||||
utils.get_os_vif_network.return_value = subnets
|
||||
|
||||
self.assertEqual(vif, cls.request_vif(m_driver, self._pod, project_id,
|
||||
subnets, security_groups))
|
||||
|
|
|
@ -838,8 +838,8 @@ class NeutronVIFPool(test_base.TestCase):
|
|||
os_net.delete_port.assert_not_called()
|
||||
|
||||
@mock.patch('kuryr_kubernetes.os_vif_util.neutron_to_osvif_vif')
|
||||
@mock.patch('kuryr_kubernetes.utils.get_subnet')
|
||||
def test__recover_precreated_ports(self, m_get_subnet, m_to_osvif):
|
||||
@mock.patch('kuryr_kubernetes.utils.get_os_vif_network')
|
||||
def test__recover_precreated_ports(self, m_get_network, m_to_osvif):
|
||||
cls = vif_pool.NeutronVIFPool
|
||||
m_driver = mock.MagicMock(spec=cls)
|
||||
os_net = self.useFixture(k_fix.MockNetworkClient()).client
|
||||
|
@ -870,7 +870,7 @@ class NeutronVIFPool(test_base.TestCase):
|
|||
'provider_network_type': None})
|
||||
network = ovu.neutron_to_osvif_network(_net)
|
||||
subnet = {subnet_id: network}
|
||||
m_get_subnet.return_value = network
|
||||
m_get_network.return_value = network
|
||||
vif = mock.sentinel.vif
|
||||
m_to_osvif.return_value = vif
|
||||
m_driver._get_in_use_ports_info.return_value = [], {}
|
||||
|
@ -882,7 +882,7 @@ class NeutronVIFPool(test_base.TestCase):
|
|||
cls._recover_precreated_ports(m_driver)
|
||||
|
||||
os_net.ports.assert_called_once()
|
||||
m_get_subnet.assert_called_with(subnet_id)
|
||||
m_get_network.assert_called_with(subnet_id)
|
||||
m_to_osvif.assert_called_once_with(vif_plugin, port, subnet)
|
||||
|
||||
self.assertEqual(m_driver._existing_vifs[port_id], vif)
|
||||
|
@ -890,8 +890,8 @@ class NeutronVIFPool(test_base.TestCase):
|
|||
{tuple(port.security_group_ids): [port_id]})
|
||||
|
||||
@mock.patch('kuryr_kubernetes.os_vif_util.neutron_to_osvif_vif')
|
||||
@mock.patch('kuryr_kubernetes.utils.get_subnet')
|
||||
def test__recover_precreated_ports_empty(self, m_get_subnet, m_to_osvif):
|
||||
@mock.patch('kuryr_kubernetes.utils.get_os_vif_network')
|
||||
def test__recover_precreated_ports_empty(self, m_get_network, m_to_osvif):
|
||||
cls = vif_pool.NeutronVIFPool
|
||||
m_driver = mock.MagicMock(spec=cls)
|
||||
os_net = self.useFixture(k_fix.MockNetworkClient()).client
|
||||
|
@ -908,7 +908,7 @@ class NeutronVIFPool(test_base.TestCase):
|
|||
cls._recover_precreated_ports(m_driver)
|
||||
|
||||
os_net.ports.assert_called_once()
|
||||
m_get_subnet.assert_not_called()
|
||||
m_get_network.assert_not_called()
|
||||
m_to_osvif.assert_not_called()
|
||||
|
||||
def test_delete_network_pools(self):
|
||||
|
@ -1426,8 +1426,8 @@ class NestedVIFPool(test_base.TestCase):
|
|||
self.assertEqual(ip_address, cls._get_parent_port_ip(m_driver,
|
||||
port_id))
|
||||
|
||||
@mock.patch('kuryr_kubernetes.utils.get_subnet')
|
||||
def test__get_trunk_info(self, m_get_subnet):
|
||||
@mock.patch('kuryr_kubernetes.utils.get_os_vif_network')
|
||||
def test__get_trunk_info(self, m_get_network):
|
||||
cls = vif_pool.NestedVIFPool
|
||||
m_driver = mock.MagicMock(spec=cls)
|
||||
os_net = self.useFixture(k_fix.MockNetworkClient()).client
|
||||
|
@ -1449,7 +1449,7 @@ class NestedVIFPool(test_base.TestCase):
|
|||
os_net.ports.return_value = [trunk_port, subport]
|
||||
m_driver._get_in_use_ports_info.return_value = [], {}
|
||||
subnet = mock.sentinel.subnet
|
||||
m_get_subnet.return_value = subnet
|
||||
m_get_network.return_value = subnet
|
||||
|
||||
exp_p_ports = {trunk_id: {
|
||||
'ip': trunk_port.fixed_ips[0]['ip_address'],
|
||||
|
|
|
@ -65,7 +65,7 @@ class TestUtils(test_base.TestCase):
|
|||
|
||||
@mock.patch('kuryr_kubernetes.os_vif_util.neutron_to_osvif_network')
|
||||
@mock.patch('kuryr_kubernetes.os_vif_util.neutron_to_osvif_subnet')
|
||||
def test_get_subnet(self, m_osv_subnet, m_osv_network):
|
||||
def test_get_os_vif_network(self, m_osv_subnet, m_osv_network):
|
||||
os_net = self.useFixture(k_fix.MockNetworkClient()).client
|
||||
|
||||
subnet = mock.MagicMock()
|
||||
|
@ -82,7 +82,7 @@ class TestUtils(test_base.TestCase):
|
|||
m_osv_subnet.return_value = subnet
|
||||
m_osv_network.return_value = network
|
||||
|
||||
ret = utils.get_subnet(subnet_id)
|
||||
ret = utils.get_os_vif_network(subnet_id)
|
||||
|
||||
self.assertEqual(network, ret)
|
||||
os_net.get_subnet.assert_called_once_with(subnet_id)
|
||||
|
|
|
@ -277,10 +277,20 @@ def get_nodes_ips(node_subnets):
|
|||
|
||||
|
||||
@MEMOIZE
|
||||
def get_subnet(subnet_id):
|
||||
def _get_subnet(subnet_id):
|
||||
os_net = clients.get_network_client()
|
||||
try:
|
||||
return os_net.get_subnet(subnet_id)
|
||||
except os_exc.ResourceNotFound:
|
||||
LOG.exception("Subnet %s not found!", subnet_id)
|
||||
raise
|
||||
|
||||
|
||||
@MEMOIZE
|
||||
def get_os_vif_network(subnet_id):
|
||||
os_net = clients.get_network_client()
|
||||
|
||||
n_subnet = os_net.get_subnet(subnet_id)
|
||||
n_subnet = _get_subnet(subnet_id)
|
||||
n_network = os_net.get_network(n_subnet.network_id)
|
||||
|
||||
subnet = os_vif_util.neutron_to_osvif_subnet(n_subnet)
|
||||
|
@ -291,13 +301,12 @@ def get_subnet(subnet_id):
|
|||
|
||||
@MEMOIZE
|
||||
def get_subnet_cidr(subnet_id):
|
||||
os_net = clients.get_network_client()
|
||||
try:
|
||||
subnet_obj = os_net.get_subnet(subnet_id)
|
||||
except os_exc.ResourceNotFound:
|
||||
LOG.exception("Subnet %s CIDR not found!", subnet_id)
|
||||
raise
|
||||
return subnet_obj.cidr
|
||||
return _get_subnet(subnet_id).cidr
|
||||
|
||||
|
||||
@MEMOIZE
|
||||
def get_subnet_ip_version(subnet_id):
|
||||
return _get_subnet(subnet_id).ip_version
|
||||
|
||||
|
||||
def get_subnet_id(**filters):
|
||||
|
@ -524,16 +533,13 @@ def get_service_ports(service):
|
|||
for port in service['spec']['ports']]
|
||||
|
||||
|
||||
# TODO(dulek): Update this.
|
||||
@MEMOIZE
|
||||
def get_service_subnet_version():
|
||||
os_net = clients.get_network_client()
|
||||
svc_subnet_id = CONF.neutron_defaults.service_subnet
|
||||
try:
|
||||
svc_subnet = os_net.get_subnet(svc_subnet_id)
|
||||
except os_exc.ResourceNotFound:
|
||||
LOG.exception("Service subnet %s not found", svc_subnet_id)
|
||||
raise
|
||||
return svc_subnet.ip_version
|
||||
def get_service_subnets_ip_versions():
|
||||
ip_versions = set()
|
||||
for subnet_id in CONF.neutron_defaults.service_subnets:
|
||||
ip_versions.add(_get_subnet(subnet_id).ip_version)
|
||||
return ip_versions
|
||||
|
||||
|
||||
def clean_lb_crd_status(loadbalancer_name):
|
||||
|
|
|
@ -0,0 +1,8 @@
|
|||
---
|
||||
deprecations:
|
||||
- |
|
||||
``[neutron_defaults]service_subnet`` option is getting deprecated in favor
|
||||
of ``[neutron_defaults]service_subnets`` that will accept the list of
|
||||
subnet IDs. This is in order to support dual stack K8s clusters. Please
|
||||
note that setting deprecated option will have the same meaning as the new
|
||||
one with just one element.
|
|
@ -33,7 +33,7 @@ if [ -z $CONF_PATH ]; then
|
|||
k8s_project_id=${KURYR_K8S_PROJECT_ID}
|
||||
pod_subnet_id=${KURYR_K8S_POD_SUBNET_ID}
|
||||
pod_sg=${KURYR_K8S_POD_SG}
|
||||
service_subnet_id=${KURYR_K8S_SERVICE_SUBNET_ID}
|
||||
service_subnet_ids=${KURYR_K8S_SERVICE_SUBNET_IDS:-${KURYR_K8S_SERVICE_SUBNET_ID}}
|
||||
worker_nodes_subnets=${KURYR_K8S_WORKER_NODES_SUBNETS:-${KURYR_K8S_WORKER_NODES_SUBNET}}
|
||||
binding_driver=${KURYR_K8S_BINDING_DRIVER:-kuryr.lib.binding.drivers.vlan}
|
||||
binding_iface=${KURYR_K8S_BINDING_IFACE:-eth0}
|
||||
|
@ -68,7 +68,7 @@ EOF
|
|||
cat >> $CONF_PATH << EOF
|
||||
[neutron_defaults]
|
||||
ovs_bridge = br-int
|
||||
service_subnet = $service_subnet_id
|
||||
service_subnet = $service_subnet_ids
|
||||
pod_security_groups = $pod_sg
|
||||
pod_subnet = $pod_subnet_id
|
||||
project = $k8s_project_id
|
||||
|
|
Loading…
Reference in New Issue