Multiple nodes subnets support

This commit deprecates `[pod_vif_nested]worker_nodes_subnet` in favor of
`[pod_vif_nested]worker_nodes_subnets` that will accept a list instead.
All the code using the deprecated options is updated to expect a list
and iterate over possible nodes subnets.

Change-Id: I7671fb06863d58b58905bec43555d8f21626f640
This commit is contained in:
Michał Dulko 2020-12-14 10:01:51 +01:00
parent 31b96f3ecc
commit b3814a33d6
13 changed files with 134 additions and 59 deletions

View File

@ -82,7 +82,7 @@ script. Below is the list of available variables:
* ``$KURYR_K8S_POD_SUBNET_ID`` - ``[neutron_defaults]pod_subnet_id`` * ``$KURYR_K8S_POD_SUBNET_ID`` - ``[neutron_defaults]pod_subnet_id``
* ``$KURYR_K8S_POD_SG`` - ``[neutron_defaults]pod_sg`` * ``$KURYR_K8S_POD_SG`` - ``[neutron_defaults]pod_sg``
* ``$KURYR_K8S_SERVICE_SUBNET_ID`` - ``[neutron_defaults]service_subnet_id`` * ``$KURYR_K8S_SERVICE_SUBNET_ID`` - ``[neutron_defaults]service_subnet_id``
* ``$KURYR_K8S_WORKER_NODES_SUBNET`` - ``[pod_vif_nested]worker_nodes_subnet`` * ``$KURYR_K8S_WORKER_NODES_SUBNETS`` - ``[pod_vif_nested]worker_nodes_subnets``
* ``$KURYR_K8S_BINDING_DRIVER`` - ``[binding]driver`` (default: * ``$KURYR_K8S_BINDING_DRIVER`` - ``[binding]driver`` (default:
``kuryr.lib.binding.drivers.vlan``) ``kuryr.lib.binding.drivers.vlan``)
* ``$KURYR_K8S_BINDING_IFACE`` - ``[binding]link_iface`` (default: eth0) * ``$KURYR_K8S_BINDING_IFACE`` - ``[binding]link_iface`` (default: eth0)

View File

@ -28,7 +28,7 @@ nested MACVLAN driver rather than VLAN and trunk ports.
.. code-block:: ini .. code-block:: ini
[pod_vif_nested] [pod_vif_nested]
worker_nodes_subnet = <UNDERCLOUD_SUBNET_WORKER_NODES_UUID> worker_nodes_subnets = <UNDERCLOUD_SUBNET_WORKER_NODES_UUID>
- Configure "pod_vif_driver" as "nested-macvlan": - Configure "pod_vif_driver" as "nested-macvlan":

View File

@ -64,7 +64,7 @@ for the VM:
.. code-block:: ini .. code-block:: ini
[pod_vif_nested] [pod_vif_nested]
worker_nodes_subnet = <UNDERCLOUD_SUBNET_WORKER_NODES_UUID> worker_nodes_subnets = <UNDERCLOUD_SUBNET_WORKER_NODES_UUID>
- Configure binding section: - Configure binding section:

View File

@ -51,8 +51,9 @@ You need to set several options in the kuryr.conf:
vif_pool_driver = nested # If using port pools. vif_pool_driver = nested # If using port pools.
[pod_vif_nested] [pod_vif_nested]
# ID of the subnet in which worker node VMs are running. # ID of the subnet in which worker node VMs are running (if multiple join
worker_nodes_subnet = <id> # with a comma).
worker_nodes_subnets = <id>
Also if you want to run several Kubernetes cluster in one OpenStack tenant you Also if you want to run several Kubernetes cluster in one OpenStack tenant you
need to make sure Kuryr-Kubernetes instances are able to distinguish their own need to make sure Kuryr-Kubernetes instances are able to distinguish their own

View File

@ -268,9 +268,11 @@ cache_defaults = [
] ]
nested_vif_driver_opts = [ nested_vif_driver_opts = [
cfg.StrOpt('worker_nodes_subnet', cfg.ListOpt('worker_nodes_subnets',
help=_("Neutron subnet ID for k8s worker node vms."), help=_("Neutron subnet IDs for k8s worker node VMs."),
default=''), default=[],
deprecated_name='worker_nodes_subnet',
deprecated_group='pod_vif_nested'),
cfg.IntOpt('rev_update_attempts', cfg.IntOpt('rev_update_attempts',
help=_("How many time to try to re-update the neutron resource " help=_("How many time to try to re-update the neutron resource "
"when revision has been changed by other thread"), "when revision has been changed by other thread"),

View File

@ -23,6 +23,7 @@ from kuryr_kubernetes import clients
from kuryr_kubernetes.controller.drivers import neutron_vif from kuryr_kubernetes.controller.drivers import neutron_vif
CONF = oslo_cfg.CONF
LOG = logging.getLogger(__name__) LOG = logging.getLogger(__name__)
@ -32,26 +33,30 @@ class NestedPodVIFDriver(neutron_vif.NeutronPodVIFDriver,
def _get_parent_port_by_host_ip(self, node_fixed_ip): def _get_parent_port_by_host_ip(self, node_fixed_ip):
os_net = clients.get_network_client() os_net = clients.get_network_client()
node_subnet_id = oslo_cfg.CONF.pod_vif_nested.worker_nodes_subnet node_subnet_ids = oslo_cfg.CONF.pod_vif_nested.worker_nodes_subnets
if not node_subnet_id: if not node_subnet_ids:
raise oslo_cfg.RequiredOptError( raise oslo_cfg.RequiredOptError(
'worker_nodes_subnet', oslo_cfg.OptGroup('pod_vif_nested')) 'worker_nodes_subnets', oslo_cfg.OptGroup('pod_vif_nested'))
fixed_ips = ['ip_address=%s' % str(node_fixed_ip)]
filters = {'fixed_ips': fixed_ips}
tags = CONF.neutron_defaults.resource_tags
if tags:
filters['tags'] = tags
try: try:
fixed_ips = ['subnet_id=%s' % str(node_subnet_id), ports = os_net.ports(**filters)
'ip_address=%s' % str(node_fixed_ip)]
ports = os_net.ports(fixed_ips=fixed_ips)
except os_exc.SDKException: except os_exc.SDKException:
LOG.error("Parent vm port with fixed ips %s not found!", LOG.error("Parent VM port with fixed IPs %s not found!", fixed_ips)
fixed_ips)
raise raise
try: for port in ports:
return next(ports) for fip in port.fixed_ips:
except StopIteration: if fip.get('subnet_id') in node_subnet_ids:
LOG.error("Neutron port for vm port with fixed ips %s not found!", return port
fixed_ips)
raise kl_exc.NoResourceException LOG.error("Neutron port for VM port with fixed IPs %s not found!",
fixed_ips)
raise kl_exc.NoResourceException()
def _get_parent_port(self, pod): def _get_parent_port(self, pod):
try: try:

View File

@ -147,9 +147,9 @@ class NetworkPolicyDriver(base.NetworkPolicyDriver):
if CONF.octavia_defaults.enforce_sg_rules: if CONF.octavia_defaults.enforce_sg_rules:
default_cidrs.append(utils.get_subnet_cidr( default_cidrs.append(utils.get_subnet_cidr(
CONF.neutron_defaults.service_subnet)) CONF.neutron_defaults.service_subnet))
worker_subnet_id = CONF.pod_vif_nested.worker_nodes_subnet worker_subnet_ids = CONF.pod_vif_nested.worker_nodes_subnets
if worker_subnet_id: default_cidrs.extend(utils.get_subnets_cidrs(worker_subnet_ids))
default_cidrs.append(utils.get_subnet_cidr(worker_subnet_id))
for cidr in default_cidrs: for cidr in default_cidrs:
ethertype = constants.IPv4 ethertype = constants.IPv4
if ipaddress.ip_network(cidr).version == constants.IP_VERSION_6: if ipaddress.ip_network(cidr).version == constants.IP_VERSION_6:

View File

@ -50,8 +50,8 @@ class KuryrLoadBalancerHandler(k8s_base.ResourceEventHandler):
self._drv_service_pub_ip = drv_base.ServicePubIpDriver.get_instance() self._drv_service_pub_ip = drv_base.ServicePubIpDriver.get_instance()
self._drv_svc_project = drv_base.ServiceProjectDriver.get_instance() self._drv_svc_project = drv_base.ServiceProjectDriver.get_instance()
self._drv_sg = drv_base.ServiceSecurityGroupsDriver.get_instance() self._drv_sg = drv_base.ServiceSecurityGroupsDriver.get_instance()
self._nodes_subnet = utils.get_subnet_cidr( self._nodes_subnets = utils.get_subnets_id_cidrs(
CONF.pod_vif_nested.worker_nodes_subnet) CONF.pod_vif_nested.worker_nodes_subnets)
def on_present(self, loadbalancer_crd): def on_present(self, loadbalancer_crd):
if self._should_ignore(loadbalancer_crd): if self._should_ignore(loadbalancer_crd):
@ -261,8 +261,8 @@ class KuryrLoadBalancerHandler(k8s_base.ResourceEventHandler):
target_namespace = target_ref['namespace'] target_namespace = target_ref['namespace']
# Avoid to point to a Pod on hostNetwork # Avoid to point to a Pod on hostNetwork
# that isn't the one to be added as Member. # that isn't the one to be added as Member.
if not target_ref and utils.is_ip_on_subnet( if not target_ref and utils.get_subnet_by_ip(
self._nodes_subnet, target_ip): self._nodes_subnets, target_ip):
target_pod = {} target_pod = {}
else: else:
target_pod = utils.get_pod_by_ip( target_pod = utils.get_pod_by_ip(
@ -357,10 +357,11 @@ class KuryrLoadBalancerHandler(k8s_base.ResourceEventHandler):
if (CONF.octavia_defaults.member_mode == if (CONF.octavia_defaults.member_mode ==
k_const.OCTAVIA_L2_MEMBER_MODE): k_const.OCTAVIA_L2_MEMBER_MODE):
if target_pod: if target_pod:
subnet_id = self._get_pod_subnet( subnet_id = self._get_pod_subnet(target_pod, target_ip)
target_pod, target_ip) else:
elif utils.is_ip_on_subnet(self._nodes_subnet, target_ip): subnet = utils.get_subnet_by_ip(self._nodes_subnets, target_ip)
subnet_id = CONF.pod_vif_nested.worker_nodes_subnet if subnet:
subnet_id = subnet[0]
else: else:
# We use the service subnet id so that the connectivity # We use the service subnet id so that the connectivity
# from VIP to pods happens in layer 3 mode, i.e., # from VIP to pods happens in layer 3 mode, i.e.,
@ -377,10 +378,16 @@ class KuryrLoadBalancerHandler(k8s_base.ResourceEventHandler):
if subnet_ids: if subnet_ids:
return subnet_ids[0] return subnet_ids[0]
else: else:
# NOTE(ltomasbo): We are assuming that if ip is not on the # NOTE(ltomasbo): We are assuming that if IP is not on the
# pod subnet is because the member is using hostnetworking. In # pod subnet it's because the member is using hostNetworking. In
# this worker_nodes_subnet will be used # this case we look for the IP in worker_nodes_subnets.
return config.CONF.pod_vif_nested.worker_nodes_subnet subnet = utils.get_subnet_by_ip(self._nodes_subnets, ip)
if subnet:
return subnet[0]
else:
# This shouldn't ever happen but let's return just the first
# worker_nodes_subnet id.
return self._nodes_subnets[0][0]
def _get_port_in_pool(self, pool, loadbalancer_crd): def _get_port_in_pool(self, pool, loadbalancer_crd):

View File

@ -45,28 +45,55 @@ class TestNestedPodVIFDriver(test_base.TestCase):
m_driver = mock.Mock(spec=cls) m_driver = mock.Mock(spec=cls)
os_net = self.useFixture(k_fix.MockNetworkClient()).client os_net = self.useFixture(k_fix.MockNetworkClient()).client
node_subnet_id = mock.sentinel.node_subnet_id node_subnet_id1 = 'node_subnet_id1'
oslo_cfg.CONF.set_override('worker_nodes_subnet', node_subnet_id2 = 'node_subnet_id2'
node_subnet_id, oslo_cfg.CONF.set_override('worker_nodes_subnets',
[node_subnet_id2],
group='pod_vif_nested') group='pod_vif_nested')
node_fixed_ip = mock.sentinel.node_fixed_ip node_fixed_ip = mock.sentinel.node_fixed_ip
port = mock.sentinel.port ports = [
ports = (p for p in [port]) mock.Mock(fixed_ips=[{'subnet_id': node_subnet_id1}]),
os_net.ports.return_value = ports mock.Mock(fixed_ips=[{'subnet_id': node_subnet_id2}]),
]
os_net.ports.return_value = iter(ports)
self.assertEqual(port, cls._get_parent_port_by_host_ip( self.assertEqual(ports[1], cls._get_parent_port_by_host_ip(
m_driver, node_fixed_ip)) m_driver, node_fixed_ip))
fixed_ips = ['subnet_id=%s' % str(node_subnet_id), fixed_ips = ['ip_address=%s' % str(node_fixed_ip)]
'ip_address=%s' % str(node_fixed_ip)]
os_net.ports.assert_called_once_with(fixed_ips=fixed_ips) os_net.ports.assert_called_once_with(fixed_ips=fixed_ips)
def test_get_parent_port_by_host_ip_multiple(self):
cls = nested_vif.NestedPodVIFDriver
m_driver = mock.Mock(spec=cls)
os_net = self.useFixture(k_fix.MockNetworkClient()).client
node_subnet_id1 = 'node_subnet_id1'
node_subnet_id2 = 'node_subnet_id2'
node_subnet_id3 = 'node_subnet_id3'
oslo_cfg.CONF.set_override('worker_nodes_subnets',
[node_subnet_id3, node_subnet_id2],
group='pod_vif_nested')
node_fixed_ip = mock.sentinel.node_fixed_ip
ports = [
mock.Mock(fixed_ips=[{'subnet_id': node_subnet_id1}]),
mock.Mock(fixed_ips=[{'subnet_id': node_subnet_id2}]),
]
os_net.ports.return_value = (p for p in ports)
self.assertEqual(ports[1], cls._get_parent_port_by_host_ip(
m_driver, node_fixed_ip))
fixed_ips = ['ip_address=%s' % str(node_fixed_ip)]
os_net.ports.assert_called_with(fixed_ips=fixed_ips)
def test_get_parent_port_by_host_ip_subnet_id_not_configured(self): def test_get_parent_port_by_host_ip_subnet_id_not_configured(self):
cls = nested_vif.NestedPodVIFDriver cls = nested_vif.NestedPodVIFDriver
m_driver = mock.Mock(spec=cls) m_driver = mock.Mock(spec=cls)
self.useFixture(k_fix.MockNetworkClient()).client self.useFixture(k_fix.MockNetworkClient()).client
oslo_cfg.CONF.set_override('worker_nodes_subnet', oslo_cfg.CONF.set_override('worker_nodes_subnets',
'', '',
group='pod_vif_nested') group='pod_vif_nested')
node_fixed_ip = mock.sentinel.node_fixed_ip node_fixed_ip = mock.sentinel.node_fixed_ip
@ -79,10 +106,10 @@ class TestNestedPodVIFDriver(test_base.TestCase):
m_driver = mock.Mock(spec=cls) m_driver = mock.Mock(spec=cls)
os_net = self.useFixture(k_fix.MockNetworkClient()).client os_net = self.useFixture(k_fix.MockNetworkClient()).client
node_subnet_id = mock.sentinel.node_subnet_id node_subnet_id = 'node_subnet_id'
oslo_cfg.CONF.set_override('worker_nodes_subnet', oslo_cfg.CONF.set_override('worker_nodes_subnets',
node_subnet_id, [node_subnet_id],
group='pod_vif_nested') group='pod_vif_nested')
node_fixed_ip = mock.sentinel.node_fixed_ip node_fixed_ip = mock.sentinel.node_fixed_ip
@ -93,6 +120,5 @@ class TestNestedPodVIFDriver(test_base.TestCase):
self.assertRaises(kl_exc.NoResourceException, self.assertRaises(kl_exc.NoResourceException,
cls._get_parent_port_by_host_ip, m_driver, cls._get_parent_port_by_host_ip, m_driver,
node_fixed_ip) node_fixed_ip)
fixed_ips = ['subnet_id=%s' % str(node_subnet_id), fixed_ips = ['ip_address=%s' % str(node_fixed_ip)]
'ip_address=%s' % str(node_fixed_ip)]
os_net.ports.assert_called_once_with(fixed_ips=fixed_ips) os_net.ports.assert_called_once_with(fixed_ips=fixed_ips)

View File

@ -190,8 +190,9 @@ class FakeLBaaSDriver(drv_base.LBaaSDriver):
} }
@mock.patch('kuryr_kubernetes.utils.get_subnets_id_cidrs',
mock.Mock(return_value=[('id', 'cidr')]))
class TestKuryrLoadBalancerHandler(test_base.TestCase): class TestKuryrLoadBalancerHandler(test_base.TestCase):
@mock.patch('kuryr_kubernetes.utils.get_subnet_cidr') @mock.patch('kuryr_kubernetes.utils.get_subnet_cidr')
@mock.patch('kuryr_kubernetes.controller.drivers.base.' @mock.patch('kuryr_kubernetes.controller.drivers.base.'
'ServiceProjectDriver.get_instance') 'ServiceProjectDriver.get_instance')

View File

@ -296,6 +296,24 @@ def get_subnet_cidr(subnet_id):
return subnet_obj.cidr return subnet_obj.cidr
@MEMOIZE
def get_subnets_id_cidrs(subnet_ids):
os_net = clients.get_network_client()
subnets = os_net.subnets()
cidrs = [(subnet.id, subnet.cidr) for subnet in subnets
if subnet.id in subnet_ids]
if len(cidrs) != len(subnet_ids):
existing = {subnet.id for subnet in subnets}
missing = set(subnet_ids) - existing
LOG.exception("CIDRs of subnets %s not found!", missing)
raise os_exc.ResourceNotFound()
return cidrs
def get_subnets_cidrs(subnet_ids):
return [x[1] for x in get_subnets_id_cidrs(subnet_ids)]
@MEMOIZE @MEMOIZE
def get_subnetpool_version(subnetpool_id): def get_subnetpool_version(subnetpool_id):
os_net = clients.get_network_client() os_net = clients.get_network_client()
@ -571,7 +589,10 @@ def get_current_endpoints_target(ep, port, spec_ports, ep_name):
spec_ports.get(port.get('name'))) spec_ports.get(port.get('name')))
def is_ip_on_subnet(nodes_subnet, target_ip): def get_subnet_by_ip(nodes_subnets, target_ip):
return (nodes_subnet and ip = ipaddress.ip_address(target_ip)
(ipaddress.ip_address(target_ip) in for nodes_subnet in nodes_subnets:
ipaddress.ip_network(nodes_subnet))) if ip in ipaddress.ip_network(nodes_subnet[1]):
return nodes_subnet
return None

View File

@ -0,0 +1,12 @@
---
features:
- |
Kuryr will now support nested mode with nodes VMs running in multiple
subnets. In order to use that functionality a new option
`[pod_vif_nested]worker_nodes_subnets` is introduced and will accept a list
of subnet IDs.
deprecations:
- |
Option `[pod_vif_nested]worker_nodes_subnet` is deprecated in favor of
`[pod_vif_nested]worker_nodes_subnets` that accepts a list instead of a
single ID.

View File

@ -34,7 +34,7 @@ if [ -z $CONF_PATH ]; then
pod_subnet_id=${KURYR_K8S_POD_SUBNET_ID} pod_subnet_id=${KURYR_K8S_POD_SUBNET_ID}
pod_sg=${KURYR_K8S_POD_SG} pod_sg=${KURYR_K8S_POD_SG}
service_subnet_id=${KURYR_K8S_SERVICE_SUBNET_ID} service_subnet_id=${KURYR_K8S_SERVICE_SUBNET_ID}
worker_nodes_subnet=${KURYR_K8S_WORKER_NODES_SUBNET} worker_nodes_subnets=${KURYR_K8S_WORKER_NODES_SUBNETS:-${KURYR_K8S_WORKER_NODES_SUBNET}}
binding_driver=${KURYR_K8S_BINDING_DRIVER:-kuryr.lib.binding.drivers.vlan} binding_driver=${KURYR_K8S_BINDING_DRIVER:-kuryr.lib.binding.drivers.vlan}
binding_iface=${KURYR_K8S_BINDING_IFACE:-eth0} binding_iface=${KURYR_K8S_BINDING_IFACE:-eth0}
pod_subnet_pool=${KURYR_NEUTRON_DEFAULT_SUBNETPOOL_ID} pod_subnet_pool=${KURYR_NEUTRON_DEFAULT_SUBNETPOOL_ID}
@ -86,7 +86,7 @@ EOF
if [ ! -z $binding_driver ]; then if [ ! -z $binding_driver ]; then
cat >> $CONF_PATH << EOF cat >> $CONF_PATH << EOF
[pod_vif_nested] [pod_vif_nested]
worker_nodes_subnet = $worker_nodes_subnet worker_nodes_subnets = $worker_nodes_subnets
[binding] [binding]
driver = $binding_driver driver = $binding_driver
link_iface = $binding_iface link_iface = $binding_iface