diff --git a/doc/source/installation/containerized.rst b/doc/source/installation/containerized.rst index d0b276895..e55e00236 100644 --- a/doc/source/installation/containerized.rst +++ b/doc/source/installation/containerized.rst @@ -82,7 +82,7 @@ script. Below is the list of available variables: * ``$KURYR_K8S_POD_SUBNET_ID`` - ``[neutron_defaults]pod_subnet_id`` * ``$KURYR_K8S_POD_SG`` - ``[neutron_defaults]pod_sg`` * ``$KURYR_K8S_SERVICE_SUBNET_ID`` - ``[neutron_defaults]service_subnet_id`` -* ``$KURYR_K8S_WORKER_NODES_SUBNET`` - ``[pod_vif_nested]worker_nodes_subnet`` +* ``$KURYR_K8S_WORKER_NODES_SUBNETS`` - ``[pod_vif_nested]worker_nodes_subnets`` * ``$KURYR_K8S_BINDING_DRIVER`` - ``[binding]driver`` (default: ``kuryr.lib.binding.drivers.vlan``) * ``$KURYR_K8S_BINDING_IFACE`` - ``[binding]link_iface`` (default: eth0) diff --git a/doc/source/installation/devstack/nested-macvlan.rst b/doc/source/installation/devstack/nested-macvlan.rst index c1fcfab8e..dcbe043ae 100644 --- a/doc/source/installation/devstack/nested-macvlan.rst +++ b/doc/source/installation/devstack/nested-macvlan.rst @@ -28,7 +28,7 @@ nested MACVLAN driver rather than VLAN and trunk ports. .. code-block:: ini [pod_vif_nested] - worker_nodes_subnet = + worker_nodes_subnets = - Configure "pod_vif_driver" as "nested-macvlan": diff --git a/doc/source/installation/devstack/nested-vlan.rst b/doc/source/installation/devstack/nested-vlan.rst index 18e9c73cc..7c749e5b5 100644 --- a/doc/source/installation/devstack/nested-vlan.rst +++ b/doc/source/installation/devstack/nested-vlan.rst @@ -64,7 +64,7 @@ for the VM: .. code-block:: ini [pod_vif_nested] - worker_nodes_subnet = + worker_nodes_subnets = - Configure binding section: diff --git a/doc/source/nested_vlan_mode.rst b/doc/source/nested_vlan_mode.rst index 079058b55..c32be36c9 100644 --- a/doc/source/nested_vlan_mode.rst +++ b/doc/source/nested_vlan_mode.rst @@ -51,8 +51,9 @@ You need to set several options in the kuryr.conf: vif_pool_driver = nested # If using port pools. [pod_vif_nested] - # ID of the subnet in which worker node VMs are running. - worker_nodes_subnet = + # ID of the subnet in which worker node VMs are running (if multiple join + # with a comma). + worker_nodes_subnets = Also if you want to run several Kubernetes cluster in one OpenStack tenant you need to make sure Kuryr-Kubernetes instances are able to distinguish their own diff --git a/kuryr_kubernetes/config.py b/kuryr_kubernetes/config.py index d1f3b0403..cd3d34aec 100644 --- a/kuryr_kubernetes/config.py +++ b/kuryr_kubernetes/config.py @@ -268,9 +268,11 @@ cache_defaults = [ ] nested_vif_driver_opts = [ - cfg.StrOpt('worker_nodes_subnet', - help=_("Neutron subnet ID for k8s worker node vms."), - default=''), + cfg.ListOpt('worker_nodes_subnets', + help=_("Neutron subnet IDs for k8s worker node VMs."), + default=[], + deprecated_name='worker_nodes_subnet', + deprecated_group='pod_vif_nested'), cfg.IntOpt('rev_update_attempts', help=_("How many time to try to re-update the neutron resource " "when revision has been changed by other thread"), diff --git a/kuryr_kubernetes/controller/drivers/nested_vif.py b/kuryr_kubernetes/controller/drivers/nested_vif.py index f22081925..7e5205d06 100644 --- a/kuryr_kubernetes/controller/drivers/nested_vif.py +++ b/kuryr_kubernetes/controller/drivers/nested_vif.py @@ -23,6 +23,7 @@ from kuryr_kubernetes import clients from kuryr_kubernetes.controller.drivers import neutron_vif +CONF = oslo_cfg.CONF LOG = logging.getLogger(__name__) @@ -32,26 +33,30 @@ class NestedPodVIFDriver(neutron_vif.NeutronPodVIFDriver, def _get_parent_port_by_host_ip(self, node_fixed_ip): os_net = clients.get_network_client() - node_subnet_id = oslo_cfg.CONF.pod_vif_nested.worker_nodes_subnet - if not node_subnet_id: + node_subnet_ids = oslo_cfg.CONF.pod_vif_nested.worker_nodes_subnets + if not node_subnet_ids: raise oslo_cfg.RequiredOptError( - 'worker_nodes_subnet', oslo_cfg.OptGroup('pod_vif_nested')) + 'worker_nodes_subnets', oslo_cfg.OptGroup('pod_vif_nested')) + fixed_ips = ['ip_address=%s' % str(node_fixed_ip)] + filters = {'fixed_ips': fixed_ips} + tags = CONF.neutron_defaults.resource_tags + if tags: + filters['tags'] = tags try: - fixed_ips = ['subnet_id=%s' % str(node_subnet_id), - 'ip_address=%s' % str(node_fixed_ip)] - ports = os_net.ports(fixed_ips=fixed_ips) + ports = os_net.ports(**filters) except os_exc.SDKException: - LOG.error("Parent vm port with fixed ips %s not found!", - fixed_ips) + LOG.error("Parent VM port with fixed IPs %s not found!", fixed_ips) raise - try: - return next(ports) - except StopIteration: - LOG.error("Neutron port for vm port with fixed ips %s not found!", - fixed_ips) - raise kl_exc.NoResourceException + for port in ports: + for fip in port.fixed_ips: + if fip.get('subnet_id') in node_subnet_ids: + return port + + LOG.error("Neutron port for VM port with fixed IPs %s not found!", + fixed_ips) + raise kl_exc.NoResourceException() def _get_parent_port(self, pod): try: diff --git a/kuryr_kubernetes/controller/drivers/network_policy.py b/kuryr_kubernetes/controller/drivers/network_policy.py index 54a8f2d98..ee9d8eb9d 100644 --- a/kuryr_kubernetes/controller/drivers/network_policy.py +++ b/kuryr_kubernetes/controller/drivers/network_policy.py @@ -147,9 +147,9 @@ class NetworkPolicyDriver(base.NetworkPolicyDriver): if CONF.octavia_defaults.enforce_sg_rules: default_cidrs.append(utils.get_subnet_cidr( CONF.neutron_defaults.service_subnet)) - worker_subnet_id = CONF.pod_vif_nested.worker_nodes_subnet - if worker_subnet_id: - default_cidrs.append(utils.get_subnet_cidr(worker_subnet_id)) + worker_subnet_ids = CONF.pod_vif_nested.worker_nodes_subnets + default_cidrs.extend(utils.get_subnets_cidrs(worker_subnet_ids)) + for cidr in default_cidrs: ethertype = constants.IPv4 if ipaddress.ip_network(cidr).version == constants.IP_VERSION_6: diff --git a/kuryr_kubernetes/controller/handlers/loadbalancer.py b/kuryr_kubernetes/controller/handlers/loadbalancer.py index 66244ab61..e230f98af 100644 --- a/kuryr_kubernetes/controller/handlers/loadbalancer.py +++ b/kuryr_kubernetes/controller/handlers/loadbalancer.py @@ -50,8 +50,8 @@ class KuryrLoadBalancerHandler(k8s_base.ResourceEventHandler): self._drv_service_pub_ip = drv_base.ServicePubIpDriver.get_instance() self._drv_svc_project = drv_base.ServiceProjectDriver.get_instance() self._drv_sg = drv_base.ServiceSecurityGroupsDriver.get_instance() - self._nodes_subnet = utils.get_subnet_cidr( - CONF.pod_vif_nested.worker_nodes_subnet) + self._nodes_subnets = utils.get_subnets_id_cidrs( + CONF.pod_vif_nested.worker_nodes_subnets) def on_present(self, loadbalancer_crd): if self._should_ignore(loadbalancer_crd): @@ -261,8 +261,8 @@ class KuryrLoadBalancerHandler(k8s_base.ResourceEventHandler): target_namespace = target_ref['namespace'] # Avoid to point to a Pod on hostNetwork # that isn't the one to be added as Member. - if not target_ref and utils.is_ip_on_subnet( - self._nodes_subnet, target_ip): + if not target_ref and utils.get_subnet_by_ip( + self._nodes_subnets, target_ip): target_pod = {} else: target_pod = utils.get_pod_by_ip( @@ -357,10 +357,11 @@ class KuryrLoadBalancerHandler(k8s_base.ResourceEventHandler): if (CONF.octavia_defaults.member_mode == k_const.OCTAVIA_L2_MEMBER_MODE): if target_pod: - subnet_id = self._get_pod_subnet( - target_pod, target_ip) - elif utils.is_ip_on_subnet(self._nodes_subnet, target_ip): - subnet_id = CONF.pod_vif_nested.worker_nodes_subnet + subnet_id = self._get_pod_subnet(target_pod, target_ip) + else: + subnet = utils.get_subnet_by_ip(self._nodes_subnets, target_ip) + if subnet: + subnet_id = subnet[0] else: # We use the service subnet id so that the connectivity # from VIP to pods happens in layer 3 mode, i.e., @@ -377,10 +378,16 @@ class KuryrLoadBalancerHandler(k8s_base.ResourceEventHandler): if subnet_ids: return subnet_ids[0] else: - # NOTE(ltomasbo): We are assuming that if ip is not on the - # pod subnet is because the member is using hostnetworking. In - # this worker_nodes_subnet will be used - return config.CONF.pod_vif_nested.worker_nodes_subnet + # NOTE(ltomasbo): We are assuming that if IP is not on the + # pod subnet it's because the member is using hostNetworking. In + # this case we look for the IP in worker_nodes_subnets. + subnet = utils.get_subnet_by_ip(self._nodes_subnets, ip) + if subnet: + return subnet[0] + else: + # This shouldn't ever happen but let's return just the first + # worker_nodes_subnet id. + return self._nodes_subnets[0][0] def _get_port_in_pool(self, pool, loadbalancer_crd): diff --git a/kuryr_kubernetes/tests/unit/controller/drivers/test_nested_vif.py b/kuryr_kubernetes/tests/unit/controller/drivers/test_nested_vif.py index 0d09fb160..35419c1c7 100644 --- a/kuryr_kubernetes/tests/unit/controller/drivers/test_nested_vif.py +++ b/kuryr_kubernetes/tests/unit/controller/drivers/test_nested_vif.py @@ -45,28 +45,55 @@ class TestNestedPodVIFDriver(test_base.TestCase): m_driver = mock.Mock(spec=cls) os_net = self.useFixture(k_fix.MockNetworkClient()).client - node_subnet_id = mock.sentinel.node_subnet_id - oslo_cfg.CONF.set_override('worker_nodes_subnet', - node_subnet_id, + node_subnet_id1 = 'node_subnet_id1' + node_subnet_id2 = 'node_subnet_id2' + oslo_cfg.CONF.set_override('worker_nodes_subnets', + [node_subnet_id2], group='pod_vif_nested') node_fixed_ip = mock.sentinel.node_fixed_ip - port = mock.sentinel.port - ports = (p for p in [port]) - os_net.ports.return_value = ports + ports = [ + mock.Mock(fixed_ips=[{'subnet_id': node_subnet_id1}]), + mock.Mock(fixed_ips=[{'subnet_id': node_subnet_id2}]), + ] + os_net.ports.return_value = iter(ports) - self.assertEqual(port, cls._get_parent_port_by_host_ip( + self.assertEqual(ports[1], cls._get_parent_port_by_host_ip( m_driver, node_fixed_ip)) - fixed_ips = ['subnet_id=%s' % str(node_subnet_id), - 'ip_address=%s' % str(node_fixed_ip)] + fixed_ips = ['ip_address=%s' % str(node_fixed_ip)] os_net.ports.assert_called_once_with(fixed_ips=fixed_ips) + def test_get_parent_port_by_host_ip_multiple(self): + cls = nested_vif.NestedPodVIFDriver + m_driver = mock.Mock(spec=cls) + os_net = self.useFixture(k_fix.MockNetworkClient()).client + + node_subnet_id1 = 'node_subnet_id1' + node_subnet_id2 = 'node_subnet_id2' + node_subnet_id3 = 'node_subnet_id3' + oslo_cfg.CONF.set_override('worker_nodes_subnets', + [node_subnet_id3, node_subnet_id2], + group='pod_vif_nested') + + node_fixed_ip = mock.sentinel.node_fixed_ip + + ports = [ + mock.Mock(fixed_ips=[{'subnet_id': node_subnet_id1}]), + mock.Mock(fixed_ips=[{'subnet_id': node_subnet_id2}]), + ] + os_net.ports.return_value = (p for p in ports) + + self.assertEqual(ports[1], cls._get_parent_port_by_host_ip( + m_driver, node_fixed_ip)) + fixed_ips = ['ip_address=%s' % str(node_fixed_ip)] + os_net.ports.assert_called_with(fixed_ips=fixed_ips) + def test_get_parent_port_by_host_ip_subnet_id_not_configured(self): cls = nested_vif.NestedPodVIFDriver m_driver = mock.Mock(spec=cls) self.useFixture(k_fix.MockNetworkClient()).client - oslo_cfg.CONF.set_override('worker_nodes_subnet', + oslo_cfg.CONF.set_override('worker_nodes_subnets', '', group='pod_vif_nested') node_fixed_ip = mock.sentinel.node_fixed_ip @@ -79,10 +106,10 @@ class TestNestedPodVIFDriver(test_base.TestCase): m_driver = mock.Mock(spec=cls) os_net = self.useFixture(k_fix.MockNetworkClient()).client - node_subnet_id = mock.sentinel.node_subnet_id + node_subnet_id = 'node_subnet_id' - oslo_cfg.CONF.set_override('worker_nodes_subnet', - node_subnet_id, + oslo_cfg.CONF.set_override('worker_nodes_subnets', + [node_subnet_id], group='pod_vif_nested') node_fixed_ip = mock.sentinel.node_fixed_ip @@ -93,6 +120,5 @@ class TestNestedPodVIFDriver(test_base.TestCase): self.assertRaises(kl_exc.NoResourceException, cls._get_parent_port_by_host_ip, m_driver, node_fixed_ip) - fixed_ips = ['subnet_id=%s' % str(node_subnet_id), - 'ip_address=%s' % str(node_fixed_ip)] + fixed_ips = ['ip_address=%s' % str(node_fixed_ip)] os_net.ports.assert_called_once_with(fixed_ips=fixed_ips) diff --git a/kuryr_kubernetes/tests/unit/controller/handlers/test_loadbalancer.py b/kuryr_kubernetes/tests/unit/controller/handlers/test_loadbalancer.py index 7b48a6046..59705dbd1 100644 --- a/kuryr_kubernetes/tests/unit/controller/handlers/test_loadbalancer.py +++ b/kuryr_kubernetes/tests/unit/controller/handlers/test_loadbalancer.py @@ -190,8 +190,9 @@ class FakeLBaaSDriver(drv_base.LBaaSDriver): } +@mock.patch('kuryr_kubernetes.utils.get_subnets_id_cidrs', + mock.Mock(return_value=[('id', 'cidr')])) class TestKuryrLoadBalancerHandler(test_base.TestCase): - @mock.patch('kuryr_kubernetes.utils.get_subnet_cidr') @mock.patch('kuryr_kubernetes.controller.drivers.base.' 'ServiceProjectDriver.get_instance') diff --git a/kuryr_kubernetes/utils.py b/kuryr_kubernetes/utils.py index 7f73483f9..23aae679c 100644 --- a/kuryr_kubernetes/utils.py +++ b/kuryr_kubernetes/utils.py @@ -296,6 +296,24 @@ def get_subnet_cidr(subnet_id): return subnet_obj.cidr +@MEMOIZE +def get_subnets_id_cidrs(subnet_ids): + os_net = clients.get_network_client() + subnets = os_net.subnets() + cidrs = [(subnet.id, subnet.cidr) for subnet in subnets + if subnet.id in subnet_ids] + if len(cidrs) != len(subnet_ids): + existing = {subnet.id for subnet in subnets} + missing = set(subnet_ids) - existing + LOG.exception("CIDRs of subnets %s not found!", missing) + raise os_exc.ResourceNotFound() + return cidrs + + +def get_subnets_cidrs(subnet_ids): + return [x[1] for x in get_subnets_id_cidrs(subnet_ids)] + + @MEMOIZE def get_subnetpool_version(subnetpool_id): os_net = clients.get_network_client() @@ -571,7 +589,10 @@ def get_current_endpoints_target(ep, port, spec_ports, ep_name): spec_ports.get(port.get('name'))) -def is_ip_on_subnet(nodes_subnet, target_ip): - return (nodes_subnet and - (ipaddress.ip_address(target_ip) in - ipaddress.ip_network(nodes_subnet))) +def get_subnet_by_ip(nodes_subnets, target_ip): + ip = ipaddress.ip_address(target_ip) + for nodes_subnet in nodes_subnets: + if ip in ipaddress.ip_network(nodes_subnet[1]): + return nodes_subnet + + return None diff --git a/releasenotes/notes/deprecate-worker-nodes-subnet-e452c84df5b5ed5c.yaml b/releasenotes/notes/deprecate-worker-nodes-subnet-e452c84df5b5ed5c.yaml new file mode 100644 index 000000000..a4cfcca0e --- /dev/null +++ b/releasenotes/notes/deprecate-worker-nodes-subnet-e452c84df5b5ed5c.yaml @@ -0,0 +1,12 @@ +--- +features: + - | + Kuryr will now support nested mode with nodes VMs running in multiple + subnets. In order to use that functionality a new option + `[pod_vif_nested]worker_nodes_subnets` is introduced and will accept a list + of subnet IDs. +deprecations: + - | + Option `[pod_vif_nested]worker_nodes_subnet` is deprecated in favor of + `[pod_vif_nested]worker_nodes_subnets` that accepts a list instead of a + single ID. diff --git a/tools/generate_k8s_resource_definitions.sh b/tools/generate_k8s_resource_definitions.sh index b5199570f..89583dbc8 100755 --- a/tools/generate_k8s_resource_definitions.sh +++ b/tools/generate_k8s_resource_definitions.sh @@ -34,7 +34,7 @@ if [ -z $CONF_PATH ]; then pod_subnet_id=${KURYR_K8S_POD_SUBNET_ID} pod_sg=${KURYR_K8S_POD_SG} service_subnet_id=${KURYR_K8S_SERVICE_SUBNET_ID} - worker_nodes_subnet=${KURYR_K8S_WORKER_NODES_SUBNET} + worker_nodes_subnets=${KURYR_K8S_WORKER_NODES_SUBNETS:-${KURYR_K8S_WORKER_NODES_SUBNET}} binding_driver=${KURYR_K8S_BINDING_DRIVER:-kuryr.lib.binding.drivers.vlan} binding_iface=${KURYR_K8S_BINDING_IFACE:-eth0} pod_subnet_pool=${KURYR_NEUTRON_DEFAULT_SUBNETPOOL_ID} @@ -86,7 +86,7 @@ EOF if [ ! -z $binding_driver ]; then cat >> $CONF_PATH << EOF [pod_vif_nested] -worker_nodes_subnet = $worker_nodes_subnet +worker_nodes_subnets = $worker_nodes_subnets [binding] driver = $binding_driver link_iface = $binding_iface