diff --git a/lower-constraints.txt b/lower-constraints.txt index f671fa08af7..4baf3d96887 100644 --- a/lower-constraints.txt +++ b/lower-constraints.txt @@ -75,7 +75,7 @@ oslo.rootwrap==5.8.0 oslo.serialization==2.25.0 oslo.service==1.24.0 oslo.upgradecheck==0.1.0 -oslo.utils==3.36.0 +oslo.utils==4.4.0 oslo.versionedobjects==1.35.1 oslotest==3.2.0 osprofiler==2.3.0 diff --git a/neutron/agent/metadata/agent.py b/neutron/agent/metadata/agent.py index 7ea1968066c..d3a68dff4a7 100644 --- a/neutron/agent/metadata/agent.py +++ b/neutron/agent/metadata/agent.py @@ -27,6 +27,7 @@ from oslo_log import log as logging import oslo_messaging from oslo_service import loopingcall from oslo_utils import encodeutils +from oslo_utils import netutils import requests import webob @@ -108,18 +109,26 @@ class MetadataProxyHandler(object): return webob.exc.HTTPInternalServerError(explanation=explanation) def _get_ports_from_server(self, router_id=None, ip_address=None, - networks=None): + networks=None, mac_address=None): """Get ports from server.""" - filters = self._get_port_filters(router_id, ip_address, networks) + filters = self._get_port_filters( + router_id, ip_address, networks, mac_address) return self.plugin_rpc.get_ports(self.context, filters) def _get_port_filters(self, router_id=None, ip_address=None, - networks=None): + networks=None, mac_address=None): filters = {} if router_id: filters['device_id'] = [router_id] filters['device_owner'] = constants.ROUTER_INTERFACE_OWNERS - if ip_address: + # We either get an IP assigned (and therefore known) by neutron + # via X-Forwarded-For or that header contained a link-local + # IPv6 address of which neutron only knows the MAC address encoded + # in it. In the latter case the IPv6 address in X-Forwarded-For + # is not a fixed ip of the port. + if mac_address: + filters['mac_address'] = [mac_address] + elif ip_address: filters['fixed_ips'] = {'ip_address': [ip_address]} if networks: filters['network_id'] = networks @@ -134,7 +143,8 @@ class MetadataProxyHandler(object): @cache.cache_method_results def _get_ports_for_remote_address(self, remote_address, networks, - skip_cache=False): + skip_cache=False, + remote_mac=None): """Get list of ports that has given ip address and are part of given networks. @@ -144,10 +154,11 @@ class MetadataProxyHandler(object): """ return self._get_ports_from_server(networks=networks, - ip_address=remote_address) + ip_address=remote_address, + mac_address=remote_mac) def _get_ports(self, remote_address, network_id=None, router_id=None, - skip_cache=False): + skip_cache=False, remote_mac=None): """Search for all ports that contain passed ip address and belongs to given network. @@ -167,7 +178,8 @@ class MetadataProxyHandler(object): " must be passed to _get_ports method.")) return self._get_ports_for_remote_address(remote_address, networks, - skip_cache=skip_cache) + skip_cache=skip_cache, + remote_mac=remote_mac) def _get_instance_and_tenant_id(self, req, skip_cache=False): forwarded_for = req.headers.get('X-Forwarded-For') @@ -181,15 +193,23 @@ class MetadataProxyHandler(object): "dropping") return None, None + remote_mac = None remote_ip = netaddr.IPAddress(forwarded_for) if remote_ip.version == constants.IP_VERSION_6: if remote_ip.is_ipv4_mapped(): # When haproxy listens on v4 AND v6 then it inserts ipv4 # addresses as ipv4-mapped v6 addresses into X-Forwarded-For. forwarded_for = str(remote_ip.ipv4()) + if remote_ip.is_link_local(): + # When haproxy sees an ipv6 link-local client address + # (and sends that to us in X-Forwarded-For) we must rely + # on the EUI encoded in it, because that's all we can + # recognize. + remote_mac = str(netutils.get_mac_addr_by_ipv6(remote_ip)) - ports = self._get_ports(forwarded_for, network_id, router_id, - skip_cache=skip_cache) + ports = self._get_ports( + forwarded_for, network_id, router_id, + skip_cache=skip_cache, remote_mac=remote_mac) LOG.debug("Gotten ports for remote_address %(remote_address)s, " "network_id %(network_id)s, router_id %(router_id)s are: " "%(ports)s", diff --git a/neutron/tests/unit/agent/metadata/test_agent.py b/neutron/tests/unit/agent/metadata/test_agent.py index 2848da7051f..f23fa8e74ff 100644 --- a/neutron/tests/unit/agent/metadata/test_agent.py +++ b/neutron/tests/unit/agent/metadata/test_agent.py @@ -15,6 +15,7 @@ from unittest import mock import ddt +import netaddr from neutron_lib import constants as n_const import testtools import webob @@ -22,6 +23,7 @@ import webob from oslo_config import cfg from oslo_config import fixture as config_fixture from oslo_utils import fileutils +from oslo_utils import netutils from neutron.agent.linux import utils as agent_utils from neutron.agent.metadata import agent @@ -84,6 +86,18 @@ class TestMetadataProxyHandlerRpc(TestMetadataProxyHandlerBase): actual = self.handler._get_port_filters(router_id, ip, networks) self.assertEqual(expected, actual) + def test_get_port_filters_mac(self): + router_id = 'test_router_id' + networks = ('net_id1', 'net_id2') + mac = '11:22:33:44:55:66' + expected = {'device_id': [router_id], + 'device_owner': n_const.ROUTER_INTERFACE_OWNERS, + 'network_id': networks, + 'mac_address': [mac]} + actual = self.handler._get_port_filters( + router_id=router_id, networks=networks, mac_address=mac) + self.assertEqual(expected, actual) + def test_get_router_networks(self): router_id = 'router-id' expected = ('network_id1', 'network_id2') @@ -215,6 +229,7 @@ class _TestMetadataProxyHandlerCacheMixin(object): router_id) mock_get_ip_addr.assert_called_once_with(remote_address, networks, + remote_mac=None, skip_cache=False) self.assertFalse(mock_get_router_networks.called) self.assertEqual(expected, ports) @@ -237,7 +252,7 @@ class _TestMetadataProxyHandlerCacheMixin(object): mock_get_router_networks.assert_called_once_with( router_id, skip_cache=False) mock_get_ip_addr.assert_called_once_with( - remote_address, networks, skip_cache=False) + remote_address, networks, remote_mac=None, skip_cache=False) self.assertEqual(expected, ports) def test_get_ports_no_id(self): @@ -269,19 +284,29 @@ class _TestMetadataProxyHandlerCacheMixin(object): ) ) - expected.append( - mock.call( - mock.ANY, - {'network_id': networks, - 'fixed_ips': {'ip_address': ['192.168.1.1']}} + remote_ip = netaddr.IPAddress(remote_address) + if remote_ip.is_link_local(): + expected.append( + mock.call( + mock.ANY, + {'network_id': networks, + 'mac_address': [netutils.get_mac_addr_by_ipv6(remote_ip)]} + ) + ) + else: + expected.append( + mock.call( + mock.ANY, + {'network_id': networks, + 'fixed_ips': {'ip_address': ['192.168.1.1']}} + ) ) - ) self.handler.plugin_rpc.get_ports.assert_has_calls(expected) return (instance_id, tenant_id) - @ddt.data('192.168.1.1', '::ffff:192.168.1.1') + @ddt.data('192.168.1.1', '::ffff:192.168.1.1', 'fe80::5054:ff:fede:5bbf') def test_get_instance_id_router_id(self, remote_address): router_id = 'the_id' headers = { @@ -302,7 +327,7 @@ class _TestMetadataProxyHandlerCacheMixin(object): remote_address=remote_address) ) - @ddt.data('192.168.1.1', '::ffff:192.168.1.1') + @ddt.data('192.168.1.1', '::ffff:192.168.1.1', 'fe80::5054:ff:fede:5bbf') def test_get_instance_id_router_id_no_match(self, remote_address): router_id = 'the_id' headers = { @@ -321,7 +346,7 @@ class _TestMetadataProxyHandlerCacheMixin(object): remote_address=remote_address) ) - @ddt.data('192.168.1.1', '::ffff:192.168.1.1') + @ddt.data('192.168.1.1', '::ffff:192.168.1.1', 'fe80::5054:ff:fede:5bbf') def test_get_instance_id_network_id(self, remote_address): network_id = 'the_id' headers = { @@ -341,7 +366,7 @@ class _TestMetadataProxyHandlerCacheMixin(object): remote_address=remote_address) ) - @ddt.data('192.168.1.1', '::ffff:192.168.1.1') + @ddt.data('192.168.1.1', '::ffff:192.168.1.1', 'fe80::5054:ff:fede:5bbf') def test_get_instance_id_network_id_no_match(self, remote_address): network_id = 'the_id' headers = { @@ -357,7 +382,7 @@ class _TestMetadataProxyHandlerCacheMixin(object): remote_address=remote_address) ) - @ddt.data('192.168.1.1', '::ffff:192.168.1.1') + @ddt.data('192.168.1.1', '::ffff:192.168.1.1', 'fe80::5054:ff:fede:5bbf') def test_get_instance_id_network_id_and_router_id_invalid( self, remote_address): network_id = 'the_nid' diff --git a/releasenotes/notes/metadata-ipv6-b3607f6932da7226.yaml b/releasenotes/notes/metadata-ipv6-b3607f6932da7226.yaml new file mode 100644 index 00000000000..af223c32f55 --- /dev/null +++ b/releasenotes/notes/metadata-ipv6-b3607f6932da7226.yaml @@ -0,0 +1,39 @@ +--- +features: + - | + Make the metadata service available over the IPv6 link-local + address ``fe80::a9fe:a9fe``. Metadata over IPv6 works on both + isolated networks and networks with an IPv6 subnet connected + to a Neutron router as well as on dual-stack and on IPv6-only + networks. There are no new config options. The usual config + options (``enable_isolated_metadata``, ``force_metadata``, + ``enable_metadata_proxy``) now control the metadata service over + both IPv4 and IPv6. This change only affects the guests' access to + the metadata service over tenant networks. This feature changes + nothing about how the metadata-agent talks to Nova's metadata service. + The guest OS is expected to pick up routes from Router Advertisements + for this feature to work on networks connected to a router. + At least the following IPv6 subnet modes work: + + * ``--ipv6-ra-mode slaac --ipv6-address-mode slaac`` + * ``--ipv6-ra-mode dhcpv6-stateless --ipv6-address-mode dhcpv6-stateless`` + * ``--ipv6-ra-mode dhcpv6-stateful --ipv6-address-mode dhcpv6-stateful`` + + Please note that the metadata IPv6 address (being link-local) + is not complete without a zone identifier (in a Linux guest + that is usually the interface name concatenated after a percent + sign). Please also note that in URLs you should URL-encode + the percent sign itself. For example, assuming that the primary + network interface in the guest is ``eth0`` the base metadata URL is + ``http://[fe80::a9fe:a9fe%25eth0]:80/``. +upgrade: + - | + The metadata over IPv6 feature makes each dhcp-agent restart + trigger a quick restart of dhcp-agent-controlled metadata-proxies, + so they can pick up their new config making them also bind to + ``fe80::a9fe:a9fe``. These restarts make the metadata service + transiently unavailable. This is done in order to enable the metadata + service on pre-existing isolated networks during an upgrade. Please + also note that pre-existing instances may need to re-acquire all + information acquired over Router Discovery and/or DHCP for this + feature to start working. diff --git a/requirements.txt b/requirements.txt index f476931b35a..92724b34922 100644 --- a/requirements.txt +++ b/requirements.txt @@ -40,7 +40,7 @@ oslo.rootwrap>=5.8.0 # Apache-2.0 oslo.serialization>=2.25.0 # Apache-2.0 oslo.service!=1.28.1,>=1.24.0 # Apache-2.0 oslo.upgradecheck>=0.1.0 # Apache-2.0 -oslo.utils>=3.36.0 # Apache-2.0 +oslo.utils>=4.4.0 # Apache-2.0 oslo.versionedobjects>=1.35.1 # Apache-2.0 osprofiler>=2.3.0 # Apache-2.0 os-ken >= 0.3.0 # Apache-2.0