[OVN] Move OVN metadata agent

This patch imports the OVN metadata agent code from networking-ovn.

Previous paths in networking-ovn tree:
./networking_ovn/agent/metadata -> ./neutron/agent/ovn/metadata
./networking_ovn/conf/agent/metadata -> ./neutron/conf/agent/ovn/metadata.py

Change-Id: If0c46909ec0d83df1be98300badeb36753b1002b
Related-Blueprint: neutron-ovn-merge
Signed-off-by: Lucas Alvares Gomes <lucasagomes@gmail.com>
This commit is contained in:
Lucas Alvares Gomes 2019-11-28 11:38:14 +00:00
parent fdb3f05055
commit 88153e7013
17 changed files with 1775 additions and 0 deletions

View File

View File

View File

@ -0,0 +1,465 @@
# Copyright 2017 Red Hat, Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import collections
import re
from neutron.agent.linux import external_process
from neutron.agent.linux import ip_lib
from neutron.agent.ovn.metadata import driver as metadata_driver
from neutron.agent.ovn.metadata import ovsdb
from neutron.agent.ovn.metadata import server as metadata_server
from neutron.common.ovn import constants as ovn_const
from neutron.common import utils
from neutron.conf.plugins.ml2.drivers.ovn import ovn_conf as config
from neutron_lib import constants as n_const
from oslo_concurrency import lockutils
from oslo_log import log
from oslo_utils import uuidutils
from ovsdbapp.backend.ovs_idl import event as row_event
from ovsdbapp.backend.ovs_idl import vlog
import six
LOG = log.getLogger(__name__)
_SYNC_STATE_LOCK = lockutils.ReaderWriterLock()
CHASSIS_METADATA_LOCK = 'chassis_metadata_lock'
NS_PREFIX = 'ovnmeta-'
MAC_PATTERN = re.compile(r'([0-9A-F]{2}[:-]){5}([0-9A-F]{2})', re.I)
OVN_VIF_PORT_TYPES = ("", "external", )
MetadataPortInfo = collections.namedtuple('MetadataPortInfo', ['mac',
'ip_addresses'])
def _sync_lock(f):
"""Decorator to block all operations for a global sync call."""
@six.wraps(f)
def wrapped(*args, **kwargs):
with _SYNC_STATE_LOCK.write_lock():
return f(*args, **kwargs)
return wrapped
class ConfigException(Exception):
"""Misconfiguration of the agent
This exception is raised when agent detects its wrong configuration.
Typically agent should resync when this is raised.
"""
class PortBindingChassisEvent(row_event.RowEvent):
def __init__(self, metadata_agent):
self.agent = metadata_agent
table = 'Port_Binding'
events = (self.ROW_UPDATE,)
super(PortBindingChassisEvent, self).__init__(
events, table, None)
self.event_name = self.__class__.__name__
def run(self, event, row, old):
# Check if the port has been bound/unbound to our chassis and update
# the metadata namespace accordingly.
resync = False
if row.type not in OVN_VIF_PORT_TYPES:
return
new_chassis = getattr(row, 'chassis', [])
old_chassis = getattr(old, 'chassis', [])
with _SYNC_STATE_LOCK.read_lock():
try:
if new_chassis and new_chassis[0].name == self.agent.chassis:
LOG.info("Port %s in datapath %s bound to our chassis",
row.logical_port, str(row.datapath.uuid))
self.agent.update_datapath(str(row.datapath.uuid))
elif old_chassis and old_chassis[0].name == self.agent.chassis:
LOG.info("Port %s in datapath %s unbound from our chassis",
row.logical_port, str(row.datapath.uuid))
self.agent.update_datapath(str(row.datapath.uuid))
except ConfigException:
# We're now in the reader lock mode, we need to exit the
# context and then use writer lock
resync = True
if resync:
self.agent.resync()
class ChassisCreateEvent(row_event.RowEvent):
"""Row create event - Chassis name == our_chassis.
On connection, we get a dump of all chassis so if we catch a creation
of our own chassis it has to be a reconnection. In this case, we need
to do a full sync to make sure that we capture all changes while the
connection to OVSDB was down.
"""
def __init__(self, metadata_agent):
self.agent = metadata_agent
self.first_time = True
table = 'Chassis'
events = (self.ROW_CREATE,)
super(ChassisCreateEvent, self).__init__(
events, table, (('name', '=', self.agent.chassis),))
self.event_name = self.__class__.__name__
def run(self, event, row, old):
if self.first_time:
self.first_time = False
else:
# NOTE(lucasagomes): Re-register the ovn metadata agent
# with the local chassis in case its entry was re-created
# (happens when restarting the ovn-controller)
self.agent.register_metadata_agent()
LOG.info("Connection to OVSDB established, doing a full sync")
self.agent.sync()
class SbGlobalUpdateEvent(row_event.RowEvent):
"""Row update event on SB_Global table."""
def __init__(self, metadata_agent):
self.agent = metadata_agent
table = 'SB_Global'
events = (self.ROW_UPDATE,)
super(SbGlobalUpdateEvent, self).__init__(events, table, None)
self.event_name = self.__class__.__name__
def run(self, event, row, old):
self.agent.sb_idl.update_metadata_health_status(
self.agent.chassis, row.nb_cfg).execute()
class MetadataAgent(object):
def __init__(self, conf):
self.conf = conf
vlog.use_python_logger(max_level=config.get_ovn_ovsdb_log_level())
self._process_monitor = external_process.ProcessMonitor(
config=self.conf,
resource_type='metadata')
def _load_config(self):
self.chassis = self._get_own_chassis_name()
self.ovn_bridge = self._get_ovn_bridge()
LOG.debug("Loaded chassis %s and ovn bridge %s.",
self.chassis, self.ovn_bridge)
@_sync_lock
def resync(self):
"""Resync the agent.
Reload the configuration and sync the agent again.
"""
self._load_config()
self.sync()
def start(self):
# Launch the server that will act as a proxy between the VM's and Nova.
proxy = metadata_server.UnixDomainMetadataProxy(self.conf)
proxy.run()
# Open the connection to OVS database
self.ovs_idl = ovsdb.MetadataAgentOvsIdl().start()
self._load_config()
# Open the connection to OVN SB database.
self.sb_idl = ovsdb.MetadataAgentOvnSbIdl(
[PortBindingChassisEvent(self), ChassisCreateEvent(self),
SbGlobalUpdateEvent(self)]).start()
# Do the initial sync.
self.sync()
# Register the agent with its corresponding Chassis
self.register_metadata_agent()
proxy.wait()
def register_metadata_agent(self):
# NOTE(lucasagomes): db_add() will not overwrite the UUID if
# it's already set.
ext_ids = {
ovn_const.OVN_AGENT_METADATA_ID_KEY: uuidutils.generate_uuid()}
self.sb_idl.db_add('Chassis', self.chassis, 'external_ids',
ext_ids).execute(check_error=True)
def _get_own_chassis_name(self):
"""Return the external_ids:system-id value of the Open_vSwitch table.
As long as ovn-controller is running on this node, the key is
guaranteed to exist and will include the chassis name.
"""
ext_ids = self.ovs_idl.db_get(
'Open_vSwitch', '.', 'external_ids').execute()
return ext_ids['system-id']
def _get_ovn_bridge(self):
"""Return the external_ids:ovn-bridge value of the Open_vSwitch table.
This is the OVS bridge used to plug the metadata ports to.
If the key doesn't exist, this method will return 'br-int' as default.
"""
ext_ids = self.ovs_idl.db_get(
'Open_vSwitch', '.', 'external_ids').execute()
try:
return ext_ids['ovn-bridge']
except KeyError:
LOG.warning("Can't read ovn-bridge external-id from OVSDB. Using "
"br-int instead.")
return 'br-int'
@_sync_lock
def sync(self):
"""Agent sync.
This function will make sure that all networks with ports in our
chassis are serving metadata. Also, it will tear down those namespaces
which were serving metadata but are no longer needed.
"""
metadata_namespaces = self.ensure_all_networks_provisioned()
system_namespaces = tuple(
ns.decode('utf-8') if isinstance(ns, bytes) else ns
for ns in ip_lib.list_network_namespaces())
unused_namespaces = [ns for ns in system_namespaces if
ns.startswith(NS_PREFIX) and
ns not in metadata_namespaces]
for ns in unused_namespaces:
self.teardown_datapath(self._get_datapath_name(ns))
@staticmethod
def _get_veth_name(datapath):
return ['{}{}{}'.format(n_const.TAP_DEVICE_PREFIX,
datapath[:10], i) for i in [0, 1]]
@staticmethod
def _get_datapath_name(namespace):
return namespace[len(NS_PREFIX):]
@staticmethod
def _get_namespace_name(datapath):
return NS_PREFIX + datapath
def _vif_ports(self, ports):
return (p for p in ports if p.type in OVN_VIF_PORT_TYPES)
def teardown_datapath(self, datapath):
"""Unprovision this datapath to stop serving metadata.
This function will shutdown metadata proxy if it's running and delete
the VETH pair, the OVS port and the namespace.
"""
self.update_chassis_metadata_networks(datapath, remove=True)
namespace = self._get_namespace_name(datapath)
ip = ip_lib.IPWrapper(namespace)
# If the namespace doesn't exist, return
if not ip.netns.exists(namespace):
return
LOG.info("Cleaning up %s namespace which is not needed anymore",
namespace)
metadata_driver.MetadataDriver.destroy_monitored_metadata_proxy(
self._process_monitor, datapath, self.conf, namespace)
veth_name = self._get_veth_name(datapath)
self.ovs_idl.del_port(veth_name[0]).execute()
if ip_lib.device_exists(veth_name[0]):
ip_lib.IPWrapper().del_veth(veth_name[0])
ip.garbage_collect_namespace()
def update_datapath(self, datapath):
"""Update the metadata service for this datapath.
This function will:
* Provision the namespace if it wasn't already in place.
* Update the namespace if it was already serving metadata (for example,
after binding/unbinding the first/last port of a subnet in our
chassis).
* Tear down the namespace if there are no more ports in our chassis
for this datapath.
"""
ports = self.sb_idl.get_ports_on_chassis(self.chassis)
datapath_ports = [p for p in self._vif_ports(ports) if
str(p.datapath.uuid) == datapath]
if datapath_ports:
self.provision_datapath(datapath)
else:
self.teardown_datapath(datapath)
def provision_datapath(self, datapath):
"""Provision the datapath so that it can serve metadata.
This function will create the namespace and VETH pair if needed
and assign the IP addresses to the interface corresponding to the
metadata port of the network. It will also remove existing IP
addresses that are no longer needed.
:return: The metadata namespace name of this datapath
"""
LOG.debug("Provisioning datapath %s", datapath)
port = self.sb_idl.get_metadata_port_network(datapath)
# If there's no metadata port or it doesn't have a MAC or IP
# addresses, then tear the namespace down if needed. This might happen
# when there are no subnets yet created so metadata port doesn't have
# an IP address.
if not (port and port.mac and
port.external_ids.get(ovn_const.OVN_CIDRS_EXT_ID_KEY, None)):
LOG.debug("There is no metadata port for datapath %s or it has no "
"MAC or IP addresses configured, tearing the namespace "
"down if needed", datapath)
self.teardown_datapath(datapath)
return
# First entry of the mac field must be the MAC address.
match = MAC_PATTERN.match(port.mac[0].split(' ')[0])
# If it is not, we can't provision the namespace. Tear it down if
# needed and log the error.
if not match:
LOG.error("Metadata port for datapath %s doesn't have a MAC "
"address, tearing the namespace down if needed",
datapath)
self.teardown_datapath(datapath)
return
mac = match.group()
ip_addresses = set(
port.external_ids[ovn_const.OVN_CIDRS_EXT_ID_KEY].split(' '))
ip_addresses.add(ovn_const.METADATA_DEFAULT_CIDR)
metadata_port = MetadataPortInfo(mac, ip_addresses)
# Create the VETH pair if it's not created. Also the add_veth function
# will create the namespace for us.
namespace = self._get_namespace_name(datapath)
veth_name = self._get_veth_name(datapath)
ip1 = ip_lib.IPDevice(veth_name[0])
if ip_lib.device_exists(veth_name[1], namespace):
ip2 = ip_lib.IPDevice(veth_name[1], namespace)
else:
LOG.debug("Creating VETH %s in %s namespace", veth_name[1],
namespace)
# Might happen that the end in the root namespace exists even
# though the other end doesn't. Make sure we delete it first if
# that's the case.
if ip1.exists():
ip1.link.delete()
ip1, ip2 = ip_lib.IPWrapper().add_veth(
veth_name[0], veth_name[1], namespace)
# Make sure both ends of the VETH are up
ip1.link.set_up()
ip2.link.set_up()
# Configure the MAC address.
ip2.link.set_address(metadata_port.mac)
dev_info = ip2.addr.list()
# Configure the IP addresses on the VETH pair and remove those
# that we no longer need.
current_cidrs = {dev['cidr'] for dev in dev_info}
for ipaddr in current_cidrs - metadata_port.ip_addresses:
ip2.addr.delete(ipaddr)
for ipaddr in metadata_port.ip_addresses - current_cidrs:
# NOTE(dalvarez): metadata only works on IPv4. We're doing this
# extra check here because it could be that the metadata port has
# an IPv6 address if there's an IPv6 subnet with SLAAC in its
# network. Neutron IPAM will autoallocate an IPv6 address for every
# port in the network.
if utils.get_ip_version(ipaddr) == 4:
ip2.addr.add(ipaddr)
# Check that this port is not attached to any other OVS bridge. This
# can happen when the OVN bridge changes (for example, during a
# migration from ML2/OVS).
ovs_bridges = set(self.ovs_idl.list_br().execute())
try:
ovs_bridges.remove(self.ovn_bridge)
except KeyError:
LOG.warning("Configured OVN bridge %s cannot be found in "
"the system. Resyncing the agent.", self.ovn_bridge)
raise ConfigException()
if ovs_bridges:
with self.ovs_idl.transaction() as txn:
for br in ovs_bridges:
txn.add(self.ovs_idl.del_port(veth_name[0], bridge=br,
if_exists=True))
# Configure the OVS port and add external_ids:iface-id so that it
# can be tracked by OVN.
self.ovs_idl.add_port(self.ovn_bridge,
veth_name[0]).execute()
self.ovs_idl.db_set(
'Interface', veth_name[0],
('external_ids', {'iface-id': port.logical_port})).execute()
# Spawn metadata proxy if it's not already running.
metadata_driver.MetadataDriver.spawn_monitored_metadata_proxy(
self._process_monitor, namespace, ovn_const.METADATA_PORT,
self.conf, bind_address=ovn_const.METADATA_DEFAULT_IP,
network_id=datapath)
self.update_chassis_metadata_networks(datapath)
return namespace
def ensure_all_networks_provisioned(self):
"""Ensure that all datapaths are provisioned.
This function will make sure that all datapaths with ports bound to
our chassis have its namespace, VETH pair and OVS port created and
metadata proxy is up and running.
:return: A list with the namespaces that are currently serving
metadata
"""
# Retrieve all VIF ports in our Chassis
ports = self.sb_idl.get_ports_on_chassis(self.chassis)
datapaths = {str(p.datapath.uuid) for p in self._vif_ports(ports)}
namespaces = []
# Make sure that all those datapaths are serving metadata
for datapath in datapaths:
netns = self.provision_datapath(datapath)
if netns:
namespaces.append(netns)
return namespaces
# NOTE(lucasagomes): Even tho the metadata agent is a multi-process
# application, there's only one Southbound database IDL instance in
# the agent which handles the OVSDB events therefore we do not need
# the external=True parameter in the @synchronized decorator.
@lockutils.synchronized(CHASSIS_METADATA_LOCK)
def update_chassis_metadata_networks(self, datapath, remove=False):
"""Update metadata networks hosted in this chassis.
Add or remove a datapath from the list of current datapaths that
we're currently serving metadata.
"""
current_dps = self.sb_idl.get_chassis_metadata_networks(self.chassis)
updated = False
if remove:
if datapath in current_dps:
current_dps.remove(datapath)
updated = True
else:
if datapath not in current_dps:
current_dps.append(datapath)
updated = True
if updated:
with self.sb_idl.create_transaction(check_error=True) as txn:
txn.add(self.sb_idl.set_chassis_metadata_networks(
self.chassis, current_dps))

View File

@ -0,0 +1,224 @@
# Copyright 2017 OpenStack Foundation.
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
import errno
import grp
import os
import pwd
from neutron.agent.linux import external_process
from neutron_lib import exceptions
from oslo_config import cfg
from oslo_log import log as logging
from neutron._i18n import _
LOG = logging.getLogger(__name__)
METADATA_SERVICE_NAME = 'metadata-proxy'
HAPROXY_SERVICE = 'haproxy'
PROXY_CONFIG_DIR = "ovn-metadata-proxy"
_HAPROXY_CONFIG_TEMPLATE = """
global
log /dev/log local0 %(log_level)s
user %(user)s
group %(group)s
maxconn 1024
pidfile %(pidfile)s
daemon
defaults
log global
mode http
option httplog
option dontlognull
option http-server-close
option forwardfor
retries 3
timeout http-request 30s
timeout connect 30s
timeout client 32s
timeout server 32s
timeout http-keep-alive 30s
listen listener
bind %(host)s:%(port)s
server metadata %(unix_socket_path)s
http-request add-header X-OVN-%(res_type)s-ID %(res_id)s
"""
class InvalidUserOrGroupException(Exception):
pass
class HaproxyConfigurator(object):
def __init__(self, network_id, router_id, unix_socket_path, host,
port, user, group, state_path, pid_file):
self.network_id = network_id
self.router_id = router_id
if network_id is None and router_id is None:
raise exceptions.NetworkIdOrRouterIdRequiredError()
self.host = host
self.port = port
self.user = user
self.group = group
self.state_path = state_path
self.unix_socket_path = unix_socket_path
self.pidfile = pid_file
self.log_level = (
'debug' if logging.is_debug_enabled(cfg.CONF) else 'info')
def create_config_file(self):
"""Create the config file for haproxy."""
# Need to convert uid/gid into username/group
try:
username = pwd.getpwuid(int(self.user)).pw_name
except (ValueError, KeyError):
try:
username = pwd.getpwnam(self.user).pw_name
except KeyError:
raise InvalidUserOrGroupException(
_("Invalid user/uid: '%s'") % self.user)
try:
groupname = grp.getgrgid(int(self.group)).gr_name
except (ValueError, KeyError):
try:
groupname = grp.getgrnam(self.group).gr_name
except KeyError:
raise InvalidUserOrGroupException(
_("Invalid group/gid: '%s'") % self.group)
cfg_info = {
'host': self.host,
'port': self.port,
'unix_socket_path': self.unix_socket_path,
'user': username,
'group': groupname,
'pidfile': self.pidfile,
'log_level': self.log_level
}
if self.network_id:
cfg_info['res_type'] = 'Network'
cfg_info['res_id'] = self.network_id
else:
cfg_info['res_type'] = 'Router'
cfg_info['res_id'] = self.router_id
haproxy_cfg = _HAPROXY_CONFIG_TEMPLATE % cfg_info
LOG.debug("haproxy_cfg = %s", haproxy_cfg)
cfg_dir = self.get_config_path(self.state_path)
# uuid has to be included somewhere in the command line so that it can
# be tracked by process_monitor.
self.cfg_path = os.path.join(cfg_dir, "%s.conf" % cfg_info['res_id'])
if not os.path.exists(cfg_dir):
os.makedirs(cfg_dir)
with open(self.cfg_path, "w") as cfg_file:
cfg_file.write(haproxy_cfg)
@staticmethod
def get_config_path(state_path):
return os.path.join(state_path or cfg.CONF.state_path,
PROXY_CONFIG_DIR)
@staticmethod
def cleanup_config_file(uuid, state_path):
"""Delete config file created when metadata proxy was spawned."""
# Delete config file if it exists
cfg_path = os.path.join(
HaproxyConfigurator.get_config_path(state_path),
"%s.conf" % uuid)
try:
os.unlink(cfg_path)
except OSError as ex:
# It can happen that this function is called but metadata proxy
# was never spawned so its config file won't exist
if ex.errno != errno.ENOENT:
raise
class MetadataDriver(object):
monitors = {}
@classmethod
def _get_metadata_proxy_user_group(cls, conf):
user = conf.metadata_proxy_user or str(os.geteuid())
group = conf.metadata_proxy_group or str(os.getegid())
return user, group
@classmethod
def _get_metadata_proxy_callback(cls, bind_address, port, conf,
network_id=None, router_id=None):
def callback(pid_file):
metadata_proxy_socket = conf.metadata_proxy_socket
user, group = (
cls._get_metadata_proxy_user_group(conf))
haproxy = HaproxyConfigurator(network_id,
router_id,
metadata_proxy_socket,
bind_address,
port,
user,
group,
conf.state_path,
pid_file)
haproxy.create_config_file()
proxy_cmd = [HAPROXY_SERVICE,
'-f', haproxy.cfg_path]
return proxy_cmd
return callback
@classmethod
def spawn_monitored_metadata_proxy(cls, monitor, ns_name, port, conf,
bind_address="0.0.0.0", network_id=None,
router_id=None):
uuid = network_id or router_id
callback = cls._get_metadata_proxy_callback(
bind_address, port, conf, network_id=network_id,
router_id=router_id)
pm = cls._get_metadata_proxy_process_manager(uuid, conf,
ns_name=ns_name,
callback=callback)
pm.enable()
monitor.register(uuid, METADATA_SERVICE_NAME, pm)
cls.monitors[router_id] = pm
@classmethod
def destroy_monitored_metadata_proxy(cls, monitor, uuid, conf, ns_name):
monitor.unregister(uuid, METADATA_SERVICE_NAME)
pm = cls._get_metadata_proxy_process_manager(uuid, conf,
ns_name=ns_name)
pm.disable()
# Delete metadata proxy config file
HaproxyConfigurator.cleanup_config_file(uuid, cfg.CONF.state_path)
cls.monitors.pop(uuid, None)
@classmethod
def _get_metadata_proxy_process_manager(cls, router_id, conf, ns_name=None,
callback=None):
return external_process.ProcessManager(
conf=conf,
uuid=router_id,
namespace=ns_name,
service=HAPROXY_SERVICE,
default_cmd_callback=callback)

View File

@ -0,0 +1,69 @@
# Copyright 2017 Red Hat, Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from ovs.db import idl
from ovsdbapp.backend.ovs_idl import connection
from ovsdbapp.backend.ovs_idl import idlutils
from ovsdbapp.schema.open_vswitch import impl_idl as idl_ovs
import tenacity
from neutron.conf.plugins.ml2.drivers.ovn import ovn_conf as config
from neutron.plugins.ml2.drivers.ovn.mech_driver.ovsdb import impl_idl_ovn
from neutron.plugins.ml2.drivers.ovn.mech_driver.ovsdb import ovsdb_monitor
class MetadataAgentOvnSbIdl(ovsdb_monitor.OvnIdl):
SCHEMA = 'OVN_Southbound'
def __init__(self, events=None):
connection_string = config.get_ovn_sb_connection()
ovsdb_monitor._check_and_set_ssl_files(self.SCHEMA)
helper = self._get_ovsdb_helper(connection_string)
tables = ('Chassis', 'Encap', 'Port_Binding', 'Datapath_Binding',
'SB_Global')
for table in tables:
helper.register_table(table)
super(MetadataAgentOvnSbIdl, self).__init__(
None, connection_string, helper)
if events:
self.notify_handler.watch_events(events)
@tenacity.retry(
wait=tenacity.wait_exponential(max=180),
reraise=True)
def _get_ovsdb_helper(self, connection_string):
return idlutils.get_schema_helper(connection_string, self.SCHEMA)
def start(self):
conn = connection.Connection(
self, timeout=config.get_ovn_ovsdb_timeout())
return impl_idl_ovn.OvsdbSbOvnIdl(conn)
class MetadataAgentOvsIdl(object):
def start(self):
connection_string = config.cfg.CONF.ovs.ovsdb_connection
helper = idlutils.get_schema_helper(connection_string,
'Open_vSwitch')
tables = ('Open_vSwitch', 'Bridge', 'Port', 'Interface')
for table in tables:
helper.register_table(table)
ovs_idl = idl.Idl(connection_string, helper)
ovs_idl._session.reconnect.set_probe_interval(
config.get_ovn_ovsdb_probe_interval())
conn = connection.Connection(
ovs_idl, timeout=config.cfg.CONF.ovs.ovsdb_connection_timeout)
return idl_ovs.OvsdbIdl(conn)

View File

@ -0,0 +1,197 @@
# Copyright 2017 Red Hat, Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import hashlib
import hmac
from neutron._i18n import _
from neutron.agent.linux import utils as agent_utils
from neutron.agent.ovn.metadata import ovsdb
from neutron.common import ipv6_utils
from neutron.common.ovn import constants as ovn_const
from neutron.conf.agent.metadata import config
from neutron_lib.callbacks import events
from neutron_lib.callbacks import registry
from neutron_lib.callbacks import resources
from oslo_config import cfg
from oslo_log import log as logging
from oslo_utils import encodeutils
import requests
import six
from six.moves import urllib
import webob
LOG = logging.getLogger(__name__)
MODE_MAP = {
config.USER_MODE: 0o644,
config.GROUP_MODE: 0o664,
config.ALL_MODE: 0o666,
}
class MetadataProxyHandler(object):
def __init__(self, conf):
self.conf = conf
self.subscribe()
def subscribe(self):
registry.subscribe(self.post_fork_initialize,
resources.PROCESS,
events.AFTER_INIT)
def post_fork_initialize(self, resource, event, trigger, payload=None):
# We need to open a connection to OVN SouthBound database for
# each worker so that we can process the metadata requests.
self.sb_idl = ovsdb.MetadataAgentOvnSbIdl().start()
@webob.dec.wsgify(RequestClass=webob.Request)
def __call__(self, req):
try:
LOG.debug("Request: %s", req)
instance_id, project_id = self._get_instance_and_project_id(req)
if instance_id:
return self._proxy_request(instance_id, project_id, req)
else:
return webob.exc.HTTPNotFound()
except Exception:
LOG.exception("Unexpected error.")
msg = _('An unknown error has occurred. '
'Please try your request again.')
explanation = six.text_type(msg)
return webob.exc.HTTPInternalServerError(explanation=explanation)
def _get_instance_and_project_id(self, req):
remote_address = req.headers.get('X-Forwarded-For')
network_id = req.headers.get('X-OVN-Network-ID')
ports = self.sb_idl.get_network_port_bindings_by_ip(network_id,
remote_address)
if len(ports) == 1:
external_ids = ports[0].external_ids
return (external_ids[ovn_const.OVN_DEVID_EXT_ID_KEY],
external_ids[ovn_const.OVN_PROJID_EXT_ID_KEY])
return None, None
def _proxy_request(self, instance_id, tenant_id, req):
headers = {
'X-Forwarded-For': req.headers.get('X-Forwarded-For'),
'X-Instance-ID': instance_id,
'X-Tenant-ID': tenant_id,
'X-Instance-ID-Signature': self._sign_instance_id(instance_id)
}
nova_host_port = ipv6_utils.valid_ipv6_url(
self.conf.nova_metadata_host,
self.conf.nova_metadata_port)
url = urllib.parse.urlunsplit((
self.conf.nova_metadata_protocol,
nova_host_port,
req.path_info,
req.query_string,
''))
disable_ssl_certificate_validation = self.conf.nova_metadata_insecure
if self.conf.auth_ca_cert and not disable_ssl_certificate_validation:
verify_cert = self.conf.auth_ca_cert
else:
verify_cert = not disable_ssl_certificate_validation
client_cert = None
if self.conf.nova_client_cert and self.conf.nova_client_priv_key:
client_cert = (self.conf.nova_client_cert,
self.conf.nova_client_priv_key)
resp = requests.request(method=req.method, url=url,
headers=headers,
data=req.body,
cert=client_cert,
verify=verify_cert)
if resp.status_code == 200:
req.response.content_type = resp.headers['content-type']
req.response.body = resp.content
LOG.debug(str(resp))
return req.response
elif resp.status_code == 403:
LOG.warning(
'The remote metadata server responded with Forbidden. This '
'response usually occurs when shared secrets do not match.'
)
return webob.exc.HTTPForbidden()
elif resp.status_code == 400:
return webob.exc.HTTPBadRequest()
elif resp.status_code == 404:
return webob.exc.HTTPNotFound()
elif resp.status_code == 409:
return webob.exc.HTTPConflict()
elif resp.status_code == 500:
msg = _(
'Remote metadata server experienced an internal server error.'
)
LOG.warning(msg)
explanation = six.text_type(msg)
return webob.exc.HTTPInternalServerError(explanation=explanation)
else:
raise Exception(_('Unexpected response code: %s') %
resp.status_code)
def _sign_instance_id(self, instance_id):
secret = self.conf.metadata_proxy_shared_secret
secret = encodeutils.to_utf8(secret)
instance_id = encodeutils.to_utf8(instance_id)
return hmac.new(secret, instance_id, hashlib.sha256).hexdigest()
class UnixDomainMetadataProxy(object):
def __init__(self, conf):
self.conf = conf
agent_utils.ensure_directory_exists_without_file(
cfg.CONF.metadata_proxy_socket)
def _get_socket_mode(self):
mode = self.conf.metadata_proxy_socket_mode
if mode == config.DEDUCE_MODE:
user = self.conf.metadata_proxy_user
if (not user or user == '0' or user == 'root' or
agent_utils.is_effective_user(user)):
# user is agent effective user or root => USER_MODE
mode = config.USER_MODE
else:
group = self.conf.metadata_proxy_group
if not group or agent_utils.is_effective_group(group):
# group is agent effective group => GROUP_MODE
mode = config.GROUP_MODE
else:
# otherwise => ALL_MODE
mode = config.ALL_MODE
return MODE_MAP[mode]
def run(self):
self.server = agent_utils.UnixDomainWSGIServer(
'networking-ovn-metadata-agent')
self.server.start(MetadataProxyHandler(self.conf),
self.conf.metadata_proxy_socket,
workers=self.conf.metadata_workers,
backlog=self.conf.metadata_backlog,
mode=self._get_socket_mode())
def wait(self):
self.server.wait()

View File

@ -0,0 +1,38 @@
# Copyright 2017 OpenStack Foundation.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
import sys
from neutron.common import config
from neutron.common import utils
from oslo_config import cfg
from oslo_log import log as logging
from neutron.agent.ovn.metadata import agent
from neutron.conf.agent.ovn.metadata import config as meta
LOG = logging.getLogger(__name__)
def main():
meta.register_meta_conf_opts(meta.SHARED_OPTS)
meta.register_meta_conf_opts(meta.UNIX_DOMAIN_METADATA_PROXY_OPTS)
meta.register_meta_conf_opts(meta.METADATA_PROXY_HANDLER_OPTS)
meta.register_meta_conf_opts(meta.OVS_OPTS, group='ovs')
config.init(sys.argv[1:])
config.setup_logging()
meta.setup_privsep()
utils.log_opt_values(LOG)
agt = agent.MetadataAgent(cfg.CONF)
agt.start()

View File

@ -183,3 +183,11 @@ LSP_OPTIONS_VIRTUAL_IP_KEY = 'virtual-ip'
HA_CHASSIS_GROUP_DEFAULT_NAME = 'default_ha_chassis_group'
HA_CHASSIS_GROUP_HIGHEST_PRIORITY = 32767
# TODO(lucasagomes): Move this to neutron-lib later.
# Metadata constants
METADATA_DEFAULT_PREFIX = 16
METADATA_DEFAULT_IP = '169.254.169.254'
METADATA_DEFAULT_CIDR = '%s/%d' % (METADATA_DEFAULT_IP,
METADATA_DEFAULT_PREFIX)
METADATA_PORT = 80

View File

View File

@ -0,0 +1,58 @@
# Copyright 2015 OpenStack Foundation.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
import itertools
import shlex
from neutron.conf.agent.metadata import config as meta_conf
from oslo_config import cfg
from oslo_privsep import priv_context
from neutron._i18n import _
OVS_OPTS = [
cfg.StrOpt('ovsdb_connection',
default='unix:/usr/local/var/run/openvswitch/db.sock',
help=_('The connection string for the native OVSDB backend.\n'
'Use tcp:IP:PORT for TCP connection.\n'
'Use unix:FILE for unix domain socket connection.')),
cfg.IntOpt('ovsdb_connection_timeout',
default=180,
help=_('Timeout in seconds for the OVSDB '
'connection transaction'))
]
def register_meta_conf_opts(opts, cfg=cfg.CONF, group=None):
cfg.register_opts(opts, group=group)
def list_metadata_agent_opts():
return [
('DEFAULT',
itertools.chain(
meta_conf.SHARED_OPTS,
meta_conf.METADATA_PROXY_HANDLER_OPTS,
meta_conf.UNIX_DOMAIN_METADATA_PROXY_OPTS)
),
('ovs', OVS_OPTS)
]
def get_root_helper(conf):
return conf.AGENT.root_helper
def setup_privsep():
priv_context.init(root_helper=shlex.split(get_root_helper(cfg.CONF)))

View File

View File

@ -0,0 +1,316 @@
# Copyright 2017 Red Hat, Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import collections
import mock
from oslo_config import cfg
from oslo_config import fixture as config_fixture
from neutron.agent.linux import ip_lib
from neutron.agent.linux.ip_lib import IpAddrCommand as ip_addr
from neutron.agent.linux.ip_lib import IpLinkCommand as ip_link
from neutron.agent.linux.ip_lib import IpNetnsCommand as ip_netns
from neutron.agent.linux.ip_lib import IPWrapper as ip_wrap
from neutron.agent.ovn.metadata import agent
from neutron.agent.ovn.metadata import driver
from neutron.conf.agent.metadata import config as meta_conf
from neutron.conf.agent.ovn.metadata import config as ovn_meta_conf
from neutron.tests import base
OvnPortInfo = collections.namedtuple(
'OvnPortInfo', ['datapath', 'type', 'mac', 'external_ids', 'logical_port'])
DatapathInfo = collections.namedtuple('DatapathInfo', 'uuid')
def makePort(datapath=None, type='', mac=None, external_ids=None,
logical_port=None):
return OvnPortInfo(datapath, type, mac, external_ids, logical_port)
class ConfFixture(config_fixture.Config):
def setUp(self):
super(ConfFixture, self).setUp()
ovn_meta_conf.register_meta_conf_opts(meta_conf.SHARED_OPTS, self.conf)
ovn_meta_conf.register_meta_conf_opts(
meta_conf.UNIX_DOMAIN_METADATA_PROXY_OPTS, self.conf)
ovn_meta_conf.register_meta_conf_opts(
meta_conf.METADATA_PROXY_HANDLER_OPTS, self.conf)
ovn_meta_conf.register_meta_conf_opts(
ovn_meta_conf.OVS_OPTS, self.conf, group='ovs')
class TestMetadataAgent(base.BaseTestCase):
fake_conf = cfg.CONF
fake_conf_fixture = ConfFixture(fake_conf)
def setUp(self):
super(TestMetadataAgent, self).setUp()
self.useFixture(self.fake_conf_fixture)
self.log_p = mock.patch.object(agent, 'LOG')
self.log = self.log_p.start()
self.agent = agent.MetadataAgent(self.fake_conf)
self.agent.sb_idl = mock.Mock()
self.agent.ovs_idl = mock.Mock()
self.agent.ovs_idl.transaction = mock.MagicMock()
self.agent.chassis = 'chassis'
self.agent.ovn_bridge = 'br-int'
def test_sync(self):
with mock.patch.object(
self.agent, 'ensure_all_networks_provisioned') as enp,\
mock.patch.object(
ip_lib, 'list_network_namespaces') as lnn,\
mock.patch.object(
self.agent, 'teardown_datapath') as tdp:
enp.return_value = ['ovnmeta-1', 'ovnmeta-2']
lnn.return_value = ['ovnmeta-1', 'ovnmeta-2']
self.agent.sync()
enp.assert_called_once_with()
lnn.assert_called_once_with()
tdp.assert_not_called()
def test_sync_teardown_namespace(self):
"""Test that sync tears down unneeded metadata namespaces."""
with mock.patch.object(
self.agent, 'ensure_all_networks_provisioned') as enp,\
mock.patch.object(
ip_lib, 'list_network_namespaces') as lnn,\
mock.patch.object(
self.agent, 'teardown_datapath') as tdp:
enp.return_value = ['ovnmeta-1', 'ovnmeta-2']
lnn.return_value = ['ovnmeta-1', 'ovnmeta-2', 'ovnmeta-3',
'ns1', 'ns2']
self.agent.sync()
enp.assert_called_once_with()
lnn.assert_called_once_with()
tdp.assert_called_once_with('3')
def test_ensure_all_networks_provisioned(self):
"""Test networks are provisioned.
This test simulates that this chassis has the following ports:
* datapath '0': 1 port
* datapath '1': 2 ports
* datapath '2': 1 port
* datapath '3': 1 port with type 'external'
* datapath '5': 1 port with type 'unknown'
It is expected that only datapaths '0', '1' and '2' are provisioned
once.
"""
ports = []
for i in range(0, 3):
ports.append(makePort(datapath=DatapathInfo(uuid=str(i))))
ports.append(makePort(datapath=DatapathInfo(uuid='1')))
ports.append(makePort(datapath=DatapathInfo(uuid='3'),
type='external'))
ports.append(makePort(datapath=DatapathInfo(uuid='5'), type='unknown'))
with mock.patch.object(self.agent, 'provision_datapath',
return_value=None) as pdp,\
mock.patch.object(self.agent.sb_idl, 'get_ports_on_chassis',
return_value=ports):
self.agent.ensure_all_networks_provisioned()
expected_calls = [mock.call(str(i)) for i in range(0, 4)]
self.assertEqual(sorted(expected_calls),
sorted(pdp.call_args_list))
def test_update_datapath_provision(self):
ports = []
for i in range(0, 3):
ports.append(makePort(datapath=DatapathInfo(uuid=str(i))))
ports.append(makePort(datapath=DatapathInfo(uuid='3'),
type='external'))
with mock.patch.object(self.agent, 'provision_datapath',
return_value=None) as pdp,\
mock.patch.object(self.agent, 'teardown_datapath') as tdp,\
mock.patch.object(self.agent.sb_idl, 'get_ports_on_chassis',
return_value=ports):
self.agent.update_datapath('1')
self.agent.update_datapath('3')
expected_calls = [mock.call('1'), mock.call('3')]
pdp.assert_has_calls(expected_calls)
tdp.assert_not_called()
def test_update_datapath_teardown(self):
ports = []
for i in range(0, 3):
ports.append(makePort(datapath=DatapathInfo(uuid=str(i))))
with mock.patch.object(self.agent, 'provision_datapath',
return_value=None) as pdp,\
mock.patch.object(self.agent, 'teardown_datapath') as tdp,\
mock.patch.object(self.agent.sb_idl, 'get_ports_on_chassis',
return_value=ports):
self.agent.update_datapath('5')
tdp.assert_called_once_with('5')
pdp.assert_not_called()
def test_teardown_datapath(self):
"""Test teardown datapath.
Check that the VETH pair, OVS port and namespace associated to this
namespace are deleted and the metadata proxy is destroyed.
"""
with mock.patch.object(self.agent,
'update_chassis_metadata_networks'),\
mock.patch.object(
ip_netns, 'exists', return_value=True),\
mock.patch.object(
ip_lib, 'device_exists', return_value=True),\
mock.patch.object(
ip_wrap, 'garbage_collect_namespace') as garbage_collect,\
mock.patch.object(
ip_wrap, 'del_veth') as del_veth,\
mock.patch.object(agent.MetadataAgent, '_get_veth_name',
return_value=['veth_0', 'veth_1']),\
mock.patch.object(
driver.MetadataDriver,
'destroy_monitored_metadata_proxy') as destroy_mdp:
self.agent.teardown_datapath('1')
destroy_mdp.assert_called_once_with(
mock.ANY, '1', mock.ANY, 'ovnmeta-1')
self.agent.ovs_idl.del_port.assert_called_once_with('veth_0')
del_veth.assert_called_once_with('veth_0')
garbage_collect.assert_called_once_with()
def test_provision_datapath(self):
"""Test datapath provisioning.
Check that the VETH pair, OVS port and namespace associated to this
namespace are created, that the interface is properly configured with
the right IP addresses and that the metadata proxy is spawned.
"""
metadata_port = makePort(mac=['aa:bb:cc:dd:ee:ff'],
external_ids={
'neutron:cidrs': '10.0.0.1/23 '
'2001:470:9:1224:5595:dd51:6ba2:e788/64'},
logical_port='port')
with mock.patch.object(self.agent.sb_idl,
'get_metadata_port_network',
return_value=metadata_port),\
mock.patch.object(
ip_lib, 'device_exists', return_value=False),\
mock.patch.object(
ip_lib.IPDevice, 'exists', return_value=False),\
mock.patch.object(agent.MetadataAgent, '_get_veth_name',
return_value=['veth_0', 'veth_1']),\
mock.patch.object(agent.MetadataAgent, '_get_namespace_name',
return_value='namespace'),\
mock.patch.object(ip_link, 'set_up') as link_set_up,\
mock.patch.object(ip_link, 'set_address') as link_set_addr,\
mock.patch.object(ip_addr, 'list', return_value=[]),\
mock.patch.object(ip_addr, 'add') as ip_addr_add,\
mock.patch.object(
ip_wrap, 'add_veth',
return_value=[ip_lib.IPDevice('ip1'),
ip_lib.IPDevice('ip2')]) as add_veth,\
mock.patch.object(
self.agent,
'update_chassis_metadata_networks') as update_chassis,\
mock.patch.object(
driver.MetadataDriver,
'spawn_monitored_metadata_proxy') as spawn_mdp:
# Simulate that the VETH pair was already present in 'br-fake'.
# We need to assert that it was deleted first.
self.agent.ovs_idl.list_br.return_value.execute.return_value = (
['br-int', 'br-fake'])
self.agent.provision_datapath('1')
# Check that the port was deleted from br-fake
self.agent.ovs_idl.del_port.assert_called_once_with(
'veth_0', bridge='br-fake', if_exists=True)
# Check that the VETH pair is created
add_veth.assert_called_once_with('veth_0', 'veth_1', 'namespace')
# Make sure that the two ends of the VETH pair have been set as up.
self.assertEqual(2, link_set_up.call_count)
link_set_addr.assert_called_once_with('aa:bb:cc:dd:ee:ff')
# Make sure that the port has been added to OVS.
self.agent.ovs_idl.add_port.assert_called_once_with(
'br-int', 'veth_0')
self.agent.ovs_idl.db_set.assert_called_once_with(
'Interface', 'veth_0', ('external_ids', {'iface-id': 'port'}))
# Check that the metadata port has the IP addresses properly
# configured and that IPv6 address has been skipped.
expected_calls = [mock.call('10.0.0.1/23'),
mock.call('169.254.169.254/16')]
self.assertEqual(sorted(expected_calls),
sorted(ip_addr_add.call_args_list))
# Check that metadata proxy has been spawned
spawn_mdp.assert_called_once_with(
mock.ANY, 'namespace', 80, mock.ANY,
bind_address='169.254.169.254', network_id='1')
# Check that the chassis has been updated with the datapath.
update_chassis.assert_called_once_with('1')
def _test_update_chassis_metadata_networks_helper(
self, dp, remove, expected_dps, txn_called=True):
current_dps = ['0', '1', '2']
with mock.patch.object(self.agent.sb_idl,
'get_chassis_metadata_networks',
return_value=current_dps),\
mock.patch.object(self.agent.sb_idl,
'set_chassis_metadata_networks',
retrurn_value=True),\
mock.patch.object(self.agent.sb_idl,
'create_transaction') as create_txn_mock:
self.agent.update_chassis_metadata_networks(dp, remove=remove)
updated_dps = self.agent.sb_idl.get_chassis_metadata_networks(
self.agent.chassis)
self.assertEqual(updated_dps, expected_dps)
self.assertEqual(create_txn_mock.called, txn_called)
def test_update_chassis_metadata_networks_add(self):
dp = '4'
remove = False
expected_dps = ['0', '1', '2', '4']
self._test_update_chassis_metadata_networks_helper(
dp, remove, expected_dps)
def test_update_chassis_metadata_networks_remove(self):
dp = '2'
remove = True
expected_dps = ['0', '1']
self._test_update_chassis_metadata_networks_helper(
dp, remove, expected_dps)
def test_update_chassis_metadata_networks_add_dp_exists(self):
dp = '2'
remove = False
expected_dps = ['0', '1', '2']
self._test_update_chassis_metadata_networks_helper(
dp, remove, expected_dps, txn_called=False)
def test_update_chassis_metadata_networks_remove_no_dp(self):
dp = '3'
remove = True
expected_dps = ['0', '1', '2']
self._test_update_chassis_metadata_networks_helper(
dp, remove, expected_dps, txn_called=False)

View File

@ -0,0 +1,129 @@
# Copyright 2017 OpenStack Foundation.
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
import os
import mock
from neutron_lib import fixture as lib_fixtures
from oslo_config import cfg
from oslo_utils import uuidutils
from neutron.agent.ovn.metadata import agent as metadata_agent
from neutron.agent.ovn.metadata import driver as metadata_driver
from neutron.conf.agent.metadata import config as meta_conf
from neutron.conf.agent.ovn.metadata import config as ovn_meta_conf
from neutron.tests import base
from neutron.tests.unit.agent.linux import test_utils
_uuid = uuidutils.generate_uuid
class TestMetadataDriverProcess(base.BaseTestCase):
EUNAME = 'neutron'
EGNAME = 'neutron'
METADATA_DEFAULT_IP = '169.254.169.254'
METADATA_PORT = 8080
METADATA_SOCKET = '/socket/path'
PIDFILE = 'pidfile'
def setUp(self):
super(TestMetadataDriverProcess, self).setUp()
mock.patch('eventlet.spawn').start()
ovn_meta_conf.register_meta_conf_opts(meta_conf.SHARED_OPTS, cfg.CONF)
def test_spawn_metadata_proxy(self):
datapath_id = _uuid()
metadata_ns = metadata_agent.NS_PREFIX + datapath_id
ip_class_path = 'neutron.agent.linux.ip_lib.IPWrapper'
cfg.CONF.set_override('metadata_proxy_user', self.EUNAME)
cfg.CONF.set_override('metadata_proxy_group', self.EGNAME)
cfg.CONF.set_override('metadata_proxy_socket', self.METADATA_SOCKET)
cfg.CONF.set_override('debug', True)
agent = metadata_agent.MetadataAgent(cfg.CONF)
with mock.patch(ip_class_path) as ip_mock,\
mock.patch(
'neutron.agent.linux.external_process.'
'ProcessManager.get_pid_file_name',
return_value=self.PIDFILE),\
mock.patch('pwd.getpwnam',
return_value=test_utils.FakeUser(self.EUNAME)),\
mock.patch('grp.getgrnam',
return_value=test_utils.FakeGroup(self.EGNAME)),\
mock.patch('os.makedirs'):
cfg_file = os.path.join(
metadata_driver.HaproxyConfigurator.get_config_path(
cfg.CONF.state_path),
"%s.conf" % datapath_id)
mock_open = self.useFixture(
lib_fixtures.OpenFixture(cfg_file)).mock_open
metadata_driver.MetadataDriver.spawn_monitored_metadata_proxy(
agent._process_monitor,
metadata_ns,
self.METADATA_PORT,
cfg.CONF,
bind_address=self.METADATA_DEFAULT_IP,
network_id=datapath_id)
netns_execute_args = [
'haproxy',
'-f', cfg_file]
cfg_contents = metadata_driver._HAPROXY_CONFIG_TEMPLATE % {
'user': self.EUNAME,
'group': self.EGNAME,
'host': self.METADATA_DEFAULT_IP,
'port': self.METADATA_PORT,
'unix_socket_path': self.METADATA_SOCKET,
'res_type': 'Network',
'res_id': datapath_id,
'pidfile': self.PIDFILE,
'log_level': 'debug'}
mock_open.assert_has_calls([
mock.call(cfg_file, 'w'),
mock.call().write(cfg_contents)],
any_order=True)
ip_mock.assert_has_calls([
mock.call(namespace=metadata_ns),
mock.call().netns.execute(netns_execute_args, addl_env=None,
run_as_root=True)
])
def test_create_config_file_wrong_user(self):
with mock.patch('pwd.getpwnam', side_effect=KeyError):
config = metadata_driver.HaproxyConfigurator(mock.ANY, mock.ANY,
mock.ANY, mock.ANY,
mock.ANY, self.EUNAME,
self.EGNAME, mock.ANY,
mock.ANY)
self.assertRaises(metadata_driver.InvalidUserOrGroupException,
config.create_config_file)
def test_create_config_file_wrong_group(self):
with mock.patch('grp.getgrnam', side_effect=KeyError),\
mock.patch('pwd.getpwnam',
return_value=test_utils.FakeUser(self.EUNAME)):
config = metadata_driver.HaproxyConfigurator(mock.ANY, mock.ANY,
mock.ANY, mock.ANY,
mock.ANY, self.EUNAME,
self.EGNAME, mock.ANY,
mock.ANY)
self.assertRaises(metadata_driver.InvalidUserOrGroupException,
config.create_config_file)

View File

@ -0,0 +1,269 @@
# Copyright 2017 Red Hat, Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import collections
import mock
from oslo_config import cfg
from oslo_config import fixture as config_fixture
from oslo_utils import fileutils
import testtools
import webob
from neutron.agent.linux import utils as agent_utils
from neutron.agent.ovn.metadata import server as agent
from neutron.conf.agent.metadata import config as meta_conf
from neutron.conf.agent.ovn.metadata import config as ovn_meta_conf
from neutron.tests import base
OvnPortInfo = collections.namedtuple('OvnPortInfo', 'external_ids')
class ConfFixture(config_fixture.Config):
def setUp(self):
super(ConfFixture, self).setUp()
ovn_meta_conf.register_meta_conf_opts(
meta_conf.METADATA_PROXY_HANDLER_OPTS, self.conf)
self.config(auth_ca_cert=None,
nova_metadata_host='9.9.9.9',
nova_metadata_port=8775,
metadata_proxy_shared_secret='secret',
nova_metadata_protocol='http',
nova_metadata_insecure=True,
nova_client_cert='nova_cert',
nova_client_priv_key='nova_priv_key')
class TestMetadataProxyHandler(base.BaseTestCase):
fake_conf = cfg.CONF
fake_conf_fixture = ConfFixture(fake_conf)
def setUp(self):
super(TestMetadataProxyHandler, self).setUp()
self.useFixture(self.fake_conf_fixture)
self.log_p = mock.patch.object(agent, 'LOG')
self.log = self.log_p.start()
self.handler = agent.MetadataProxyHandler(self.fake_conf)
self.handler.sb_idl = mock.Mock()
def test_call(self):
req = mock.Mock()
with mock.patch.object(self.handler,
'_get_instance_and_project_id') as get_ids:
get_ids.return_value = ('instance_id', 'project_id')
with mock.patch.object(self.handler, '_proxy_request') as proxy:
proxy.return_value = 'value'
retval = self.handler(req)
self.assertEqual(retval, 'value')
def test_call_no_instance_match(self):
req = mock.Mock()
with mock.patch.object(self.handler,
'_get_instance_and_project_id') as get_ids:
get_ids.return_value = None, None
retval = self.handler(req)
self.assertIsInstance(retval, webob.exc.HTTPNotFound)
def test_call_internal_server_error(self):
req = mock.Mock()
with mock.patch.object(self.handler,
'_get_instance_and_project_id') as get_ids:
get_ids.side_effect = Exception
retval = self.handler(req)
self.assertIsInstance(retval, webob.exc.HTTPInternalServerError)
self.assertEqual(len(self.log.mock_calls), 2)
def _get_instance_and_project_id_helper(self, headers, list_ports_retval,
network=None):
remote_address = '192.168.1.1'
headers['X-Forwarded-For'] = remote_address
req = mock.Mock(headers=headers)
def mock_get_network_port_bindings_by_ip(*args, **kwargs):
return list_ports_retval.pop(0)
self.handler.sb_idl.get_network_port_bindings_by_ip.side_effect = (
mock_get_network_port_bindings_by_ip)
instance_id, project_id = (
self.handler._get_instance_and_project_id(req))
expected = [mock.call(network, '192.168.1.1')]
self.handler.sb_idl.get_network_port_bindings_by_ip.assert_has_calls(
expected)
return (instance_id, project_id)
def test_get_instance_id_network_id(self):
network_id = 'the_id'
headers = {
'X-OVN-Network-ID': network_id
}
ovn_port = OvnPortInfo(
external_ids={'neutron:device_id': 'device_id',
'neutron:project_id': 'project_id'})
ports = [[ovn_port]]
self.assertEqual(
self._get_instance_and_project_id_helper(headers, ports,
network='the_id'),
('device_id', 'project_id')
)
def test_get_instance_id_network_id_no_match(self):
network_id = 'the_id'
headers = {
'X-OVN-Network-ID': network_id
}
ports = [[]]
expected = (None, None)
observed = self._get_instance_and_project_id_helper(headers, ports,
network='the_id')
self.assertEqual(expected, observed)
def _proxy_request_test_helper(self, response_code=200, method='GET'):
hdrs = {'X-Forwarded-For': '8.8.8.8'}
body = 'body'
req = mock.Mock(path_info='/the_path', query_string='', headers=hdrs,
method=method, body=body)
resp = mock.MagicMock(status_code=response_code)
resp.status.__str__.side_effect = AttributeError
resp.content = 'content'
req.response = resp
with mock.patch.object(self.handler, '_sign_instance_id') as sign:
sign.return_value = 'signed'
with mock.patch('requests.request') as mock_request:
resp.headers = {'content-type': 'text/plain'}
mock_request.return_value = resp
retval = self.handler._proxy_request('the_id', 'tenant_id',
req)
mock_request.assert_called_once_with(
method=method, url='http://9.9.9.9:8775/the_path',
headers={
'X-Forwarded-For': '8.8.8.8',
'X-Instance-ID-Signature': 'signed',
'X-Instance-ID': 'the_id',
'X-Tenant-ID': 'tenant_id'
},
data=body,
cert=(self.fake_conf.nova_client_cert,
self.fake_conf.nova_client_priv_key),
verify=False)
return retval
def test_proxy_request_post(self):
response = self._proxy_request_test_helper(method='POST')
self.assertEqual(response.content_type, "text/plain")
self.assertEqual(response.body, 'content')
def test_proxy_request_200(self):
response = self._proxy_request_test_helper(200)
self.assertEqual(response.content_type, "text/plain")
self.assertEqual(response.body, 'content')
def test_proxy_request_400(self):
self.assertIsInstance(self._proxy_request_test_helper(400),
webob.exc.HTTPBadRequest)
def test_proxy_request_403(self):
self.assertIsInstance(self._proxy_request_test_helper(403),
webob.exc.HTTPForbidden)
def test_proxy_request_404(self):
self.assertIsInstance(self._proxy_request_test_helper(404),
webob.exc.HTTPNotFound)
def test_proxy_request_409(self):
self.assertIsInstance(self._proxy_request_test_helper(409),
webob.exc.HTTPConflict)
def test_proxy_request_500(self):
self.assertIsInstance(self._proxy_request_test_helper(500),
webob.exc.HTTPInternalServerError)
def test_proxy_request_other_code(self):
with testtools.ExpectedException(Exception):
self._proxy_request_test_helper(302)
def test_sign_instance_id(self):
self.assertEqual(
self.handler._sign_instance_id('foo'),
'773ba44693c7553d6ee20f61ea5d2757a9a4f4a44d2841ae4e95b52e4cd62db4'
)
class TestUnixDomainMetadataProxy(base.BaseTestCase):
def setUp(self):
super(TestUnixDomainMetadataProxy, self).setUp()
self.cfg_p = mock.patch.object(agent, 'cfg')
self.cfg = self.cfg_p.start()
self.cfg.CONF.metadata_proxy_socket = '/the/path'
self.cfg.CONF.metadata_workers = 0
self.cfg.CONF.metadata_backlog = 128
self.cfg.CONF.metadata_proxy_socket_mode = meta_conf.USER_MODE
@mock.patch.object(fileutils, 'ensure_tree')
def test_init_doesnot_exists(self, ensure_dir):
agent.UnixDomainMetadataProxy(mock.Mock())
ensure_dir.assert_called_once_with('/the', mode=0o755)
def test_init_exists(self):
with mock.patch('os.path.isdir') as isdir:
with mock.patch('os.unlink') as unlink:
isdir.return_value = True
agent.UnixDomainMetadataProxy(mock.Mock())
unlink.assert_called_once_with('/the/path')
def test_init_exists_unlink_no_file(self):
with mock.patch('os.path.isdir') as isdir:
with mock.patch('os.unlink') as unlink:
with mock.patch('os.path.exists') as exists:
isdir.return_value = True
exists.return_value = False
unlink.side_effect = OSError
agent.UnixDomainMetadataProxy(mock.Mock())
unlink.assert_called_once_with('/the/path')
def test_init_exists_unlink_fails_file_still_exists(self):
with mock.patch('os.path.isdir') as isdir:
with mock.patch('os.unlink') as unlink:
with mock.patch('os.path.exists') as exists:
isdir.return_value = True
exists.return_value = True
unlink.side_effect = OSError
with testtools.ExpectedException(OSError):
agent.UnixDomainMetadataProxy(mock.Mock())
unlink.assert_called_once_with('/the/path')
@mock.patch.object(agent, 'MetadataProxyHandler')
@mock.patch.object(agent_utils, 'UnixDomainWSGIServer')
@mock.patch.object(fileutils, 'ensure_tree')
def test_run(self, ensure_dir, server, handler):
p = agent.UnixDomainMetadataProxy(self.cfg.CONF)
p.run()
ensure_dir.assert_called_once_with('/the', mode=0o755)
server.assert_has_calls([
mock.call('networking-ovn-metadata-agent'),
mock.call().start(handler.return_value,
'/the/path', workers=0,
backlog=128, mode=0o644)]
)

View File

@ -28,6 +28,8 @@ data_files =
scripts =
bin/neutron-rootwrap-xen-dom0
# TODO(lucasagomes): Add the metadata agent (and potentially others)
# after the networking-ovn code is migrated into neutron
[entry_points]
wsgi_scripts =
neutron-api = neutron.server:get_application